AArch64: Add gdbserver MTE support
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 FALSE, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 FALSE), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 TRUE, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 FALSE, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 TRUE, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 TRUE, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 FALSE, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 FALSE, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 FALSE, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 FALSE, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 FALSE, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 TRUE, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 TRUE, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 FALSE, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 FALSE, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 FALSE, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 TRUE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 TRUE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 FALSE, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 FALSE, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 TRUE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 TRUE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 TRUE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 TRUE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 FALSE, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 TRUE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 TRUE, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 TRUE, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 FALSE, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 FALSE, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 FALSE, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 FALSE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 FALSE, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 TRUE, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 FALSE, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 FALSE, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 TRUE, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 TRUE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 FALSE, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 TRUE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 TRUE, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 TRUE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 FALSE, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 TRUE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 TRUE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 TRUE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 TRUE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 TRUE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 TRUE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 TRUE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 TRUE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 TRUE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 TRUE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 TRUE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 TRUE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 TRUE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 TRUE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 TRUE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 TRUE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 TRUE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 TRUE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 TRUE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 TRUE, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 TRUE, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 TRUE, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 TRUE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 TRUE, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 FALSE, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 FALSE, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 FALSE, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 FALSE, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 FALSE, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 FALSE, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 FALSE, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 TRUE, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 FALSE, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 FALSE, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 TRUE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 TRUE, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 FALSE, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 FALSE, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 FALSE, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 FALSE, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 FALSE, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 16, /* bitsize. */
1699 FALSE, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 FALSE, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 FALSE), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 16, /* bitsize. */
1712 FALSE, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 FALSE, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 FALSE), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 16, /* bitsize. */
1725 FALSE, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 FALSE, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 FALSE), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 16, /* bitsize. */
1738 FALSE, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 FALSE, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 FALSE), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1751 16, /* bitsize. */
1752 TRUE, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 FALSE, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 TRUE), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1764 12, /* bitsize. */
1765 TRUE, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 FALSE, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 TRUE), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1777 18, /* bitsize. */
1778 TRUE, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 FALSE, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 TRUE), /* pcrel_offset. */
1787 };
1788
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1795 32, /* bitsize */
1796 FALSE, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 TRUE, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 FALSE), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1808 32, /* bitsize */
1809 FALSE, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 FALSE, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 FALSE), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1821 32, /* bitsize */
1822 FALSE, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 FALSE, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 FALSE), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1834 32, /* bitsize */
1835 FALSE, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 FALSE, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 FALSE), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1847 64, /* bitsize */
1848 FALSE, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 FALSE, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 FALSE), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1860 32, /* bitsize */
1861 FALSE, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 FALSE, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 FALSE), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1873 32, /* bitsize */
1874 FALSE, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 FALSE, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 FALSE), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1886 32, /* bitsize */
1887 FALSE, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 FALSE, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 FALSE), /* pcrel_offset */
1896 };
1897
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 FALSE, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 FALSE, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 FALSE), /* pcrel_offset */
1914
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 FALSE, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 FALSE, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 FALSE), /* pcrel_offset */
1928
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1932 0, /* bitsize */
1933 FALSE, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 FALSE, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 FALSE), /* pcrel_offset */
1942
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1946 0, /* bitsize */
1947 FALSE, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 FALSE, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 FALSE) /* pcrel_offset */
1956 };
1957
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1963
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971
1972 return NULL;
1973 }
1974
1975 static bfd_boolean
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1978 {
1979 unsigned int r_type;
1980
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983 {
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return FALSE;
1989 }
1990 return TRUE;
1991 }
1992
1993 struct elf32_arm_reloc_map
1994 {
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1997 };
1998
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001 {
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102 };
2103
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113
2114 return NULL;
2115 }
2116
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2120 {
2121 unsigned int i;
2122
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2127
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2132
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2137
2138 return NULL;
2139 }
2140
2141 /* Support for core dump NOTE sections. */
2142
2143 static bfd_boolean
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146 int offset;
2147 size_t size;
2148
2149 switch (note->descsz)
2150 {
2151 default:
2152 return FALSE;
2153
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2164
2165 break;
2166 }
2167
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2171 }
2172
2173 static bfd_boolean
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176 switch (note->descsz)
2177 {
2178 default:
2179 return FALSE;
2180
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188 }
2189
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2193 {
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2196
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2199 }
2200
2201 return TRUE;
2202 }
2203
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2207 {
2208 switch (note_type)
2209 {
2210 default:
2211 return NULL;
2212
2213 case NT_PRPSINFO:
2214 {
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2217
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 */
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2234
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2237 }
2238
2239 case NT_PRSTATUS:
2240 {
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2246
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2256
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2259 }
2260 }
2261 }
2262
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2267
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2271
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2281
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2288
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2291
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2294
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2297
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2300
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2302
2303 #define CMSE_PREFIX "__acle_se_"
2304
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2310
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313
2314 static const unsigned long tls_trampoline [] =
2315 {
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2319 };
2320
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2338
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342 {
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2353 };
2354
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358 {
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2369 };
2370
2371 #ifdef FOUR_WORD_PLT
2372
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2383 };
2384
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2393 };
2394
2395 #else /* not FOUR_WORD_PLT */
2396
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2408 };
2409
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2417 };
2418
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2427 };
2428
2429 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2430
2431 #endif /* not FOUR_WORD_PLT */
2432
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2445 };
2446
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2458 };
2459
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2468 };
2469
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2479 };
2480
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2490 };
2491
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2498 };
2499
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2503 linker first. */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2505 {
2506 /* First bundle: */
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2516 /* Third bundle: */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2520 /* .Lplt_tail: */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2527 };
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2529
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2532 {
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2537 };
2538
2539 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2547
2548 enum stub_insn_type
2549 {
2550 THUMB16_TYPE = 1,
2551 THUMB32_TYPE,
2552 ARM_TYPE,
2553 DATA_TYPE
2554 };
2555
2556 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2558 is inserted in arm_build_one_stub(). */
2559 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2567
2568 typedef struct
2569 {
2570 bfd_vma data;
2571 enum stub_insn_type type;
2572 unsigned int r_type;
2573 int reloc_addend;
2574 } insn_sequence;
2575
2576 /* See note [Thumb nop sequence] when adding a veneer. */
2577
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579 to reach the stub if necessary. */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2581 {
2582 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2583 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2584 };
2585
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2587 available. */
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2589 {
2590 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2591 ARM_INSN (0xe12fff1c), /* bx ip */
2592 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2593 };
2594
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2597 {
2598 THUMB16_INSN (0xb401), /* push {r0} */
2599 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2600 THUMB16_INSN (0x4684), /* mov ip, r0 */
2601 THUMB16_INSN (0xbc01), /* pop {r0} */
2602 THUMB16_INSN (0x4760), /* bx ip */
2603 THUMB16_INSN (0xbf00), /* nop */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2605 };
2606
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2609 {
2610 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2611 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2612 };
2613
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615 M-profile architectures. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2617 {
2618 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2620 THUMB16_INSN (0x4760), /* bx ip */
2621 };
2622
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2624 allowed. */
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2626 {
2627 THUMB16_INSN (0x4778), /* bx pc */
2628 THUMB16_INSN (0xe7fd), /* b .-2 */
2629 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2630 ARM_INSN (0xe12fff1c), /* bx ip */
2631 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2632 };
2633
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2635 available. */
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2637 {
2638 THUMB16_INSN (0x4778), /* bx pc */
2639 THUMB16_INSN (0xe7fd), /* b .-2 */
2640 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2641 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2642 };
2643
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645 one, when the destination is close enough. */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2647 {
2648 THUMB16_INSN (0x4778), /* bx pc */
2649 THUMB16_INSN (0xe7fd), /* b .-2 */
2650 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2651 };
2652
2653 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2654 blx to reach the stub if necessary. */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2656 {
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2658 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2659 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2660 };
2661
2662 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2663 blx to reach the stub if necessary. We can not add into pc;
2664 it is not guaranteed to mode switch (different in ARMv6 and
2665 ARMv7). */
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2667 {
2668 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2669 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2670 ARM_INSN (0xe12fff1c), /* bx ip */
2671 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2672 };
2673
2674 /* V4T ARM -> ARM long branch stub, PIC. */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2676 {
2677 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2678 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2679 ARM_INSN (0xe12fff1c), /* bx ip */
2680 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2681 };
2682
2683 /* V4T Thumb -> ARM long branch stub, PIC. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2685 {
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0xe7fd), /* b .-2 */
2688 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2689 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2690 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2691 };
2692
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2694 architectures. */
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2696 {
2697 THUMB16_INSN (0xb401), /* push {r0} */
2698 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2699 THUMB16_INSN (0x46fc), /* mov ip, pc */
2700 THUMB16_INSN (0x4484), /* add ip, r0 */
2701 THUMB16_INSN (0xbc01), /* pop {r0} */
2702 THUMB16_INSN (0x4760), /* bx ip */
2703 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2704 };
2705
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2707 allowed. */
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2709 {
2710 THUMB16_INSN (0x4778), /* bx pc */
2711 THUMB16_INSN (0xe7fd), /* b .-2 */
2712 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2713 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2714 ARM_INSN (0xe12fff1c), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2716 };
2717
2718 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2719 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2721 {
2722 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2723 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2724 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2725 };
2726
2727 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2728 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2730 {
2731 THUMB16_INSN (0x4778), /* bx pc */
2732 THUMB16_INSN (0xe7fd), /* b .-2 */
2733 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2734 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2735 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2736 };
2737
2738 /* NaCl ARM -> ARM long branch stub. */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2740 {
2741 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2742 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2743 ARM_INSN (0xe12fff1c), /* bx ip */
2744 ARM_INSN (0xe320f000), /* nop */
2745 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2746 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2747 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2748 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2749 };
2750
2751 /* NaCl ARM -> ARM long branch stub, PIC. */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2753 {
2754 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2755 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2759 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2761 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2762 };
2763
2764 /* Stub used for transition to secure state (aka SG veneer). */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2766 {
2767 THUMB32_INSN (0xe97fe97f), /* sg. */
2768 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2769 };
2770
2771
2772 /* Cortex-A8 erratum-workaround stubs. */
2773
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775 can't use a conditional branch to reach this stub). */
2776
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2778 {
2779 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2781 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2782 };
2783
2784 /* Stub used for b.w and bl.w instructions. */
2785
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2787 {
2788 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2789 };
2790
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2792 {
2793 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2794 };
2795
2796 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2797 instruction (which switches to ARM mode) to point to this stub. Jump to the
2798 real destination using an ARM-mode branch. */
2799
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2801 {
2802 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2803 };
2804
2805 /* For each section group there can be a specially created linker section
2806 to hold the stubs for that group. The name of the stub section is based
2807 upon the name of another section within that group with the suffix below
2808 applied.
2809
2810 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811 create what appeared to be a linker stub section when it actually
2812 contained user code/data. For example, consider this fragment:
2813
2814 const char * stubborn_problems[] = { "np" };
2815
2816 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2817 section called:
2818
2819 .data.rel.local.stubborn_problems
2820
2821 This then causes problems in arm32_arm_build_stubs() as it triggers:
2822
2823 // Ignore non-stub sections.
2824 if (!strstr (stub_sec->name, STUB_SUFFIX))
2825 continue;
2826
2827 And so the section would be ignored instead of being processed. Hence
2828 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2829 C identifier. */
2830 #define STUB_SUFFIX ".__stub"
2831
2832 /* One entry per long/short branch stub defined above. */
2833 #define DEF_STUBS \
2834 DEF_STUB(long_branch_any_any) \
2835 DEF_STUB(long_branch_v4t_arm_thumb) \
2836 DEF_STUB(long_branch_thumb_only) \
2837 DEF_STUB(long_branch_v4t_thumb_thumb) \
2838 DEF_STUB(long_branch_v4t_thumb_arm) \
2839 DEF_STUB(short_branch_v4t_thumb_arm) \
2840 DEF_STUB(long_branch_any_arm_pic) \
2841 DEF_STUB(long_branch_any_thumb_pic) \
2842 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2843 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2844 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2845 DEF_STUB(long_branch_thumb_only_pic) \
2846 DEF_STUB(long_branch_any_tls_pic) \
2847 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2848 DEF_STUB(long_branch_arm_nacl) \
2849 DEF_STUB(long_branch_arm_nacl_pic) \
2850 DEF_STUB(cmse_branch_thumb_only) \
2851 DEF_STUB(a8_veneer_b_cond) \
2852 DEF_STUB(a8_veneer_b) \
2853 DEF_STUB(a8_veneer_bl) \
2854 DEF_STUB(a8_veneer_blx) \
2855 DEF_STUB(long_branch_thumb2_only) \
2856 DEF_STUB(long_branch_thumb2_only_pure)
2857
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2860 {
2861 arm_stub_none,
2862 DEF_STUBS
2863 max_stub_type
2864 };
2865 #undef DEF_STUB
2866
2867 /* Note the first a8_veneer type. */
2868 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2869
2870 typedef struct
2871 {
2872 const insn_sequence* template_sequence;
2873 int template_size;
2874 } stub_def;
2875
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions[] =
2878 {
2879 {NULL, 0},
2880 DEF_STUBS
2881 };
2882
2883 struct elf32_arm_stub_hash_entry
2884 {
2885 /* Base hash table entry structure. */
2886 struct bfd_hash_entry root;
2887
2888 /* The stub section. */
2889 asection *stub_sec;
2890
2891 /* Offset within stub_sec of the beginning of this stub. */
2892 bfd_vma stub_offset;
2893
2894 /* Given the symbol's value and its section we can determine its final
2895 value when building the stubs (so the stub knows where to jump). */
2896 bfd_vma target_value;
2897 asection *target_section;
2898
2899 /* Same as above but for the source of the branch to the stub. Used for
2900 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2901 such, source section does not need to be recorded since Cortex-A8 erratum
2902 workaround stubs are only generated when both source and target are in the
2903 same section. */
2904 bfd_vma source_value;
2905
2906 /* The instruction which caused this stub to be generated (only valid for
2907 Cortex-A8 erratum workaround stubs at present). */
2908 unsigned long orig_insn;
2909
2910 /* The stub type. */
2911 enum elf32_arm_stub_type stub_type;
2912 /* Its encoding size in bytes. */
2913 int stub_size;
2914 /* Its template. */
2915 const insn_sequence *stub_template;
2916 /* The size of the template (number of entries). */
2917 int stub_template_size;
2918
2919 /* The symbol table entry, if any, that this was derived from. */
2920 struct elf32_arm_link_hash_entry *h;
2921
2922 /* Type of branch. */
2923 enum arm_st_branch_type branch_type;
2924
2925 /* Where this stub is being called from, or, in the case of combined
2926 stub sections, the first input section in the group. */
2927 asection *id_sec;
2928
2929 /* The name for the local symbol at the start of this stub. The
2930 stub name in the hash table has to be unique; this does not, so
2931 it can be friendlier. */
2932 char *output_name;
2933 };
2934
2935 /* Used to build a map of a section. This is required for mixed-endian
2936 code/data. */
2937
2938 typedef struct elf32_elf_section_map
2939 {
2940 bfd_vma vma;
2941 char type;
2942 }
2943 elf32_arm_section_map;
2944
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2946
2947 typedef enum
2948 {
2949 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2950 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2951 VFP11_ERRATUM_ARM_VENEER,
2952 VFP11_ERRATUM_THUMB_VENEER
2953 }
2954 elf32_vfp11_erratum_type;
2955
2956 typedef struct elf32_vfp11_erratum_list
2957 {
2958 struct elf32_vfp11_erratum_list *next;
2959 bfd_vma vma;
2960 union
2961 {
2962 struct
2963 {
2964 struct elf32_vfp11_erratum_list *veneer;
2965 unsigned int vfp_insn;
2966 } b;
2967 struct
2968 {
2969 struct elf32_vfp11_erratum_list *branch;
2970 unsigned int id;
2971 } v;
2972 } u;
2973 elf32_vfp11_erratum_type type;
2974 }
2975 elf32_vfp11_erratum_list;
2976
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2978 veneer. */
2979 typedef enum
2980 {
2981 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2982 STM32L4XX_ERRATUM_VENEER
2983 }
2984 elf32_stm32l4xx_erratum_type;
2985
2986 typedef struct elf32_stm32l4xx_erratum_list
2987 {
2988 struct elf32_stm32l4xx_erratum_list *next;
2989 bfd_vma vma;
2990 union
2991 {
2992 struct
2993 {
2994 struct elf32_stm32l4xx_erratum_list *veneer;
2995 unsigned int insn;
2996 } b;
2997 struct
2998 {
2999 struct elf32_stm32l4xx_erratum_list *branch;
3000 unsigned int id;
3001 } v;
3002 } u;
3003 elf32_stm32l4xx_erratum_type type;
3004 }
3005 elf32_stm32l4xx_erratum_list;
3006
3007 typedef enum
3008 {
3009 DELETE_EXIDX_ENTRY,
3010 INSERT_EXIDX_CANTUNWIND_AT_END
3011 }
3012 arm_unwind_edit_type;
3013
3014 /* A (sorted) list of edits to apply to an unwind table. */
3015 typedef struct arm_unwind_table_edit
3016 {
3017 arm_unwind_edit_type type;
3018 /* Note: we sometimes want to insert an unwind entry corresponding to a
3019 section different from the one we're currently writing out, so record the
3020 (text) section this edit relates to here. */
3021 asection *linked_section;
3022 unsigned int index;
3023 struct arm_unwind_table_edit *next;
3024 }
3025 arm_unwind_table_edit;
3026
3027 typedef struct _arm_elf_section_data
3028 {
3029 /* Information about mapping symbols. */
3030 struct bfd_elf_section_data elf;
3031 unsigned int mapcount;
3032 unsigned int mapsize;
3033 elf32_arm_section_map *map;
3034 /* Information about CPU errata. */
3035 unsigned int erratumcount;
3036 elf32_vfp11_erratum_list *erratumlist;
3037 unsigned int stm32l4xx_erratumcount;
3038 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3039 unsigned int additional_reloc_count;
3040 /* Information about unwind tables. */
3041 union
3042 {
3043 /* Unwind info attached to a text section. */
3044 struct
3045 {
3046 asection *arm_exidx_sec;
3047 } text;
3048
3049 /* Unwind info attached to an .ARM.exidx section. */
3050 struct
3051 {
3052 arm_unwind_table_edit *unwind_edit_list;
3053 arm_unwind_table_edit *unwind_edit_tail;
3054 } exidx;
3055 } u;
3056 }
3057 _arm_elf_section_data;
3058
3059 #define elf32_arm_section_data(sec) \
3060 ((_arm_elf_section_data *) elf_section_data (sec))
3061
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064 so may be created multiple times: we use an array of these entries whilst
3065 relaxing which we can refresh easily, then create stubs for each potentially
3066 erratum-triggering instruction once we've settled on a solution. */
3067
3068 struct a8_erratum_fix
3069 {
3070 bfd *input_bfd;
3071 asection *section;
3072 bfd_vma offset;
3073 bfd_vma target_offset;
3074 unsigned long orig_insn;
3075 char *stub_name;
3076 enum elf32_arm_stub_type stub_type;
3077 enum arm_st_branch_type branch_type;
3078 };
3079
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3081 erratum. */
3082
3083 struct a8_erratum_reloc
3084 {
3085 bfd_vma from;
3086 bfd_vma destination;
3087 struct elf32_arm_link_hash_entry *hash;
3088 const char *sym_name;
3089 unsigned int r_type;
3090 enum arm_st_branch_type branch_type;
3091 bfd_boolean non_a8_stub;
3092 };
3093
3094 /* The size of the thread control block. */
3095 #define TCB_SIZE 8
3096
3097 /* ARM-specific information about a PLT entry, over and above the usual
3098 gotplt_union. */
3099 struct arm_plt_info
3100 {
3101 /* We reference count Thumb references to a PLT entry separately,
3102 so that we can emit the Thumb trampoline only if needed. */
3103 bfd_signed_vma thumb_refcount;
3104
3105 /* Some references from Thumb code may be eliminated by BL->BLX
3106 conversion, so record them separately. */
3107 bfd_signed_vma maybe_thumb_refcount;
3108
3109 /* How many of the recorded PLT accesses were from non-call relocations.
3110 This information is useful when deciding whether anything takes the
3111 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3112 non-call references to the function should resolve directly to the
3113 real runtime target. */
3114 unsigned int noncall_refcount;
3115
3116 /* Since PLT entries have variable size if the Thumb prologue is
3117 used, we need to record the index into .got.plt instead of
3118 recomputing it from the PLT offset. */
3119 bfd_signed_vma got_offset;
3120 };
3121
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3123 struct arm_local_iplt_info
3124 {
3125 /* The information that is usually found in the generic ELF part of
3126 the hash table entry. */
3127 union gotplt_union root;
3128
3129 /* The information that is usually found in the ARM-specific part of
3130 the hash table entry. */
3131 struct arm_plt_info arm;
3132
3133 /* A list of all potential dynamic relocations against this symbol. */
3134 struct elf_dyn_relocs *dyn_relocs;
3135 };
3136
3137 /* Structure to handle FDPIC support for local functions. */
3138 struct fdpic_local {
3139 unsigned int funcdesc_cnt;
3140 unsigned int gotofffuncdesc_cnt;
3141 int funcdesc_offset;
3142 };
3143
3144 struct elf_arm_obj_tdata
3145 {
3146 struct elf_obj_tdata root;
3147
3148 /* tls_type for each local got entry. */
3149 char *local_got_tls_type;
3150
3151 /* GOTPLT entries for TLS descriptors. */
3152 bfd_vma *local_tlsdesc_gotent;
3153
3154 /* Information for local symbols that need entries in .iplt. */
3155 struct arm_local_iplt_info **local_iplt;
3156
3157 /* Zero to warn when linking objects with incompatible enum sizes. */
3158 int no_enum_size_warning;
3159
3160 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3161 int no_wchar_size_warning;
3162
3163 /* Maintains FDPIC counters and funcdesc info. */
3164 struct fdpic_local *local_fdpic_cnts;
3165 };
3166
3167 #define elf_arm_tdata(bfd) \
3168 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3169
3170 #define elf32_arm_local_got_tls_type(bfd) \
3171 (elf_arm_tdata (bfd)->local_got_tls_type)
3172
3173 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3174 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3175
3176 #define elf32_arm_local_iplt(bfd) \
3177 (elf_arm_tdata (bfd)->local_iplt)
3178
3179 #define elf32_arm_local_fdpic_cnts(bfd) \
3180 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3181
3182 #define is_arm_elf(bfd) \
3183 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3184 && elf_tdata (bfd) != NULL \
3185 && elf_object_id (bfd) == ARM_ELF_DATA)
3186
3187 static bfd_boolean
3188 elf32_arm_mkobject (bfd *abfd)
3189 {
3190 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3191 ARM_ELF_DATA);
3192 }
3193
3194 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3195
3196 /* Structure to handle FDPIC support for extern functions. */
3197 struct fdpic_global {
3198 unsigned int gotofffuncdesc_cnt;
3199 unsigned int gotfuncdesc_cnt;
3200 unsigned int funcdesc_cnt;
3201 int funcdesc_offset;
3202 int gotfuncdesc_offset;
3203 };
3204
3205 /* Arm ELF linker hash entry. */
3206 struct elf32_arm_link_hash_entry
3207 {
3208 struct elf_link_hash_entry root;
3209
3210 /* ARM-specific PLT information. */
3211 struct arm_plt_info plt;
3212
3213 #define GOT_UNKNOWN 0
3214 #define GOT_NORMAL 1
3215 #define GOT_TLS_GD 2
3216 #define GOT_TLS_IE 4
3217 #define GOT_TLS_GDESC 8
3218 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3219 unsigned int tls_type : 8;
3220
3221 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3222 unsigned int is_iplt : 1;
3223
3224 unsigned int unused : 23;
3225
3226 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3227 starting at the end of the jump table. */
3228 bfd_vma tlsdesc_got;
3229
3230 /* The symbol marking the real symbol location for exported thumb
3231 symbols with Arm stubs. */
3232 struct elf_link_hash_entry *export_glue;
3233
3234 /* A pointer to the most recently used stub hash entry against this
3235 symbol. */
3236 struct elf32_arm_stub_hash_entry *stub_cache;
3237
3238 /* Counter for FDPIC relocations against this symbol. */
3239 struct fdpic_global fdpic_cnts;
3240 };
3241
3242 /* Traverse an arm ELF linker hash table. */
3243 #define elf32_arm_link_hash_traverse(table, func, info) \
3244 (elf_link_hash_traverse \
3245 (&(table)->root, \
3246 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3247 (info)))
3248
3249 /* Get the ARM elf linker hash table from a link_info structure. */
3250 #define elf32_arm_hash_table(p) \
3251 ((is_elf_hash_table ((p)->hash) \
3252 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3253 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3254
3255 #define arm_stub_hash_lookup(table, string, create, copy) \
3256 ((struct elf32_arm_stub_hash_entry *) \
3257 bfd_hash_lookup ((table), (string), (create), (copy)))
3258
3259 /* Array to keep track of which stub sections have been created, and
3260 information on stub grouping. */
3261 struct map_stub
3262 {
3263 /* This is the section to which stubs in the group will be
3264 attached. */
3265 asection *link_sec;
3266 /* The stub section. */
3267 asection *stub_sec;
3268 };
3269
3270 #define elf32_arm_compute_jump_table_size(htab) \
3271 ((htab)->next_tls_desc_index * 4)
3272
3273 /* ARM ELF linker hash table. */
3274 struct elf32_arm_link_hash_table
3275 {
3276 /* The main hash table. */
3277 struct elf_link_hash_table root;
3278
3279 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3280 bfd_size_type thumb_glue_size;
3281
3282 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3283 bfd_size_type arm_glue_size;
3284
3285 /* The size in bytes of section containing the ARMv4 BX veneers. */
3286 bfd_size_type bx_glue_size;
3287
3288 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3289 veneer has been populated. */
3290 bfd_vma bx_glue_offset[15];
3291
3292 /* The size in bytes of the section containing glue for VFP11 erratum
3293 veneers. */
3294 bfd_size_type vfp11_erratum_glue_size;
3295
3296 /* The size in bytes of the section containing glue for STM32L4XX erratum
3297 veneers. */
3298 bfd_size_type stm32l4xx_erratum_glue_size;
3299
3300 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3301 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3302 elf32_arm_write_section(). */
3303 struct a8_erratum_fix *a8_erratum_fixes;
3304 unsigned int num_a8_erratum_fixes;
3305
3306 /* An arbitrary input BFD chosen to hold the glue sections. */
3307 bfd * bfd_of_glue_owner;
3308
3309 /* Nonzero to output a BE8 image. */
3310 int byteswap_code;
3311
3312 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3313 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3314 int target1_is_rel;
3315
3316 /* The relocation to use for R_ARM_TARGET2 relocations. */
3317 int target2_reloc;
3318
3319 /* 0 = Ignore R_ARM_V4BX.
3320 1 = Convert BX to MOV PC.
3321 2 = Generate v4 interworing stubs. */
3322 int fix_v4bx;
3323
3324 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3325 int fix_cortex_a8;
3326
3327 /* Whether we should fix the ARM1176 BLX immediate issue. */
3328 int fix_arm1176;
3329
3330 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3331 int use_blx;
3332
3333 /* What sort of code sequences we should look for which may trigger the
3334 VFP11 denorm erratum. */
3335 bfd_arm_vfp11_fix vfp11_fix;
3336
3337 /* Global counter for the number of fixes we have emitted. */
3338 int num_vfp11_fixes;
3339
3340 /* What sort of code sequences we should look for which may trigger the
3341 STM32L4XX erratum. */
3342 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3343
3344 /* Global counter for the number of fixes we have emitted. */
3345 int num_stm32l4xx_fixes;
3346
3347 /* Nonzero to force PIC branch veneers. */
3348 int pic_veneer;
3349
3350 /* The number of bytes in the initial entry in the PLT. */
3351 bfd_size_type plt_header_size;
3352
3353 /* The number of bytes in the subsequent PLT etries. */
3354 bfd_size_type plt_entry_size;
3355
3356 /* True if the target uses REL relocations. */
3357 bfd_boolean use_rel;
3358
3359 /* Nonzero if import library must be a secure gateway import library
3360 as per ARMv8-M Security Extensions. */
3361 int cmse_implib;
3362
3363 /* The import library whose symbols' address must remain stable in
3364 the import library generated. */
3365 bfd *in_implib_bfd;
3366
3367 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3368 bfd_vma next_tls_desc_index;
3369
3370 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3371 bfd_vma num_tls_desc;
3372
3373 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3374 asection *srelplt2;
3375
3376 /* Offset in .plt section of tls_arm_trampoline. */
3377 bfd_vma tls_trampoline;
3378
3379 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3380 union
3381 {
3382 bfd_signed_vma refcount;
3383 bfd_vma offset;
3384 } tls_ldm_got;
3385
3386 /* For convenience in allocate_dynrelocs. */
3387 bfd * obfd;
3388
3389 /* The amount of space used by the reserved portion of the sgotplt
3390 section, plus whatever space is used by the jump slots. */
3391 bfd_vma sgotplt_jump_table_size;
3392
3393 /* The stub hash table. */
3394 struct bfd_hash_table stub_hash_table;
3395
3396 /* Linker stub bfd. */
3397 bfd *stub_bfd;
3398
3399 /* Linker call-backs. */
3400 asection * (*add_stub_section) (const char *, asection *, asection *,
3401 unsigned int);
3402 void (*layout_sections_again) (void);
3403
3404 /* Array to keep track of which stub sections have been created, and
3405 information on stub grouping. */
3406 struct map_stub *stub_group;
3407
3408 /* Input stub section holding secure gateway veneers. */
3409 asection *cmse_stub_sec;
3410
3411 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3412 start to be allocated. */
3413 bfd_vma new_cmse_stub_offset;
3414
3415 /* Number of elements in stub_group. */
3416 unsigned int top_id;
3417
3418 /* Assorted information used by elf32_arm_size_stubs. */
3419 unsigned int bfd_count;
3420 unsigned int top_index;
3421 asection **input_list;
3422
3423 /* True if the target system uses FDPIC. */
3424 int fdpic_p;
3425
3426 /* Fixup section. Used for FDPIC. */
3427 asection *srofixup;
3428 };
3429
3430 /* Add an FDPIC read-only fixup. */
3431 static void
3432 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3433 {
3434 bfd_vma fixup_offset;
3435
3436 fixup_offset = srofixup->reloc_count++ * 4;
3437 BFD_ASSERT (fixup_offset < srofixup->size);
3438 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3439 }
3440
3441 static inline int
3442 ctz (unsigned int mask)
3443 {
3444 #if GCC_VERSION >= 3004
3445 return __builtin_ctz (mask);
3446 #else
3447 unsigned int i;
3448
3449 for (i = 0; i < 8 * sizeof (mask); i++)
3450 {
3451 if (mask & 0x1)
3452 break;
3453 mask = (mask >> 1);
3454 }
3455 return i;
3456 #endif
3457 }
3458
3459 static inline int
3460 elf32_arm_popcount (unsigned int mask)
3461 {
3462 #if GCC_VERSION >= 3004
3463 return __builtin_popcount (mask);
3464 #else
3465 unsigned int i;
3466 int sum = 0;
3467
3468 for (i = 0; i < 8 * sizeof (mask); i++)
3469 {
3470 if (mask & 0x1)
3471 sum++;
3472 mask = (mask >> 1);
3473 }
3474 return sum;
3475 #endif
3476 }
3477
3478 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3479 asection *sreloc, Elf_Internal_Rela *rel);
3480
3481 static void
3482 arm_elf_fill_funcdesc(bfd *output_bfd,
3483 struct bfd_link_info *info,
3484 int *funcdesc_offset,
3485 int dynindx,
3486 int offset,
3487 bfd_vma addr,
3488 bfd_vma dynreloc_value,
3489 bfd_vma seg)
3490 {
3491 if ((*funcdesc_offset & 1) == 0)
3492 {
3493 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3494 asection *sgot = globals->root.sgot;
3495
3496 if (bfd_link_pic(info))
3497 {
3498 asection *srelgot = globals->root.srelgot;
3499 Elf_Internal_Rela outrel;
3500
3501 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3502 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3503 outrel.r_addend = 0;
3504
3505 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3506 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3507 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3508 }
3509 else
3510 {
3511 struct elf_link_hash_entry *hgot = globals->root.hgot;
3512 bfd_vma got_value = hgot->root.u.def.value
3513 + hgot->root.u.def.section->output_section->vma
3514 + hgot->root.u.def.section->output_offset;
3515
3516 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3517 sgot->output_section->vma + sgot->output_offset
3518 + offset);
3519 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3520 sgot->output_section->vma + sgot->output_offset
3521 + offset + 4);
3522 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3523 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3524 }
3525 *funcdesc_offset |= 1;
3526 }
3527 }
3528
3529 /* Create an entry in an ARM ELF linker hash table. */
3530
3531 static struct bfd_hash_entry *
3532 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3533 struct bfd_hash_table * table,
3534 const char * string)
3535 {
3536 struct elf32_arm_link_hash_entry * ret =
3537 (struct elf32_arm_link_hash_entry *) entry;
3538
3539 /* Allocate the structure if it has not already been allocated by a
3540 subclass. */
3541 if (ret == NULL)
3542 ret = (struct elf32_arm_link_hash_entry *)
3543 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3544 if (ret == NULL)
3545 return (struct bfd_hash_entry *) ret;
3546
3547 /* Call the allocation method of the superclass. */
3548 ret = ((struct elf32_arm_link_hash_entry *)
3549 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3550 table, string));
3551 if (ret != NULL)
3552 {
3553 ret->tls_type = GOT_UNKNOWN;
3554 ret->tlsdesc_got = (bfd_vma) -1;
3555 ret->plt.thumb_refcount = 0;
3556 ret->plt.maybe_thumb_refcount = 0;
3557 ret->plt.noncall_refcount = 0;
3558 ret->plt.got_offset = -1;
3559 ret->is_iplt = FALSE;
3560 ret->export_glue = NULL;
3561
3562 ret->stub_cache = NULL;
3563
3564 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3565 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3566 ret->fdpic_cnts.funcdesc_cnt = 0;
3567 ret->fdpic_cnts.funcdesc_offset = -1;
3568 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3569 }
3570
3571 return (struct bfd_hash_entry *) ret;
3572 }
3573
3574 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3575 symbols. */
3576
3577 static bfd_boolean
3578 elf32_arm_allocate_local_sym_info (bfd *abfd)
3579 {
3580 if (elf_local_got_refcounts (abfd) == NULL)
3581 {
3582 bfd_size_type num_syms;
3583 bfd_size_type size;
3584 char *data;
3585
3586 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3587 size = num_syms * (sizeof (bfd_signed_vma)
3588 + sizeof (bfd_vma)
3589 + sizeof (struct arm_local_iplt_info *)
3590 + sizeof (struct fdpic_local)
3591 + sizeof (char));
3592 data = bfd_zalloc (abfd, size);
3593 if (data == NULL)
3594 return FALSE;
3595
3596 /* It is important that these all be allocated in descending
3597 order of required alignment, so that arrays allocated later
3598 will be sufficiently aligned. */
3599 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3600 data += num_syms * sizeof (bfd_signed_vma);
3601
3602 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3603 data += num_syms * sizeof (bfd_vma);
3604
3605 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3606 data += num_syms * sizeof (struct arm_local_iplt_info *);
3607
3608 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3609 data += num_syms * sizeof (struct fdpic_local);
3610
3611 elf32_arm_local_got_tls_type (abfd) = data;
3612 #if GCC_VERSION >= 3000
3613 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3614 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3615 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3616 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3617 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3618 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3619 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3620 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3621 #endif
3622 }
3623 return TRUE;
3624 }
3625
3626 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3627 to input bfd ABFD. Create the information if it doesn't already exist.
3628 Return null if an allocation fails. */
3629
3630 static struct arm_local_iplt_info *
3631 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3632 {
3633 struct arm_local_iplt_info **ptr;
3634
3635 if (!elf32_arm_allocate_local_sym_info (abfd))
3636 return NULL;
3637
3638 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3639 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3640 if (*ptr == NULL)
3641 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3642 return *ptr;
3643 }
3644
3645 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3646 in ABFD's symbol table. If the symbol is global, H points to its
3647 hash table entry, otherwise H is null.
3648
3649 Return true if the symbol does have PLT information. When returning
3650 true, point *ROOT_PLT at the target-independent reference count/offset
3651 union and *ARM_PLT at the ARM-specific information. */
3652
3653 static bfd_boolean
3654 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3655 struct elf32_arm_link_hash_entry *h,
3656 unsigned long r_symndx, union gotplt_union **root_plt,
3657 struct arm_plt_info **arm_plt)
3658 {
3659 struct arm_local_iplt_info *local_iplt;
3660
3661 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3662 return FALSE;
3663
3664 if (h != NULL)
3665 {
3666 *root_plt = &h->root.plt;
3667 *arm_plt = &h->plt;
3668 return TRUE;
3669 }
3670
3671 if (elf32_arm_local_iplt (abfd) == NULL)
3672 return FALSE;
3673
3674 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3675 if (local_iplt == NULL)
3676 return FALSE;
3677
3678 *root_plt = &local_iplt->root;
3679 *arm_plt = &local_iplt->arm;
3680 return TRUE;
3681 }
3682
3683 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3684
3685 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3686 before it. */
3687
3688 static bfd_boolean
3689 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3690 struct arm_plt_info *arm_plt)
3691 {
3692 struct elf32_arm_link_hash_table *htab;
3693
3694 htab = elf32_arm_hash_table (info);
3695
3696 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3697 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3698 }
3699
3700 /* Return a pointer to the head of the dynamic reloc list that should
3701 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3702 ABFD's symbol table. Return null if an error occurs. */
3703
3704 static struct elf_dyn_relocs **
3705 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3706 Elf_Internal_Sym *isym)
3707 {
3708 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3709 {
3710 struct arm_local_iplt_info *local_iplt;
3711
3712 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3713 if (local_iplt == NULL)
3714 return NULL;
3715 return &local_iplt->dyn_relocs;
3716 }
3717 else
3718 {
3719 /* Track dynamic relocs needed for local syms too.
3720 We really need local syms available to do this
3721 easily. Oh well. */
3722 asection *s;
3723 void *vpp;
3724
3725 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3726 if (s == NULL)
3727 abort ();
3728
3729 vpp = &elf_section_data (s)->local_dynrel;
3730 return (struct elf_dyn_relocs **) vpp;
3731 }
3732 }
3733
3734 /* Initialize an entry in the stub hash table. */
3735
3736 static struct bfd_hash_entry *
3737 stub_hash_newfunc (struct bfd_hash_entry *entry,
3738 struct bfd_hash_table *table,
3739 const char *string)
3740 {
3741 /* Allocate the structure if it has not already been allocated by a
3742 subclass. */
3743 if (entry == NULL)
3744 {
3745 entry = (struct bfd_hash_entry *)
3746 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3747 if (entry == NULL)
3748 return entry;
3749 }
3750
3751 /* Call the allocation method of the superclass. */
3752 entry = bfd_hash_newfunc (entry, table, string);
3753 if (entry != NULL)
3754 {
3755 struct elf32_arm_stub_hash_entry *eh;
3756
3757 /* Initialize the local fields. */
3758 eh = (struct elf32_arm_stub_hash_entry *) entry;
3759 eh->stub_sec = NULL;
3760 eh->stub_offset = (bfd_vma) -1;
3761 eh->source_value = 0;
3762 eh->target_value = 0;
3763 eh->target_section = NULL;
3764 eh->orig_insn = 0;
3765 eh->stub_type = arm_stub_none;
3766 eh->stub_size = 0;
3767 eh->stub_template = NULL;
3768 eh->stub_template_size = -1;
3769 eh->h = NULL;
3770 eh->id_sec = NULL;
3771 eh->output_name = NULL;
3772 }
3773
3774 return entry;
3775 }
3776
3777 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3778 shortcuts to them in our hash table. */
3779
3780 static bfd_boolean
3781 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3782 {
3783 struct elf32_arm_link_hash_table *htab;
3784
3785 htab = elf32_arm_hash_table (info);
3786 if (htab == NULL)
3787 return FALSE;
3788
3789 if (! _bfd_elf_create_got_section (dynobj, info))
3790 return FALSE;
3791
3792 /* Also create .rofixup. */
3793 if (htab->fdpic_p)
3794 {
3795 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3796 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3797 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3798 if (htab->srofixup == NULL
3799 || !bfd_set_section_alignment (htab->srofixup, 2))
3800 return FALSE;
3801 }
3802
3803 return TRUE;
3804 }
3805
3806 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3807
3808 static bfd_boolean
3809 create_ifunc_sections (struct bfd_link_info *info)
3810 {
3811 struct elf32_arm_link_hash_table *htab;
3812 const struct elf_backend_data *bed;
3813 bfd *dynobj;
3814 asection *s;
3815 flagword flags;
3816
3817 htab = elf32_arm_hash_table (info);
3818 dynobj = htab->root.dynobj;
3819 bed = get_elf_backend_data (dynobj);
3820 flags = bed->dynamic_sec_flags;
3821
3822 if (htab->root.iplt == NULL)
3823 {
3824 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3825 flags | SEC_READONLY | SEC_CODE);
3826 if (s == NULL
3827 || !bfd_set_section_alignment (s, bed->plt_alignment))
3828 return FALSE;
3829 htab->root.iplt = s;
3830 }
3831
3832 if (htab->root.irelplt == NULL)
3833 {
3834 s = bfd_make_section_anyway_with_flags (dynobj,
3835 RELOC_SECTION (htab, ".iplt"),
3836 flags | SEC_READONLY);
3837 if (s == NULL
3838 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3839 return FALSE;
3840 htab->root.irelplt = s;
3841 }
3842
3843 if (htab->root.igotplt == NULL)
3844 {
3845 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3846 if (s == NULL
3847 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3848 return FALSE;
3849 htab->root.igotplt = s;
3850 }
3851 return TRUE;
3852 }
3853
3854 /* Determine if we're dealing with a Thumb only architecture. */
3855
3856 static bfd_boolean
3857 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3858 {
3859 int arch;
3860 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3861 Tag_CPU_arch_profile);
3862
3863 if (profile)
3864 return profile == 'M';
3865
3866 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3867
3868 /* Force return logic to be reviewed for each new architecture. */
3869 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3870
3871 if (arch == TAG_CPU_ARCH_V6_M
3872 || arch == TAG_CPU_ARCH_V6S_M
3873 || arch == TAG_CPU_ARCH_V7E_M
3874 || arch == TAG_CPU_ARCH_V8M_BASE
3875 || arch == TAG_CPU_ARCH_V8M_MAIN
3876 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3877 return TRUE;
3878
3879 return FALSE;
3880 }
3881
3882 /* Determine if we're dealing with a Thumb-2 object. */
3883
3884 static bfd_boolean
3885 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3886 {
3887 int arch;
3888 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3889 Tag_THUMB_ISA_use);
3890
3891 if (thumb_isa)
3892 return thumb_isa == 2;
3893
3894 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3895
3896 /* Force return logic to be reviewed for each new architecture. */
3897 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3898
3899 return (arch == TAG_CPU_ARCH_V6T2
3900 || arch == TAG_CPU_ARCH_V7
3901 || arch == TAG_CPU_ARCH_V7E_M
3902 || arch == TAG_CPU_ARCH_V8
3903 || arch == TAG_CPU_ARCH_V8R
3904 || arch == TAG_CPU_ARCH_V8M_MAIN
3905 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3906 }
3907
3908 /* Determine whether Thumb-2 BL instruction is available. */
3909
3910 static bfd_boolean
3911 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3912 {
3913 int arch =
3914 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3915
3916 /* Force return logic to be reviewed for each new architecture. */
3917 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3918
3919 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3920 return (arch == TAG_CPU_ARCH_V6T2
3921 || arch >= TAG_CPU_ARCH_V7);
3922 }
3923
3924 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3925 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3926 hash table. */
3927
3928 static bfd_boolean
3929 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3930 {
3931 struct elf32_arm_link_hash_table *htab;
3932
3933 htab = elf32_arm_hash_table (info);
3934 if (htab == NULL)
3935 return FALSE;
3936
3937 if (!htab->root.sgot && !create_got_section (dynobj, info))
3938 return FALSE;
3939
3940 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3941 return FALSE;
3942
3943 if (htab->root.target_os == is_vxworks)
3944 {
3945 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3946 return FALSE;
3947
3948 if (bfd_link_pic (info))
3949 {
3950 htab->plt_header_size = 0;
3951 htab->plt_entry_size
3952 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3953 }
3954 else
3955 {
3956 htab->plt_header_size
3957 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3958 htab->plt_entry_size
3959 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3960 }
3961
3962 if (elf_elfheader (dynobj))
3963 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3964 }
3965 else
3966 {
3967 /* PR ld/16017
3968 Test for thumb only architectures. Note - we cannot just call
3969 using_thumb_only() as the attributes in the output bfd have not been
3970 initialised at this point, so instead we use the input bfd. */
3971 bfd * saved_obfd = htab->obfd;
3972
3973 htab->obfd = dynobj;
3974 if (using_thumb_only (htab))
3975 {
3976 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3977 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3978 }
3979 htab->obfd = saved_obfd;
3980 }
3981
3982 if (htab->fdpic_p) {
3983 htab->plt_header_size = 0;
3984 if (info->flags & DF_BIND_NOW)
3985 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3986 else
3987 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3988 }
3989
3990 if (!htab->root.splt
3991 || !htab->root.srelplt
3992 || !htab->root.sdynbss
3993 || (!bfd_link_pic (info) && !htab->root.srelbss))
3994 abort ();
3995
3996 return TRUE;
3997 }
3998
3999 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4000
4001 static void
4002 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4003 struct elf_link_hash_entry *dir,
4004 struct elf_link_hash_entry *ind)
4005 {
4006 struct elf32_arm_link_hash_entry *edir, *eind;
4007
4008 edir = (struct elf32_arm_link_hash_entry *) dir;
4009 eind = (struct elf32_arm_link_hash_entry *) ind;
4010
4011 if (ind->root.type == bfd_link_hash_indirect)
4012 {
4013 /* Copy over PLT info. */
4014 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4015 eind->plt.thumb_refcount = 0;
4016 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4017 eind->plt.maybe_thumb_refcount = 0;
4018 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4019 eind->plt.noncall_refcount = 0;
4020
4021 /* Copy FDPIC counters. */
4022 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4023 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4024 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4025
4026 /* We should only allocate a function to .iplt once the final
4027 symbol information is known. */
4028 BFD_ASSERT (!eind->is_iplt);
4029
4030 if (dir->got.refcount <= 0)
4031 {
4032 edir->tls_type = eind->tls_type;
4033 eind->tls_type = GOT_UNKNOWN;
4034 }
4035 }
4036
4037 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4038 }
4039
4040 /* Destroy an ARM elf linker hash table. */
4041
4042 static void
4043 elf32_arm_link_hash_table_free (bfd *obfd)
4044 {
4045 struct elf32_arm_link_hash_table *ret
4046 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4047
4048 bfd_hash_table_free (&ret->stub_hash_table);
4049 _bfd_elf_link_hash_table_free (obfd);
4050 }
4051
4052 /* Create an ARM elf linker hash table. */
4053
4054 static struct bfd_link_hash_table *
4055 elf32_arm_link_hash_table_create (bfd *abfd)
4056 {
4057 struct elf32_arm_link_hash_table *ret;
4058 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4059
4060 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4061 if (ret == NULL)
4062 return NULL;
4063
4064 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4065 elf32_arm_link_hash_newfunc,
4066 sizeof (struct elf32_arm_link_hash_entry),
4067 ARM_ELF_DATA))
4068 {
4069 free (ret);
4070 return NULL;
4071 }
4072
4073 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4074 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4075 #ifdef FOUR_WORD_PLT
4076 ret->plt_header_size = 16;
4077 ret->plt_entry_size = 16;
4078 #else
4079 ret->plt_header_size = 20;
4080 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4081 #endif
4082 ret->use_rel = TRUE;
4083 ret->obfd = abfd;
4084 ret->fdpic_p = 0;
4085
4086 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4087 sizeof (struct elf32_arm_stub_hash_entry)))
4088 {
4089 _bfd_elf_link_hash_table_free (abfd);
4090 return NULL;
4091 }
4092 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4093
4094 return &ret->root.root;
4095 }
4096
4097 /* Determine what kind of NOPs are available. */
4098
4099 static bfd_boolean
4100 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4101 {
4102 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4103 Tag_CPU_arch);
4104
4105 /* Force return logic to be reviewed for each new architecture. */
4106 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4107
4108 return (arch == TAG_CPU_ARCH_V6T2
4109 || arch == TAG_CPU_ARCH_V6K
4110 || arch == TAG_CPU_ARCH_V7
4111 || arch == TAG_CPU_ARCH_V8
4112 || arch == TAG_CPU_ARCH_V8R);
4113 }
4114
4115 static bfd_boolean
4116 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4117 {
4118 switch (stub_type)
4119 {
4120 case arm_stub_long_branch_thumb_only:
4121 case arm_stub_long_branch_thumb2_only:
4122 case arm_stub_long_branch_thumb2_only_pure:
4123 case arm_stub_long_branch_v4t_thumb_arm:
4124 case arm_stub_short_branch_v4t_thumb_arm:
4125 case arm_stub_long_branch_v4t_thumb_arm_pic:
4126 case arm_stub_long_branch_v4t_thumb_tls_pic:
4127 case arm_stub_long_branch_thumb_only_pic:
4128 case arm_stub_cmse_branch_thumb_only:
4129 return TRUE;
4130 case arm_stub_none:
4131 BFD_FAIL ();
4132 return FALSE;
4133 break;
4134 default:
4135 return FALSE;
4136 }
4137 }
4138
4139 /* Determine the type of stub needed, if any, for a call. */
4140
4141 static enum elf32_arm_stub_type
4142 arm_type_of_stub (struct bfd_link_info *info,
4143 asection *input_sec,
4144 const Elf_Internal_Rela *rel,
4145 unsigned char st_type,
4146 enum arm_st_branch_type *actual_branch_type,
4147 struct elf32_arm_link_hash_entry *hash,
4148 bfd_vma destination,
4149 asection *sym_sec,
4150 bfd *input_bfd,
4151 const char *name)
4152 {
4153 bfd_vma location;
4154 bfd_signed_vma branch_offset;
4155 unsigned int r_type;
4156 struct elf32_arm_link_hash_table * globals;
4157 bfd_boolean thumb2, thumb2_bl, thumb_only;
4158 enum elf32_arm_stub_type stub_type = arm_stub_none;
4159 int use_plt = 0;
4160 enum arm_st_branch_type branch_type = *actual_branch_type;
4161 union gotplt_union *root_plt;
4162 struct arm_plt_info *arm_plt;
4163 int arch;
4164 int thumb2_movw;
4165
4166 if (branch_type == ST_BRANCH_LONG)
4167 return stub_type;
4168
4169 globals = elf32_arm_hash_table (info);
4170 if (globals == NULL)
4171 return stub_type;
4172
4173 thumb_only = using_thumb_only (globals);
4174 thumb2 = using_thumb2 (globals);
4175 thumb2_bl = using_thumb2_bl (globals);
4176
4177 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4178
4179 /* True for architectures that implement the thumb2 movw instruction. */
4180 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4181
4182 /* Determine where the call point is. */
4183 location = (input_sec->output_offset
4184 + input_sec->output_section->vma
4185 + rel->r_offset);
4186
4187 r_type = ELF32_R_TYPE (rel->r_info);
4188
4189 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4190 are considering a function call relocation. */
4191 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4192 || r_type == R_ARM_THM_JUMP19)
4193 && branch_type == ST_BRANCH_TO_ARM)
4194 branch_type = ST_BRANCH_TO_THUMB;
4195
4196 /* For TLS call relocs, it is the caller's responsibility to provide
4197 the address of the appropriate trampoline. */
4198 if (r_type != R_ARM_TLS_CALL
4199 && r_type != R_ARM_THM_TLS_CALL
4200 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4201 ELF32_R_SYM (rel->r_info), &root_plt,
4202 &arm_plt)
4203 && root_plt->offset != (bfd_vma) -1)
4204 {
4205 asection *splt;
4206
4207 if (hash == NULL || hash->is_iplt)
4208 splt = globals->root.iplt;
4209 else
4210 splt = globals->root.splt;
4211 if (splt != NULL)
4212 {
4213 use_plt = 1;
4214
4215 /* Note when dealing with PLT entries: the main PLT stub is in
4216 ARM mode, so if the branch is in Thumb mode, another
4217 Thumb->ARM stub will be inserted later just before the ARM
4218 PLT stub. If a long branch stub is needed, we'll add a
4219 Thumb->Arm one and branch directly to the ARM PLT entry.
4220 Here, we have to check if a pre-PLT Thumb->ARM stub
4221 is needed and if it will be close enough. */
4222
4223 destination = (splt->output_section->vma
4224 + splt->output_offset
4225 + root_plt->offset);
4226 st_type = STT_FUNC;
4227
4228 /* Thumb branch/call to PLT: it can become a branch to ARM
4229 or to Thumb. We must perform the same checks and
4230 corrections as in elf32_arm_final_link_relocate. */
4231 if ((r_type == R_ARM_THM_CALL)
4232 || (r_type == R_ARM_THM_JUMP24))
4233 {
4234 if (globals->use_blx
4235 && r_type == R_ARM_THM_CALL
4236 && !thumb_only)
4237 {
4238 /* If the Thumb BLX instruction is available, convert
4239 the BL to a BLX instruction to call the ARM-mode
4240 PLT entry. */
4241 branch_type = ST_BRANCH_TO_ARM;
4242 }
4243 else
4244 {
4245 if (!thumb_only)
4246 /* Target the Thumb stub before the ARM PLT entry. */
4247 destination -= PLT_THUMB_STUB_SIZE;
4248 branch_type = ST_BRANCH_TO_THUMB;
4249 }
4250 }
4251 else
4252 {
4253 branch_type = ST_BRANCH_TO_ARM;
4254 }
4255 }
4256 }
4257 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4258 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4259
4260 branch_offset = (bfd_signed_vma)(destination - location);
4261
4262 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4263 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4264 {
4265 /* Handle cases where:
4266 - this call goes too far (different Thumb/Thumb2 max
4267 distance)
4268 - it's a Thumb->Arm call and blx is not available, or it's a
4269 Thumb->Arm branch (not bl). A stub is needed in this case,
4270 but only if this call is not through a PLT entry. Indeed,
4271 PLT stubs handle mode switching already. */
4272 if ((!thumb2_bl
4273 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4274 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4275 || (thumb2_bl
4276 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4277 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4278 || (thumb2
4279 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4280 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4281 && (r_type == R_ARM_THM_JUMP19))
4282 || (branch_type == ST_BRANCH_TO_ARM
4283 && (((r_type == R_ARM_THM_CALL
4284 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4285 || (r_type == R_ARM_THM_JUMP24)
4286 || (r_type == R_ARM_THM_JUMP19))
4287 && !use_plt))
4288 {
4289 /* If we need to insert a Thumb-Thumb long branch stub to a
4290 PLT, use one that branches directly to the ARM PLT
4291 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4292 stub, undo this now. */
4293 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4294 {
4295 branch_type = ST_BRANCH_TO_ARM;
4296 branch_offset += PLT_THUMB_STUB_SIZE;
4297 }
4298
4299 if (branch_type == ST_BRANCH_TO_THUMB)
4300 {
4301 /* Thumb to thumb. */
4302 if (!thumb_only)
4303 {
4304 if (input_sec->flags & SEC_ELF_PURECODE)
4305 _bfd_error_handler
4306 (_("%pB(%pA): warning: long branch veneers used in"
4307 " section with SHF_ARM_PURECODE section"
4308 " attribute is only supported for M-profile"
4309 " targets that implement the movw instruction"),
4310 input_bfd, input_sec);
4311
4312 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4313 /* PIC stubs. */
4314 ? ((globals->use_blx
4315 && (r_type == R_ARM_THM_CALL))
4316 /* V5T and above. Stub starts with ARM code, so
4317 we must be able to switch mode before
4318 reaching it, which is only possible for 'bl'
4319 (ie R_ARM_THM_CALL relocation). */
4320 ? arm_stub_long_branch_any_thumb_pic
4321 /* On V4T, use Thumb code only. */
4322 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4323
4324 /* non-PIC stubs. */
4325 : ((globals->use_blx
4326 && (r_type == R_ARM_THM_CALL))
4327 /* V5T and above. */
4328 ? arm_stub_long_branch_any_any
4329 /* V4T. */
4330 : arm_stub_long_branch_v4t_thumb_thumb);
4331 }
4332 else
4333 {
4334 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4335 stub_type = arm_stub_long_branch_thumb2_only_pure;
4336 else
4337 {
4338 if (input_sec->flags & SEC_ELF_PURECODE)
4339 _bfd_error_handler
4340 (_("%pB(%pA): warning: long branch veneers used in"
4341 " section with SHF_ARM_PURECODE section"
4342 " attribute is only supported for M-profile"
4343 " targets that implement the movw instruction"),
4344 input_bfd, input_sec);
4345
4346 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4347 /* PIC stub. */
4348 ? arm_stub_long_branch_thumb_only_pic
4349 /* non-PIC stub. */
4350 : (thumb2 ? arm_stub_long_branch_thumb2_only
4351 : arm_stub_long_branch_thumb_only);
4352 }
4353 }
4354 }
4355 else
4356 {
4357 if (input_sec->flags & SEC_ELF_PURECODE)
4358 _bfd_error_handler
4359 (_("%pB(%pA): warning: long branch veneers used in"
4360 " section with SHF_ARM_PURECODE section"
4361 " attribute is only supported" " for M-profile"
4362 " targets that implement the movw instruction"),
4363 input_bfd, input_sec);
4364
4365 /* Thumb to arm. */
4366 if (sym_sec != NULL
4367 && sym_sec->owner != NULL
4368 && !INTERWORK_FLAG (sym_sec->owner))
4369 {
4370 _bfd_error_handler
4371 (_("%pB(%s): warning: interworking not enabled;"
4372 " first occurrence: %pB: %s call to %s"),
4373 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4374 }
4375
4376 stub_type =
4377 (bfd_link_pic (info) | globals->pic_veneer)
4378 /* PIC stubs. */
4379 ? (r_type == R_ARM_THM_TLS_CALL
4380 /* TLS PIC stubs. */
4381 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4382 : arm_stub_long_branch_v4t_thumb_tls_pic)
4383 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4384 /* V5T PIC and above. */
4385 ? arm_stub_long_branch_any_arm_pic
4386 /* V4T PIC stub. */
4387 : arm_stub_long_branch_v4t_thumb_arm_pic))
4388
4389 /* non-PIC stubs. */
4390 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4391 /* V5T and above. */
4392 ? arm_stub_long_branch_any_any
4393 /* V4T. */
4394 : arm_stub_long_branch_v4t_thumb_arm);
4395
4396 /* Handle v4t short branches. */
4397 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4398 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4399 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4400 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4401 }
4402 }
4403 }
4404 else if (r_type == R_ARM_CALL
4405 || r_type == R_ARM_JUMP24
4406 || r_type == R_ARM_PLT32
4407 || r_type == R_ARM_TLS_CALL)
4408 {
4409 if (input_sec->flags & SEC_ELF_PURECODE)
4410 _bfd_error_handler
4411 (_("%pB(%pA): warning: long branch veneers used in"
4412 " section with SHF_ARM_PURECODE section"
4413 " attribute is only supported for M-profile"
4414 " targets that implement the movw instruction"),
4415 input_bfd, input_sec);
4416 if (branch_type == ST_BRANCH_TO_THUMB)
4417 {
4418 /* Arm to thumb. */
4419
4420 if (sym_sec != NULL
4421 && sym_sec->owner != NULL
4422 && !INTERWORK_FLAG (sym_sec->owner))
4423 {
4424 _bfd_error_handler
4425 (_("%pB(%s): warning: interworking not enabled;"
4426 " first occurrence: %pB: %s call to %s"),
4427 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4428 }
4429
4430 /* We have an extra 2-bytes reach because of
4431 the mode change (bit 24 (H) of BLX encoding). */
4432 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4433 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4434 || (r_type == R_ARM_CALL && !globals->use_blx)
4435 || (r_type == R_ARM_JUMP24)
4436 || (r_type == R_ARM_PLT32))
4437 {
4438 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4439 /* PIC stubs. */
4440 ? ((globals->use_blx)
4441 /* V5T and above. */
4442 ? arm_stub_long_branch_any_thumb_pic
4443 /* V4T stub. */
4444 : arm_stub_long_branch_v4t_arm_thumb_pic)
4445
4446 /* non-PIC stubs. */
4447 : ((globals->use_blx)
4448 /* V5T and above. */
4449 ? arm_stub_long_branch_any_any
4450 /* V4T. */
4451 : arm_stub_long_branch_v4t_arm_thumb);
4452 }
4453 }
4454 else
4455 {
4456 /* Arm to arm. */
4457 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4458 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4459 {
4460 stub_type =
4461 (bfd_link_pic (info) | globals->pic_veneer)
4462 /* PIC stubs. */
4463 ? (r_type == R_ARM_TLS_CALL
4464 /* TLS PIC Stub. */
4465 ? arm_stub_long_branch_any_tls_pic
4466 : (globals->root.target_os == is_nacl
4467 ? arm_stub_long_branch_arm_nacl_pic
4468 : arm_stub_long_branch_any_arm_pic))
4469 /* non-PIC stubs. */
4470 : (globals->root.target_os == is_nacl
4471 ? arm_stub_long_branch_arm_nacl
4472 : arm_stub_long_branch_any_any);
4473 }
4474 }
4475 }
4476
4477 /* If a stub is needed, record the actual destination type. */
4478 if (stub_type != arm_stub_none)
4479 *actual_branch_type = branch_type;
4480
4481 return stub_type;
4482 }
4483
4484 /* Build a name for an entry in the stub hash table. */
4485
4486 static char *
4487 elf32_arm_stub_name (const asection *input_section,
4488 const asection *sym_sec,
4489 const struct elf32_arm_link_hash_entry *hash,
4490 const Elf_Internal_Rela *rel,
4491 enum elf32_arm_stub_type stub_type)
4492 {
4493 char *stub_name;
4494 bfd_size_type len;
4495
4496 if (hash)
4497 {
4498 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4499 stub_name = (char *) bfd_malloc (len);
4500 if (stub_name != NULL)
4501 sprintf (stub_name, "%08x_%s+%x_%d",
4502 input_section->id & 0xffffffff,
4503 hash->root.root.root.string,
4504 (int) rel->r_addend & 0xffffffff,
4505 (int) stub_type);
4506 }
4507 else
4508 {
4509 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4510 stub_name = (char *) bfd_malloc (len);
4511 if (stub_name != NULL)
4512 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4513 input_section->id & 0xffffffff,
4514 sym_sec->id & 0xffffffff,
4515 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4516 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4517 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4518 (int) rel->r_addend & 0xffffffff,
4519 (int) stub_type);
4520 }
4521
4522 return stub_name;
4523 }
4524
4525 /* Look up an entry in the stub hash. Stub entries are cached because
4526 creating the stub name takes a bit of time. */
4527
4528 static struct elf32_arm_stub_hash_entry *
4529 elf32_arm_get_stub_entry (const asection *input_section,
4530 const asection *sym_sec,
4531 struct elf_link_hash_entry *hash,
4532 const Elf_Internal_Rela *rel,
4533 struct elf32_arm_link_hash_table *htab,
4534 enum elf32_arm_stub_type stub_type)
4535 {
4536 struct elf32_arm_stub_hash_entry *stub_entry;
4537 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4538 const asection *id_sec;
4539
4540 if ((input_section->flags & SEC_CODE) == 0)
4541 return NULL;
4542
4543 /* If the input section is the CMSE stubs one and it needs a long
4544 branch stub to reach it's final destination, give up with an
4545 error message: this is not supported. See PR ld/24709. */
4546 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen(CMSE_STUB_NAME)))
4547 {
4548 bfd *output_bfd = htab->obfd;
4549 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4550
4551 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4552 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4553 CMSE_STUB_NAME,
4554 (uint64_t)out_sec->output_section->vma
4555 + out_sec->output_offset,
4556 (uint64_t)sym_sec->output_section->vma
4557 + sym_sec->output_offset
4558 + h->root.root.u.def.value);
4559 /* Exit, rather than leave incompletely processed
4560 relocations. */
4561 xexit(1);
4562 }
4563
4564 /* If this input section is part of a group of sections sharing one
4565 stub section, then use the id of the first section in the group.
4566 Stub names need to include a section id, as there may well be
4567 more than one stub used to reach say, printf, and we need to
4568 distinguish between them. */
4569 BFD_ASSERT (input_section->id <= htab->top_id);
4570 id_sec = htab->stub_group[input_section->id].link_sec;
4571
4572 if (h != NULL && h->stub_cache != NULL
4573 && h->stub_cache->h == h
4574 && h->stub_cache->id_sec == id_sec
4575 && h->stub_cache->stub_type == stub_type)
4576 {
4577 stub_entry = h->stub_cache;
4578 }
4579 else
4580 {
4581 char *stub_name;
4582
4583 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4584 if (stub_name == NULL)
4585 return NULL;
4586
4587 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4588 stub_name, FALSE, FALSE);
4589 if (h != NULL)
4590 h->stub_cache = stub_entry;
4591
4592 free (stub_name);
4593 }
4594
4595 return stub_entry;
4596 }
4597
4598 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4599 section. */
4600
4601 static bfd_boolean
4602 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4603 {
4604 if (stub_type >= max_stub_type)
4605 abort (); /* Should be unreachable. */
4606
4607 switch (stub_type)
4608 {
4609 case arm_stub_cmse_branch_thumb_only:
4610 return TRUE;
4611
4612 default:
4613 return FALSE;
4614 }
4615
4616 abort (); /* Should be unreachable. */
4617 }
4618
4619 /* Required alignment (as a power of 2) for the dedicated section holding
4620 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4621 with input sections. */
4622
4623 static int
4624 arm_dedicated_stub_output_section_required_alignment
4625 (enum elf32_arm_stub_type stub_type)
4626 {
4627 if (stub_type >= max_stub_type)
4628 abort (); /* Should be unreachable. */
4629
4630 switch (stub_type)
4631 {
4632 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4633 boundary. */
4634 case arm_stub_cmse_branch_thumb_only:
4635 return 5;
4636
4637 default:
4638 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4639 return 0;
4640 }
4641
4642 abort (); /* Should be unreachable. */
4643 }
4644
4645 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4646 NULL if veneers of this type are interspersed with input sections. */
4647
4648 static const char *
4649 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4650 {
4651 if (stub_type >= max_stub_type)
4652 abort (); /* Should be unreachable. */
4653
4654 switch (stub_type)
4655 {
4656 case arm_stub_cmse_branch_thumb_only:
4657 return CMSE_STUB_NAME;
4658
4659 default:
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4661 return NULL;
4662 }
4663
4664 abort (); /* Should be unreachable. */
4665 }
4666
4667 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4668 returns the address of the hash table field in HTAB holding a pointer to the
4669 corresponding input section. Otherwise, returns NULL. */
4670
4671 static asection **
4672 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4673 enum elf32_arm_stub_type stub_type)
4674 {
4675 if (stub_type >= max_stub_type)
4676 abort (); /* Should be unreachable. */
4677
4678 switch (stub_type)
4679 {
4680 case arm_stub_cmse_branch_thumb_only:
4681 return &htab->cmse_stub_sec;
4682
4683 default:
4684 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4685 return NULL;
4686 }
4687
4688 abort (); /* Should be unreachable. */
4689 }
4690
4691 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4692 is the section that branch into veneer and can be NULL if stub should go in
4693 a dedicated output section. Returns a pointer to the stub section, and the
4694 section to which the stub section will be attached (in *LINK_SEC_P).
4695 LINK_SEC_P may be NULL. */
4696
4697 static asection *
4698 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4699 struct elf32_arm_link_hash_table *htab,
4700 enum elf32_arm_stub_type stub_type)
4701 {
4702 asection *link_sec, *out_sec, **stub_sec_p;
4703 const char *stub_sec_prefix;
4704 bfd_boolean dedicated_output_section =
4705 arm_dedicated_stub_output_section_required (stub_type);
4706 int align;
4707
4708 if (dedicated_output_section)
4709 {
4710 bfd *output_bfd = htab->obfd;
4711 const char *out_sec_name =
4712 arm_dedicated_stub_output_section_name (stub_type);
4713 link_sec = NULL;
4714 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4715 stub_sec_prefix = out_sec_name;
4716 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4717 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4718 if (out_sec == NULL)
4719 {
4720 _bfd_error_handler (_("no address assigned to the veneers output "
4721 "section %s"), out_sec_name);
4722 return NULL;
4723 }
4724 }
4725 else
4726 {
4727 BFD_ASSERT (section->id <= htab->top_id);
4728 link_sec = htab->stub_group[section->id].link_sec;
4729 BFD_ASSERT (link_sec != NULL);
4730 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4731 if (*stub_sec_p == NULL)
4732 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4733 stub_sec_prefix = link_sec->name;
4734 out_sec = link_sec->output_section;
4735 align = htab->root.target_os == is_nacl ? 4 : 3;
4736 }
4737
4738 if (*stub_sec_p == NULL)
4739 {
4740 size_t namelen;
4741 bfd_size_type len;
4742 char *s_name;
4743
4744 namelen = strlen (stub_sec_prefix);
4745 len = namelen + sizeof (STUB_SUFFIX);
4746 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4747 if (s_name == NULL)
4748 return NULL;
4749
4750 memcpy (s_name, stub_sec_prefix, namelen);
4751 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4752 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4753 align);
4754 if (*stub_sec_p == NULL)
4755 return NULL;
4756
4757 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4758 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4759 | SEC_KEEP;
4760 }
4761
4762 if (!dedicated_output_section)
4763 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4764
4765 if (link_sec_p)
4766 *link_sec_p = link_sec;
4767
4768 return *stub_sec_p;
4769 }
4770
4771 /* Add a new stub entry to the stub hash. Not all fields of the new
4772 stub entry are initialised. */
4773
4774 static struct elf32_arm_stub_hash_entry *
4775 elf32_arm_add_stub (const char *stub_name, asection *section,
4776 struct elf32_arm_link_hash_table *htab,
4777 enum elf32_arm_stub_type stub_type)
4778 {
4779 asection *link_sec;
4780 asection *stub_sec;
4781 struct elf32_arm_stub_hash_entry *stub_entry;
4782
4783 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4784 stub_type);
4785 if (stub_sec == NULL)
4786 return NULL;
4787
4788 /* Enter this entry into the linker stub hash table. */
4789 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4790 TRUE, FALSE);
4791 if (stub_entry == NULL)
4792 {
4793 if (section == NULL)
4794 section = stub_sec;
4795 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4796 section->owner, stub_name);
4797 return NULL;
4798 }
4799
4800 stub_entry->stub_sec = stub_sec;
4801 stub_entry->stub_offset = (bfd_vma) -1;
4802 stub_entry->id_sec = link_sec;
4803
4804 return stub_entry;
4805 }
4806
4807 /* Store an Arm insn into an output section not processed by
4808 elf32_arm_write_section. */
4809
4810 static void
4811 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4812 bfd * output_bfd, bfd_vma val, void * ptr)
4813 {
4814 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4815 bfd_putl32 (val, ptr);
4816 else
4817 bfd_putb32 (val, ptr);
4818 }
4819
4820 /* Store a 16-bit Thumb insn into an output section not processed by
4821 elf32_arm_write_section. */
4822
4823 static void
4824 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4825 bfd * output_bfd, bfd_vma val, void * ptr)
4826 {
4827 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4828 bfd_putl16 (val, ptr);
4829 else
4830 bfd_putb16 (val, ptr);
4831 }
4832
4833 /* Store a Thumb2 insn into an output section not processed by
4834 elf32_arm_write_section. */
4835
4836 static void
4837 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4838 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4839 {
4840 /* T2 instructions are 16-bit streamed. */
4841 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4842 {
4843 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4844 bfd_putl16 ((val & 0xffff), ptr + 2);
4845 }
4846 else
4847 {
4848 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4849 bfd_putb16 ((val & 0xffff), ptr + 2);
4850 }
4851 }
4852
4853 /* If it's possible to change R_TYPE to a more efficient access
4854 model, return the new reloc type. */
4855
4856 static unsigned
4857 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4858 struct elf_link_hash_entry *h)
4859 {
4860 int is_local = (h == NULL);
4861
4862 if (bfd_link_dll (info)
4863 || (h && h->root.type == bfd_link_hash_undefweak))
4864 return r_type;
4865
4866 /* We do not support relaxations for Old TLS models. */
4867 switch (r_type)
4868 {
4869 case R_ARM_TLS_GOTDESC:
4870 case R_ARM_TLS_CALL:
4871 case R_ARM_THM_TLS_CALL:
4872 case R_ARM_TLS_DESCSEQ:
4873 case R_ARM_THM_TLS_DESCSEQ:
4874 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4875 }
4876
4877 return r_type;
4878 }
4879
4880 static bfd_reloc_status_type elf32_arm_final_link_relocate
4881 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4882 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4883 const char *, unsigned char, enum arm_st_branch_type,
4884 struct elf_link_hash_entry *, bfd_boolean *, char **);
4885
4886 static unsigned int
4887 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4888 {
4889 switch (stub_type)
4890 {
4891 case arm_stub_a8_veneer_b_cond:
4892 case arm_stub_a8_veneer_b:
4893 case arm_stub_a8_veneer_bl:
4894 return 2;
4895
4896 case arm_stub_long_branch_any_any:
4897 case arm_stub_long_branch_v4t_arm_thumb:
4898 case arm_stub_long_branch_thumb_only:
4899 case arm_stub_long_branch_thumb2_only:
4900 case arm_stub_long_branch_thumb2_only_pure:
4901 case arm_stub_long_branch_v4t_thumb_thumb:
4902 case arm_stub_long_branch_v4t_thumb_arm:
4903 case arm_stub_short_branch_v4t_thumb_arm:
4904 case arm_stub_long_branch_any_arm_pic:
4905 case arm_stub_long_branch_any_thumb_pic:
4906 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4907 case arm_stub_long_branch_v4t_arm_thumb_pic:
4908 case arm_stub_long_branch_v4t_thumb_arm_pic:
4909 case arm_stub_long_branch_thumb_only_pic:
4910 case arm_stub_long_branch_any_tls_pic:
4911 case arm_stub_long_branch_v4t_thumb_tls_pic:
4912 case arm_stub_cmse_branch_thumb_only:
4913 case arm_stub_a8_veneer_blx:
4914 return 4;
4915
4916 case arm_stub_long_branch_arm_nacl:
4917 case arm_stub_long_branch_arm_nacl_pic:
4918 return 16;
4919
4920 default:
4921 abort (); /* Should be unreachable. */
4922 }
4923 }
4924
4925 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4926 veneering (TRUE) or have their own symbol (FALSE). */
4927
4928 static bfd_boolean
4929 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4930 {
4931 if (stub_type >= max_stub_type)
4932 abort (); /* Should be unreachable. */
4933
4934 switch (stub_type)
4935 {
4936 case arm_stub_cmse_branch_thumb_only:
4937 return TRUE;
4938
4939 default:
4940 return FALSE;
4941 }
4942
4943 abort (); /* Should be unreachable. */
4944 }
4945
4946 /* Returns the padding needed for the dedicated section used stubs of type
4947 STUB_TYPE. */
4948
4949 static int
4950 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4951 {
4952 if (stub_type >= max_stub_type)
4953 abort (); /* Should be unreachable. */
4954
4955 switch (stub_type)
4956 {
4957 case arm_stub_cmse_branch_thumb_only:
4958 return 32;
4959
4960 default:
4961 return 0;
4962 }
4963
4964 abort (); /* Should be unreachable. */
4965 }
4966
4967 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4968 returns the address of the hash table field in HTAB holding the offset at
4969 which new veneers should be layed out in the stub section. */
4970
4971 static bfd_vma*
4972 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4973 enum elf32_arm_stub_type stub_type)
4974 {
4975 switch (stub_type)
4976 {
4977 case arm_stub_cmse_branch_thumb_only:
4978 return &htab->new_cmse_stub_offset;
4979
4980 default:
4981 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4982 return NULL;
4983 }
4984 }
4985
4986 static bfd_boolean
4987 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4988 void * in_arg)
4989 {
4990 #define MAXRELOCS 3
4991 bfd_boolean removed_sg_veneer;
4992 struct elf32_arm_stub_hash_entry *stub_entry;
4993 struct elf32_arm_link_hash_table *globals;
4994 struct bfd_link_info *info;
4995 asection *stub_sec;
4996 bfd *stub_bfd;
4997 bfd_byte *loc;
4998 bfd_vma sym_value;
4999 int template_size;
5000 int size;
5001 const insn_sequence *template_sequence;
5002 int i;
5003 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5004 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5005 int nrelocs = 0;
5006 int just_allocated = 0;
5007
5008 /* Massage our args to the form they really have. */
5009 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5010 info = (struct bfd_link_info *) in_arg;
5011
5012 /* Fail if the target section could not be assigned to an output
5013 section. The user should fix his linker script. */
5014 if (stub_entry->target_section->output_section == NULL
5015 && info->non_contiguous_regions)
5016 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5017 "Retry without --enable-non-contiguous-regions.\n"),
5018 stub_entry->target_section);
5019
5020 globals = elf32_arm_hash_table (info);
5021 if (globals == NULL)
5022 return FALSE;
5023
5024 stub_sec = stub_entry->stub_sec;
5025
5026 if ((globals->fix_cortex_a8 < 0)
5027 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5028 /* We have to do less-strictly-aligned fixes last. */
5029 return TRUE;
5030
5031 /* Assign a slot at the end of section if none assigned yet. */
5032 if (stub_entry->stub_offset == (bfd_vma) -1)
5033 {
5034 stub_entry->stub_offset = stub_sec->size;
5035 just_allocated = 1;
5036 }
5037 loc = stub_sec->contents + stub_entry->stub_offset;
5038
5039 stub_bfd = stub_sec->owner;
5040
5041 /* This is the address of the stub destination. */
5042 sym_value = (stub_entry->target_value
5043 + stub_entry->target_section->output_offset
5044 + stub_entry->target_section->output_section->vma);
5045
5046 template_sequence = stub_entry->stub_template;
5047 template_size = stub_entry->stub_template_size;
5048
5049 size = 0;
5050 for (i = 0; i < template_size; i++)
5051 {
5052 switch (template_sequence[i].type)
5053 {
5054 case THUMB16_TYPE:
5055 {
5056 bfd_vma data = (bfd_vma) template_sequence[i].data;
5057 if (template_sequence[i].reloc_addend != 0)
5058 {
5059 /* We've borrowed the reloc_addend field to mean we should
5060 insert a condition code into this (Thumb-1 branch)
5061 instruction. See THUMB16_BCOND_INSN. */
5062 BFD_ASSERT ((data & 0xff00) == 0xd000);
5063 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5064 }
5065 bfd_put_16 (stub_bfd, data, loc + size);
5066 size += 2;
5067 }
5068 break;
5069
5070 case THUMB32_TYPE:
5071 bfd_put_16 (stub_bfd,
5072 (template_sequence[i].data >> 16) & 0xffff,
5073 loc + size);
5074 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5075 loc + size + 2);
5076 if (template_sequence[i].r_type != R_ARM_NONE)
5077 {
5078 stub_reloc_idx[nrelocs] = i;
5079 stub_reloc_offset[nrelocs++] = size;
5080 }
5081 size += 4;
5082 break;
5083
5084 case ARM_TYPE:
5085 bfd_put_32 (stub_bfd, template_sequence[i].data,
5086 loc + size);
5087 /* Handle cases where the target is encoded within the
5088 instruction. */
5089 if (template_sequence[i].r_type == R_ARM_JUMP24)
5090 {
5091 stub_reloc_idx[nrelocs] = i;
5092 stub_reloc_offset[nrelocs++] = size;
5093 }
5094 size += 4;
5095 break;
5096
5097 case DATA_TYPE:
5098 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5099 stub_reloc_idx[nrelocs] = i;
5100 stub_reloc_offset[nrelocs++] = size;
5101 size += 4;
5102 break;
5103
5104 default:
5105 BFD_FAIL ();
5106 return FALSE;
5107 }
5108 }
5109
5110 if (just_allocated)
5111 stub_sec->size += size;
5112
5113 /* Stub size has already been computed in arm_size_one_stub. Check
5114 consistency. */
5115 BFD_ASSERT (size == stub_entry->stub_size);
5116
5117 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5118 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5119 sym_value |= 1;
5120
5121 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5122 to relocate in each stub. */
5123 removed_sg_veneer =
5124 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5125 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5126
5127 for (i = 0; i < nrelocs; i++)
5128 {
5129 Elf_Internal_Rela rel;
5130 bfd_boolean unresolved_reloc;
5131 char *error_message;
5132 bfd_vma points_to =
5133 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5134
5135 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5136 rel.r_info = ELF32_R_INFO (0,
5137 template_sequence[stub_reloc_idx[i]].r_type);
5138 rel.r_addend = 0;
5139
5140 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5141 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5142 template should refer back to the instruction after the original
5143 branch. We use target_section as Cortex-A8 erratum workaround stubs
5144 are only generated when both source and target are in the same
5145 section. */
5146 points_to = stub_entry->target_section->output_section->vma
5147 + stub_entry->target_section->output_offset
5148 + stub_entry->source_value;
5149
5150 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5151 (template_sequence[stub_reloc_idx[i]].r_type),
5152 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5153 points_to, info, stub_entry->target_section, "", STT_FUNC,
5154 stub_entry->branch_type,
5155 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5156 &error_message);
5157 }
5158
5159 return TRUE;
5160 #undef MAXRELOCS
5161 }
5162
5163 /* Calculate the template, template size and instruction size for a stub.
5164 Return value is the instruction size. */
5165
5166 static unsigned int
5167 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5168 const insn_sequence **stub_template,
5169 int *stub_template_size)
5170 {
5171 const insn_sequence *template_sequence = NULL;
5172 int template_size = 0, i;
5173 unsigned int size;
5174
5175 template_sequence = stub_definitions[stub_type].template_sequence;
5176 if (stub_template)
5177 *stub_template = template_sequence;
5178
5179 template_size = stub_definitions[stub_type].template_size;
5180 if (stub_template_size)
5181 *stub_template_size = template_size;
5182
5183 size = 0;
5184 for (i = 0; i < template_size; i++)
5185 {
5186 switch (template_sequence[i].type)
5187 {
5188 case THUMB16_TYPE:
5189 size += 2;
5190 break;
5191
5192 case ARM_TYPE:
5193 case THUMB32_TYPE:
5194 case DATA_TYPE:
5195 size += 4;
5196 break;
5197
5198 default:
5199 BFD_FAIL ();
5200 return 0;
5201 }
5202 }
5203
5204 return size;
5205 }
5206
5207 /* As above, but don't actually build the stub. Just bump offset so
5208 we know stub section sizes. */
5209
5210 static bfd_boolean
5211 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5212 void *in_arg ATTRIBUTE_UNUSED)
5213 {
5214 struct elf32_arm_stub_hash_entry *stub_entry;
5215 const insn_sequence *template_sequence;
5216 int template_size, size;
5217
5218 /* Massage our args to the form they really have. */
5219 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5220
5221 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5222 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5223
5224 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5225 &template_size);
5226
5227 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5228 if (stub_entry->stub_template_size)
5229 {
5230 stub_entry->stub_size = size;
5231 stub_entry->stub_template = template_sequence;
5232 stub_entry->stub_template_size = template_size;
5233 }
5234
5235 /* Already accounted for. */
5236 if (stub_entry->stub_offset != (bfd_vma) -1)
5237 return TRUE;
5238
5239 size = (size + 7) & ~7;
5240 stub_entry->stub_sec->size += size;
5241
5242 return TRUE;
5243 }
5244
5245 /* External entry points for sizing and building linker stubs. */
5246
5247 /* Set up various things so that we can make a list of input sections
5248 for each output section included in the link. Returns -1 on error,
5249 0 when no stubs will be needed, and 1 on success. */
5250
5251 int
5252 elf32_arm_setup_section_lists (bfd *output_bfd,
5253 struct bfd_link_info *info)
5254 {
5255 bfd *input_bfd;
5256 unsigned int bfd_count;
5257 unsigned int top_id, top_index;
5258 asection *section;
5259 asection **input_list, **list;
5260 size_t amt;
5261 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5262
5263 if (htab == NULL)
5264 return 0;
5265 if (! is_elf_hash_table (htab))
5266 return 0;
5267
5268 /* Count the number of input BFDs and find the top input section id. */
5269 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5270 input_bfd != NULL;
5271 input_bfd = input_bfd->link.next)
5272 {
5273 bfd_count += 1;
5274 for (section = input_bfd->sections;
5275 section != NULL;
5276 section = section->next)
5277 {
5278 if (top_id < section->id)
5279 top_id = section->id;
5280 }
5281 }
5282 htab->bfd_count = bfd_count;
5283
5284 amt = sizeof (struct map_stub) * (top_id + 1);
5285 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5286 if (htab->stub_group == NULL)
5287 return -1;
5288 htab->top_id = top_id;
5289
5290 /* We can't use output_bfd->section_count here to find the top output
5291 section index as some sections may have been removed, and
5292 _bfd_strip_section_from_output doesn't renumber the indices. */
5293 for (section = output_bfd->sections, top_index = 0;
5294 section != NULL;
5295 section = section->next)
5296 {
5297 if (top_index < section->index)
5298 top_index = section->index;
5299 }
5300
5301 htab->top_index = top_index;
5302 amt = sizeof (asection *) * (top_index + 1);
5303 input_list = (asection **) bfd_malloc (amt);
5304 htab->input_list = input_list;
5305 if (input_list == NULL)
5306 return -1;
5307
5308 /* For sections we aren't interested in, mark their entries with a
5309 value we can check later. */
5310 list = input_list + top_index;
5311 do
5312 *list = bfd_abs_section_ptr;
5313 while (list-- != input_list);
5314
5315 for (section = output_bfd->sections;
5316 section != NULL;
5317 section = section->next)
5318 {
5319 if ((section->flags & SEC_CODE) != 0)
5320 input_list[section->index] = NULL;
5321 }
5322
5323 return 1;
5324 }
5325
5326 /* The linker repeatedly calls this function for each input section,
5327 in the order that input sections are linked into output sections.
5328 Build lists of input sections to determine groupings between which
5329 we may insert linker stubs. */
5330
5331 void
5332 elf32_arm_next_input_section (struct bfd_link_info *info,
5333 asection *isec)
5334 {
5335 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5336
5337 if (htab == NULL)
5338 return;
5339
5340 if (isec->output_section->index <= htab->top_index)
5341 {
5342 asection **list = htab->input_list + isec->output_section->index;
5343
5344 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5345 {
5346 /* Steal the link_sec pointer for our list. */
5347 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5348 /* This happens to make the list in reverse order,
5349 which we reverse later. */
5350 PREV_SEC (isec) = *list;
5351 *list = isec;
5352 }
5353 }
5354 }
5355
5356 /* See whether we can group stub sections together. Grouping stub
5357 sections may result in fewer stubs. More importantly, we need to
5358 put all .init* and .fini* stubs at the end of the .init or
5359 .fini output sections respectively, because glibc splits the
5360 _init and _fini functions into multiple parts. Putting a stub in
5361 the middle of a function is not a good idea. */
5362
5363 static void
5364 group_sections (struct elf32_arm_link_hash_table *htab,
5365 bfd_size_type stub_group_size,
5366 bfd_boolean stubs_always_after_branch)
5367 {
5368 asection **list = htab->input_list;
5369
5370 do
5371 {
5372 asection *tail = *list;
5373 asection *head;
5374
5375 if (tail == bfd_abs_section_ptr)
5376 continue;
5377
5378 /* Reverse the list: we must avoid placing stubs at the
5379 beginning of the section because the beginning of the text
5380 section may be required for an interrupt vector in bare metal
5381 code. */
5382 #define NEXT_SEC PREV_SEC
5383 head = NULL;
5384 while (tail != NULL)
5385 {
5386 /* Pop from tail. */
5387 asection *item = tail;
5388 tail = PREV_SEC (item);
5389
5390 /* Push on head. */
5391 NEXT_SEC (item) = head;
5392 head = item;
5393 }
5394
5395 while (head != NULL)
5396 {
5397 asection *curr;
5398 asection *next;
5399 bfd_vma stub_group_start = head->output_offset;
5400 bfd_vma end_of_next;
5401
5402 curr = head;
5403 while (NEXT_SEC (curr) != NULL)
5404 {
5405 next = NEXT_SEC (curr);
5406 end_of_next = next->output_offset + next->size;
5407 if (end_of_next - stub_group_start >= stub_group_size)
5408 /* End of NEXT is too far from start, so stop. */
5409 break;
5410 /* Add NEXT to the group. */
5411 curr = next;
5412 }
5413
5414 /* OK, the size from the start to the start of CURR is less
5415 than stub_group_size and thus can be handled by one stub
5416 section. (Or the head section is itself larger than
5417 stub_group_size, in which case we may be toast.)
5418 We should really be keeping track of the total size of
5419 stubs added here, as stubs contribute to the final output
5420 section size. */
5421 do
5422 {
5423 next = NEXT_SEC (head);
5424 /* Set up this stub group. */
5425 htab->stub_group[head->id].link_sec = curr;
5426 }
5427 while (head != curr && (head = next) != NULL);
5428
5429 /* But wait, there's more! Input sections up to stub_group_size
5430 bytes after the stub section can be handled by it too. */
5431 if (!stubs_always_after_branch)
5432 {
5433 stub_group_start = curr->output_offset + curr->size;
5434
5435 while (next != NULL)
5436 {
5437 end_of_next = next->output_offset + next->size;
5438 if (end_of_next - stub_group_start >= stub_group_size)
5439 /* End of NEXT is too far from stubs, so stop. */
5440 break;
5441 /* Add NEXT to the stub group. */
5442 head = next;
5443 next = NEXT_SEC (head);
5444 htab->stub_group[head->id].link_sec = curr;
5445 }
5446 }
5447 head = next;
5448 }
5449 }
5450 while (list++ != htab->input_list + htab->top_index);
5451
5452 free (htab->input_list);
5453 #undef PREV_SEC
5454 #undef NEXT_SEC
5455 }
5456
5457 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5458 erratum fix. */
5459
5460 static int
5461 a8_reloc_compare (const void *a, const void *b)
5462 {
5463 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5464 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5465
5466 if (ra->from < rb->from)
5467 return -1;
5468 else if (ra->from > rb->from)
5469 return 1;
5470 else
5471 return 0;
5472 }
5473
5474 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5475 const char *, char **);
5476
5477 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5478 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5479 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5480 otherwise. */
5481
5482 static bfd_boolean
5483 cortex_a8_erratum_scan (bfd *input_bfd,
5484 struct bfd_link_info *info,
5485 struct a8_erratum_fix **a8_fixes_p,
5486 unsigned int *num_a8_fixes_p,
5487 unsigned int *a8_fix_table_size_p,
5488 struct a8_erratum_reloc *a8_relocs,
5489 unsigned int num_a8_relocs,
5490 unsigned prev_num_a8_fixes,
5491 bfd_boolean *stub_changed_p)
5492 {
5493 asection *section;
5494 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5495 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5496 unsigned int num_a8_fixes = *num_a8_fixes_p;
5497 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5498
5499 if (htab == NULL)
5500 return FALSE;
5501
5502 for (section = input_bfd->sections;
5503 section != NULL;
5504 section = section->next)
5505 {
5506 bfd_byte *contents = NULL;
5507 struct _arm_elf_section_data *sec_data;
5508 unsigned int span;
5509 bfd_vma base_vma;
5510
5511 if (elf_section_type (section) != SHT_PROGBITS
5512 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5513 || (section->flags & SEC_EXCLUDE) != 0
5514 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5515 || (section->output_section == bfd_abs_section_ptr))
5516 continue;
5517
5518 base_vma = section->output_section->vma + section->output_offset;
5519
5520 if (elf_section_data (section)->this_hdr.contents != NULL)
5521 contents = elf_section_data (section)->this_hdr.contents;
5522 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5523 return TRUE;
5524
5525 sec_data = elf32_arm_section_data (section);
5526
5527 for (span = 0; span < sec_data->mapcount; span++)
5528 {
5529 unsigned int span_start = sec_data->map[span].vma;
5530 unsigned int span_end = (span == sec_data->mapcount - 1)
5531 ? section->size : sec_data->map[span + 1].vma;
5532 unsigned int i;
5533 char span_type = sec_data->map[span].type;
5534 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5535
5536 if (span_type != 't')
5537 continue;
5538
5539 /* Span is entirely within a single 4KB region: skip scanning. */
5540 if (((base_vma + span_start) & ~0xfff)
5541 == ((base_vma + span_end) & ~0xfff))
5542 continue;
5543
5544 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5545
5546 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5547 * The branch target is in the same 4KB region as the
5548 first half of the branch.
5549 * The instruction before the branch is a 32-bit
5550 length non-branch instruction. */
5551 for (i = span_start; i < span_end;)
5552 {
5553 unsigned int insn = bfd_getl16 (&contents[i]);
5554 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5555 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5556
5557 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5558 insn_32bit = TRUE;
5559
5560 if (insn_32bit)
5561 {
5562 /* Load the rest of the insn (in manual-friendly order). */
5563 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5564
5565 /* Encoding T4: B<c>.W. */
5566 is_b = (insn & 0xf800d000) == 0xf0009000;
5567 /* Encoding T1: BL<c>.W. */
5568 is_bl = (insn & 0xf800d000) == 0xf000d000;
5569 /* Encoding T2: BLX<c>.W. */
5570 is_blx = (insn & 0xf800d000) == 0xf000c000;
5571 /* Encoding T3: B<c>.W (not permitted in IT block). */
5572 is_bcc = (insn & 0xf800d000) == 0xf0008000
5573 && (insn & 0x07f00000) != 0x03800000;
5574 }
5575
5576 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5577
5578 if (((base_vma + i) & 0xfff) == 0xffe
5579 && insn_32bit
5580 && is_32bit_branch
5581 && last_was_32bit
5582 && ! last_was_branch)
5583 {
5584 bfd_signed_vma offset = 0;
5585 bfd_boolean force_target_arm = FALSE;
5586 bfd_boolean force_target_thumb = FALSE;
5587 bfd_vma target;
5588 enum elf32_arm_stub_type stub_type = arm_stub_none;
5589 struct a8_erratum_reloc key, *found;
5590 bfd_boolean use_plt = FALSE;
5591
5592 key.from = base_vma + i;
5593 found = (struct a8_erratum_reloc *)
5594 bsearch (&key, a8_relocs, num_a8_relocs,
5595 sizeof (struct a8_erratum_reloc),
5596 &a8_reloc_compare);
5597
5598 if (found)
5599 {
5600 char *error_message = NULL;
5601 struct elf_link_hash_entry *entry;
5602
5603 /* We don't care about the error returned from this
5604 function, only if there is glue or not. */
5605 entry = find_thumb_glue (info, found->sym_name,
5606 &error_message);
5607
5608 if (entry)
5609 found->non_a8_stub = TRUE;
5610
5611 /* Keep a simpler condition, for the sake of clarity. */
5612 if (htab->root.splt != NULL && found->hash != NULL
5613 && found->hash->root.plt.offset != (bfd_vma) -1)
5614 use_plt = TRUE;
5615
5616 if (found->r_type == R_ARM_THM_CALL)
5617 {
5618 if (found->branch_type == ST_BRANCH_TO_ARM
5619 || use_plt)
5620 force_target_arm = TRUE;
5621 else
5622 force_target_thumb = TRUE;
5623 }
5624 }
5625
5626 /* Check if we have an offending branch instruction. */
5627
5628 if (found && found->non_a8_stub)
5629 /* We've already made a stub for this instruction, e.g.
5630 it's a long branch or a Thumb->ARM stub. Assume that
5631 stub will suffice to work around the A8 erratum (see
5632 setting of always_after_branch above). */
5633 ;
5634 else if (is_bcc)
5635 {
5636 offset = (insn & 0x7ff) << 1;
5637 offset |= (insn & 0x3f0000) >> 4;
5638 offset |= (insn & 0x2000) ? 0x40000 : 0;
5639 offset |= (insn & 0x800) ? 0x80000 : 0;
5640 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5641 if (offset & 0x100000)
5642 offset |= ~ ((bfd_signed_vma) 0xfffff);
5643 stub_type = arm_stub_a8_veneer_b_cond;
5644 }
5645 else if (is_b || is_bl || is_blx)
5646 {
5647 int s = (insn & 0x4000000) != 0;
5648 int j1 = (insn & 0x2000) != 0;
5649 int j2 = (insn & 0x800) != 0;
5650 int i1 = !(j1 ^ s);
5651 int i2 = !(j2 ^ s);
5652
5653 offset = (insn & 0x7ff) << 1;
5654 offset |= (insn & 0x3ff0000) >> 4;
5655 offset |= i2 << 22;
5656 offset |= i1 << 23;
5657 offset |= s << 24;
5658 if (offset & 0x1000000)
5659 offset |= ~ ((bfd_signed_vma) 0xffffff);
5660
5661 if (is_blx)
5662 offset &= ~ ((bfd_signed_vma) 3);
5663
5664 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5665 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5666 }
5667
5668 if (stub_type != arm_stub_none)
5669 {
5670 bfd_vma pc_for_insn = base_vma + i + 4;
5671
5672 /* The original instruction is a BL, but the target is
5673 an ARM instruction. If we were not making a stub,
5674 the BL would have been converted to a BLX. Use the
5675 BLX stub instead in that case. */
5676 if (htab->use_blx && force_target_arm
5677 && stub_type == arm_stub_a8_veneer_bl)
5678 {
5679 stub_type = arm_stub_a8_veneer_blx;
5680 is_blx = TRUE;
5681 is_bl = FALSE;
5682 }
5683 /* Conversely, if the original instruction was
5684 BLX but the target is Thumb mode, use the BL
5685 stub. */
5686 else if (force_target_thumb
5687 && stub_type == arm_stub_a8_veneer_blx)
5688 {
5689 stub_type = arm_stub_a8_veneer_bl;
5690 is_blx = FALSE;
5691 is_bl = TRUE;
5692 }
5693
5694 if (is_blx)
5695 pc_for_insn &= ~ ((bfd_vma) 3);
5696
5697 /* If we found a relocation, use the proper destination,
5698 not the offset in the (unrelocated) instruction.
5699 Note this is always done if we switched the stub type
5700 above. */
5701 if (found)
5702 offset =
5703 (bfd_signed_vma) (found->destination - pc_for_insn);
5704
5705 /* If the stub will use a Thumb-mode branch to a
5706 PLT target, redirect it to the preceding Thumb
5707 entry point. */
5708 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5709 offset -= PLT_THUMB_STUB_SIZE;
5710
5711 target = pc_for_insn + offset;
5712
5713 /* The BLX stub is ARM-mode code. Adjust the offset to
5714 take the different PC value (+8 instead of +4) into
5715 account. */
5716 if (stub_type == arm_stub_a8_veneer_blx)
5717 offset += 4;
5718
5719 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5720 {
5721 char *stub_name = NULL;
5722
5723 if (num_a8_fixes == a8_fix_table_size)
5724 {
5725 a8_fix_table_size *= 2;
5726 a8_fixes = (struct a8_erratum_fix *)
5727 bfd_realloc (a8_fixes,
5728 sizeof (struct a8_erratum_fix)
5729 * a8_fix_table_size);
5730 }
5731
5732 if (num_a8_fixes < prev_num_a8_fixes)
5733 {
5734 /* If we're doing a subsequent scan,
5735 check if we've found the same fix as
5736 before, and try and reuse the stub
5737 name. */
5738 stub_name = a8_fixes[num_a8_fixes].stub_name;
5739 if ((a8_fixes[num_a8_fixes].section != section)
5740 || (a8_fixes[num_a8_fixes].offset != i))
5741 {
5742 free (stub_name);
5743 stub_name = NULL;
5744 *stub_changed_p = TRUE;
5745 }
5746 }
5747
5748 if (!stub_name)
5749 {
5750 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5751 if (stub_name != NULL)
5752 sprintf (stub_name, "%x:%x", section->id, i);
5753 }
5754
5755 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5756 a8_fixes[num_a8_fixes].section = section;
5757 a8_fixes[num_a8_fixes].offset = i;
5758 a8_fixes[num_a8_fixes].target_offset =
5759 target - base_vma;
5760 a8_fixes[num_a8_fixes].orig_insn = insn;
5761 a8_fixes[num_a8_fixes].stub_name = stub_name;
5762 a8_fixes[num_a8_fixes].stub_type = stub_type;
5763 a8_fixes[num_a8_fixes].branch_type =
5764 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5765
5766 num_a8_fixes++;
5767 }
5768 }
5769 }
5770
5771 i += insn_32bit ? 4 : 2;
5772 last_was_32bit = insn_32bit;
5773 last_was_branch = is_32bit_branch;
5774 }
5775 }
5776
5777 if (elf_section_data (section)->this_hdr.contents == NULL)
5778 free (contents);
5779 }
5780
5781 *a8_fixes_p = a8_fixes;
5782 *num_a8_fixes_p = num_a8_fixes;
5783 *a8_fix_table_size_p = a8_fix_table_size;
5784
5785 return FALSE;
5786 }
5787
5788 /* Create or update a stub entry depending on whether the stub can already be
5789 found in HTAB. The stub is identified by:
5790 - its type STUB_TYPE
5791 - its source branch (note that several can share the same stub) whose
5792 section and relocation (if any) are given by SECTION and IRELA
5793 respectively
5794 - its target symbol whose input section, hash, name, value and branch type
5795 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5796 respectively
5797
5798 If found, the value of the stub's target symbol is updated from SYM_VALUE
5799 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5800 TRUE and the stub entry is initialized.
5801
5802 Returns the stub that was created or updated, or NULL if an error
5803 occurred. */
5804
5805 static struct elf32_arm_stub_hash_entry *
5806 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5807 enum elf32_arm_stub_type stub_type, asection *section,
5808 Elf_Internal_Rela *irela, asection *sym_sec,
5809 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5810 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5811 bfd_boolean *new_stub)
5812 {
5813 const asection *id_sec;
5814 char *stub_name;
5815 struct elf32_arm_stub_hash_entry *stub_entry;
5816 unsigned int r_type;
5817 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5818
5819 BFD_ASSERT (stub_type != arm_stub_none);
5820 *new_stub = FALSE;
5821
5822 if (sym_claimed)
5823 stub_name = sym_name;
5824 else
5825 {
5826 BFD_ASSERT (irela);
5827 BFD_ASSERT (section);
5828 BFD_ASSERT (section->id <= htab->top_id);
5829
5830 /* Support for grouping stub sections. */
5831 id_sec = htab->stub_group[section->id].link_sec;
5832
5833 /* Get the name of this stub. */
5834 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5835 stub_type);
5836 if (!stub_name)
5837 return NULL;
5838 }
5839
5840 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5841 FALSE);
5842 /* The proper stub has already been created, just update its value. */
5843 if (stub_entry != NULL)
5844 {
5845 if (!sym_claimed)
5846 free (stub_name);
5847 stub_entry->target_value = sym_value;
5848 return stub_entry;
5849 }
5850
5851 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5852 if (stub_entry == NULL)
5853 {
5854 if (!sym_claimed)
5855 free (stub_name);
5856 return NULL;
5857 }
5858
5859 stub_entry->target_value = sym_value;
5860 stub_entry->target_section = sym_sec;
5861 stub_entry->stub_type = stub_type;
5862 stub_entry->h = hash;
5863 stub_entry->branch_type = branch_type;
5864
5865 if (sym_claimed)
5866 stub_entry->output_name = sym_name;
5867 else
5868 {
5869 if (sym_name == NULL)
5870 sym_name = "unnamed";
5871 stub_entry->output_name = (char *)
5872 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5873 + strlen (sym_name));
5874 if (stub_entry->output_name == NULL)
5875 {
5876 free (stub_name);
5877 return NULL;
5878 }
5879
5880 /* For historical reasons, use the existing names for ARM-to-Thumb and
5881 Thumb-to-ARM stubs. */
5882 r_type = ELF32_R_TYPE (irela->r_info);
5883 if ((r_type == (unsigned int) R_ARM_THM_CALL
5884 || r_type == (unsigned int) R_ARM_THM_JUMP24
5885 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5886 && branch_type == ST_BRANCH_TO_ARM)
5887 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5888 else if ((r_type == (unsigned int) R_ARM_CALL
5889 || r_type == (unsigned int) R_ARM_JUMP24)
5890 && branch_type == ST_BRANCH_TO_THUMB)
5891 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5892 else
5893 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5894 }
5895
5896 *new_stub = TRUE;
5897 return stub_entry;
5898 }
5899
5900 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5901 gateway veneer to transition from non secure to secure state and create them
5902 accordingly.
5903
5904 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5905 defines the conditions that govern Secure Gateway veneer creation for a
5906 given symbol <SYM> as follows:
5907 - it has function type
5908 - it has non local binding
5909 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5910 same type, binding and value as <SYM> (called normal symbol).
5911 An entry function can handle secure state transition itself in which case
5912 its special symbol would have a different value from the normal symbol.
5913
5914 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5915 entry mapping while HTAB gives the name to hash entry mapping.
5916 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5917 created.
5918
5919 The return value gives whether a stub failed to be allocated. */
5920
5921 static bfd_boolean
5922 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5923 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5924 int *cmse_stub_created)
5925 {
5926 const struct elf_backend_data *bed;
5927 Elf_Internal_Shdr *symtab_hdr;
5928 unsigned i, j, sym_count, ext_start;
5929 Elf_Internal_Sym *cmse_sym, *local_syms;
5930 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5931 enum arm_st_branch_type branch_type;
5932 char *sym_name, *lsym_name;
5933 bfd_vma sym_value;
5934 asection *section;
5935 struct elf32_arm_stub_hash_entry *stub_entry;
5936 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5937
5938 bed = get_elf_backend_data (input_bfd);
5939 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5940 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5941 ext_start = symtab_hdr->sh_info;
5942 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5943 && out_attr[Tag_CPU_arch_profile].i == 'M');
5944
5945 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5946 if (local_syms == NULL)
5947 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5948 symtab_hdr->sh_info, 0, NULL, NULL,
5949 NULL);
5950 if (symtab_hdr->sh_info && local_syms == NULL)
5951 return FALSE;
5952
5953 /* Scan symbols. */
5954 for (i = 0; i < sym_count; i++)
5955 {
5956 cmse_invalid = FALSE;
5957
5958 if (i < ext_start)
5959 {
5960 cmse_sym = &local_syms[i];
5961 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5962 symtab_hdr->sh_link,
5963 cmse_sym->st_name);
5964 if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
5965 continue;
5966
5967 /* Special symbol with local binding. */
5968 cmse_invalid = TRUE;
5969 }
5970 else
5971 {
5972 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5973 sym_name = (char *) cmse_hash->root.root.root.string;
5974 if (!startswith (sym_name, CMSE_PREFIX))
5975 continue;
5976
5977 /* Special symbol has incorrect binding or type. */
5978 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5979 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5980 || cmse_hash->root.type != STT_FUNC)
5981 cmse_invalid = TRUE;
5982 }
5983
5984 if (!is_v8m)
5985 {
5986 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5987 "ARMv8-M architecture or later"),
5988 input_bfd, sym_name);
5989 is_v8m = TRUE; /* Avoid multiple warning. */
5990 ret = FALSE;
5991 }
5992
5993 if (cmse_invalid)
5994 {
5995 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5996 " a global or weak function symbol"),
5997 input_bfd, sym_name);
5998 ret = FALSE;
5999 if (i < ext_start)
6000 continue;
6001 }
6002
6003 sym_name += strlen (CMSE_PREFIX);
6004 hash = (struct elf32_arm_link_hash_entry *)
6005 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6006
6007 /* No associated normal symbol or it is neither global nor weak. */
6008 if (!hash
6009 || (hash->root.root.type != bfd_link_hash_defined
6010 && hash->root.root.type != bfd_link_hash_defweak)
6011 || hash->root.type != STT_FUNC)
6012 {
6013 /* Initialize here to avoid warning about use of possibly
6014 uninitialized variable. */
6015 j = 0;
6016
6017 if (!hash)
6018 {
6019 /* Searching for a normal symbol with local binding. */
6020 for (; j < ext_start; j++)
6021 {
6022 lsym_name =
6023 bfd_elf_string_from_elf_section (input_bfd,
6024 symtab_hdr->sh_link,
6025 local_syms[j].st_name);
6026 if (!strcmp (sym_name, lsym_name))
6027 break;
6028 }
6029 }
6030
6031 if (hash || j < ext_start)
6032 {
6033 _bfd_error_handler
6034 (_("%pB: invalid standard symbol `%s'; it must be "
6035 "a global or weak function symbol"),
6036 input_bfd, sym_name);
6037 }
6038 else
6039 _bfd_error_handler
6040 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6041 ret = FALSE;
6042 if (!hash)
6043 continue;
6044 }
6045
6046 sym_value = hash->root.root.u.def.value;
6047 section = hash->root.root.u.def.section;
6048
6049 if (cmse_hash->root.root.u.def.section != section)
6050 {
6051 _bfd_error_handler
6052 (_("%pB: `%s' and its special symbol are in different sections"),
6053 input_bfd, sym_name);
6054 ret = FALSE;
6055 }
6056 if (cmse_hash->root.root.u.def.value != sym_value)
6057 continue; /* Ignore: could be an entry function starting with SG. */
6058
6059 /* If this section is a link-once section that will be discarded, then
6060 don't create any stubs. */
6061 if (section->output_section == NULL)
6062 {
6063 _bfd_error_handler
6064 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6065 continue;
6066 }
6067
6068 if (hash->root.size == 0)
6069 {
6070 _bfd_error_handler
6071 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6072 ret = FALSE;
6073 }
6074
6075 if (!ret)
6076 continue;
6077 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6078 stub_entry
6079 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6080 NULL, NULL, section, hash, sym_name,
6081 sym_value, branch_type, &new_stub);
6082
6083 if (stub_entry == NULL)
6084 ret = FALSE;
6085 else
6086 {
6087 BFD_ASSERT (new_stub);
6088 (*cmse_stub_created)++;
6089 }
6090 }
6091
6092 if (!symtab_hdr->contents)
6093 free (local_syms);
6094 return ret;
6095 }
6096
6097 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6098 code entry function, ie can be called from non secure code without using a
6099 veneer. */
6100
6101 static bfd_boolean
6102 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6103 {
6104 bfd_byte contents[4];
6105 uint32_t first_insn;
6106 asection *section;
6107 file_ptr offset;
6108 bfd *abfd;
6109
6110 /* Defined symbol of function type. */
6111 if (hash->root.root.type != bfd_link_hash_defined
6112 && hash->root.root.type != bfd_link_hash_defweak)
6113 return FALSE;
6114 if (hash->root.type != STT_FUNC)
6115 return FALSE;
6116
6117 /* Read first instruction. */
6118 section = hash->root.root.u.def.section;
6119 abfd = section->owner;
6120 offset = hash->root.root.u.def.value - section->vma;
6121 if (!bfd_get_section_contents (abfd, section, contents, offset,
6122 sizeof (contents)))
6123 return FALSE;
6124
6125 first_insn = bfd_get_32 (abfd, contents);
6126
6127 /* Starts by SG instruction. */
6128 return first_insn == 0xe97fe97f;
6129 }
6130
6131 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6132 secure gateway veneers (ie. the veneers was not in the input import library)
6133 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6134
6135 static bfd_boolean
6136 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6137 {
6138 struct elf32_arm_stub_hash_entry *stub_entry;
6139 struct bfd_link_info *info;
6140
6141 /* Massage our args to the form they really have. */
6142 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6143 info = (struct bfd_link_info *) gen_info;
6144
6145 if (info->out_implib_bfd)
6146 return TRUE;
6147
6148 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6149 return TRUE;
6150
6151 if (stub_entry->stub_offset == (bfd_vma) -1)
6152 _bfd_error_handler (" %s", stub_entry->output_name);
6153
6154 return TRUE;
6155 }
6156
6157 /* Set offset of each secure gateway veneers so that its address remain
6158 identical to the one in the input import library referred by
6159 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6160 (present in input import library but absent from the executable being
6161 linked) or if new veneers appeared and there is no output import library
6162 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6163 number of secure gateway veneers found in the input import library.
6164
6165 The function returns whether an error occurred. If no error occurred,
6166 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6167 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6168 veneer observed set for new veneers to be layed out after. */
6169
6170 static bfd_boolean
6171 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6172 struct elf32_arm_link_hash_table *htab,
6173 int *cmse_stub_created)
6174 {
6175 long symsize;
6176 char *sym_name;
6177 flagword flags;
6178 long i, symcount;
6179 bfd *in_implib_bfd;
6180 asection *stub_out_sec;
6181 bfd_boolean ret = TRUE;
6182 Elf_Internal_Sym *intsym;
6183 const char *out_sec_name;
6184 bfd_size_type cmse_stub_size;
6185 asymbol **sympp = NULL, *sym;
6186 struct elf32_arm_link_hash_entry *hash;
6187 const insn_sequence *cmse_stub_template;
6188 struct elf32_arm_stub_hash_entry *stub_entry;
6189 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6190 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6191 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6192
6193 /* No input secure gateway import library. */
6194 if (!htab->in_implib_bfd)
6195 return TRUE;
6196
6197 in_implib_bfd = htab->in_implib_bfd;
6198 if (!htab->cmse_implib)
6199 {
6200 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6201 "Gateway import libraries"), in_implib_bfd);
6202 return FALSE;
6203 }
6204
6205 /* Get symbol table size. */
6206 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6207 if (symsize < 0)
6208 return FALSE;
6209
6210 /* Read in the input secure gateway import library's symbol table. */
6211 sympp = (asymbol **) bfd_malloc (symsize);
6212 if (sympp == NULL)
6213 return FALSE;
6214
6215 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6216 if (symcount < 0)
6217 {
6218 ret = FALSE;
6219 goto free_sym_buf;
6220 }
6221
6222 htab->new_cmse_stub_offset = 0;
6223 cmse_stub_size =
6224 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6225 &cmse_stub_template,
6226 &cmse_stub_template_size);
6227 out_sec_name =
6228 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6229 stub_out_sec =
6230 bfd_get_section_by_name (htab->obfd, out_sec_name);
6231 if (stub_out_sec != NULL)
6232 cmse_stub_sec_vma = stub_out_sec->vma;
6233
6234 /* Set addresses of veneers mentionned in input secure gateway import
6235 library's symbol table. */
6236 for (i = 0; i < symcount; i++)
6237 {
6238 sym = sympp[i];
6239 flags = sym->flags;
6240 sym_name = (char *) bfd_asymbol_name (sym);
6241 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6242
6243 if (sym->section != bfd_abs_section_ptr
6244 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6245 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6246 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6247 != ST_BRANCH_TO_THUMB))
6248 {
6249 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6250 "symbol should be absolute, global and "
6251 "refer to Thumb functions"),
6252 in_implib_bfd, sym_name);
6253 ret = FALSE;
6254 continue;
6255 }
6256
6257 veneer_value = bfd_asymbol_value (sym);
6258 stub_offset = veneer_value - cmse_stub_sec_vma;
6259 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6260 FALSE, FALSE);
6261 hash = (struct elf32_arm_link_hash_entry *)
6262 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6263
6264 /* Stub entry should have been created by cmse_scan or the symbol be of
6265 a secure function callable from non secure code. */
6266 if (!stub_entry && !hash)
6267 {
6268 bfd_boolean new_stub;
6269
6270 _bfd_error_handler
6271 (_("entry function `%s' disappeared from secure code"), sym_name);
6272 hash = (struct elf32_arm_link_hash_entry *)
6273 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6274 stub_entry
6275 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6276 NULL, NULL, bfd_abs_section_ptr, hash,
6277 sym_name, veneer_value,
6278 ST_BRANCH_TO_THUMB, &new_stub);
6279 if (stub_entry == NULL)
6280 ret = FALSE;
6281 else
6282 {
6283 BFD_ASSERT (new_stub);
6284 new_cmse_stubs_created++;
6285 (*cmse_stub_created)++;
6286 }
6287 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6288 stub_entry->stub_offset = stub_offset;
6289 }
6290 /* Symbol found is not callable from non secure code. */
6291 else if (!stub_entry)
6292 {
6293 if (!cmse_entry_fct_p (hash))
6294 {
6295 _bfd_error_handler (_("`%s' refers to a non entry function"),
6296 sym_name);
6297 ret = FALSE;
6298 }
6299 continue;
6300 }
6301 else
6302 {
6303 /* Only stubs for SG veneers should have been created. */
6304 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6305
6306 /* Check visibility hasn't changed. */
6307 if (!!(flags & BSF_GLOBAL)
6308 != (hash->root.root.type == bfd_link_hash_defined))
6309 _bfd_error_handler
6310 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6311 sym_name);
6312
6313 stub_entry->stub_offset = stub_offset;
6314 }
6315
6316 /* Size should match that of a SG veneer. */
6317 if (intsym->st_size != cmse_stub_size)
6318 {
6319 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6320 in_implib_bfd, sym_name);
6321 ret = FALSE;
6322 }
6323
6324 /* Previous veneer address is before current SG veneer section. */
6325 if (veneer_value < cmse_stub_sec_vma)
6326 {
6327 /* Avoid offset underflow. */
6328 if (stub_entry)
6329 stub_entry->stub_offset = 0;
6330 stub_offset = 0;
6331 ret = FALSE;
6332 }
6333
6334 /* Complain if stub offset not a multiple of stub size. */
6335 if (stub_offset % cmse_stub_size)
6336 {
6337 _bfd_error_handler
6338 (_("offset of veneer for entry function `%s' not a multiple of "
6339 "its size"), sym_name);
6340 ret = FALSE;
6341 }
6342
6343 if (!ret)
6344 continue;
6345
6346 new_cmse_stubs_created--;
6347 if (veneer_value < cmse_stub_array_start)
6348 cmse_stub_array_start = veneer_value;
6349 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6350 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6351 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6352 }
6353
6354 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6355 {
6356 BFD_ASSERT (new_cmse_stubs_created > 0);
6357 _bfd_error_handler
6358 (_("new entry function(s) introduced but no output import library "
6359 "specified:"));
6360 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6361 }
6362
6363 if (cmse_stub_array_start != cmse_stub_sec_vma)
6364 {
6365 _bfd_error_handler
6366 (_("start address of `%s' is different from previous link"),
6367 out_sec_name);
6368 ret = FALSE;
6369 }
6370
6371 free_sym_buf:
6372 free (sympp);
6373 return ret;
6374 }
6375
6376 /* Determine and set the size of the stub section for a final link.
6377
6378 The basic idea here is to examine all the relocations looking for
6379 PC-relative calls to a target that is unreachable with a "bl"
6380 instruction. */
6381
6382 bfd_boolean
6383 elf32_arm_size_stubs (bfd *output_bfd,
6384 bfd *stub_bfd,
6385 struct bfd_link_info *info,
6386 bfd_signed_vma group_size,
6387 asection * (*add_stub_section) (const char *, asection *,
6388 asection *,
6389 unsigned int),
6390 void (*layout_sections_again) (void))
6391 {
6392 bfd_boolean ret = TRUE;
6393 obj_attribute *out_attr;
6394 int cmse_stub_created = 0;
6395 bfd_size_type stub_group_size;
6396 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6397 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6398 struct a8_erratum_fix *a8_fixes = NULL;
6399 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6400 struct a8_erratum_reloc *a8_relocs = NULL;
6401 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6402
6403 if (htab == NULL)
6404 return FALSE;
6405
6406 if (htab->fix_cortex_a8)
6407 {
6408 a8_fixes = (struct a8_erratum_fix *)
6409 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6410 a8_relocs = (struct a8_erratum_reloc *)
6411 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6412 }
6413
6414 /* Propagate mach to stub bfd, because it may not have been
6415 finalized when we created stub_bfd. */
6416 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6417 bfd_get_mach (output_bfd));
6418
6419 /* Stash our params away. */
6420 htab->stub_bfd = stub_bfd;
6421 htab->add_stub_section = add_stub_section;
6422 htab->layout_sections_again = layout_sections_again;
6423 stubs_always_after_branch = group_size < 0;
6424
6425 out_attr = elf_known_obj_attributes_proc (output_bfd);
6426 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6427
6428 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6429 as the first half of a 32-bit branch straddling two 4K pages. This is a
6430 crude way of enforcing that. */
6431 if (htab->fix_cortex_a8)
6432 stubs_always_after_branch = 1;
6433
6434 if (group_size < 0)
6435 stub_group_size = -group_size;
6436 else
6437 stub_group_size = group_size;
6438
6439 if (stub_group_size == 1)
6440 {
6441 /* Default values. */
6442 /* Thumb branch range is +-4MB has to be used as the default
6443 maximum size (a given section can contain both ARM and Thumb
6444 code, so the worst case has to be taken into account).
6445
6446 This value is 24K less than that, which allows for 2025
6447 12-byte stubs. If we exceed that, then we will fail to link.
6448 The user will have to relink with an explicit group size
6449 option. */
6450 stub_group_size = 4170000;
6451 }
6452
6453 group_sections (htab, stub_group_size, stubs_always_after_branch);
6454
6455 /* If we're applying the cortex A8 fix, we need to determine the
6456 program header size now, because we cannot change it later --
6457 that could alter section placements. Notice the A8 erratum fix
6458 ends up requiring the section addresses to remain unchanged
6459 modulo the page size. That's something we cannot represent
6460 inside BFD, and we don't want to force the section alignment to
6461 be the page size. */
6462 if (htab->fix_cortex_a8)
6463 (*htab->layout_sections_again) ();
6464
6465 while (1)
6466 {
6467 bfd *input_bfd;
6468 unsigned int bfd_indx;
6469 asection *stub_sec;
6470 enum elf32_arm_stub_type stub_type;
6471 bfd_boolean stub_changed = FALSE;
6472 unsigned prev_num_a8_fixes = num_a8_fixes;
6473
6474 num_a8_fixes = 0;
6475 for (input_bfd = info->input_bfds, bfd_indx = 0;
6476 input_bfd != NULL;
6477 input_bfd = input_bfd->link.next, bfd_indx++)
6478 {
6479 Elf_Internal_Shdr *symtab_hdr;
6480 asection *section;
6481 Elf_Internal_Sym *local_syms = NULL;
6482
6483 if (!is_arm_elf (input_bfd))
6484 continue;
6485 if ((input_bfd->flags & DYNAMIC) != 0
6486 && (elf_sym_hashes (input_bfd) == NULL
6487 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6488 continue;
6489
6490 num_a8_relocs = 0;
6491
6492 /* We'll need the symbol table in a second. */
6493 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6494 if (symtab_hdr->sh_info == 0)
6495 continue;
6496
6497 /* Limit scan of symbols to object file whose profile is
6498 Microcontroller to not hinder performance in the general case. */
6499 if (m_profile && first_veneer_scan)
6500 {
6501 struct elf_link_hash_entry **sym_hashes;
6502
6503 sym_hashes = elf_sym_hashes (input_bfd);
6504 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6505 &cmse_stub_created))
6506 goto error_ret_free_local;
6507
6508 if (cmse_stub_created != 0)
6509 stub_changed = TRUE;
6510 }
6511
6512 /* Walk over each section attached to the input bfd. */
6513 for (section = input_bfd->sections;
6514 section != NULL;
6515 section = section->next)
6516 {
6517 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6518
6519 /* If there aren't any relocs, then there's nothing more
6520 to do. */
6521 if ((section->flags & SEC_RELOC) == 0
6522 || section->reloc_count == 0
6523 || (section->flags & SEC_CODE) == 0)
6524 continue;
6525
6526 /* If this section is a link-once section that will be
6527 discarded, then don't create any stubs. */
6528 if (section->output_section == NULL
6529 || section->output_section->owner != output_bfd)
6530 continue;
6531
6532 /* Get the relocs. */
6533 internal_relocs
6534 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6535 NULL, info->keep_memory);
6536 if (internal_relocs == NULL)
6537 goto error_ret_free_local;
6538
6539 /* Now examine each relocation. */
6540 irela = internal_relocs;
6541 irelaend = irela + section->reloc_count;
6542 for (; irela < irelaend; irela++)
6543 {
6544 unsigned int r_type, r_indx;
6545 asection *sym_sec;
6546 bfd_vma sym_value;
6547 bfd_vma destination;
6548 struct elf32_arm_link_hash_entry *hash;
6549 const char *sym_name;
6550 unsigned char st_type;
6551 enum arm_st_branch_type branch_type;
6552 bfd_boolean created_stub = FALSE;
6553
6554 r_type = ELF32_R_TYPE (irela->r_info);
6555 r_indx = ELF32_R_SYM (irela->r_info);
6556
6557 if (r_type >= (unsigned int) R_ARM_max)
6558 {
6559 bfd_set_error (bfd_error_bad_value);
6560 error_ret_free_internal:
6561 if (elf_section_data (section)->relocs == NULL)
6562 free (internal_relocs);
6563 /* Fall through. */
6564 error_ret_free_local:
6565 if (symtab_hdr->contents != (unsigned char *) local_syms)
6566 free (local_syms);
6567 return FALSE;
6568 }
6569
6570 hash = NULL;
6571 if (r_indx >= symtab_hdr->sh_info)
6572 hash = elf32_arm_hash_entry
6573 (elf_sym_hashes (input_bfd)
6574 [r_indx - symtab_hdr->sh_info]);
6575
6576 /* Only look for stubs on branch instructions, or
6577 non-relaxed TLSCALL */
6578 if ((r_type != (unsigned int) R_ARM_CALL)
6579 && (r_type != (unsigned int) R_ARM_THM_CALL)
6580 && (r_type != (unsigned int) R_ARM_JUMP24)
6581 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6582 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6583 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6584 && (r_type != (unsigned int) R_ARM_PLT32)
6585 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6586 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6587 && r_type == elf32_arm_tls_transition
6588 (info, r_type, &hash->root)
6589 && ((hash ? hash->tls_type
6590 : (elf32_arm_local_got_tls_type
6591 (input_bfd)[r_indx]))
6592 & GOT_TLS_GDESC) != 0))
6593 continue;
6594
6595 /* Now determine the call target, its name, value,
6596 section. */
6597 sym_sec = NULL;
6598 sym_value = 0;
6599 destination = 0;
6600 sym_name = NULL;
6601
6602 if (r_type == (unsigned int) R_ARM_TLS_CALL
6603 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6604 {
6605 /* A non-relaxed TLS call. The target is the
6606 plt-resident trampoline and nothing to do
6607 with the symbol. */
6608 BFD_ASSERT (htab->tls_trampoline > 0);
6609 sym_sec = htab->root.splt;
6610 sym_value = htab->tls_trampoline;
6611 hash = 0;
6612 st_type = STT_FUNC;
6613 branch_type = ST_BRANCH_TO_ARM;
6614 }
6615 else if (!hash)
6616 {
6617 /* It's a local symbol. */
6618 Elf_Internal_Sym *sym;
6619
6620 if (local_syms == NULL)
6621 {
6622 local_syms
6623 = (Elf_Internal_Sym *) symtab_hdr->contents;
6624 if (local_syms == NULL)
6625 local_syms
6626 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6627 symtab_hdr->sh_info, 0,
6628 NULL, NULL, NULL);
6629 if (local_syms == NULL)
6630 goto error_ret_free_internal;
6631 }
6632
6633 sym = local_syms + r_indx;
6634 if (sym->st_shndx == SHN_UNDEF)
6635 sym_sec = bfd_und_section_ptr;
6636 else if (sym->st_shndx == SHN_ABS)
6637 sym_sec = bfd_abs_section_ptr;
6638 else if (sym->st_shndx == SHN_COMMON)
6639 sym_sec = bfd_com_section_ptr;
6640 else
6641 sym_sec =
6642 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6643
6644 if (!sym_sec)
6645 /* This is an undefined symbol. It can never
6646 be resolved. */
6647 continue;
6648
6649 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6650 sym_value = sym->st_value;
6651 destination = (sym_value + irela->r_addend
6652 + sym_sec->output_offset
6653 + sym_sec->output_section->vma);
6654 st_type = ELF_ST_TYPE (sym->st_info);
6655 branch_type =
6656 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6657 sym_name
6658 = bfd_elf_string_from_elf_section (input_bfd,
6659 symtab_hdr->sh_link,
6660 sym->st_name);
6661 }
6662 else
6663 {
6664 /* It's an external symbol. */
6665 while (hash->root.root.type == bfd_link_hash_indirect
6666 || hash->root.root.type == bfd_link_hash_warning)
6667 hash = ((struct elf32_arm_link_hash_entry *)
6668 hash->root.root.u.i.link);
6669
6670 if (hash->root.root.type == bfd_link_hash_defined
6671 || hash->root.root.type == bfd_link_hash_defweak)
6672 {
6673 sym_sec = hash->root.root.u.def.section;
6674 sym_value = hash->root.root.u.def.value;
6675
6676 struct elf32_arm_link_hash_table *globals =
6677 elf32_arm_hash_table (info);
6678
6679 /* For a destination in a shared library,
6680 use the PLT stub as target address to
6681 decide whether a branch stub is
6682 needed. */
6683 if (globals != NULL
6684 && globals->root.splt != NULL
6685 && hash != NULL
6686 && hash->root.plt.offset != (bfd_vma) -1)
6687 {
6688 sym_sec = globals->root.splt;
6689 sym_value = hash->root.plt.offset;
6690 if (sym_sec->output_section != NULL)
6691 destination = (sym_value
6692 + sym_sec->output_offset
6693 + sym_sec->output_section->vma);
6694 }
6695 else if (sym_sec->output_section != NULL)
6696 destination = (sym_value + irela->r_addend
6697 + sym_sec->output_offset
6698 + sym_sec->output_section->vma);
6699 }
6700 else if ((hash->root.root.type == bfd_link_hash_undefined)
6701 || (hash->root.root.type == bfd_link_hash_undefweak))
6702 {
6703 /* For a shared library, use the PLT stub as
6704 target address to decide whether a long
6705 branch stub is needed.
6706 For absolute code, they cannot be handled. */
6707 struct elf32_arm_link_hash_table *globals =
6708 elf32_arm_hash_table (info);
6709
6710 if (globals != NULL
6711 && globals->root.splt != NULL
6712 && hash != NULL
6713 && hash->root.plt.offset != (bfd_vma) -1)
6714 {
6715 sym_sec = globals->root.splt;
6716 sym_value = hash->root.plt.offset;
6717 if (sym_sec->output_section != NULL)
6718 destination = (sym_value
6719 + sym_sec->output_offset
6720 + sym_sec->output_section->vma);
6721 }
6722 else
6723 continue;
6724 }
6725 else
6726 {
6727 bfd_set_error (bfd_error_bad_value);
6728 goto error_ret_free_internal;
6729 }
6730 st_type = hash->root.type;
6731 branch_type =
6732 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6733 sym_name = hash->root.root.root.string;
6734 }
6735
6736 do
6737 {
6738 bfd_boolean new_stub;
6739 struct elf32_arm_stub_hash_entry *stub_entry;
6740
6741 /* Determine what (if any) linker stub is needed. */
6742 stub_type = arm_type_of_stub (info, section, irela,
6743 st_type, &branch_type,
6744 hash, destination, sym_sec,
6745 input_bfd, sym_name);
6746 if (stub_type == arm_stub_none)
6747 break;
6748
6749 /* We've either created a stub for this reloc already,
6750 or we are about to. */
6751 stub_entry =
6752 elf32_arm_create_stub (htab, stub_type, section, irela,
6753 sym_sec, hash,
6754 (char *) sym_name, sym_value,
6755 branch_type, &new_stub);
6756
6757 created_stub = stub_entry != NULL;
6758 if (!created_stub)
6759 goto error_ret_free_internal;
6760 else if (!new_stub)
6761 break;
6762 else
6763 stub_changed = TRUE;
6764 }
6765 while (0);
6766
6767 /* Look for relocations which might trigger Cortex-A8
6768 erratum. */
6769 if (htab->fix_cortex_a8
6770 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6771 || r_type == (unsigned int) R_ARM_THM_JUMP19
6772 || r_type == (unsigned int) R_ARM_THM_CALL
6773 || r_type == (unsigned int) R_ARM_THM_XPC22))
6774 {
6775 bfd_vma from = section->output_section->vma
6776 + section->output_offset
6777 + irela->r_offset;
6778
6779 if ((from & 0xfff) == 0xffe)
6780 {
6781 /* Found a candidate. Note we haven't checked the
6782 destination is within 4K here: if we do so (and
6783 don't create an entry in a8_relocs) we can't tell
6784 that a branch should have been relocated when
6785 scanning later. */
6786 if (num_a8_relocs == a8_reloc_table_size)
6787 {
6788 a8_reloc_table_size *= 2;
6789 a8_relocs = (struct a8_erratum_reloc *)
6790 bfd_realloc (a8_relocs,
6791 sizeof (struct a8_erratum_reloc)
6792 * a8_reloc_table_size);
6793 }
6794
6795 a8_relocs[num_a8_relocs].from = from;
6796 a8_relocs[num_a8_relocs].destination = destination;
6797 a8_relocs[num_a8_relocs].r_type = r_type;
6798 a8_relocs[num_a8_relocs].branch_type = branch_type;
6799 a8_relocs[num_a8_relocs].sym_name = sym_name;
6800 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6801 a8_relocs[num_a8_relocs].hash = hash;
6802
6803 num_a8_relocs++;
6804 }
6805 }
6806 }
6807
6808 /* We're done with the internal relocs, free them. */
6809 if (elf_section_data (section)->relocs == NULL)
6810 free (internal_relocs);
6811 }
6812
6813 if (htab->fix_cortex_a8)
6814 {
6815 /* Sort relocs which might apply to Cortex-A8 erratum. */
6816 qsort (a8_relocs, num_a8_relocs,
6817 sizeof (struct a8_erratum_reloc),
6818 &a8_reloc_compare);
6819
6820 /* Scan for branches which might trigger Cortex-A8 erratum. */
6821 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6822 &num_a8_fixes, &a8_fix_table_size,
6823 a8_relocs, num_a8_relocs,
6824 prev_num_a8_fixes, &stub_changed)
6825 != 0)
6826 goto error_ret_free_local;
6827 }
6828
6829 if (local_syms != NULL
6830 && symtab_hdr->contents != (unsigned char *) local_syms)
6831 {
6832 if (!info->keep_memory)
6833 free (local_syms);
6834 else
6835 symtab_hdr->contents = (unsigned char *) local_syms;
6836 }
6837 }
6838
6839 if (first_veneer_scan
6840 && !set_cmse_veneer_addr_from_implib (info, htab,
6841 &cmse_stub_created))
6842 ret = FALSE;
6843
6844 if (prev_num_a8_fixes != num_a8_fixes)
6845 stub_changed = TRUE;
6846
6847 if (!stub_changed)
6848 break;
6849
6850 /* OK, we've added some stubs. Find out the new size of the
6851 stub sections. */
6852 for (stub_sec = htab->stub_bfd->sections;
6853 stub_sec != NULL;
6854 stub_sec = stub_sec->next)
6855 {
6856 /* Ignore non-stub sections. */
6857 if (!strstr (stub_sec->name, STUB_SUFFIX))
6858 continue;
6859
6860 stub_sec->size = 0;
6861 }
6862
6863 /* Add new SG veneers after those already in the input import
6864 library. */
6865 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6866 stub_type++)
6867 {
6868 bfd_vma *start_offset_p;
6869 asection **stub_sec_p;
6870
6871 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6872 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6873 if (start_offset_p == NULL)
6874 continue;
6875
6876 BFD_ASSERT (stub_sec_p != NULL);
6877 if (*stub_sec_p != NULL)
6878 (*stub_sec_p)->size = *start_offset_p;
6879 }
6880
6881 /* Compute stub section size, considering padding. */
6882 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6883 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6884 stub_type++)
6885 {
6886 int size, padding;
6887 asection **stub_sec_p;
6888
6889 padding = arm_dedicated_stub_section_padding (stub_type);
6890 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6891 /* Skip if no stub input section or no stub section padding
6892 required. */
6893 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6894 continue;
6895 /* Stub section padding required but no dedicated section. */
6896 BFD_ASSERT (stub_sec_p);
6897
6898 size = (*stub_sec_p)->size;
6899 size = (size + padding - 1) & ~(padding - 1);
6900 (*stub_sec_p)->size = size;
6901 }
6902
6903 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6904 if (htab->fix_cortex_a8)
6905 for (i = 0; i < num_a8_fixes; i++)
6906 {
6907 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6908 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6909
6910 if (stub_sec == NULL)
6911 return FALSE;
6912
6913 stub_sec->size
6914 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6915 NULL);
6916 }
6917
6918
6919 /* Ask the linker to do its stuff. */
6920 (*htab->layout_sections_again) ();
6921 first_veneer_scan = FALSE;
6922 }
6923
6924 /* Add stubs for Cortex-A8 erratum fixes now. */
6925 if (htab->fix_cortex_a8)
6926 {
6927 for (i = 0; i < num_a8_fixes; i++)
6928 {
6929 struct elf32_arm_stub_hash_entry *stub_entry;
6930 char *stub_name = a8_fixes[i].stub_name;
6931 asection *section = a8_fixes[i].section;
6932 unsigned int section_id = a8_fixes[i].section->id;
6933 asection *link_sec = htab->stub_group[section_id].link_sec;
6934 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6935 const insn_sequence *template_sequence;
6936 int template_size, size = 0;
6937
6938 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6939 TRUE, FALSE);
6940 if (stub_entry == NULL)
6941 {
6942 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6943 section->owner, stub_name);
6944 return FALSE;
6945 }
6946
6947 stub_entry->stub_sec = stub_sec;
6948 stub_entry->stub_offset = (bfd_vma) -1;
6949 stub_entry->id_sec = link_sec;
6950 stub_entry->stub_type = a8_fixes[i].stub_type;
6951 stub_entry->source_value = a8_fixes[i].offset;
6952 stub_entry->target_section = a8_fixes[i].section;
6953 stub_entry->target_value = a8_fixes[i].target_offset;
6954 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6955 stub_entry->branch_type = a8_fixes[i].branch_type;
6956
6957 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6958 &template_sequence,
6959 &template_size);
6960
6961 stub_entry->stub_size = size;
6962 stub_entry->stub_template = template_sequence;
6963 stub_entry->stub_template_size = template_size;
6964 }
6965
6966 /* Stash the Cortex-A8 erratum fix array for use later in
6967 elf32_arm_write_section(). */
6968 htab->a8_erratum_fixes = a8_fixes;
6969 htab->num_a8_erratum_fixes = num_a8_fixes;
6970 }
6971 else
6972 {
6973 htab->a8_erratum_fixes = NULL;
6974 htab->num_a8_erratum_fixes = 0;
6975 }
6976 return ret;
6977 }
6978
6979 /* Build all the stubs associated with the current output file. The
6980 stubs are kept in a hash table attached to the main linker hash
6981 table. We also set up the .plt entries for statically linked PIC
6982 functions here. This function is called via arm_elf_finish in the
6983 linker. */
6984
6985 bfd_boolean
6986 elf32_arm_build_stubs (struct bfd_link_info *info)
6987 {
6988 asection *stub_sec;
6989 struct bfd_hash_table *table;
6990 enum elf32_arm_stub_type stub_type;
6991 struct elf32_arm_link_hash_table *htab;
6992
6993 htab = elf32_arm_hash_table (info);
6994 if (htab == NULL)
6995 return FALSE;
6996
6997 for (stub_sec = htab->stub_bfd->sections;
6998 stub_sec != NULL;
6999 stub_sec = stub_sec->next)
7000 {
7001 bfd_size_type size;
7002
7003 /* Ignore non-stub sections. */
7004 if (!strstr (stub_sec->name, STUB_SUFFIX))
7005 continue;
7006
7007 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7008 must at least be done for stub section requiring padding and for SG
7009 veneers to ensure that a non secure code branching to a removed SG
7010 veneer causes an error. */
7011 size = stub_sec->size;
7012 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7013 if (stub_sec->contents == NULL && size != 0)
7014 return FALSE;
7015
7016 stub_sec->size = 0;
7017 }
7018
7019 /* Add new SG veneers after those already in the input import library. */
7020 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7021 {
7022 bfd_vma *start_offset_p;
7023 asection **stub_sec_p;
7024
7025 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7026 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7027 if (start_offset_p == NULL)
7028 continue;
7029
7030 BFD_ASSERT (stub_sec_p != NULL);
7031 if (*stub_sec_p != NULL)
7032 (*stub_sec_p)->size = *start_offset_p;
7033 }
7034
7035 /* Build the stubs as directed by the stub hash table. */
7036 table = &htab->stub_hash_table;
7037 bfd_hash_traverse (table, arm_build_one_stub, info);
7038 if (htab->fix_cortex_a8)
7039 {
7040 /* Place the cortex a8 stubs last. */
7041 htab->fix_cortex_a8 = -1;
7042 bfd_hash_traverse (table, arm_build_one_stub, info);
7043 }
7044
7045 return TRUE;
7046 }
7047
7048 /* Locate the Thumb encoded calling stub for NAME. */
7049
7050 static struct elf_link_hash_entry *
7051 find_thumb_glue (struct bfd_link_info *link_info,
7052 const char *name,
7053 char **error_message)
7054 {
7055 char *tmp_name;
7056 struct elf_link_hash_entry *hash;
7057 struct elf32_arm_link_hash_table *hash_table;
7058
7059 /* We need a pointer to the armelf specific hash table. */
7060 hash_table = elf32_arm_hash_table (link_info);
7061 if (hash_table == NULL)
7062 return NULL;
7063
7064 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7065 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7066
7067 BFD_ASSERT (tmp_name);
7068
7069 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7070
7071 hash = elf_link_hash_lookup
7072 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7073
7074 if (hash == NULL
7075 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7076 "Thumb", tmp_name, name) == -1)
7077 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7078
7079 free (tmp_name);
7080
7081 return hash;
7082 }
7083
7084 /* Locate the ARM encoded calling stub for NAME. */
7085
7086 static struct elf_link_hash_entry *
7087 find_arm_glue (struct bfd_link_info *link_info,
7088 const char *name,
7089 char **error_message)
7090 {
7091 char *tmp_name;
7092 struct elf_link_hash_entry *myh;
7093 struct elf32_arm_link_hash_table *hash_table;
7094
7095 /* We need a pointer to the elfarm specific hash table. */
7096 hash_table = elf32_arm_hash_table (link_info);
7097 if (hash_table == NULL)
7098 return NULL;
7099
7100 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7101 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7102 BFD_ASSERT (tmp_name);
7103
7104 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7105
7106 myh = elf_link_hash_lookup
7107 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7108
7109 if (myh == NULL
7110 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7111 "ARM", tmp_name, name) == -1)
7112 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7113
7114 free (tmp_name);
7115
7116 return myh;
7117 }
7118
7119 /* ARM->Thumb glue (static images):
7120
7121 .arm
7122 __func_from_arm:
7123 ldr r12, __func_addr
7124 bx r12
7125 __func_addr:
7126 .word func @ behave as if you saw a ARM_32 reloc.
7127
7128 (v5t static images)
7129 .arm
7130 __func_from_arm:
7131 ldr pc, __func_addr
7132 __func_addr:
7133 .word func @ behave as if you saw a ARM_32 reloc.
7134
7135 (relocatable images)
7136 .arm
7137 __func_from_arm:
7138 ldr r12, __func_offset
7139 add r12, r12, pc
7140 bx r12
7141 __func_offset:
7142 .word func - . */
7143
7144 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7145 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7146 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7147 static const insn32 a2t3_func_addr_insn = 0x00000001;
7148
7149 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7150 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7151 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7152
7153 #define ARM2THUMB_PIC_GLUE_SIZE 16
7154 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7155 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7156 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7157
7158 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7159
7160 .thumb .thumb
7161 .align 2 .align 2
7162 __func_from_thumb: __func_from_thumb:
7163 bx pc push {r6, lr}
7164 nop ldr r6, __func_addr
7165 .arm mov lr, pc
7166 b func bx r6
7167 .arm
7168 ;; back_to_thumb
7169 ldmia r13! {r6, lr}
7170 bx lr
7171 __func_addr:
7172 .word func */
7173
7174 #define THUMB2ARM_GLUE_SIZE 8
7175 static const insn16 t2a1_bx_pc_insn = 0x4778;
7176 static const insn16 t2a2_noop_insn = 0x46c0;
7177 static const insn32 t2a3_b_insn = 0xea000000;
7178
7179 #define VFP11_ERRATUM_VENEER_SIZE 8
7180 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7181 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7182
7183 #define ARM_BX_VENEER_SIZE 12
7184 static const insn32 armbx1_tst_insn = 0xe3100001;
7185 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7186 static const insn32 armbx3_bx_insn = 0xe12fff10;
7187
7188 #ifndef ELFARM_NABI_C_INCLUDED
7189 static void
7190 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7191 {
7192 asection * s;
7193 bfd_byte * contents;
7194
7195 if (size == 0)
7196 {
7197 /* Do not include empty glue sections in the output. */
7198 if (abfd != NULL)
7199 {
7200 s = bfd_get_linker_section (abfd, name);
7201 if (s != NULL)
7202 s->flags |= SEC_EXCLUDE;
7203 }
7204 return;
7205 }
7206
7207 BFD_ASSERT (abfd != NULL);
7208
7209 s = bfd_get_linker_section (abfd, name);
7210 BFD_ASSERT (s != NULL);
7211
7212 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7213
7214 BFD_ASSERT (s->size == size);
7215 s->contents = contents;
7216 }
7217
7218 bfd_boolean
7219 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7220 {
7221 struct elf32_arm_link_hash_table * globals;
7222
7223 globals = elf32_arm_hash_table (info);
7224 BFD_ASSERT (globals != NULL);
7225
7226 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7227 globals->arm_glue_size,
7228 ARM2THUMB_GLUE_SECTION_NAME);
7229
7230 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7231 globals->thumb_glue_size,
7232 THUMB2ARM_GLUE_SECTION_NAME);
7233
7234 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7235 globals->vfp11_erratum_glue_size,
7236 VFP11_ERRATUM_VENEER_SECTION_NAME);
7237
7238 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7239 globals->stm32l4xx_erratum_glue_size,
7240 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7241
7242 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7243 globals->bx_glue_size,
7244 ARM_BX_GLUE_SECTION_NAME);
7245
7246 return TRUE;
7247 }
7248
7249 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7250 returns the symbol identifying the stub. */
7251
7252 static struct elf_link_hash_entry *
7253 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7254 struct elf_link_hash_entry * h)
7255 {
7256 const char * name = h->root.root.string;
7257 asection * s;
7258 char * tmp_name;
7259 struct elf_link_hash_entry * myh;
7260 struct bfd_link_hash_entry * bh;
7261 struct elf32_arm_link_hash_table * globals;
7262 bfd_vma val;
7263 bfd_size_type size;
7264
7265 globals = elf32_arm_hash_table (link_info);
7266 BFD_ASSERT (globals != NULL);
7267 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7268
7269 s = bfd_get_linker_section
7270 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7271
7272 BFD_ASSERT (s != NULL);
7273
7274 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7275 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7276 BFD_ASSERT (tmp_name);
7277
7278 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7279
7280 myh = elf_link_hash_lookup
7281 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7282
7283 if (myh != NULL)
7284 {
7285 /* We've already seen this guy. */
7286 free (tmp_name);
7287 return myh;
7288 }
7289
7290 /* The only trick here is using hash_table->arm_glue_size as the value.
7291 Even though the section isn't allocated yet, this is where we will be
7292 putting it. The +1 on the value marks that the stub has not been
7293 output yet - not that it is a Thumb function. */
7294 bh = NULL;
7295 val = globals->arm_glue_size + 1;
7296 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7297 tmp_name, BSF_GLOBAL, s, val,
7298 NULL, TRUE, FALSE, &bh);
7299
7300 myh = (struct elf_link_hash_entry *) bh;
7301 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7302 myh->forced_local = 1;
7303
7304 free (tmp_name);
7305
7306 if (bfd_link_pic (link_info)
7307 || globals->root.is_relocatable_executable
7308 || globals->pic_veneer)
7309 size = ARM2THUMB_PIC_GLUE_SIZE;
7310 else if (globals->use_blx)
7311 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7312 else
7313 size = ARM2THUMB_STATIC_GLUE_SIZE;
7314
7315 s->size += size;
7316 globals->arm_glue_size += size;
7317
7318 return myh;
7319 }
7320
7321 /* Allocate space for ARMv4 BX veneers. */
7322
7323 static void
7324 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7325 {
7326 asection * s;
7327 struct elf32_arm_link_hash_table *globals;
7328 char *tmp_name;
7329 struct elf_link_hash_entry *myh;
7330 struct bfd_link_hash_entry *bh;
7331 bfd_vma val;
7332
7333 /* BX PC does not need a veneer. */
7334 if (reg == 15)
7335 return;
7336
7337 globals = elf32_arm_hash_table (link_info);
7338 BFD_ASSERT (globals != NULL);
7339 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7340
7341 /* Check if this veneer has already been allocated. */
7342 if (globals->bx_glue_offset[reg])
7343 return;
7344
7345 s = bfd_get_linker_section
7346 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7347
7348 BFD_ASSERT (s != NULL);
7349
7350 /* Add symbol for veneer. */
7351 tmp_name = (char *)
7352 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7353 BFD_ASSERT (tmp_name);
7354
7355 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7356
7357 myh = elf_link_hash_lookup
7358 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7359
7360 BFD_ASSERT (myh == NULL);
7361
7362 bh = NULL;
7363 val = globals->bx_glue_size;
7364 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7365 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7366 NULL, TRUE, FALSE, &bh);
7367
7368 myh = (struct elf_link_hash_entry *) bh;
7369 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7370 myh->forced_local = 1;
7371
7372 s->size += ARM_BX_VENEER_SIZE;
7373 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7374 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7375 }
7376
7377
7378 /* Add an entry to the code/data map for section SEC. */
7379
7380 static void
7381 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7382 {
7383 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7384 unsigned int newidx;
7385
7386 if (sec_data->map == NULL)
7387 {
7388 sec_data->map = (elf32_arm_section_map *)
7389 bfd_malloc (sizeof (elf32_arm_section_map));
7390 sec_data->mapcount = 0;
7391 sec_data->mapsize = 1;
7392 }
7393
7394 newidx = sec_data->mapcount++;
7395
7396 if (sec_data->mapcount > sec_data->mapsize)
7397 {
7398 sec_data->mapsize *= 2;
7399 sec_data->map = (elf32_arm_section_map *)
7400 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7401 * sizeof (elf32_arm_section_map));
7402 }
7403
7404 if (sec_data->map)
7405 {
7406 sec_data->map[newidx].vma = vma;
7407 sec_data->map[newidx].type = type;
7408 }
7409 }
7410
7411
7412 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7413 veneers are handled for now. */
7414
7415 static bfd_vma
7416 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7417 elf32_vfp11_erratum_list *branch,
7418 bfd *branch_bfd,
7419 asection *branch_sec,
7420 unsigned int offset)
7421 {
7422 asection *s;
7423 struct elf32_arm_link_hash_table *hash_table;
7424 char *tmp_name;
7425 struct elf_link_hash_entry *myh;
7426 struct bfd_link_hash_entry *bh;
7427 bfd_vma val;
7428 struct _arm_elf_section_data *sec_data;
7429 elf32_vfp11_erratum_list *newerr;
7430
7431 hash_table = elf32_arm_hash_table (link_info);
7432 BFD_ASSERT (hash_table != NULL);
7433 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7434
7435 s = bfd_get_linker_section
7436 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7437
7438 sec_data = elf32_arm_section_data (s);
7439
7440 BFD_ASSERT (s != NULL);
7441
7442 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7443 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7444 BFD_ASSERT (tmp_name);
7445
7446 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7447 hash_table->num_vfp11_fixes);
7448
7449 myh = elf_link_hash_lookup
7450 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7451
7452 BFD_ASSERT (myh == NULL);
7453
7454 bh = NULL;
7455 val = hash_table->vfp11_erratum_glue_size;
7456 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7457 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7458 NULL, TRUE, FALSE, &bh);
7459
7460 myh = (struct elf_link_hash_entry *) bh;
7461 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7462 myh->forced_local = 1;
7463
7464 /* Link veneer back to calling location. */
7465 sec_data->erratumcount += 1;
7466 newerr = (elf32_vfp11_erratum_list *)
7467 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7468
7469 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7470 newerr->vma = -1;
7471 newerr->u.v.branch = branch;
7472 newerr->u.v.id = hash_table->num_vfp11_fixes;
7473 branch->u.b.veneer = newerr;
7474
7475 newerr->next = sec_data->erratumlist;
7476 sec_data->erratumlist = newerr;
7477
7478 /* A symbol for the return from the veneer. */
7479 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7480 hash_table->num_vfp11_fixes);
7481
7482 myh = elf_link_hash_lookup
7483 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7484
7485 if (myh != NULL)
7486 abort ();
7487
7488 bh = NULL;
7489 val = offset + 4;
7490 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7491 branch_sec, val, NULL, TRUE, FALSE, &bh);
7492
7493 myh = (struct elf_link_hash_entry *) bh;
7494 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7495 myh->forced_local = 1;
7496
7497 free (tmp_name);
7498
7499 /* Generate a mapping symbol for the veneer section, and explicitly add an
7500 entry for that symbol to the code/data map for the section. */
7501 if (hash_table->vfp11_erratum_glue_size == 0)
7502 {
7503 bh = NULL;
7504 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7505 ever requires this erratum fix. */
7506 _bfd_generic_link_add_one_symbol (link_info,
7507 hash_table->bfd_of_glue_owner, "$a",
7508 BSF_LOCAL, s, 0, NULL,
7509 TRUE, FALSE, &bh);
7510
7511 myh = (struct elf_link_hash_entry *) bh;
7512 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7513 myh->forced_local = 1;
7514
7515 /* The elf32_arm_init_maps function only cares about symbols from input
7516 BFDs. We must make a note of this generated mapping symbol
7517 ourselves so that code byteswapping works properly in
7518 elf32_arm_write_section. */
7519 elf32_arm_section_map_add (s, 'a', 0);
7520 }
7521
7522 s->size += VFP11_ERRATUM_VENEER_SIZE;
7523 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7524 hash_table->num_vfp11_fixes++;
7525
7526 /* The offset of the veneer. */
7527 return val;
7528 }
7529
7530 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7531 veneers need to be handled because used only in Cortex-M. */
7532
7533 static bfd_vma
7534 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7535 elf32_stm32l4xx_erratum_list *branch,
7536 bfd *branch_bfd,
7537 asection *branch_sec,
7538 unsigned int offset,
7539 bfd_size_type veneer_size)
7540 {
7541 asection *s;
7542 struct elf32_arm_link_hash_table *hash_table;
7543 char *tmp_name;
7544 struct elf_link_hash_entry *myh;
7545 struct bfd_link_hash_entry *bh;
7546 bfd_vma val;
7547 struct _arm_elf_section_data *sec_data;
7548 elf32_stm32l4xx_erratum_list *newerr;
7549
7550 hash_table = elf32_arm_hash_table (link_info);
7551 BFD_ASSERT (hash_table != NULL);
7552 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7553
7554 s = bfd_get_linker_section
7555 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7556
7557 BFD_ASSERT (s != NULL);
7558
7559 sec_data = elf32_arm_section_data (s);
7560
7561 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7562 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7563 BFD_ASSERT (tmp_name);
7564
7565 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7566 hash_table->num_stm32l4xx_fixes);
7567
7568 myh = elf_link_hash_lookup
7569 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7570
7571 BFD_ASSERT (myh == NULL);
7572
7573 bh = NULL;
7574 val = hash_table->stm32l4xx_erratum_glue_size;
7575 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7576 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7577 NULL, TRUE, FALSE, &bh);
7578
7579 myh = (struct elf_link_hash_entry *) bh;
7580 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7581 myh->forced_local = 1;
7582
7583 /* Link veneer back to calling location. */
7584 sec_data->stm32l4xx_erratumcount += 1;
7585 newerr = (elf32_stm32l4xx_erratum_list *)
7586 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7587
7588 newerr->type = STM32L4XX_ERRATUM_VENEER;
7589 newerr->vma = -1;
7590 newerr->u.v.branch = branch;
7591 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7592 branch->u.b.veneer = newerr;
7593
7594 newerr->next = sec_data->stm32l4xx_erratumlist;
7595 sec_data->stm32l4xx_erratumlist = newerr;
7596
7597 /* A symbol for the return from the veneer. */
7598 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7599 hash_table->num_stm32l4xx_fixes);
7600
7601 myh = elf_link_hash_lookup
7602 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7603
7604 if (myh != NULL)
7605 abort ();
7606
7607 bh = NULL;
7608 val = offset + 4;
7609 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7610 branch_sec, val, NULL, TRUE, FALSE, &bh);
7611
7612 myh = (struct elf_link_hash_entry *) bh;
7613 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7614 myh->forced_local = 1;
7615
7616 free (tmp_name);
7617
7618 /* Generate a mapping symbol for the veneer section, and explicitly add an
7619 entry for that symbol to the code/data map for the section. */
7620 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7621 {
7622 bh = NULL;
7623 /* Creates a THUMB symbol since there is no other choice. */
7624 _bfd_generic_link_add_one_symbol (link_info,
7625 hash_table->bfd_of_glue_owner, "$t",
7626 BSF_LOCAL, s, 0, NULL,
7627 TRUE, FALSE, &bh);
7628
7629 myh = (struct elf_link_hash_entry *) bh;
7630 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7631 myh->forced_local = 1;
7632
7633 /* The elf32_arm_init_maps function only cares about symbols from input
7634 BFDs. We must make a note of this generated mapping symbol
7635 ourselves so that code byteswapping works properly in
7636 elf32_arm_write_section. */
7637 elf32_arm_section_map_add (s, 't', 0);
7638 }
7639
7640 s->size += veneer_size;
7641 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7642 hash_table->num_stm32l4xx_fixes++;
7643
7644 /* The offset of the veneer. */
7645 return val;
7646 }
7647
7648 #define ARM_GLUE_SECTION_FLAGS \
7649 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7650 | SEC_READONLY | SEC_LINKER_CREATED)
7651
7652 /* Create a fake section for use by the ARM backend of the linker. */
7653
7654 static bfd_boolean
7655 arm_make_glue_section (bfd * abfd, const char * name)
7656 {
7657 asection * sec;
7658
7659 sec = bfd_get_linker_section (abfd, name);
7660 if (sec != NULL)
7661 /* Already made. */
7662 return TRUE;
7663
7664 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7665
7666 if (sec == NULL
7667 || !bfd_set_section_alignment (sec, 2))
7668 return FALSE;
7669
7670 /* Set the gc mark to prevent the section from being removed by garbage
7671 collection, despite the fact that no relocs refer to this section. */
7672 sec->gc_mark = 1;
7673
7674 return TRUE;
7675 }
7676
7677 /* Set size of .plt entries. This function is called from the
7678 linker scripts in ld/emultempl/{armelf}.em. */
7679
7680 void
7681 bfd_elf32_arm_use_long_plt (void)
7682 {
7683 elf32_arm_use_long_plt_entry = TRUE;
7684 }
7685
7686 /* Add the glue sections to ABFD. This function is called from the
7687 linker scripts in ld/emultempl/{armelf}.em. */
7688
7689 bfd_boolean
7690 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7691 struct bfd_link_info *info)
7692 {
7693 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7694 bfd_boolean dostm32l4xx = globals
7695 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7696 bfd_boolean addglue;
7697
7698 /* If we are only performing a partial
7699 link do not bother adding the glue. */
7700 if (bfd_link_relocatable (info))
7701 return TRUE;
7702
7703 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7704 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7705 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7706 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7707
7708 if (!dostm32l4xx)
7709 return addglue;
7710
7711 return addglue
7712 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7713 }
7714
7715 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7716 ensures they are not marked for deletion by
7717 strip_excluded_output_sections () when veneers are going to be created
7718 later. Not doing so would trigger assert on empty section size in
7719 lang_size_sections_1 (). */
7720
7721 void
7722 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7723 {
7724 enum elf32_arm_stub_type stub_type;
7725
7726 /* If we are only performing a partial
7727 link do not bother adding the glue. */
7728 if (bfd_link_relocatable (info))
7729 return;
7730
7731 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7732 {
7733 asection *out_sec;
7734 const char *out_sec_name;
7735
7736 if (!arm_dedicated_stub_output_section_required (stub_type))
7737 continue;
7738
7739 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7740 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7741 if (out_sec != NULL)
7742 out_sec->flags |= SEC_KEEP;
7743 }
7744 }
7745
7746 /* Select a BFD to be used to hold the sections used by the glue code.
7747 This function is called from the linker scripts in ld/emultempl/
7748 {armelf/pe}.em. */
7749
7750 bfd_boolean
7751 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7752 {
7753 struct elf32_arm_link_hash_table *globals;
7754
7755 /* If we are only performing a partial link
7756 do not bother getting a bfd to hold the glue. */
7757 if (bfd_link_relocatable (info))
7758 return TRUE;
7759
7760 /* Make sure we don't attach the glue sections to a dynamic object. */
7761 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7762
7763 globals = elf32_arm_hash_table (info);
7764 BFD_ASSERT (globals != NULL);
7765
7766 if (globals->bfd_of_glue_owner != NULL)
7767 return TRUE;
7768
7769 /* Save the bfd for later use. */
7770 globals->bfd_of_glue_owner = abfd;
7771
7772 return TRUE;
7773 }
7774
7775 static void
7776 check_use_blx (struct elf32_arm_link_hash_table *globals)
7777 {
7778 int cpu_arch;
7779
7780 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7781 Tag_CPU_arch);
7782
7783 if (globals->fix_arm1176)
7784 {
7785 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7786 globals->use_blx = 1;
7787 }
7788 else
7789 {
7790 if (cpu_arch > TAG_CPU_ARCH_V4T)
7791 globals->use_blx = 1;
7792 }
7793 }
7794
7795 bfd_boolean
7796 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7797 struct bfd_link_info *link_info)
7798 {
7799 Elf_Internal_Shdr *symtab_hdr;
7800 Elf_Internal_Rela *internal_relocs = NULL;
7801 Elf_Internal_Rela *irel, *irelend;
7802 bfd_byte *contents = NULL;
7803
7804 asection *sec;
7805 struct elf32_arm_link_hash_table *globals;
7806
7807 /* If we are only performing a partial link do not bother
7808 to construct any glue. */
7809 if (bfd_link_relocatable (link_info))
7810 return TRUE;
7811
7812 /* Here we have a bfd that is to be included on the link. We have a
7813 hook to do reloc rummaging, before section sizes are nailed down. */
7814 globals = elf32_arm_hash_table (link_info);
7815 BFD_ASSERT (globals != NULL);
7816
7817 check_use_blx (globals);
7818
7819 if (globals->byteswap_code && !bfd_big_endian (abfd))
7820 {
7821 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7822 abfd);
7823 return FALSE;
7824 }
7825
7826 /* PR 5398: If we have not decided to include any loadable sections in
7827 the output then we will not have a glue owner bfd. This is OK, it
7828 just means that there is nothing else for us to do here. */
7829 if (globals->bfd_of_glue_owner == NULL)
7830 return TRUE;
7831
7832 /* Rummage around all the relocs and map the glue vectors. */
7833 sec = abfd->sections;
7834
7835 if (sec == NULL)
7836 return TRUE;
7837
7838 for (; sec != NULL; sec = sec->next)
7839 {
7840 if (sec->reloc_count == 0)
7841 continue;
7842
7843 if ((sec->flags & SEC_EXCLUDE) != 0)
7844 continue;
7845
7846 symtab_hdr = & elf_symtab_hdr (abfd);
7847
7848 /* Load the relocs. */
7849 internal_relocs
7850 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7851
7852 if (internal_relocs == NULL)
7853 goto error_return;
7854
7855 irelend = internal_relocs + sec->reloc_count;
7856 for (irel = internal_relocs; irel < irelend; irel++)
7857 {
7858 long r_type;
7859 unsigned long r_index;
7860
7861 struct elf_link_hash_entry *h;
7862
7863 r_type = ELF32_R_TYPE (irel->r_info);
7864 r_index = ELF32_R_SYM (irel->r_info);
7865
7866 /* These are the only relocation types we care about. */
7867 if ( r_type != R_ARM_PC24
7868 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7869 continue;
7870
7871 /* Get the section contents if we haven't done so already. */
7872 if (contents == NULL)
7873 {
7874 /* Get cached copy if it exists. */
7875 if (elf_section_data (sec)->this_hdr.contents != NULL)
7876 contents = elf_section_data (sec)->this_hdr.contents;
7877 else
7878 {
7879 /* Go get them off disk. */
7880 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7881 goto error_return;
7882 }
7883 }
7884
7885 if (r_type == R_ARM_V4BX)
7886 {
7887 int reg;
7888
7889 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7890 record_arm_bx_glue (link_info, reg);
7891 continue;
7892 }
7893
7894 /* If the relocation is not against a symbol it cannot concern us. */
7895 h = NULL;
7896
7897 /* We don't care about local symbols. */
7898 if (r_index < symtab_hdr->sh_info)
7899 continue;
7900
7901 /* This is an external symbol. */
7902 r_index -= symtab_hdr->sh_info;
7903 h = (struct elf_link_hash_entry *)
7904 elf_sym_hashes (abfd)[r_index];
7905
7906 /* If the relocation is against a static symbol it must be within
7907 the current section and so cannot be a cross ARM/Thumb relocation. */
7908 if (h == NULL)
7909 continue;
7910
7911 /* If the call will go through a PLT entry then we do not need
7912 glue. */
7913 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7914 continue;
7915
7916 switch (r_type)
7917 {
7918 case R_ARM_PC24:
7919 /* This one is a call from arm code. We need to look up
7920 the target of the call. If it is a thumb target, we
7921 insert glue. */
7922 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7923 == ST_BRANCH_TO_THUMB)
7924 record_arm_to_thumb_glue (link_info, h);
7925 break;
7926
7927 default:
7928 abort ();
7929 }
7930 }
7931
7932 if (elf_section_data (sec)->this_hdr.contents != contents)
7933 free (contents);
7934 contents = NULL;
7935
7936 if (elf_section_data (sec)->relocs != internal_relocs)
7937 free (internal_relocs);
7938 internal_relocs = NULL;
7939 }
7940
7941 return TRUE;
7942
7943 error_return:
7944 if (elf_section_data (sec)->this_hdr.contents != contents)
7945 free (contents);
7946 if (elf_section_data (sec)->relocs != internal_relocs)
7947 free (internal_relocs);
7948
7949 return FALSE;
7950 }
7951 #endif
7952
7953
7954 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7955
7956 void
7957 bfd_elf32_arm_init_maps (bfd *abfd)
7958 {
7959 Elf_Internal_Sym *isymbuf;
7960 Elf_Internal_Shdr *hdr;
7961 unsigned int i, localsyms;
7962
7963 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7964 if (! is_arm_elf (abfd))
7965 return;
7966
7967 if ((abfd->flags & DYNAMIC) != 0)
7968 return;
7969
7970 hdr = & elf_symtab_hdr (abfd);
7971 localsyms = hdr->sh_info;
7972
7973 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7974 should contain the number of local symbols, which should come before any
7975 global symbols. Mapping symbols are always local. */
7976 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7977 NULL);
7978
7979 /* No internal symbols read? Skip this BFD. */
7980 if (isymbuf == NULL)
7981 return;
7982
7983 for (i = 0; i < localsyms; i++)
7984 {
7985 Elf_Internal_Sym *isym = &isymbuf[i];
7986 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7987 const char *name;
7988
7989 if (sec != NULL
7990 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7991 {
7992 name = bfd_elf_string_from_elf_section (abfd,
7993 hdr->sh_link, isym->st_name);
7994
7995 if (bfd_is_arm_special_symbol_name (name,
7996 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7997 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7998 }
7999 }
8000 }
8001
8002
8003 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8004 say what they wanted. */
8005
8006 void
8007 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8008 {
8009 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8010 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8011
8012 if (globals == NULL)
8013 return;
8014
8015 if (globals->fix_cortex_a8 == -1)
8016 {
8017 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8018 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8019 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8020 || out_attr[Tag_CPU_arch_profile].i == 0))
8021 globals->fix_cortex_a8 = 1;
8022 else
8023 globals->fix_cortex_a8 = 0;
8024 }
8025 }
8026
8027
8028 void
8029 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8030 {
8031 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8032 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8033
8034 if (globals == NULL)
8035 return;
8036 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8037 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8038 {
8039 switch (globals->vfp11_fix)
8040 {
8041 case BFD_ARM_VFP11_FIX_DEFAULT:
8042 case BFD_ARM_VFP11_FIX_NONE:
8043 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8044 break;
8045
8046 default:
8047 /* Give a warning, but do as the user requests anyway. */
8048 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8049 "workaround is not necessary for target architecture"), obfd);
8050 }
8051 }
8052 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8053 /* For earlier architectures, we might need the workaround, but do not
8054 enable it by default. If users is running with broken hardware, they
8055 must enable the erratum fix explicitly. */
8056 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8057 }
8058
8059 void
8060 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8061 {
8062 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8063 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8064
8065 if (globals == NULL)
8066 return;
8067
8068 /* We assume only Cortex-M4 may require the fix. */
8069 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8070 || out_attr[Tag_CPU_arch_profile].i != 'M')
8071 {
8072 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8073 /* Give a warning, but do as the user requests anyway. */
8074 _bfd_error_handler
8075 (_("%pB: warning: selected STM32L4XX erratum "
8076 "workaround is not necessary for target architecture"), obfd);
8077 }
8078 }
8079
8080 enum bfd_arm_vfp11_pipe
8081 {
8082 VFP11_FMAC,
8083 VFP11_LS,
8084 VFP11_DS,
8085 VFP11_BAD
8086 };
8087
8088 /* Return a VFP register number. This is encoded as RX:X for single-precision
8089 registers, or X:RX for double-precision registers, where RX is the group of
8090 four bits in the instruction encoding and X is the single extension bit.
8091 RX and X fields are specified using their lowest (starting) bit. The return
8092 value is:
8093
8094 0...31: single-precision registers s0...s31
8095 32...63: double-precision registers d0...d31.
8096
8097 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8098 encounter VFP3 instructions, so we allow the full range for DP registers. */
8099
8100 static unsigned int
8101 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8102 unsigned int x)
8103 {
8104 if (is_double)
8105 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8106 else
8107 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8108 }
8109
8110 /* Set bits in *WMASK according to a register number REG as encoded by
8111 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8112
8113 static void
8114 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8115 {
8116 if (reg < 32)
8117 *wmask |= 1 << reg;
8118 else if (reg < 48)
8119 *wmask |= 3 << ((reg - 32) * 2);
8120 }
8121
8122 /* Return TRUE if WMASK overwrites anything in REGS. */
8123
8124 static bfd_boolean
8125 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8126 {
8127 int i;
8128
8129 for (i = 0; i < numregs; i++)
8130 {
8131 unsigned int reg = regs[i];
8132
8133 if (reg < 32 && (wmask & (1 << reg)) != 0)
8134 return TRUE;
8135
8136 reg -= 32;
8137
8138 if (reg >= 16)
8139 continue;
8140
8141 if ((wmask & (3 << (reg * 2))) != 0)
8142 return TRUE;
8143 }
8144
8145 return FALSE;
8146 }
8147
8148 /* In this function, we're interested in two things: finding input registers
8149 for VFP data-processing instructions, and finding the set of registers which
8150 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8151 hold the written set, so FLDM etc. are easy to deal with (we're only
8152 interested in 32 SP registers or 16 dp registers, due to the VFP version
8153 implemented by the chip in question). DP registers are marked by setting
8154 both SP registers in the write mask). */
8155
8156 static enum bfd_arm_vfp11_pipe
8157 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8158 int *numregs)
8159 {
8160 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8161 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8162
8163 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8164 {
8165 unsigned int pqrs;
8166 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8167 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8168
8169 pqrs = ((insn & 0x00800000) >> 20)
8170 | ((insn & 0x00300000) >> 19)
8171 | ((insn & 0x00000040) >> 6);
8172
8173 switch (pqrs)
8174 {
8175 case 0: /* fmac[sd]. */
8176 case 1: /* fnmac[sd]. */
8177 case 2: /* fmsc[sd]. */
8178 case 3: /* fnmsc[sd]. */
8179 vpipe = VFP11_FMAC;
8180 bfd_arm_vfp11_write_mask (destmask, fd);
8181 regs[0] = fd;
8182 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8183 regs[2] = fm;
8184 *numregs = 3;
8185 break;
8186
8187 case 4: /* fmul[sd]. */
8188 case 5: /* fnmul[sd]. */
8189 case 6: /* fadd[sd]. */
8190 case 7: /* fsub[sd]. */
8191 vpipe = VFP11_FMAC;
8192 goto vfp_binop;
8193
8194 case 8: /* fdiv[sd]. */
8195 vpipe = VFP11_DS;
8196 vfp_binop:
8197 bfd_arm_vfp11_write_mask (destmask, fd);
8198 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8199 regs[1] = fm;
8200 *numregs = 2;
8201 break;
8202
8203 case 15: /* extended opcode. */
8204 {
8205 unsigned int extn = ((insn >> 15) & 0x1e)
8206 | ((insn >> 7) & 1);
8207
8208 switch (extn)
8209 {
8210 case 0: /* fcpy[sd]. */
8211 case 1: /* fabs[sd]. */
8212 case 2: /* fneg[sd]. */
8213 case 8: /* fcmp[sd]. */
8214 case 9: /* fcmpe[sd]. */
8215 case 10: /* fcmpz[sd]. */
8216 case 11: /* fcmpez[sd]. */
8217 case 16: /* fuito[sd]. */
8218 case 17: /* fsito[sd]. */
8219 case 24: /* ftoui[sd]. */
8220 case 25: /* ftouiz[sd]. */
8221 case 26: /* ftosi[sd]. */
8222 case 27: /* ftosiz[sd]. */
8223 /* These instructions will not bounce due to underflow. */
8224 *numregs = 0;
8225 vpipe = VFP11_FMAC;
8226 break;
8227
8228 case 3: /* fsqrt[sd]. */
8229 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8230 registers to cause the erratum in previous instructions. */
8231 bfd_arm_vfp11_write_mask (destmask, fd);
8232 vpipe = VFP11_DS;
8233 break;
8234
8235 case 15: /* fcvt{ds,sd}. */
8236 {
8237 int rnum = 0;
8238
8239 bfd_arm_vfp11_write_mask (destmask, fd);
8240
8241 /* Only FCVTSD can underflow. */
8242 if ((insn & 0x100) != 0)
8243 regs[rnum++] = fm;
8244
8245 *numregs = rnum;
8246
8247 vpipe = VFP11_FMAC;
8248 }
8249 break;
8250
8251 default:
8252 return VFP11_BAD;
8253 }
8254 }
8255 break;
8256
8257 default:
8258 return VFP11_BAD;
8259 }
8260 }
8261 /* Two-register transfer. */
8262 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8263 {
8264 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8265
8266 if ((insn & 0x100000) == 0)
8267 {
8268 if (is_double)
8269 bfd_arm_vfp11_write_mask (destmask, fm);
8270 else
8271 {
8272 bfd_arm_vfp11_write_mask (destmask, fm);
8273 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8274 }
8275 }
8276
8277 vpipe = VFP11_LS;
8278 }
8279 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8280 {
8281 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8282 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8283
8284 switch (puw)
8285 {
8286 case 0: /* Two-reg transfer. We should catch these above. */
8287 abort ();
8288
8289 case 2: /* fldm[sdx]. */
8290 case 3:
8291 case 5:
8292 {
8293 unsigned int i, offset = insn & 0xff;
8294
8295 if (is_double)
8296 offset >>= 1;
8297
8298 for (i = fd; i < fd + offset; i++)
8299 bfd_arm_vfp11_write_mask (destmask, i);
8300 }
8301 break;
8302
8303 case 4: /* fld[sd]. */
8304 case 6:
8305 bfd_arm_vfp11_write_mask (destmask, fd);
8306 break;
8307
8308 default:
8309 return VFP11_BAD;
8310 }
8311
8312 vpipe = VFP11_LS;
8313 }
8314 /* Single-register transfer. Note L==0. */
8315 else if ((insn & 0x0f100e10) == 0x0e000a10)
8316 {
8317 unsigned int opcode = (insn >> 21) & 7;
8318 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8319
8320 switch (opcode)
8321 {
8322 case 0: /* fmsr/fmdlr. */
8323 case 1: /* fmdhr. */
8324 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8325 destination register. I don't know if this is exactly right,
8326 but it is the conservative choice. */
8327 bfd_arm_vfp11_write_mask (destmask, fn);
8328 break;
8329
8330 case 7: /* fmxr. */
8331 break;
8332 }
8333
8334 vpipe = VFP11_LS;
8335 }
8336
8337 return vpipe;
8338 }
8339
8340
8341 static int elf32_arm_compare_mapping (const void * a, const void * b);
8342
8343
8344 /* Look for potentially-troublesome code sequences which might trigger the
8345 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8346 (available from ARM) for details of the erratum. A short version is
8347 described in ld.texinfo. */
8348
8349 bfd_boolean
8350 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8351 {
8352 asection *sec;
8353 bfd_byte *contents = NULL;
8354 int state = 0;
8355 int regs[3], numregs = 0;
8356 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8357 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8358
8359 if (globals == NULL)
8360 return FALSE;
8361
8362 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8363 The states transition as follows:
8364
8365 0 -> 1 (vector) or 0 -> 2 (scalar)
8366 A VFP FMAC-pipeline instruction has been seen. Fill
8367 regs[0]..regs[numregs-1] with its input operands. Remember this
8368 instruction in 'first_fmac'.
8369
8370 1 -> 2
8371 Any instruction, except for a VFP instruction which overwrites
8372 regs[*].
8373
8374 1 -> 3 [ -> 0 ] or
8375 2 -> 3 [ -> 0 ]
8376 A VFP instruction has been seen which overwrites any of regs[*].
8377 We must make a veneer! Reset state to 0 before examining next
8378 instruction.
8379
8380 2 -> 0
8381 If we fail to match anything in state 2, reset to state 0 and reset
8382 the instruction pointer to the instruction after 'first_fmac'.
8383
8384 If the VFP11 vector mode is in use, there must be at least two unrelated
8385 instructions between anti-dependent VFP11 instructions to properly avoid
8386 triggering the erratum, hence the use of the extra state 1. */
8387
8388 /* If we are only performing a partial link do not bother
8389 to construct any glue. */
8390 if (bfd_link_relocatable (link_info))
8391 return TRUE;
8392
8393 /* Skip if this bfd does not correspond to an ELF image. */
8394 if (! is_arm_elf (abfd))
8395 return TRUE;
8396
8397 /* We should have chosen a fix type by the time we get here. */
8398 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8399
8400 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8401 return TRUE;
8402
8403 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8404 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8405 return TRUE;
8406
8407 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8408 {
8409 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8410 struct _arm_elf_section_data *sec_data;
8411
8412 /* If we don't have executable progbits, we're not interested in this
8413 section. Also skip if section is to be excluded. */
8414 if (elf_section_type (sec) != SHT_PROGBITS
8415 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8416 || (sec->flags & SEC_EXCLUDE) != 0
8417 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8418 || sec->output_section == bfd_abs_section_ptr
8419 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8420 continue;
8421
8422 sec_data = elf32_arm_section_data (sec);
8423
8424 if (sec_data->mapcount == 0)
8425 continue;
8426
8427 if (elf_section_data (sec)->this_hdr.contents != NULL)
8428 contents = elf_section_data (sec)->this_hdr.contents;
8429 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8430 goto error_return;
8431
8432 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8433 elf32_arm_compare_mapping);
8434
8435 for (span = 0; span < sec_data->mapcount; span++)
8436 {
8437 unsigned int span_start = sec_data->map[span].vma;
8438 unsigned int span_end = (span == sec_data->mapcount - 1)
8439 ? sec->size : sec_data->map[span + 1].vma;
8440 char span_type = sec_data->map[span].type;
8441
8442 /* FIXME: Only ARM mode is supported at present. We may need to
8443 support Thumb-2 mode also at some point. */
8444 if (span_type != 'a')
8445 continue;
8446
8447 for (i = span_start; i < span_end;)
8448 {
8449 unsigned int next_i = i + 4;
8450 unsigned int insn = bfd_big_endian (abfd)
8451 ? (((unsigned) contents[i] << 24)
8452 | (contents[i + 1] << 16)
8453 | (contents[i + 2] << 8)
8454 | contents[i + 3])
8455 : (((unsigned) contents[i + 3] << 24)
8456 | (contents[i + 2] << 16)
8457 | (contents[i + 1] << 8)
8458 | contents[i]);
8459 unsigned int writemask = 0;
8460 enum bfd_arm_vfp11_pipe vpipe;
8461
8462 switch (state)
8463 {
8464 case 0:
8465 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8466 &numregs);
8467 /* I'm assuming the VFP11 erratum can trigger with denorm
8468 operands on either the FMAC or the DS pipeline. This might
8469 lead to slightly overenthusiastic veneer insertion. */
8470 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8471 {
8472 state = use_vector ? 1 : 2;
8473 first_fmac = i;
8474 veneer_of_insn = insn;
8475 }
8476 break;
8477
8478 case 1:
8479 {
8480 int other_regs[3], other_numregs;
8481 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8482 other_regs,
8483 &other_numregs);
8484 if (vpipe != VFP11_BAD
8485 && bfd_arm_vfp11_antidependency (writemask, regs,
8486 numregs))
8487 state = 3;
8488 else
8489 state = 2;
8490 }
8491 break;
8492
8493 case 2:
8494 {
8495 int other_regs[3], other_numregs;
8496 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8497 other_regs,
8498 &other_numregs);
8499 if (vpipe != VFP11_BAD
8500 && bfd_arm_vfp11_antidependency (writemask, regs,
8501 numregs))
8502 state = 3;
8503 else
8504 {
8505 state = 0;
8506 next_i = first_fmac + 4;
8507 }
8508 }
8509 break;
8510
8511 case 3:
8512 abort (); /* Should be unreachable. */
8513 }
8514
8515 if (state == 3)
8516 {
8517 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8518 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8519
8520 elf32_arm_section_data (sec)->erratumcount += 1;
8521
8522 newerr->u.b.vfp_insn = veneer_of_insn;
8523
8524 switch (span_type)
8525 {
8526 case 'a':
8527 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8528 break;
8529
8530 default:
8531 abort ();
8532 }
8533
8534 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8535 first_fmac);
8536
8537 newerr->vma = -1;
8538
8539 newerr->next = sec_data->erratumlist;
8540 sec_data->erratumlist = newerr;
8541
8542 state = 0;
8543 }
8544
8545 i = next_i;
8546 }
8547 }
8548
8549 if (elf_section_data (sec)->this_hdr.contents != contents)
8550 free (contents);
8551 contents = NULL;
8552 }
8553
8554 return TRUE;
8555
8556 error_return:
8557 if (elf_section_data (sec)->this_hdr.contents != contents)
8558 free (contents);
8559
8560 return FALSE;
8561 }
8562
8563 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8564 after sections have been laid out, using specially-named symbols. */
8565
8566 void
8567 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8568 struct bfd_link_info *link_info)
8569 {
8570 asection *sec;
8571 struct elf32_arm_link_hash_table *globals;
8572 char *tmp_name;
8573
8574 if (bfd_link_relocatable (link_info))
8575 return;
8576
8577 /* Skip if this bfd does not correspond to an ELF image. */
8578 if (! is_arm_elf (abfd))
8579 return;
8580
8581 globals = elf32_arm_hash_table (link_info);
8582 if (globals == NULL)
8583 return;
8584
8585 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8586 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8587 BFD_ASSERT (tmp_name);
8588
8589 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8590 {
8591 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8592 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8593
8594 for (; errnode != NULL; errnode = errnode->next)
8595 {
8596 struct elf_link_hash_entry *myh;
8597 bfd_vma vma;
8598
8599 switch (errnode->type)
8600 {
8601 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8602 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8603 /* Find veneer symbol. */
8604 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8605 errnode->u.b.veneer->u.v.id);
8606
8607 myh = elf_link_hash_lookup
8608 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8609
8610 if (myh == NULL)
8611 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8612 abfd, "VFP11", tmp_name);
8613
8614 vma = myh->root.u.def.section->output_section->vma
8615 + myh->root.u.def.section->output_offset
8616 + myh->root.u.def.value;
8617
8618 errnode->u.b.veneer->vma = vma;
8619 break;
8620
8621 case VFP11_ERRATUM_ARM_VENEER:
8622 case VFP11_ERRATUM_THUMB_VENEER:
8623 /* Find return location. */
8624 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8625 errnode->u.v.id);
8626
8627 myh = elf_link_hash_lookup
8628 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8629
8630 if (myh == NULL)
8631 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8632 abfd, "VFP11", tmp_name);
8633
8634 vma = myh->root.u.def.section->output_section->vma
8635 + myh->root.u.def.section->output_offset
8636 + myh->root.u.def.value;
8637
8638 errnode->u.v.branch->vma = vma;
8639 break;
8640
8641 default:
8642 abort ();
8643 }
8644 }
8645 }
8646
8647 free (tmp_name);
8648 }
8649
8650 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8651 return locations after sections have been laid out, using
8652 specially-named symbols. */
8653
8654 void
8655 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8656 struct bfd_link_info *link_info)
8657 {
8658 asection *sec;
8659 struct elf32_arm_link_hash_table *globals;
8660 char *tmp_name;
8661
8662 if (bfd_link_relocatable (link_info))
8663 return;
8664
8665 /* Skip if this bfd does not correspond to an ELF image. */
8666 if (! is_arm_elf (abfd))
8667 return;
8668
8669 globals = elf32_arm_hash_table (link_info);
8670 if (globals == NULL)
8671 return;
8672
8673 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8674 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8675 BFD_ASSERT (tmp_name);
8676
8677 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8678 {
8679 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8680 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8681
8682 for (; errnode != NULL; errnode = errnode->next)
8683 {
8684 struct elf_link_hash_entry *myh;
8685 bfd_vma vma;
8686
8687 switch (errnode->type)
8688 {
8689 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8690 /* Find veneer symbol. */
8691 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8692 errnode->u.b.veneer->u.v.id);
8693
8694 myh = elf_link_hash_lookup
8695 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8696
8697 if (myh == NULL)
8698 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8699 abfd, "STM32L4XX", tmp_name);
8700
8701 vma = myh->root.u.def.section->output_section->vma
8702 + myh->root.u.def.section->output_offset
8703 + myh->root.u.def.value;
8704
8705 errnode->u.b.veneer->vma = vma;
8706 break;
8707
8708 case STM32L4XX_ERRATUM_VENEER:
8709 /* Find return location. */
8710 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8711 errnode->u.v.id);
8712
8713 myh = elf_link_hash_lookup
8714 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8715
8716 if (myh == NULL)
8717 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8718 abfd, "STM32L4XX", tmp_name);
8719
8720 vma = myh->root.u.def.section->output_section->vma
8721 + myh->root.u.def.section->output_offset
8722 + myh->root.u.def.value;
8723
8724 errnode->u.v.branch->vma = vma;
8725 break;
8726
8727 default:
8728 abort ();
8729 }
8730 }
8731 }
8732
8733 free (tmp_name);
8734 }
8735
8736 static inline bfd_boolean
8737 is_thumb2_ldmia (const insn32 insn)
8738 {
8739 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8740 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8741 return (insn & 0xffd02000) == 0xe8900000;
8742 }
8743
8744 static inline bfd_boolean
8745 is_thumb2_ldmdb (const insn32 insn)
8746 {
8747 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8748 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8749 return (insn & 0xffd02000) == 0xe9100000;
8750 }
8751
8752 static inline bfd_boolean
8753 is_thumb2_vldm (const insn32 insn)
8754 {
8755 /* A6.5 Extension register load or store instruction
8756 A7.7.229
8757 We look for SP 32-bit and DP 64-bit registers.
8758 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8759 <list> is consecutive 64-bit registers
8760 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8761 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8762 <list> is consecutive 32-bit registers
8763 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8764 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8765 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8766 return
8767 (((insn & 0xfe100f00) == 0xec100b00) ||
8768 ((insn & 0xfe100f00) == 0xec100a00))
8769 && /* (IA without !). */
8770 (((((insn << 7) >> 28) & 0xd) == 0x4)
8771 /* (IA with !), includes VPOP (when reg number is SP). */
8772 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8773 /* (DB with !). */
8774 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8775 }
8776
8777 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8778 VLDM opcode and:
8779 - computes the number and the mode of memory accesses
8780 - decides if the replacement should be done:
8781 . replaces only if > 8-word accesses
8782 . or (testing purposes only) replaces all accesses. */
8783
8784 static bfd_boolean
8785 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8786 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8787 {
8788 int nb_words = 0;
8789
8790 /* The field encoding the register list is the same for both LDMIA
8791 and LDMDB encodings. */
8792 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8793 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8794 else if (is_thumb2_vldm (insn))
8795 nb_words = (insn & 0xff);
8796
8797 /* DEFAULT mode accounts for the real bug condition situation,
8798 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8799 return
8800 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8801 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8802 }
8803
8804 /* Look for potentially-troublesome code sequences which might trigger
8805 the STM STM32L4XX erratum. */
8806
8807 bfd_boolean
8808 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8809 struct bfd_link_info *link_info)
8810 {
8811 asection *sec;
8812 bfd_byte *contents = NULL;
8813 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8814
8815 if (globals == NULL)
8816 return FALSE;
8817
8818 /* If we are only performing a partial link do not bother
8819 to construct any glue. */
8820 if (bfd_link_relocatable (link_info))
8821 return TRUE;
8822
8823 /* Skip if this bfd does not correspond to an ELF image. */
8824 if (! is_arm_elf (abfd))
8825 return TRUE;
8826
8827 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8828 return TRUE;
8829
8830 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8831 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8832 return TRUE;
8833
8834 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8835 {
8836 unsigned int i, span;
8837 struct _arm_elf_section_data *sec_data;
8838
8839 /* If we don't have executable progbits, we're not interested in this
8840 section. Also skip if section is to be excluded. */
8841 if (elf_section_type (sec) != SHT_PROGBITS
8842 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8843 || (sec->flags & SEC_EXCLUDE) != 0
8844 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8845 || sec->output_section == bfd_abs_section_ptr
8846 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8847 continue;
8848
8849 sec_data = elf32_arm_section_data (sec);
8850
8851 if (sec_data->mapcount == 0)
8852 continue;
8853
8854 if (elf_section_data (sec)->this_hdr.contents != NULL)
8855 contents = elf_section_data (sec)->this_hdr.contents;
8856 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8857 goto error_return;
8858
8859 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8860 elf32_arm_compare_mapping);
8861
8862 for (span = 0; span < sec_data->mapcount; span++)
8863 {
8864 unsigned int span_start = sec_data->map[span].vma;
8865 unsigned int span_end = (span == sec_data->mapcount - 1)
8866 ? sec->size : sec_data->map[span + 1].vma;
8867 char span_type = sec_data->map[span].type;
8868 int itblock_current_pos = 0;
8869
8870 /* Only Thumb2 mode need be supported with this CM4 specific
8871 code, we should not encounter any arm mode eg span_type
8872 != 'a'. */
8873 if (span_type != 't')
8874 continue;
8875
8876 for (i = span_start; i < span_end;)
8877 {
8878 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8879 bfd_boolean insn_32bit = FALSE;
8880 bfd_boolean is_ldm = FALSE;
8881 bfd_boolean is_vldm = FALSE;
8882 bfd_boolean is_not_last_in_it_block = FALSE;
8883
8884 /* The first 16-bits of all 32-bit thumb2 instructions start
8885 with opcode[15..13]=0b111 and the encoded op1 can be anything
8886 except opcode[12..11]!=0b00.
8887 See 32-bit Thumb instruction encoding. */
8888 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8889 insn_32bit = TRUE;
8890
8891 /* Compute the predicate that tells if the instruction
8892 is concerned by the IT block
8893 - Creates an error if there is a ldm that is not
8894 last in the IT block thus cannot be replaced
8895 - Otherwise we can create a branch at the end of the
8896 IT block, it will be controlled naturally by IT
8897 with the proper pseudo-predicate
8898 - So the only interesting predicate is the one that
8899 tells that we are not on the last item of an IT
8900 block. */
8901 if (itblock_current_pos != 0)
8902 is_not_last_in_it_block = !!--itblock_current_pos;
8903
8904 if (insn_32bit)
8905 {
8906 /* Load the rest of the insn (in manual-friendly order). */
8907 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8908 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8909 is_vldm = is_thumb2_vldm (insn);
8910
8911 /* Veneers are created for (v)ldm depending on
8912 option flags and memory accesses conditions; but
8913 if the instruction is not the last instruction of
8914 an IT block, we cannot create a jump there, so we
8915 bail out. */
8916 if ((is_ldm || is_vldm)
8917 && stm32l4xx_need_create_replacing_stub
8918 (insn, globals->stm32l4xx_fix))
8919 {
8920 if (is_not_last_in_it_block)
8921 {
8922 _bfd_error_handler
8923 /* xgettext:c-format */
8924 (_("%pB(%pA+%#x): error: multiple load detected"
8925 " in non-last IT block instruction:"
8926 " STM32L4XX veneer cannot be generated; "
8927 "use gcc option -mrestrict-it to generate"
8928 " only one instruction per IT block"),
8929 abfd, sec, i);
8930 }
8931 else
8932 {
8933 elf32_stm32l4xx_erratum_list *newerr =
8934 (elf32_stm32l4xx_erratum_list *)
8935 bfd_zmalloc
8936 (sizeof (elf32_stm32l4xx_erratum_list));
8937
8938 elf32_arm_section_data (sec)
8939 ->stm32l4xx_erratumcount += 1;
8940 newerr->u.b.insn = insn;
8941 /* We create only thumb branches. */
8942 newerr->type =
8943 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8944 record_stm32l4xx_erratum_veneer
8945 (link_info, newerr, abfd, sec,
8946 i,
8947 is_ldm ?
8948 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8949 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8950 newerr->vma = -1;
8951 newerr->next = sec_data->stm32l4xx_erratumlist;
8952 sec_data->stm32l4xx_erratumlist = newerr;
8953 }
8954 }
8955 }
8956 else
8957 {
8958 /* A7.7.37 IT p208
8959 IT blocks are only encoded in T1
8960 Encoding T1: IT{x{y{z}}} <firstcond>
8961 1 0 1 1 - 1 1 1 1 - firstcond - mask
8962 if mask = '0000' then see 'related encodings'
8963 We don't deal with UNPREDICTABLE, just ignore these.
8964 There can be no nested IT blocks so an IT block
8965 is naturally a new one for which it is worth
8966 computing its size. */
8967 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8968 && ((insn & 0x000f) != 0x0000);
8969 /* If we have a new IT block we compute its size. */
8970 if (is_newitblock)
8971 {
8972 /* Compute the number of instructions controlled
8973 by the IT block, it will be used to decide
8974 whether we are inside an IT block or not. */
8975 unsigned int mask = insn & 0x000f;
8976 itblock_current_pos = 4 - ctz (mask);
8977 }
8978 }
8979
8980 i += insn_32bit ? 4 : 2;
8981 }
8982 }
8983
8984 if (elf_section_data (sec)->this_hdr.contents != contents)
8985 free (contents);
8986 contents = NULL;
8987 }
8988
8989 return TRUE;
8990
8991 error_return:
8992 if (elf_section_data (sec)->this_hdr.contents != contents)
8993 free (contents);
8994
8995 return FALSE;
8996 }
8997
8998 /* Set target relocation values needed during linking. */
8999
9000 void
9001 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9002 struct bfd_link_info *link_info,
9003 struct elf32_arm_params *params)
9004 {
9005 struct elf32_arm_link_hash_table *globals;
9006
9007 globals = elf32_arm_hash_table (link_info);
9008 if (globals == NULL)
9009 return;
9010
9011 globals->target1_is_rel = params->target1_is_rel;
9012 if (globals->fdpic_p)
9013 globals->target2_reloc = R_ARM_GOT32;
9014 else if (strcmp (params->target2_type, "rel") == 0)
9015 globals->target2_reloc = R_ARM_REL32;
9016 else if (strcmp (params->target2_type, "abs") == 0)
9017 globals->target2_reloc = R_ARM_ABS32;
9018 else if (strcmp (params->target2_type, "got-rel") == 0)
9019 globals->target2_reloc = R_ARM_GOT_PREL;
9020 else
9021 {
9022 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9023 params->target2_type);
9024 }
9025 globals->fix_v4bx = params->fix_v4bx;
9026 globals->use_blx |= params->use_blx;
9027 globals->vfp11_fix = params->vfp11_denorm_fix;
9028 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9029 if (globals->fdpic_p)
9030 globals->pic_veneer = 1;
9031 else
9032 globals->pic_veneer = params->pic_veneer;
9033 globals->fix_cortex_a8 = params->fix_cortex_a8;
9034 globals->fix_arm1176 = params->fix_arm1176;
9035 globals->cmse_implib = params->cmse_implib;
9036 globals->in_implib_bfd = params->in_implib_bfd;
9037
9038 BFD_ASSERT (is_arm_elf (output_bfd));
9039 elf_arm_tdata (output_bfd)->no_enum_size_warning
9040 = params->no_enum_size_warning;
9041 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9042 = params->no_wchar_size_warning;
9043 }
9044
9045 /* Replace the target offset of a Thumb bl or b.w instruction. */
9046
9047 static void
9048 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9049 {
9050 bfd_vma upper;
9051 bfd_vma lower;
9052 int reloc_sign;
9053
9054 BFD_ASSERT ((offset & 1) == 0);
9055
9056 upper = bfd_get_16 (abfd, insn);
9057 lower = bfd_get_16 (abfd, insn + 2);
9058 reloc_sign = (offset < 0) ? 1 : 0;
9059 upper = (upper & ~(bfd_vma) 0x7ff)
9060 | ((offset >> 12) & 0x3ff)
9061 | (reloc_sign << 10);
9062 lower = (lower & ~(bfd_vma) 0x2fff)
9063 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9064 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9065 | ((offset >> 1) & 0x7ff);
9066 bfd_put_16 (abfd, upper, insn);
9067 bfd_put_16 (abfd, lower, insn + 2);
9068 }
9069
9070 /* Thumb code calling an ARM function. */
9071
9072 static int
9073 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9074 const char * name,
9075 bfd * input_bfd,
9076 bfd * output_bfd,
9077 asection * input_section,
9078 bfd_byte * hit_data,
9079 asection * sym_sec,
9080 bfd_vma offset,
9081 bfd_signed_vma addend,
9082 bfd_vma val,
9083 char **error_message)
9084 {
9085 asection * s = 0;
9086 bfd_vma my_offset;
9087 long int ret_offset;
9088 struct elf_link_hash_entry * myh;
9089 struct elf32_arm_link_hash_table * globals;
9090
9091 myh = find_thumb_glue (info, name, error_message);
9092 if (myh == NULL)
9093 return FALSE;
9094
9095 globals = elf32_arm_hash_table (info);
9096 BFD_ASSERT (globals != NULL);
9097 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9098
9099 my_offset = myh->root.u.def.value;
9100
9101 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9102 THUMB2ARM_GLUE_SECTION_NAME);
9103
9104 BFD_ASSERT (s != NULL);
9105 BFD_ASSERT (s->contents != NULL);
9106 BFD_ASSERT (s->output_section != NULL);
9107
9108 if ((my_offset & 0x01) == 0x01)
9109 {
9110 if (sym_sec != NULL
9111 && sym_sec->owner != NULL
9112 && !INTERWORK_FLAG (sym_sec->owner))
9113 {
9114 _bfd_error_handler
9115 (_("%pB(%s): warning: interworking not enabled;"
9116 " first occurrence: %pB: %s call to %s"),
9117 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9118
9119 return FALSE;
9120 }
9121
9122 --my_offset;
9123 myh->root.u.def.value = my_offset;
9124
9125 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9126 s->contents + my_offset);
9127
9128 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9129 s->contents + my_offset + 2);
9130
9131 ret_offset =
9132 /* Address of destination of the stub. */
9133 ((bfd_signed_vma) val)
9134 - ((bfd_signed_vma)
9135 /* Offset from the start of the current section
9136 to the start of the stubs. */
9137 (s->output_offset
9138 /* Offset of the start of this stub from the start of the stubs. */
9139 + my_offset
9140 /* Address of the start of the current section. */
9141 + s->output_section->vma)
9142 /* The branch instruction is 4 bytes into the stub. */
9143 + 4
9144 /* ARM branches work from the pc of the instruction + 8. */
9145 + 8);
9146
9147 put_arm_insn (globals, output_bfd,
9148 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9149 s->contents + my_offset + 4);
9150 }
9151
9152 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9153
9154 /* Now go back and fix up the original BL insn to point to here. */
9155 ret_offset =
9156 /* Address of where the stub is located. */
9157 (s->output_section->vma + s->output_offset + my_offset)
9158 /* Address of where the BL is located. */
9159 - (input_section->output_section->vma + input_section->output_offset
9160 + offset)
9161 /* Addend in the relocation. */
9162 - addend
9163 /* Biassing for PC-relative addressing. */
9164 - 8;
9165
9166 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9167
9168 return TRUE;
9169 }
9170
9171 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9172
9173 static struct elf_link_hash_entry *
9174 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9175 const char * name,
9176 bfd * input_bfd,
9177 bfd * output_bfd,
9178 asection * sym_sec,
9179 bfd_vma val,
9180 asection * s,
9181 char ** error_message)
9182 {
9183 bfd_vma my_offset;
9184 long int ret_offset;
9185 struct elf_link_hash_entry * myh;
9186 struct elf32_arm_link_hash_table * globals;
9187
9188 myh = find_arm_glue (info, name, error_message);
9189 if (myh == NULL)
9190 return NULL;
9191
9192 globals = elf32_arm_hash_table (info);
9193 BFD_ASSERT (globals != NULL);
9194 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9195
9196 my_offset = myh->root.u.def.value;
9197
9198 if ((my_offset & 0x01) == 0x01)
9199 {
9200 if (sym_sec != NULL
9201 && sym_sec->owner != NULL
9202 && !INTERWORK_FLAG (sym_sec->owner))
9203 {
9204 _bfd_error_handler
9205 (_("%pB(%s): warning: interworking not enabled;"
9206 " first occurrence: %pB: %s call to %s"),
9207 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9208 }
9209
9210 --my_offset;
9211 myh->root.u.def.value = my_offset;
9212
9213 if (bfd_link_pic (info)
9214 || globals->root.is_relocatable_executable
9215 || globals->pic_veneer)
9216 {
9217 /* For relocatable objects we can't use absolute addresses,
9218 so construct the address from a relative offset. */
9219 /* TODO: If the offset is small it's probably worth
9220 constructing the address with adds. */
9221 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9222 s->contents + my_offset);
9223 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9224 s->contents + my_offset + 4);
9225 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9226 s->contents + my_offset + 8);
9227 /* Adjust the offset by 4 for the position of the add,
9228 and 8 for the pipeline offset. */
9229 ret_offset = (val - (s->output_offset
9230 + s->output_section->vma
9231 + my_offset + 12))
9232 | 1;
9233 bfd_put_32 (output_bfd, ret_offset,
9234 s->contents + my_offset + 12);
9235 }
9236 else if (globals->use_blx)
9237 {
9238 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9239 s->contents + my_offset);
9240
9241 /* It's a thumb address. Add the low order bit. */
9242 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9243 s->contents + my_offset + 4);
9244 }
9245 else
9246 {
9247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9248 s->contents + my_offset);
9249
9250 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9251 s->contents + my_offset + 4);
9252
9253 /* It's a thumb address. Add the low order bit. */
9254 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9255 s->contents + my_offset + 8);
9256
9257 my_offset += 12;
9258 }
9259 }
9260
9261 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9262
9263 return myh;
9264 }
9265
9266 /* Arm code calling a Thumb function. */
9267
9268 static int
9269 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9270 const char * name,
9271 bfd * input_bfd,
9272 bfd * output_bfd,
9273 asection * input_section,
9274 bfd_byte * hit_data,
9275 asection * sym_sec,
9276 bfd_vma offset,
9277 bfd_signed_vma addend,
9278 bfd_vma val,
9279 char **error_message)
9280 {
9281 unsigned long int tmp;
9282 bfd_vma my_offset;
9283 asection * s;
9284 long int ret_offset;
9285 struct elf_link_hash_entry * myh;
9286 struct elf32_arm_link_hash_table * globals;
9287
9288 globals = elf32_arm_hash_table (info);
9289 BFD_ASSERT (globals != NULL);
9290 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9291
9292 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9293 ARM2THUMB_GLUE_SECTION_NAME);
9294 BFD_ASSERT (s != NULL);
9295 BFD_ASSERT (s->contents != NULL);
9296 BFD_ASSERT (s->output_section != NULL);
9297
9298 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9299 sym_sec, val, s, error_message);
9300 if (!myh)
9301 return FALSE;
9302
9303 my_offset = myh->root.u.def.value;
9304 tmp = bfd_get_32 (input_bfd, hit_data);
9305 tmp = tmp & 0xFF000000;
9306
9307 /* Somehow these are both 4 too far, so subtract 8. */
9308 ret_offset = (s->output_offset
9309 + my_offset
9310 + s->output_section->vma
9311 - (input_section->output_offset
9312 + input_section->output_section->vma
9313 + offset + addend)
9314 - 8);
9315
9316 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9317
9318 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9319
9320 return TRUE;
9321 }
9322
9323 /* Populate Arm stub for an exported Thumb function. */
9324
9325 static bfd_boolean
9326 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9327 {
9328 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9329 asection * s;
9330 struct elf_link_hash_entry * myh;
9331 struct elf32_arm_link_hash_entry *eh;
9332 struct elf32_arm_link_hash_table * globals;
9333 asection *sec;
9334 bfd_vma val;
9335 char *error_message;
9336
9337 eh = elf32_arm_hash_entry (h);
9338 /* Allocate stubs for exported Thumb functions on v4t. */
9339 if (eh->export_glue == NULL)
9340 return TRUE;
9341
9342 globals = elf32_arm_hash_table (info);
9343 BFD_ASSERT (globals != NULL);
9344 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9345
9346 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9347 ARM2THUMB_GLUE_SECTION_NAME);
9348 BFD_ASSERT (s != NULL);
9349 BFD_ASSERT (s->contents != NULL);
9350 BFD_ASSERT (s->output_section != NULL);
9351
9352 sec = eh->export_glue->root.u.def.section;
9353
9354 BFD_ASSERT (sec->output_section != NULL);
9355
9356 val = eh->export_glue->root.u.def.value + sec->output_offset
9357 + sec->output_section->vma;
9358
9359 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9360 h->root.u.def.section->owner,
9361 globals->obfd, sec, val, s,
9362 &error_message);
9363 BFD_ASSERT (myh);
9364 return TRUE;
9365 }
9366
9367 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9368
9369 static bfd_vma
9370 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9371 {
9372 bfd_byte *p;
9373 bfd_vma glue_addr;
9374 asection *s;
9375 struct elf32_arm_link_hash_table *globals;
9376
9377 globals = elf32_arm_hash_table (info);
9378 BFD_ASSERT (globals != NULL);
9379 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9380
9381 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9382 ARM_BX_GLUE_SECTION_NAME);
9383 BFD_ASSERT (s != NULL);
9384 BFD_ASSERT (s->contents != NULL);
9385 BFD_ASSERT (s->output_section != NULL);
9386
9387 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9388
9389 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9390
9391 if ((globals->bx_glue_offset[reg] & 1) == 0)
9392 {
9393 p = s->contents + glue_addr;
9394 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9395 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9396 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9397 globals->bx_glue_offset[reg] |= 1;
9398 }
9399
9400 return glue_addr + s->output_section->vma + s->output_offset;
9401 }
9402
9403 /* Generate Arm stubs for exported Thumb symbols. */
9404 static void
9405 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9406 struct bfd_link_info *link_info)
9407 {
9408 struct elf32_arm_link_hash_table * globals;
9409
9410 if (link_info == NULL)
9411 /* Ignore this if we are not called by the ELF backend linker. */
9412 return;
9413
9414 globals = elf32_arm_hash_table (link_info);
9415 if (globals == NULL)
9416 return;
9417
9418 /* If blx is available then exported Thumb symbols are OK and there is
9419 nothing to do. */
9420 if (globals->use_blx)
9421 return;
9422
9423 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9424 link_info);
9425 }
9426
9427 /* Reserve space for COUNT dynamic relocations in relocation selection
9428 SRELOC. */
9429
9430 static void
9431 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9432 bfd_size_type count)
9433 {
9434 struct elf32_arm_link_hash_table *htab;
9435
9436 htab = elf32_arm_hash_table (info);
9437 BFD_ASSERT (htab->root.dynamic_sections_created);
9438 if (sreloc == NULL)
9439 abort ();
9440 sreloc->size += RELOC_SIZE (htab) * count;
9441 }
9442
9443 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9444 dynamic, the relocations should go in SRELOC, otherwise they should
9445 go in the special .rel.iplt section. */
9446
9447 static void
9448 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9449 bfd_size_type count)
9450 {
9451 struct elf32_arm_link_hash_table *htab;
9452
9453 htab = elf32_arm_hash_table (info);
9454 if (!htab->root.dynamic_sections_created)
9455 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9456 else
9457 {
9458 BFD_ASSERT (sreloc != NULL);
9459 sreloc->size += RELOC_SIZE (htab) * count;
9460 }
9461 }
9462
9463 /* Add relocation REL to the end of relocation section SRELOC. */
9464
9465 static void
9466 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9467 asection *sreloc, Elf_Internal_Rela *rel)
9468 {
9469 bfd_byte *loc;
9470 struct elf32_arm_link_hash_table *htab;
9471
9472 htab = elf32_arm_hash_table (info);
9473 if (!htab->root.dynamic_sections_created
9474 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9475 sreloc = htab->root.irelplt;
9476 if (sreloc == NULL)
9477 abort ();
9478 loc = sreloc->contents;
9479 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9480 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9481 abort ();
9482 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9483 }
9484
9485 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9486 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9487 to .plt. */
9488
9489 static void
9490 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9491 bfd_boolean is_iplt_entry,
9492 union gotplt_union *root_plt,
9493 struct arm_plt_info *arm_plt)
9494 {
9495 struct elf32_arm_link_hash_table *htab;
9496 asection *splt;
9497 asection *sgotplt;
9498
9499 htab = elf32_arm_hash_table (info);
9500
9501 if (is_iplt_entry)
9502 {
9503 splt = htab->root.iplt;
9504 sgotplt = htab->root.igotplt;
9505
9506 /* NaCl uses a special first entry in .iplt too. */
9507 if (htab->root.target_os == is_nacl && splt->size == 0)
9508 splt->size += htab->plt_header_size;
9509
9510 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9511 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9512 }
9513 else
9514 {
9515 splt = htab->root.splt;
9516 sgotplt = htab->root.sgotplt;
9517
9518 if (htab->fdpic_p)
9519 {
9520 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9521 /* For lazy binding, relocations will be put into .rel.plt, in
9522 .rel.got otherwise. */
9523 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9524 if (info->flags & DF_BIND_NOW)
9525 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9526 else
9527 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9528 }
9529 else
9530 {
9531 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9532 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9533 }
9534
9535 /* If this is the first .plt entry, make room for the special
9536 first entry. */
9537 if (splt->size == 0)
9538 splt->size += htab->plt_header_size;
9539
9540 htab->next_tls_desc_index++;
9541 }
9542
9543 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9544 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9545 splt->size += PLT_THUMB_STUB_SIZE;
9546 root_plt->offset = splt->size;
9547 splt->size += htab->plt_entry_size;
9548
9549 /* We also need to make an entry in the .got.plt section, which
9550 will be placed in the .got section by the linker script. */
9551 if (is_iplt_entry)
9552 arm_plt->got_offset = sgotplt->size;
9553 else
9554 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9555 if (htab->fdpic_p)
9556 /* Function descriptor takes 64 bits in GOT. */
9557 sgotplt->size += 8;
9558 else
9559 sgotplt->size += 4;
9560 }
9561
9562 static bfd_vma
9563 arm_movw_immediate (bfd_vma value)
9564 {
9565 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9566 }
9567
9568 static bfd_vma
9569 arm_movt_immediate (bfd_vma value)
9570 {
9571 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9572 }
9573
9574 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9575 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9576 Otherwise, DYNINDX is the index of the symbol in the dynamic
9577 symbol table and SYM_VALUE is undefined.
9578
9579 ROOT_PLT points to the offset of the PLT entry from the start of its
9580 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9581 bookkeeping information.
9582
9583 Returns FALSE if there was a problem. */
9584
9585 static bfd_boolean
9586 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9587 union gotplt_union *root_plt,
9588 struct arm_plt_info *arm_plt,
9589 int dynindx, bfd_vma sym_value)
9590 {
9591 struct elf32_arm_link_hash_table *htab;
9592 asection *sgot;
9593 asection *splt;
9594 asection *srel;
9595 bfd_byte *loc;
9596 bfd_vma plt_index;
9597 Elf_Internal_Rela rel;
9598 bfd_vma got_header_size;
9599
9600 htab = elf32_arm_hash_table (info);
9601
9602 /* Pick the appropriate sections and sizes. */
9603 if (dynindx == -1)
9604 {
9605 splt = htab->root.iplt;
9606 sgot = htab->root.igotplt;
9607 srel = htab->root.irelplt;
9608
9609 /* There are no reserved entries in .igot.plt, and no special
9610 first entry in .iplt. */
9611 got_header_size = 0;
9612 }
9613 else
9614 {
9615 splt = htab->root.splt;
9616 sgot = htab->root.sgotplt;
9617 srel = htab->root.srelplt;
9618
9619 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9620 }
9621 BFD_ASSERT (splt != NULL && srel != NULL);
9622
9623 bfd_vma got_offset, got_address, plt_address;
9624 bfd_vma got_displacement, initial_got_entry;
9625 bfd_byte * ptr;
9626
9627 BFD_ASSERT (sgot != NULL);
9628
9629 /* Get the offset into the .(i)got.plt table of the entry that
9630 corresponds to this function. */
9631 got_offset = (arm_plt->got_offset & -2);
9632
9633 /* Get the index in the procedure linkage table which
9634 corresponds to this symbol. This is the index of this symbol
9635 in all the symbols for which we are making plt entries.
9636 After the reserved .got.plt entries, all symbols appear in
9637 the same order as in .plt. */
9638 if (htab->fdpic_p)
9639 /* Function descriptor takes 8 bytes. */
9640 plt_index = (got_offset - got_header_size) / 8;
9641 else
9642 plt_index = (got_offset - got_header_size) / 4;
9643
9644 /* Calculate the address of the GOT entry. */
9645 got_address = (sgot->output_section->vma
9646 + sgot->output_offset
9647 + got_offset);
9648
9649 /* ...and the address of the PLT entry. */
9650 plt_address = (splt->output_section->vma
9651 + splt->output_offset
9652 + root_plt->offset);
9653
9654 ptr = splt->contents + root_plt->offset;
9655 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9656 {
9657 unsigned int i;
9658 bfd_vma val;
9659
9660 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9661 {
9662 val = elf32_arm_vxworks_shared_plt_entry[i];
9663 if (i == 2)
9664 val |= got_address - sgot->output_section->vma;
9665 if (i == 5)
9666 val |= plt_index * RELOC_SIZE (htab);
9667 if (i == 2 || i == 5)
9668 bfd_put_32 (output_bfd, val, ptr);
9669 else
9670 put_arm_insn (htab, output_bfd, val, ptr);
9671 }
9672 }
9673 else if (htab->root.target_os == is_vxworks)
9674 {
9675 unsigned int i;
9676 bfd_vma val;
9677
9678 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9679 {
9680 val = elf32_arm_vxworks_exec_plt_entry[i];
9681 if (i == 2)
9682 val |= got_address;
9683 if (i == 4)
9684 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9685 if (i == 5)
9686 val |= plt_index * RELOC_SIZE (htab);
9687 if (i == 2 || i == 5)
9688 bfd_put_32 (output_bfd, val, ptr);
9689 else
9690 put_arm_insn (htab, output_bfd, val, ptr);
9691 }
9692
9693 loc = (htab->srelplt2->contents
9694 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9695
9696 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9697 referencing the GOT for this PLT entry. */
9698 rel.r_offset = plt_address + 8;
9699 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9700 rel.r_addend = got_offset;
9701 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9702 loc += RELOC_SIZE (htab);
9703
9704 /* Create the R_ARM_ABS32 relocation referencing the
9705 beginning of the PLT for this GOT entry. */
9706 rel.r_offset = got_address;
9707 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9708 rel.r_addend = 0;
9709 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9710 }
9711 else if (htab->root.target_os == is_nacl)
9712 {
9713 /* Calculate the displacement between the PLT slot and the
9714 common tail that's part of the special initial PLT slot. */
9715 int32_t tail_displacement
9716 = ((splt->output_section->vma + splt->output_offset
9717 + ARM_NACL_PLT_TAIL_OFFSET)
9718 - (plt_address + htab->plt_entry_size + 4));
9719 BFD_ASSERT ((tail_displacement & 3) == 0);
9720 tail_displacement >>= 2;
9721
9722 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9723 || (-tail_displacement & 0xff000000) == 0);
9724
9725 /* Calculate the displacement between the PLT slot and the entry
9726 in the GOT. The offset accounts for the value produced by
9727 adding to pc in the penultimate instruction of the PLT stub. */
9728 got_displacement = (got_address
9729 - (plt_address + htab->plt_entry_size));
9730
9731 /* NaCl does not support interworking at all. */
9732 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9733
9734 put_arm_insn (htab, output_bfd,
9735 elf32_arm_nacl_plt_entry[0]
9736 | arm_movw_immediate (got_displacement),
9737 ptr + 0);
9738 put_arm_insn (htab, output_bfd,
9739 elf32_arm_nacl_plt_entry[1]
9740 | arm_movt_immediate (got_displacement),
9741 ptr + 4);
9742 put_arm_insn (htab, output_bfd,
9743 elf32_arm_nacl_plt_entry[2],
9744 ptr + 8);
9745 put_arm_insn (htab, output_bfd,
9746 elf32_arm_nacl_plt_entry[3]
9747 | (tail_displacement & 0x00ffffff),
9748 ptr + 12);
9749 }
9750 else if (htab->fdpic_p)
9751 {
9752 const bfd_vma *plt_entry = using_thumb_only(htab)
9753 ? elf32_arm_fdpic_thumb_plt_entry
9754 : elf32_arm_fdpic_plt_entry;
9755
9756 /* Fill-up Thumb stub if needed. */
9757 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9758 {
9759 put_thumb_insn (htab, output_bfd,
9760 elf32_arm_plt_thumb_stub[0], ptr - 4);
9761 put_thumb_insn (htab, output_bfd,
9762 elf32_arm_plt_thumb_stub[1], ptr - 2);
9763 }
9764 /* As we are using 32 bit instructions even for the Thumb
9765 version, we have to use 'put_arm_insn' instead of
9766 'put_thumb_insn'. */
9767 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9768 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9769 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9770 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9771 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9772
9773 if (!(info->flags & DF_BIND_NOW))
9774 {
9775 /* funcdesc_value_reloc_offset. */
9776 bfd_put_32 (output_bfd,
9777 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9778 ptr + 20);
9779 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9780 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9781 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9782 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9783 }
9784 }
9785 else if (using_thumb_only (htab))
9786 {
9787 /* PR ld/16017: Generate thumb only PLT entries. */
9788 if (!using_thumb2 (htab))
9789 {
9790 /* FIXME: We ought to be able to generate thumb-1 PLT
9791 instructions... */
9792 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9793 output_bfd);
9794 return FALSE;
9795 }
9796
9797 /* Calculate the displacement between the PLT slot and the entry in
9798 the GOT. The 12-byte offset accounts for the value produced by
9799 adding to pc in the 3rd instruction of the PLT stub. */
9800 got_displacement = got_address - (plt_address + 12);
9801
9802 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9803 instead of 'put_thumb_insn'. */
9804 put_arm_insn (htab, output_bfd,
9805 elf32_thumb2_plt_entry[0]
9806 | ((got_displacement & 0x000000ff) << 16)
9807 | ((got_displacement & 0x00000700) << 20)
9808 | ((got_displacement & 0x00000800) >> 1)
9809 | ((got_displacement & 0x0000f000) >> 12),
9810 ptr + 0);
9811 put_arm_insn (htab, output_bfd,
9812 elf32_thumb2_plt_entry[1]
9813 | ((got_displacement & 0x00ff0000) )
9814 | ((got_displacement & 0x07000000) << 4)
9815 | ((got_displacement & 0x08000000) >> 17)
9816 | ((got_displacement & 0xf0000000) >> 28),
9817 ptr + 4);
9818 put_arm_insn (htab, output_bfd,
9819 elf32_thumb2_plt_entry[2],
9820 ptr + 8);
9821 put_arm_insn (htab, output_bfd,
9822 elf32_thumb2_plt_entry[3],
9823 ptr + 12);
9824 }
9825 else
9826 {
9827 /* Calculate the displacement between the PLT slot and the
9828 entry in the GOT. The eight-byte offset accounts for the
9829 value produced by adding to pc in the first instruction
9830 of the PLT stub. */
9831 got_displacement = got_address - (plt_address + 8);
9832
9833 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9834 {
9835 put_thumb_insn (htab, output_bfd,
9836 elf32_arm_plt_thumb_stub[0], ptr - 4);
9837 put_thumb_insn (htab, output_bfd,
9838 elf32_arm_plt_thumb_stub[1], ptr - 2);
9839 }
9840
9841 if (!elf32_arm_use_long_plt_entry)
9842 {
9843 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9844
9845 put_arm_insn (htab, output_bfd,
9846 elf32_arm_plt_entry_short[0]
9847 | ((got_displacement & 0x0ff00000) >> 20),
9848 ptr + 0);
9849 put_arm_insn (htab, output_bfd,
9850 elf32_arm_plt_entry_short[1]
9851 | ((got_displacement & 0x000ff000) >> 12),
9852 ptr+ 4);
9853 put_arm_insn (htab, output_bfd,
9854 elf32_arm_plt_entry_short[2]
9855 | (got_displacement & 0x00000fff),
9856 ptr + 8);
9857 #ifdef FOUR_WORD_PLT
9858 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9859 #endif
9860 }
9861 else
9862 {
9863 put_arm_insn (htab, output_bfd,
9864 elf32_arm_plt_entry_long[0]
9865 | ((got_displacement & 0xf0000000) >> 28),
9866 ptr + 0);
9867 put_arm_insn (htab, output_bfd,
9868 elf32_arm_plt_entry_long[1]
9869 | ((got_displacement & 0x0ff00000) >> 20),
9870 ptr + 4);
9871 put_arm_insn (htab, output_bfd,
9872 elf32_arm_plt_entry_long[2]
9873 | ((got_displacement & 0x000ff000) >> 12),
9874 ptr+ 8);
9875 put_arm_insn (htab, output_bfd,
9876 elf32_arm_plt_entry_long[3]
9877 | (got_displacement & 0x00000fff),
9878 ptr + 12);
9879 }
9880 }
9881
9882 /* Fill in the entry in the .rel(a).(i)plt section. */
9883 rel.r_offset = got_address;
9884 rel.r_addend = 0;
9885 if (dynindx == -1)
9886 {
9887 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9888 The dynamic linker or static executable then calls SYM_VALUE
9889 to determine the correct run-time value of the .igot.plt entry. */
9890 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9891 initial_got_entry = sym_value;
9892 }
9893 else
9894 {
9895 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9896 used by PLT entry. */
9897 if (htab->fdpic_p)
9898 {
9899 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9900 initial_got_entry = 0;
9901 }
9902 else
9903 {
9904 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9905 initial_got_entry = (splt->output_section->vma
9906 + splt->output_offset);
9907
9908 /* PR ld/16017
9909 When thumb only we need to set the LSB for any address that
9910 will be used with an interworking branch instruction. */
9911 if (using_thumb_only (htab))
9912 initial_got_entry |= 1;
9913 }
9914 }
9915
9916 /* Fill in the entry in the global offset table. */
9917 bfd_put_32 (output_bfd, initial_got_entry,
9918 sgot->contents + got_offset);
9919
9920 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9921 {
9922 /* Setup initial funcdesc value. */
9923 /* FIXME: we don't support lazy binding because there is a
9924 race condition between both words getting written and
9925 some other thread attempting to read them. The ARM
9926 architecture does not have an atomic 64 bit load/store
9927 instruction that could be used to prevent it; it is
9928 recommended that threaded FDPIC applications run with the
9929 LD_BIND_NOW environment variable set. */
9930 bfd_put_32(output_bfd, plt_address + 0x18,
9931 sgot->contents + got_offset);
9932 bfd_put_32(output_bfd, -1 /*TODO*/,
9933 sgot->contents + got_offset + 4);
9934 }
9935
9936 if (dynindx == -1)
9937 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9938 else
9939 {
9940 if (htab->fdpic_p)
9941 {
9942 /* For FDPIC we put PLT relocationss into .rel.got when not
9943 lazy binding otherwise we put them in .rel.plt. For now,
9944 we don't support lazy binding so put it in .rel.got. */
9945 if (info->flags & DF_BIND_NOW)
9946 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9947 else
9948 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9949 }
9950 else
9951 {
9952 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9953 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9954 }
9955 }
9956
9957 return TRUE;
9958 }
9959
9960 /* Some relocations map to different relocations depending on the
9961 target. Return the real relocation. */
9962
9963 static int
9964 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9965 int r_type)
9966 {
9967 switch (r_type)
9968 {
9969 case R_ARM_TARGET1:
9970 if (globals->target1_is_rel)
9971 return R_ARM_REL32;
9972 else
9973 return R_ARM_ABS32;
9974
9975 case R_ARM_TARGET2:
9976 return globals->target2_reloc;
9977
9978 default:
9979 return r_type;
9980 }
9981 }
9982
9983 /* Return the base VMA address which should be subtracted from real addresses
9984 when resolving @dtpoff relocation.
9985 This is PT_TLS segment p_vaddr. */
9986
9987 static bfd_vma
9988 dtpoff_base (struct bfd_link_info *info)
9989 {
9990 /* If tls_sec is NULL, we should have signalled an error already. */
9991 if (elf_hash_table (info)->tls_sec == NULL)
9992 return 0;
9993 return elf_hash_table (info)->tls_sec->vma;
9994 }
9995
9996 /* Return the relocation value for @tpoff relocation
9997 if STT_TLS virtual address is ADDRESS. */
9998
9999 static bfd_vma
10000 tpoff (struct bfd_link_info *info, bfd_vma address)
10001 {
10002 struct elf_link_hash_table *htab = elf_hash_table (info);
10003 bfd_vma base;
10004
10005 /* If tls_sec is NULL, we should have signalled an error already. */
10006 if (htab->tls_sec == NULL)
10007 return 0;
10008 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10009 return address - htab->tls_sec->vma + base;
10010 }
10011
10012 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10013 VALUE is the relocation value. */
10014
10015 static bfd_reloc_status_type
10016 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10017 {
10018 if (value > 0xfff)
10019 return bfd_reloc_overflow;
10020
10021 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10022 bfd_put_32 (abfd, value, data);
10023 return bfd_reloc_ok;
10024 }
10025
10026 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10027 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10028 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10029
10030 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10031 is to then call final_link_relocate. Return other values in the
10032 case of error.
10033
10034 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10035 the pre-relaxed code. It would be nice if the relocs were updated
10036 to match the optimization. */
10037
10038 static bfd_reloc_status_type
10039 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10040 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10041 Elf_Internal_Rela *rel, unsigned long is_local)
10042 {
10043 unsigned long insn;
10044
10045 switch (ELF32_R_TYPE (rel->r_info))
10046 {
10047 default:
10048 return bfd_reloc_notsupported;
10049
10050 case R_ARM_TLS_GOTDESC:
10051 if (is_local)
10052 insn = 0;
10053 else
10054 {
10055 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10056 if (insn & 1)
10057 insn -= 5; /* THUMB */
10058 else
10059 insn -= 8; /* ARM */
10060 }
10061 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10062 return bfd_reloc_continue;
10063
10064 case R_ARM_THM_TLS_DESCSEQ:
10065 /* Thumb insn. */
10066 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10067 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10068 {
10069 if (is_local)
10070 /* nop */
10071 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10072 }
10073 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10074 {
10075 if (is_local)
10076 /* nop */
10077 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10078 else
10079 /* ldr rx,[ry] */
10080 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10081 }
10082 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10083 {
10084 if (is_local)
10085 /* nop */
10086 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10087 else
10088 /* mov r0, rx */
10089 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10090 contents + rel->r_offset);
10091 }
10092 else
10093 {
10094 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10095 /* It's a 32 bit instruction, fetch the rest of it for
10096 error generation. */
10097 insn = (insn << 16)
10098 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10099 _bfd_error_handler
10100 /* xgettext:c-format */
10101 (_("%pB(%pA+%#" PRIx64 "): "
10102 "unexpected %s instruction '%#lx' in TLS trampoline"),
10103 input_bfd, input_sec, (uint64_t) rel->r_offset,
10104 "Thumb", insn);
10105 return bfd_reloc_notsupported;
10106 }
10107 break;
10108
10109 case R_ARM_TLS_DESCSEQ:
10110 /* arm insn. */
10111 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10112 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10113 {
10114 if (is_local)
10115 /* mov rx, ry */
10116 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10117 contents + rel->r_offset);
10118 }
10119 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10120 {
10121 if (is_local)
10122 /* nop */
10123 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10124 else
10125 /* ldr rx,[ry] */
10126 bfd_put_32 (input_bfd, insn & 0xfffff000,
10127 contents + rel->r_offset);
10128 }
10129 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10130 {
10131 if (is_local)
10132 /* nop */
10133 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10134 else
10135 /* mov r0, rx */
10136 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10137 contents + rel->r_offset);
10138 }
10139 else
10140 {
10141 _bfd_error_handler
10142 /* xgettext:c-format */
10143 (_("%pB(%pA+%#" PRIx64 "): "
10144 "unexpected %s instruction '%#lx' in TLS trampoline"),
10145 input_bfd, input_sec, (uint64_t) rel->r_offset,
10146 "ARM", insn);
10147 return bfd_reloc_notsupported;
10148 }
10149 break;
10150
10151 case R_ARM_TLS_CALL:
10152 /* GD->IE relaxation, turn the instruction into 'nop' or
10153 'ldr r0, [pc,r0]' */
10154 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10155 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10156 break;
10157
10158 case R_ARM_THM_TLS_CALL:
10159 /* GD->IE relaxation. */
10160 if (!is_local)
10161 /* add r0,pc; ldr r0, [r0] */
10162 insn = 0x44786800;
10163 else if (using_thumb2 (globals))
10164 /* nop.w */
10165 insn = 0xf3af8000;
10166 else
10167 /* nop; nop */
10168 insn = 0xbf00bf00;
10169
10170 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10171 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10172 break;
10173 }
10174 return bfd_reloc_ok;
10175 }
10176
10177 /* For a given value of n, calculate the value of G_n as required to
10178 deal with group relocations. We return it in the form of an
10179 encoded constant-and-rotation, together with the final residual. If n is
10180 specified as less than zero, then final_residual is filled with the
10181 input value and no further action is performed. */
10182
10183 static bfd_vma
10184 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10185 {
10186 int current_n;
10187 bfd_vma g_n;
10188 bfd_vma encoded_g_n = 0;
10189 bfd_vma residual = value; /* Also known as Y_n. */
10190
10191 for (current_n = 0; current_n <= n; current_n++)
10192 {
10193 int shift;
10194
10195 /* Calculate which part of the value to mask. */
10196 if (residual == 0)
10197 shift = 0;
10198 else
10199 {
10200 int msb;
10201
10202 /* Determine the most significant bit in the residual and
10203 align the resulting value to a 2-bit boundary. */
10204 for (msb = 30; msb >= 0; msb -= 2)
10205 if (residual & (3u << msb))
10206 break;
10207
10208 /* The desired shift is now (msb - 6), or zero, whichever
10209 is the greater. */
10210 shift = msb - 6;
10211 if (shift < 0)
10212 shift = 0;
10213 }
10214
10215 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10216 g_n = residual & (0xff << shift);
10217 encoded_g_n = (g_n >> shift)
10218 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10219
10220 /* Calculate the residual for the next time around. */
10221 residual &= ~g_n;
10222 }
10223
10224 *final_residual = residual;
10225
10226 return encoded_g_n;
10227 }
10228
10229 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10230 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10231
10232 static int
10233 identify_add_or_sub (bfd_vma insn)
10234 {
10235 int opcode = insn & 0x1e00000;
10236
10237 if (opcode == 1 << 23) /* ADD */
10238 return 1;
10239
10240 if (opcode == 1 << 22) /* SUB */
10241 return -1;
10242
10243 return 0;
10244 }
10245
10246 /* Perform a relocation as part of a final link. */
10247
10248 static bfd_reloc_status_type
10249 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10250 bfd * input_bfd,
10251 bfd * output_bfd,
10252 asection * input_section,
10253 bfd_byte * contents,
10254 Elf_Internal_Rela * rel,
10255 bfd_vma value,
10256 struct bfd_link_info * info,
10257 asection * sym_sec,
10258 const char * sym_name,
10259 unsigned char st_type,
10260 enum arm_st_branch_type branch_type,
10261 struct elf_link_hash_entry * h,
10262 bfd_boolean * unresolved_reloc_p,
10263 char ** error_message)
10264 {
10265 unsigned long r_type = howto->type;
10266 unsigned long r_symndx;
10267 bfd_byte * hit_data = contents + rel->r_offset;
10268 bfd_vma * local_got_offsets;
10269 bfd_vma * local_tlsdesc_gotents;
10270 asection * sgot;
10271 asection * splt;
10272 asection * sreloc = NULL;
10273 asection * srelgot;
10274 bfd_vma addend;
10275 bfd_signed_vma signed_addend;
10276 unsigned char dynreloc_st_type;
10277 bfd_vma dynreloc_value;
10278 struct elf32_arm_link_hash_table * globals;
10279 struct elf32_arm_link_hash_entry *eh;
10280 union gotplt_union *root_plt;
10281 struct arm_plt_info *arm_plt;
10282 bfd_vma plt_offset;
10283 bfd_vma gotplt_offset;
10284 bfd_boolean has_iplt_entry;
10285 bfd_boolean resolved_to_zero;
10286
10287 globals = elf32_arm_hash_table (info);
10288 if (globals == NULL)
10289 return bfd_reloc_notsupported;
10290
10291 BFD_ASSERT (is_arm_elf (input_bfd));
10292 BFD_ASSERT (howto != NULL);
10293
10294 /* Some relocation types map to different relocations depending on the
10295 target. We pick the right one here. */
10296 r_type = arm_real_reloc_type (globals, r_type);
10297
10298 /* It is possible to have linker relaxations on some TLS access
10299 models. Update our information here. */
10300 r_type = elf32_arm_tls_transition (info, r_type, h);
10301
10302 if (r_type != howto->type)
10303 howto = elf32_arm_howto_from_type (r_type);
10304
10305 eh = (struct elf32_arm_link_hash_entry *) h;
10306 sgot = globals->root.sgot;
10307 local_got_offsets = elf_local_got_offsets (input_bfd);
10308 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10309
10310 if (globals->root.dynamic_sections_created)
10311 srelgot = globals->root.srelgot;
10312 else
10313 srelgot = NULL;
10314
10315 r_symndx = ELF32_R_SYM (rel->r_info);
10316
10317 if (globals->use_rel)
10318 {
10319 bfd_vma sign;
10320
10321 switch (howto->size)
10322 {
10323 case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
10324 case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
10325 case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
10326 default: addend = 0; break;
10327 }
10328 /* Note: the addend and signed_addend calculated here are
10329 incorrect for any split field. */
10330 addend &= howto->src_mask;
10331 sign = howto->src_mask & ~(howto->src_mask >> 1);
10332 signed_addend = (addend ^ sign) - sign;
10333 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10334 addend <<= howto->rightshift;
10335 }
10336 else
10337 addend = signed_addend = rel->r_addend;
10338
10339 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10340 are resolving a function call relocation. */
10341 if (using_thumb_only (globals)
10342 && (r_type == R_ARM_THM_CALL
10343 || r_type == R_ARM_THM_JUMP24)
10344 && branch_type == ST_BRANCH_TO_ARM)
10345 branch_type = ST_BRANCH_TO_THUMB;
10346
10347 /* Record the symbol information that should be used in dynamic
10348 relocations. */
10349 dynreloc_st_type = st_type;
10350 dynreloc_value = value;
10351 if (branch_type == ST_BRANCH_TO_THUMB)
10352 dynreloc_value |= 1;
10353
10354 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10355 VALUE appropriately for relocations that we resolve at link time. */
10356 has_iplt_entry = FALSE;
10357 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10358 &arm_plt)
10359 && root_plt->offset != (bfd_vma) -1)
10360 {
10361 plt_offset = root_plt->offset;
10362 gotplt_offset = arm_plt->got_offset;
10363
10364 if (h == NULL || eh->is_iplt)
10365 {
10366 has_iplt_entry = TRUE;
10367 splt = globals->root.iplt;
10368
10369 /* Populate .iplt entries here, because not all of them will
10370 be seen by finish_dynamic_symbol. The lower bit is set if
10371 we have already populated the entry. */
10372 if (plt_offset & 1)
10373 plt_offset--;
10374 else
10375 {
10376 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10377 -1, dynreloc_value))
10378 root_plt->offset |= 1;
10379 else
10380 return bfd_reloc_notsupported;
10381 }
10382
10383 /* Static relocations always resolve to the .iplt entry. */
10384 st_type = STT_FUNC;
10385 value = (splt->output_section->vma
10386 + splt->output_offset
10387 + plt_offset);
10388 branch_type = ST_BRANCH_TO_ARM;
10389
10390 /* If there are non-call relocations that resolve to the .iplt
10391 entry, then all dynamic ones must too. */
10392 if (arm_plt->noncall_refcount != 0)
10393 {
10394 dynreloc_st_type = st_type;
10395 dynreloc_value = value;
10396 }
10397 }
10398 else
10399 /* We populate the .plt entry in finish_dynamic_symbol. */
10400 splt = globals->root.splt;
10401 }
10402 else
10403 {
10404 splt = NULL;
10405 plt_offset = (bfd_vma) -1;
10406 gotplt_offset = (bfd_vma) -1;
10407 }
10408
10409 resolved_to_zero = (h != NULL
10410 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10411
10412 switch (r_type)
10413 {
10414 case R_ARM_NONE:
10415 /* We don't need to find a value for this symbol. It's just a
10416 marker. */
10417 *unresolved_reloc_p = FALSE;
10418 return bfd_reloc_ok;
10419
10420 case R_ARM_ABS12:
10421 if (globals->root.target_os != is_vxworks)
10422 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10423 /* Fall through. */
10424
10425 case R_ARM_PC24:
10426 case R_ARM_ABS32:
10427 case R_ARM_ABS32_NOI:
10428 case R_ARM_REL32:
10429 case R_ARM_REL32_NOI:
10430 case R_ARM_CALL:
10431 case R_ARM_JUMP24:
10432 case R_ARM_XPC25:
10433 case R_ARM_PREL31:
10434 case R_ARM_PLT32:
10435 /* Handle relocations which should use the PLT entry. ABS32/REL32
10436 will use the symbol's value, which may point to a PLT entry, but we
10437 don't need to handle that here. If we created a PLT entry, all
10438 branches in this object should go to it, except if the PLT is too
10439 far away, in which case a long branch stub should be inserted. */
10440 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10441 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10442 && r_type != R_ARM_CALL
10443 && r_type != R_ARM_JUMP24
10444 && r_type != R_ARM_PLT32)
10445 && plt_offset != (bfd_vma) -1)
10446 {
10447 /* If we've created a .plt section, and assigned a PLT entry
10448 to this function, it must either be a STT_GNU_IFUNC reference
10449 or not be known to bind locally. In other cases, we should
10450 have cleared the PLT entry by now. */
10451 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10452
10453 value = (splt->output_section->vma
10454 + splt->output_offset
10455 + plt_offset);
10456 *unresolved_reloc_p = FALSE;
10457 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10458 contents, rel->r_offset, value,
10459 rel->r_addend);
10460 }
10461
10462 /* When generating a shared object or relocatable executable, these
10463 relocations are copied into the output file to be resolved at
10464 run time. */
10465 if ((bfd_link_pic (info)
10466 || globals->root.is_relocatable_executable
10467 || globals->fdpic_p)
10468 && (input_section->flags & SEC_ALLOC)
10469 && !(globals->root.target_os == is_vxworks
10470 && strcmp (input_section->output_section->name,
10471 ".tls_vars") == 0)
10472 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10473 || !SYMBOL_CALLS_LOCAL (info, h))
10474 && !(input_bfd == globals->stub_bfd
10475 && strstr (input_section->name, STUB_SUFFIX))
10476 && (h == NULL
10477 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10478 && !resolved_to_zero)
10479 || h->root.type != bfd_link_hash_undefweak)
10480 && r_type != R_ARM_PC24
10481 && r_type != R_ARM_CALL
10482 && r_type != R_ARM_JUMP24
10483 && r_type != R_ARM_PREL31
10484 && r_type != R_ARM_PLT32)
10485 {
10486 Elf_Internal_Rela outrel;
10487 bfd_boolean skip, relocate;
10488 int isrofixup = 0;
10489
10490 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10491 && !h->def_regular)
10492 {
10493 char *v = _("shared object");
10494
10495 if (bfd_link_executable (info))
10496 v = _("PIE executable");
10497
10498 _bfd_error_handler
10499 (_("%pB: relocation %s against external or undefined symbol `%s'"
10500 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10501 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10502 return bfd_reloc_notsupported;
10503 }
10504
10505 *unresolved_reloc_p = FALSE;
10506
10507 if (sreloc == NULL && globals->root.dynamic_sections_created)
10508 {
10509 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10510 ! globals->use_rel);
10511
10512 if (sreloc == NULL)
10513 return bfd_reloc_notsupported;
10514 }
10515
10516 skip = FALSE;
10517 relocate = FALSE;
10518
10519 outrel.r_addend = addend;
10520 outrel.r_offset =
10521 _bfd_elf_section_offset (output_bfd, info, input_section,
10522 rel->r_offset);
10523 if (outrel.r_offset == (bfd_vma) -1)
10524 skip = TRUE;
10525 else if (outrel.r_offset == (bfd_vma) -2)
10526 skip = TRUE, relocate = TRUE;
10527 outrel.r_offset += (input_section->output_section->vma
10528 + input_section->output_offset);
10529
10530 if (skip)
10531 memset (&outrel, 0, sizeof outrel);
10532 else if (h != NULL
10533 && h->dynindx != -1
10534 && (!bfd_link_pic (info)
10535 || !(bfd_link_pie (info)
10536 || SYMBOLIC_BIND (info, h))
10537 || !h->def_regular))
10538 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10539 else
10540 {
10541 int symbol;
10542
10543 /* This symbol is local, or marked to become local. */
10544 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10545 || (globals->fdpic_p && !bfd_link_pic(info)));
10546 /* On SVR4-ish systems, the dynamic loader cannot
10547 relocate the text and data segments independently,
10548 so the symbol does not matter. */
10549 symbol = 0;
10550 if (dynreloc_st_type == STT_GNU_IFUNC)
10551 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10552 to the .iplt entry. Instead, every non-call reference
10553 must use an R_ARM_IRELATIVE relocation to obtain the
10554 correct run-time address. */
10555 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10556 else if (globals->fdpic_p && !bfd_link_pic(info))
10557 isrofixup = 1;
10558 else
10559 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10560 if (globals->use_rel)
10561 relocate = TRUE;
10562 else
10563 outrel.r_addend += dynreloc_value;
10564 }
10565
10566 if (isrofixup)
10567 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10568 else
10569 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10570
10571 /* If this reloc is against an external symbol, we do not want to
10572 fiddle with the addend. Otherwise, we need to include the symbol
10573 value so that it becomes an addend for the dynamic reloc. */
10574 if (! relocate)
10575 return bfd_reloc_ok;
10576
10577 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10578 contents, rel->r_offset,
10579 dynreloc_value, (bfd_vma) 0);
10580 }
10581 else switch (r_type)
10582 {
10583 case R_ARM_ABS12:
10584 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10585
10586 case R_ARM_XPC25: /* Arm BLX instruction. */
10587 case R_ARM_CALL:
10588 case R_ARM_JUMP24:
10589 case R_ARM_PC24: /* Arm B/BL instruction. */
10590 case R_ARM_PLT32:
10591 {
10592 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10593
10594 if (r_type == R_ARM_XPC25)
10595 {
10596 /* Check for Arm calling Arm function. */
10597 /* FIXME: Should we translate the instruction into a BL
10598 instruction instead ? */
10599 if (branch_type != ST_BRANCH_TO_THUMB)
10600 _bfd_error_handler
10601 (_("\%pB: warning: %s BLX instruction targets"
10602 " %s function '%s'"),
10603 input_bfd, "ARM",
10604 "ARM", h ? h->root.root.string : "(local)");
10605 }
10606 else if (r_type == R_ARM_PC24)
10607 {
10608 /* Check for Arm calling Thumb function. */
10609 if (branch_type == ST_BRANCH_TO_THUMB)
10610 {
10611 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10612 output_bfd, input_section,
10613 hit_data, sym_sec, rel->r_offset,
10614 signed_addend, value,
10615 error_message))
10616 return bfd_reloc_ok;
10617 else
10618 return bfd_reloc_dangerous;
10619 }
10620 }
10621
10622 /* Check if a stub has to be inserted because the
10623 destination is too far or we are changing mode. */
10624 if ( r_type == R_ARM_CALL
10625 || r_type == R_ARM_JUMP24
10626 || r_type == R_ARM_PLT32)
10627 {
10628 enum elf32_arm_stub_type stub_type = arm_stub_none;
10629 struct elf32_arm_link_hash_entry *hash;
10630
10631 hash = (struct elf32_arm_link_hash_entry *) h;
10632 stub_type = arm_type_of_stub (info, input_section, rel,
10633 st_type, &branch_type,
10634 hash, value, sym_sec,
10635 input_bfd, sym_name);
10636
10637 if (stub_type != arm_stub_none)
10638 {
10639 /* The target is out of reach, so redirect the
10640 branch to the local stub for this function. */
10641 stub_entry = elf32_arm_get_stub_entry (input_section,
10642 sym_sec, h,
10643 rel, globals,
10644 stub_type);
10645 {
10646 if (stub_entry != NULL)
10647 value = (stub_entry->stub_offset
10648 + stub_entry->stub_sec->output_offset
10649 + stub_entry->stub_sec->output_section->vma);
10650
10651 if (plt_offset != (bfd_vma) -1)
10652 *unresolved_reloc_p = FALSE;
10653 }
10654 }
10655 else
10656 {
10657 /* If the call goes through a PLT entry, make sure to
10658 check distance to the right destination address. */
10659 if (plt_offset != (bfd_vma) -1)
10660 {
10661 value = (splt->output_section->vma
10662 + splt->output_offset
10663 + plt_offset);
10664 *unresolved_reloc_p = FALSE;
10665 /* The PLT entry is in ARM mode, regardless of the
10666 target function. */
10667 branch_type = ST_BRANCH_TO_ARM;
10668 }
10669 }
10670 }
10671
10672 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10673 where:
10674 S is the address of the symbol in the relocation.
10675 P is address of the instruction being relocated.
10676 A is the addend (extracted from the instruction) in bytes.
10677
10678 S is held in 'value'.
10679 P is the base address of the section containing the
10680 instruction plus the offset of the reloc into that
10681 section, ie:
10682 (input_section->output_section->vma +
10683 input_section->output_offset +
10684 rel->r_offset).
10685 A is the addend, converted into bytes, ie:
10686 (signed_addend * 4)
10687
10688 Note: None of these operations have knowledge of the pipeline
10689 size of the processor, thus it is up to the assembler to
10690 encode this information into the addend. */
10691 value -= (input_section->output_section->vma
10692 + input_section->output_offset);
10693 value -= rel->r_offset;
10694 value += signed_addend;
10695
10696 signed_addend = value;
10697 signed_addend >>= howto->rightshift;
10698
10699 /* A branch to an undefined weak symbol is turned into a jump to
10700 the next instruction unless a PLT entry will be created.
10701 Do the same for local undefined symbols (but not for STN_UNDEF).
10702 The jump to the next instruction is optimized as a NOP depending
10703 on the architecture. */
10704 if (h ? (h->root.type == bfd_link_hash_undefweak
10705 && plt_offset == (bfd_vma) -1)
10706 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10707 {
10708 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10709
10710 if (arch_has_arm_nop (globals))
10711 value |= 0x0320f000;
10712 else
10713 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10714 }
10715 else
10716 {
10717 /* Perform a signed range check. */
10718 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10719 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10720 return bfd_reloc_overflow;
10721
10722 addend = (value & 2);
10723
10724 value = (signed_addend & howto->dst_mask)
10725 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10726
10727 if (r_type == R_ARM_CALL)
10728 {
10729 /* Set the H bit in the BLX instruction. */
10730 if (branch_type == ST_BRANCH_TO_THUMB)
10731 {
10732 if (addend)
10733 value |= (1 << 24);
10734 else
10735 value &= ~(bfd_vma)(1 << 24);
10736 }
10737
10738 /* Select the correct instruction (BL or BLX). */
10739 /* Only if we are not handling a BL to a stub. In this
10740 case, mode switching is performed by the stub. */
10741 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10742 value |= (1 << 28);
10743 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10744 {
10745 value &= ~(bfd_vma)(1 << 28);
10746 value |= (1 << 24);
10747 }
10748 }
10749 }
10750 }
10751 break;
10752
10753 case R_ARM_ABS32:
10754 value += addend;
10755 if (branch_type == ST_BRANCH_TO_THUMB)
10756 value |= 1;
10757 break;
10758
10759 case R_ARM_ABS32_NOI:
10760 value += addend;
10761 break;
10762
10763 case R_ARM_REL32:
10764 value += addend;
10765 if (branch_type == ST_BRANCH_TO_THUMB)
10766 value |= 1;
10767 value -= (input_section->output_section->vma
10768 + input_section->output_offset + rel->r_offset);
10769 break;
10770
10771 case R_ARM_REL32_NOI:
10772 value += addend;
10773 value -= (input_section->output_section->vma
10774 + input_section->output_offset + rel->r_offset);
10775 break;
10776
10777 case R_ARM_PREL31:
10778 value -= (input_section->output_section->vma
10779 + input_section->output_offset + rel->r_offset);
10780 value += signed_addend;
10781 if (! h || h->root.type != bfd_link_hash_undefweak)
10782 {
10783 /* Check for overflow. */
10784 if ((value ^ (value >> 1)) & (1 << 30))
10785 return bfd_reloc_overflow;
10786 }
10787 value &= 0x7fffffff;
10788 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10789 if (branch_type == ST_BRANCH_TO_THUMB)
10790 value |= 1;
10791 break;
10792 }
10793
10794 bfd_put_32 (input_bfd, value, hit_data);
10795 return bfd_reloc_ok;
10796
10797 case R_ARM_ABS8:
10798 value += addend;
10799
10800 /* There is no way to tell whether the user intended to use a signed or
10801 unsigned addend. When checking for overflow we accept either,
10802 as specified by the AAELF. */
10803 if ((long) value > 0xff || (long) value < -0x80)
10804 return bfd_reloc_overflow;
10805
10806 bfd_put_8 (input_bfd, value, hit_data);
10807 return bfd_reloc_ok;
10808
10809 case R_ARM_ABS16:
10810 value += addend;
10811
10812 /* See comment for R_ARM_ABS8. */
10813 if ((long) value > 0xffff || (long) value < -0x8000)
10814 return bfd_reloc_overflow;
10815
10816 bfd_put_16 (input_bfd, value, hit_data);
10817 return bfd_reloc_ok;
10818
10819 case R_ARM_THM_ABS5:
10820 /* Support ldr and str instructions for the thumb. */
10821 if (globals->use_rel)
10822 {
10823 /* Need to refetch addend. */
10824 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10825 /* ??? Need to determine shift amount from operand size. */
10826 addend >>= howto->rightshift;
10827 }
10828 value += addend;
10829
10830 /* ??? Isn't value unsigned? */
10831 if ((long) value > 0x1f || (long) value < -0x10)
10832 return bfd_reloc_overflow;
10833
10834 /* ??? Value needs to be properly shifted into place first. */
10835 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10836 bfd_put_16 (input_bfd, value, hit_data);
10837 return bfd_reloc_ok;
10838
10839 case R_ARM_THM_ALU_PREL_11_0:
10840 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10841 {
10842 bfd_vma insn;
10843 bfd_signed_vma relocation;
10844
10845 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10846 | bfd_get_16 (input_bfd, hit_data + 2);
10847
10848 if (globals->use_rel)
10849 {
10850 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10851 | ((insn & (1 << 26)) >> 15);
10852 if (insn & 0xf00000)
10853 signed_addend = -signed_addend;
10854 }
10855
10856 relocation = value + signed_addend;
10857 relocation -= Pa (input_section->output_section->vma
10858 + input_section->output_offset
10859 + rel->r_offset);
10860
10861 /* PR 21523: Use an absolute value. The user of this reloc will
10862 have already selected an ADD or SUB insn appropriately. */
10863 value = llabs (relocation);
10864
10865 if (value >= 0x1000)
10866 return bfd_reloc_overflow;
10867
10868 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10869 if (branch_type == ST_BRANCH_TO_THUMB)
10870 value |= 1;
10871
10872 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10873 | ((value & 0x700) << 4)
10874 | ((value & 0x800) << 15);
10875 if (relocation < 0)
10876 insn |= 0xa00000;
10877
10878 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10879 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10880
10881 return bfd_reloc_ok;
10882 }
10883
10884 case R_ARM_THM_PC8:
10885 /* PR 10073: This reloc is not generated by the GNU toolchain,
10886 but it is supported for compatibility with third party libraries
10887 generated by other compilers, specifically the ARM/IAR. */
10888 {
10889 bfd_vma insn;
10890 bfd_signed_vma relocation;
10891
10892 insn = bfd_get_16 (input_bfd, hit_data);
10893
10894 if (globals->use_rel)
10895 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10896
10897 relocation = value + addend;
10898 relocation -= Pa (input_section->output_section->vma
10899 + input_section->output_offset
10900 + rel->r_offset);
10901
10902 value = relocation;
10903
10904 /* We do not check for overflow of this reloc. Although strictly
10905 speaking this is incorrect, it appears to be necessary in order
10906 to work with IAR generated relocs. Since GCC and GAS do not
10907 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10908 a problem for them. */
10909 value &= 0x3fc;
10910
10911 insn = (insn & 0xff00) | (value >> 2);
10912
10913 bfd_put_16 (input_bfd, insn, hit_data);
10914
10915 return bfd_reloc_ok;
10916 }
10917
10918 case R_ARM_THM_PC12:
10919 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10920 {
10921 bfd_vma insn;
10922 bfd_signed_vma relocation;
10923
10924 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10925 | bfd_get_16 (input_bfd, hit_data + 2);
10926
10927 if (globals->use_rel)
10928 {
10929 signed_addend = insn & 0xfff;
10930 if (!(insn & (1 << 23)))
10931 signed_addend = -signed_addend;
10932 }
10933
10934 relocation = value + signed_addend;
10935 relocation -= Pa (input_section->output_section->vma
10936 + input_section->output_offset
10937 + rel->r_offset);
10938
10939 value = relocation;
10940
10941 if (value >= 0x1000)
10942 return bfd_reloc_overflow;
10943
10944 insn = (insn & 0xff7ff000) | value;
10945 if (relocation >= 0)
10946 insn |= (1 << 23);
10947
10948 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10949 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10950
10951 return bfd_reloc_ok;
10952 }
10953
10954 case R_ARM_THM_XPC22:
10955 case R_ARM_THM_CALL:
10956 case R_ARM_THM_JUMP24:
10957 /* Thumb BL (branch long instruction). */
10958 {
10959 bfd_vma relocation;
10960 bfd_vma reloc_sign;
10961 bfd_boolean overflow = FALSE;
10962 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10963 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10964 bfd_signed_vma reloc_signed_max;
10965 bfd_signed_vma reloc_signed_min;
10966 bfd_vma check;
10967 bfd_signed_vma signed_check;
10968 int bitsize;
10969 const int thumb2 = using_thumb2 (globals);
10970 const int thumb2_bl = using_thumb2_bl (globals);
10971
10972 /* A branch to an undefined weak symbol is turned into a jump to
10973 the next instruction unless a PLT entry will be created.
10974 The jump to the next instruction is optimized as a NOP.W for
10975 Thumb-2 enabled architectures. */
10976 if (h && h->root.type == bfd_link_hash_undefweak
10977 && plt_offset == (bfd_vma) -1)
10978 {
10979 if (thumb2)
10980 {
10981 bfd_put_16 (input_bfd, 0xf3af, hit_data);
10982 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
10983 }
10984 else
10985 {
10986 bfd_put_16 (input_bfd, 0xe000, hit_data);
10987 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
10988 }
10989 return bfd_reloc_ok;
10990 }
10991
10992 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
10993 with Thumb-1) involving the J1 and J2 bits. */
10994 if (globals->use_rel)
10995 {
10996 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
10997 bfd_vma upper = upper_insn & 0x3ff;
10998 bfd_vma lower = lower_insn & 0x7ff;
10999 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11000 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11001 bfd_vma i1 = j1 ^ s ? 0 : 1;
11002 bfd_vma i2 = j2 ^ s ? 0 : 1;
11003
11004 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11005 /* Sign extend. */
11006 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11007
11008 signed_addend = addend;
11009 }
11010
11011 if (r_type == R_ARM_THM_XPC22)
11012 {
11013 /* Check for Thumb to Thumb call. */
11014 /* FIXME: Should we translate the instruction into a BL
11015 instruction instead ? */
11016 if (branch_type == ST_BRANCH_TO_THUMB)
11017 _bfd_error_handler
11018 (_("%pB: warning: %s BLX instruction targets"
11019 " %s function '%s'"),
11020 input_bfd, "Thumb",
11021 "Thumb", h ? h->root.root.string : "(local)");
11022 }
11023 else
11024 {
11025 /* If it is not a call to Thumb, assume call to Arm.
11026 If it is a call relative to a section name, then it is not a
11027 function call at all, but rather a long jump. Calls through
11028 the PLT do not require stubs. */
11029 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11030 {
11031 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11032 {
11033 /* Convert BL to BLX. */
11034 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11035 }
11036 else if (( r_type != R_ARM_THM_CALL)
11037 && (r_type != R_ARM_THM_JUMP24))
11038 {
11039 if (elf32_thumb_to_arm_stub
11040 (info, sym_name, input_bfd, output_bfd, input_section,
11041 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11042 error_message))
11043 return bfd_reloc_ok;
11044 else
11045 return bfd_reloc_dangerous;
11046 }
11047 }
11048 else if (branch_type == ST_BRANCH_TO_THUMB
11049 && globals->use_blx
11050 && r_type == R_ARM_THM_CALL)
11051 {
11052 /* Make sure this is a BL. */
11053 lower_insn |= 0x1800;
11054 }
11055 }
11056
11057 enum elf32_arm_stub_type stub_type = arm_stub_none;
11058 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11059 {
11060 /* Check if a stub has to be inserted because the destination
11061 is too far. */
11062 struct elf32_arm_stub_hash_entry *stub_entry;
11063 struct elf32_arm_link_hash_entry *hash;
11064
11065 hash = (struct elf32_arm_link_hash_entry *) h;
11066
11067 stub_type = arm_type_of_stub (info, input_section, rel,
11068 st_type, &branch_type,
11069 hash, value, sym_sec,
11070 input_bfd, sym_name);
11071
11072 if (stub_type != arm_stub_none)
11073 {
11074 /* The target is out of reach or we are changing modes, so
11075 redirect the branch to the local stub for this
11076 function. */
11077 stub_entry = elf32_arm_get_stub_entry (input_section,
11078 sym_sec, h,
11079 rel, globals,
11080 stub_type);
11081 if (stub_entry != NULL)
11082 {
11083 value = (stub_entry->stub_offset
11084 + stub_entry->stub_sec->output_offset
11085 + stub_entry->stub_sec->output_section->vma);
11086
11087 if (plt_offset != (bfd_vma) -1)
11088 *unresolved_reloc_p = FALSE;
11089 }
11090
11091 /* If this call becomes a call to Arm, force BLX. */
11092 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11093 {
11094 if ((stub_entry
11095 && !arm_stub_is_thumb (stub_entry->stub_type))
11096 || branch_type != ST_BRANCH_TO_THUMB)
11097 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11098 }
11099 }
11100 }
11101
11102 /* Handle calls via the PLT. */
11103 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11104 {
11105 value = (splt->output_section->vma
11106 + splt->output_offset
11107 + plt_offset);
11108
11109 if (globals->use_blx
11110 && r_type == R_ARM_THM_CALL
11111 && ! using_thumb_only (globals))
11112 {
11113 /* If the Thumb BLX instruction is available, convert
11114 the BL to a BLX instruction to call the ARM-mode
11115 PLT entry. */
11116 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11117 branch_type = ST_BRANCH_TO_ARM;
11118 }
11119 else
11120 {
11121 if (! using_thumb_only (globals))
11122 /* Target the Thumb stub before the ARM PLT entry. */
11123 value -= PLT_THUMB_STUB_SIZE;
11124 branch_type = ST_BRANCH_TO_THUMB;
11125 }
11126 *unresolved_reloc_p = FALSE;
11127 }
11128
11129 relocation = value + signed_addend;
11130
11131 relocation -= (input_section->output_section->vma
11132 + input_section->output_offset
11133 + rel->r_offset);
11134
11135 check = relocation >> howto->rightshift;
11136
11137 /* If this is a signed value, the rightshift just dropped
11138 leading 1 bits (assuming twos complement). */
11139 if ((bfd_signed_vma) relocation >= 0)
11140 signed_check = check;
11141 else
11142 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11143
11144 /* Calculate the permissable maximum and minimum values for
11145 this relocation according to whether we're relocating for
11146 Thumb-2 or not. */
11147 bitsize = howto->bitsize;
11148 if (!thumb2_bl)
11149 bitsize -= 2;
11150 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11151 reloc_signed_min = ~reloc_signed_max;
11152
11153 /* Assumes two's complement. */
11154 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11155 overflow = TRUE;
11156
11157 if ((lower_insn & 0x5000) == 0x4000)
11158 /* For a BLX instruction, make sure that the relocation is rounded up
11159 to a word boundary. This follows the semantics of the instruction
11160 which specifies that bit 1 of the target address will come from bit
11161 1 of the base address. */
11162 relocation = (relocation + 2) & ~ 3;
11163
11164 /* Put RELOCATION back into the insn. Assumes two's complement.
11165 We use the Thumb-2 encoding, which is safe even if dealing with
11166 a Thumb-1 instruction by virtue of our overflow check above. */
11167 reloc_sign = (signed_check < 0) ? 1 : 0;
11168 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11169 | ((relocation >> 12) & 0x3ff)
11170 | (reloc_sign << 10);
11171 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11172 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11173 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11174 | ((relocation >> 1) & 0x7ff);
11175
11176 /* Put the relocated value back in the object file: */
11177 bfd_put_16 (input_bfd, upper_insn, hit_data);
11178 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11179
11180 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11181 }
11182 break;
11183
11184 case R_ARM_THM_JUMP19:
11185 /* Thumb32 conditional branch instruction. */
11186 {
11187 bfd_vma relocation;
11188 bfd_boolean overflow = FALSE;
11189 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11190 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11191 bfd_signed_vma reloc_signed_max = 0xffffe;
11192 bfd_signed_vma reloc_signed_min = -0x100000;
11193 bfd_signed_vma signed_check;
11194 enum elf32_arm_stub_type stub_type = arm_stub_none;
11195 struct elf32_arm_stub_hash_entry *stub_entry;
11196 struct elf32_arm_link_hash_entry *hash;
11197
11198 /* Need to refetch the addend, reconstruct the top three bits,
11199 and squish the two 11 bit pieces together. */
11200 if (globals->use_rel)
11201 {
11202 bfd_vma S = (upper_insn & 0x0400) >> 10;
11203 bfd_vma upper = (upper_insn & 0x003f);
11204 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11205 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11206 bfd_vma lower = (lower_insn & 0x07ff);
11207
11208 upper |= J1 << 6;
11209 upper |= J2 << 7;
11210 upper |= (!S) << 8;
11211 upper -= 0x0100; /* Sign extend. */
11212
11213 addend = (upper << 12) | (lower << 1);
11214 signed_addend = addend;
11215 }
11216
11217 /* Handle calls via the PLT. */
11218 if (plt_offset != (bfd_vma) -1)
11219 {
11220 value = (splt->output_section->vma
11221 + splt->output_offset
11222 + plt_offset);
11223 /* Target the Thumb stub before the ARM PLT entry. */
11224 value -= PLT_THUMB_STUB_SIZE;
11225 *unresolved_reloc_p = FALSE;
11226 }
11227
11228 hash = (struct elf32_arm_link_hash_entry *)h;
11229
11230 stub_type = arm_type_of_stub (info, input_section, rel,
11231 st_type, &branch_type,
11232 hash, value, sym_sec,
11233 input_bfd, sym_name);
11234 if (stub_type != arm_stub_none)
11235 {
11236 stub_entry = elf32_arm_get_stub_entry (input_section,
11237 sym_sec, h,
11238 rel, globals,
11239 stub_type);
11240 if (stub_entry != NULL)
11241 {
11242 value = (stub_entry->stub_offset
11243 + stub_entry->stub_sec->output_offset
11244 + stub_entry->stub_sec->output_section->vma);
11245 }
11246 }
11247
11248 relocation = value + signed_addend;
11249 relocation -= (input_section->output_section->vma
11250 + input_section->output_offset
11251 + rel->r_offset);
11252 signed_check = (bfd_signed_vma) relocation;
11253
11254 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11255 overflow = TRUE;
11256
11257 /* Put RELOCATION back into the insn. */
11258 {
11259 bfd_vma S = (relocation & 0x00100000) >> 20;
11260 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11261 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11262 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11263 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11264
11265 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11266 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11267 }
11268
11269 /* Put the relocated value back in the object file: */
11270 bfd_put_16 (input_bfd, upper_insn, hit_data);
11271 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11272
11273 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11274 }
11275
11276 case R_ARM_THM_JUMP11:
11277 case R_ARM_THM_JUMP8:
11278 case R_ARM_THM_JUMP6:
11279 /* Thumb B (branch) instruction). */
11280 {
11281 bfd_signed_vma relocation;
11282 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11283 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11284 bfd_signed_vma signed_check;
11285
11286 /* CZB cannot jump backward. */
11287 if (r_type == R_ARM_THM_JUMP6)
11288 {
11289 reloc_signed_min = 0;
11290 if (globals->use_rel)
11291 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11292 }
11293
11294 relocation = value + signed_addend;
11295
11296 relocation -= (input_section->output_section->vma
11297 + input_section->output_offset
11298 + rel->r_offset);
11299
11300 relocation >>= howto->rightshift;
11301 signed_check = relocation;
11302
11303 if (r_type == R_ARM_THM_JUMP6)
11304 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11305 else
11306 relocation &= howto->dst_mask;
11307 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11308
11309 bfd_put_16 (input_bfd, relocation, hit_data);
11310
11311 /* Assumes two's complement. */
11312 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11313 return bfd_reloc_overflow;
11314
11315 return bfd_reloc_ok;
11316 }
11317
11318 case R_ARM_ALU_PCREL7_0:
11319 case R_ARM_ALU_PCREL15_8:
11320 case R_ARM_ALU_PCREL23_15:
11321 {
11322 bfd_vma insn;
11323 bfd_vma relocation;
11324
11325 insn = bfd_get_32 (input_bfd, hit_data);
11326 if (globals->use_rel)
11327 {
11328 /* Extract the addend. */
11329 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11330 signed_addend = addend;
11331 }
11332 relocation = value + signed_addend;
11333
11334 relocation -= (input_section->output_section->vma
11335 + input_section->output_offset
11336 + rel->r_offset);
11337 insn = (insn & ~0xfff)
11338 | ((howto->bitpos << 7) & 0xf00)
11339 | ((relocation >> howto->bitpos) & 0xff);
11340 bfd_put_32 (input_bfd, value, hit_data);
11341 }
11342 return bfd_reloc_ok;
11343
11344 case R_ARM_GNU_VTINHERIT:
11345 case R_ARM_GNU_VTENTRY:
11346 return bfd_reloc_ok;
11347
11348 case R_ARM_GOTOFF32:
11349 /* Relocation is relative to the start of the
11350 global offset table. */
11351
11352 BFD_ASSERT (sgot != NULL);
11353 if (sgot == NULL)
11354 return bfd_reloc_notsupported;
11355
11356 /* If we are addressing a Thumb function, we need to adjust the
11357 address by one, so that attempts to call the function pointer will
11358 correctly interpret it as Thumb code. */
11359 if (branch_type == ST_BRANCH_TO_THUMB)
11360 value += 1;
11361
11362 /* Note that sgot->output_offset is not involved in this
11363 calculation. We always want the start of .got. If we
11364 define _GLOBAL_OFFSET_TABLE in a different way, as is
11365 permitted by the ABI, we might have to change this
11366 calculation. */
11367 value -= sgot->output_section->vma;
11368 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11369 contents, rel->r_offset, value,
11370 rel->r_addend);
11371
11372 case R_ARM_GOTPC:
11373 /* Use global offset table as symbol value. */
11374 BFD_ASSERT (sgot != NULL);
11375
11376 if (sgot == NULL)
11377 return bfd_reloc_notsupported;
11378
11379 *unresolved_reloc_p = FALSE;
11380 value = sgot->output_section->vma;
11381 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11382 contents, rel->r_offset, value,
11383 rel->r_addend);
11384
11385 case R_ARM_GOT32:
11386 case R_ARM_GOT_PREL:
11387 /* Relocation is to the entry for this symbol in the
11388 global offset table. */
11389 if (sgot == NULL)
11390 return bfd_reloc_notsupported;
11391
11392 if (dynreloc_st_type == STT_GNU_IFUNC
11393 && plt_offset != (bfd_vma) -1
11394 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11395 {
11396 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11397 symbol, and the relocation resolves directly to the runtime
11398 target rather than to the .iplt entry. This means that any
11399 .got entry would be the same value as the .igot.plt entry,
11400 so there's no point creating both. */
11401 sgot = globals->root.igotplt;
11402 value = sgot->output_offset + gotplt_offset;
11403 }
11404 else if (h != NULL)
11405 {
11406 bfd_vma off;
11407
11408 off = h->got.offset;
11409 BFD_ASSERT (off != (bfd_vma) -1);
11410 if ((off & 1) != 0)
11411 {
11412 /* We have already processsed one GOT relocation against
11413 this symbol. */
11414 off &= ~1;
11415 if (globals->root.dynamic_sections_created
11416 && !SYMBOL_REFERENCES_LOCAL (info, h))
11417 *unresolved_reloc_p = FALSE;
11418 }
11419 else
11420 {
11421 Elf_Internal_Rela outrel;
11422 int isrofixup = 0;
11423
11424 if (((h->dynindx != -1) || globals->fdpic_p)
11425 && !SYMBOL_REFERENCES_LOCAL (info, h))
11426 {
11427 /* If the symbol doesn't resolve locally in a static
11428 object, we have an undefined reference. If the
11429 symbol doesn't resolve locally in a dynamic object,
11430 it should be resolved by the dynamic linker. */
11431 if (globals->root.dynamic_sections_created)
11432 {
11433 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11434 *unresolved_reloc_p = FALSE;
11435 }
11436 else
11437 outrel.r_info = 0;
11438 outrel.r_addend = 0;
11439 }
11440 else
11441 {
11442 if (dynreloc_st_type == STT_GNU_IFUNC)
11443 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11444 else if (bfd_link_pic (info)
11445 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11446 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11447 else
11448 {
11449 outrel.r_info = 0;
11450 if (globals->fdpic_p)
11451 isrofixup = 1;
11452 }
11453 outrel.r_addend = dynreloc_value;
11454 }
11455
11456 /* The GOT entry is initialized to zero by default.
11457 See if we should install a different value. */
11458 if (outrel.r_addend != 0
11459 && (globals->use_rel || outrel.r_info == 0))
11460 {
11461 bfd_put_32 (output_bfd, outrel.r_addend,
11462 sgot->contents + off);
11463 outrel.r_addend = 0;
11464 }
11465
11466 if (isrofixup)
11467 arm_elf_add_rofixup (output_bfd,
11468 elf32_arm_hash_table(info)->srofixup,
11469 sgot->output_section->vma
11470 + sgot->output_offset + off);
11471
11472 else if (outrel.r_info != 0)
11473 {
11474 outrel.r_offset = (sgot->output_section->vma
11475 + sgot->output_offset
11476 + off);
11477 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11478 }
11479
11480 h->got.offset |= 1;
11481 }
11482 value = sgot->output_offset + off;
11483 }
11484 else
11485 {
11486 bfd_vma off;
11487
11488 BFD_ASSERT (local_got_offsets != NULL
11489 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11490
11491 off = local_got_offsets[r_symndx];
11492
11493 /* The offset must always be a multiple of 4. We use the
11494 least significant bit to record whether we have already
11495 generated the necessary reloc. */
11496 if ((off & 1) != 0)
11497 off &= ~1;
11498 else
11499 {
11500 Elf_Internal_Rela outrel;
11501 int isrofixup = 0;
11502
11503 if (dynreloc_st_type == STT_GNU_IFUNC)
11504 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11505 else if (bfd_link_pic (info))
11506 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11507 else
11508 {
11509 outrel.r_info = 0;
11510 if (globals->fdpic_p)
11511 isrofixup = 1;
11512 }
11513
11514 /* The GOT entry is initialized to zero by default.
11515 See if we should install a different value. */
11516 if (globals->use_rel || outrel.r_info == 0)
11517 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11518
11519 if (isrofixup)
11520 arm_elf_add_rofixup (output_bfd,
11521 globals->srofixup,
11522 sgot->output_section->vma
11523 + sgot->output_offset + off);
11524
11525 else if (outrel.r_info != 0)
11526 {
11527 outrel.r_addend = addend + dynreloc_value;
11528 outrel.r_offset = (sgot->output_section->vma
11529 + sgot->output_offset
11530 + off);
11531 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11532 }
11533
11534 local_got_offsets[r_symndx] |= 1;
11535 }
11536
11537 value = sgot->output_offset + off;
11538 }
11539 if (r_type != R_ARM_GOT32)
11540 value += sgot->output_section->vma;
11541
11542 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11543 contents, rel->r_offset, value,
11544 rel->r_addend);
11545
11546 case R_ARM_TLS_LDO32:
11547 value = value - dtpoff_base (info);
11548
11549 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11550 contents, rel->r_offset, value,
11551 rel->r_addend);
11552
11553 case R_ARM_TLS_LDM32:
11554 case R_ARM_TLS_LDM32_FDPIC:
11555 {
11556 bfd_vma off;
11557
11558 if (sgot == NULL)
11559 abort ();
11560
11561 off = globals->tls_ldm_got.offset;
11562
11563 if ((off & 1) != 0)
11564 off &= ~1;
11565 else
11566 {
11567 /* If we don't know the module number, create a relocation
11568 for it. */
11569 if (bfd_link_dll (info))
11570 {
11571 Elf_Internal_Rela outrel;
11572
11573 if (srelgot == NULL)
11574 abort ();
11575
11576 outrel.r_addend = 0;
11577 outrel.r_offset = (sgot->output_section->vma
11578 + sgot->output_offset + off);
11579 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11580
11581 if (globals->use_rel)
11582 bfd_put_32 (output_bfd, outrel.r_addend,
11583 sgot->contents + off);
11584
11585 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11586 }
11587 else
11588 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11589
11590 globals->tls_ldm_got.offset |= 1;
11591 }
11592
11593 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11594 {
11595 bfd_put_32(output_bfd,
11596 globals->root.sgot->output_offset + off,
11597 contents + rel->r_offset);
11598
11599 return bfd_reloc_ok;
11600 }
11601 else
11602 {
11603 value = sgot->output_section->vma + sgot->output_offset + off
11604 - (input_section->output_section->vma
11605 + input_section->output_offset + rel->r_offset);
11606
11607 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11608 contents, rel->r_offset, value,
11609 rel->r_addend);
11610 }
11611 }
11612
11613 case R_ARM_TLS_CALL:
11614 case R_ARM_THM_TLS_CALL:
11615 case R_ARM_TLS_GD32:
11616 case R_ARM_TLS_GD32_FDPIC:
11617 case R_ARM_TLS_IE32:
11618 case R_ARM_TLS_IE32_FDPIC:
11619 case R_ARM_TLS_GOTDESC:
11620 case R_ARM_TLS_DESCSEQ:
11621 case R_ARM_THM_TLS_DESCSEQ:
11622 {
11623 bfd_vma off, offplt;
11624 int indx = 0;
11625 char tls_type;
11626
11627 BFD_ASSERT (sgot != NULL);
11628
11629 if (h != NULL)
11630 {
11631 bfd_boolean dyn;
11632 dyn = globals->root.dynamic_sections_created;
11633 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11634 bfd_link_pic (info),
11635 h)
11636 && (!bfd_link_pic (info)
11637 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11638 {
11639 *unresolved_reloc_p = FALSE;
11640 indx = h->dynindx;
11641 }
11642 off = h->got.offset;
11643 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11644 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11645 }
11646 else
11647 {
11648 BFD_ASSERT (local_got_offsets != NULL);
11649 off = local_got_offsets[r_symndx];
11650 offplt = local_tlsdesc_gotents[r_symndx];
11651 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11652 }
11653
11654 /* Linker relaxations happens from one of the
11655 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11656 if (ELF32_R_TYPE(rel->r_info) != r_type)
11657 tls_type = GOT_TLS_IE;
11658
11659 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11660
11661 if ((off & 1) != 0)
11662 off &= ~1;
11663 else
11664 {
11665 bfd_boolean need_relocs = FALSE;
11666 Elf_Internal_Rela outrel;
11667 int cur_off = off;
11668
11669 /* The GOT entries have not been initialized yet. Do it
11670 now, and emit any relocations. If both an IE GOT and a
11671 GD GOT are necessary, we emit the GD first. */
11672
11673 if ((bfd_link_dll (info) || indx != 0)
11674 && (h == NULL
11675 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11676 && !resolved_to_zero)
11677 || h->root.type != bfd_link_hash_undefweak))
11678 {
11679 need_relocs = TRUE;
11680 BFD_ASSERT (srelgot != NULL);
11681 }
11682
11683 if (tls_type & GOT_TLS_GDESC)
11684 {
11685 bfd_byte *loc;
11686
11687 /* We should have relaxed, unless this is an undefined
11688 weak symbol. */
11689 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11690 || bfd_link_dll (info));
11691 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11692 <= globals->root.sgotplt->size);
11693
11694 outrel.r_addend = 0;
11695 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11696 + globals->root.sgotplt->output_offset
11697 + offplt
11698 + globals->sgotplt_jump_table_size);
11699
11700 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11701 sreloc = globals->root.srelplt;
11702 loc = sreloc->contents;
11703 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11704 BFD_ASSERT (loc + RELOC_SIZE (globals)
11705 <= sreloc->contents + sreloc->size);
11706
11707 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11708
11709 /* For globals, the first word in the relocation gets
11710 the relocation index and the top bit set, or zero,
11711 if we're binding now. For locals, it gets the
11712 symbol's offset in the tls section. */
11713 bfd_put_32 (output_bfd,
11714 !h ? value - elf_hash_table (info)->tls_sec->vma
11715 : info->flags & DF_BIND_NOW ? 0
11716 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11717 globals->root.sgotplt->contents + offplt
11718 + globals->sgotplt_jump_table_size);
11719
11720 /* Second word in the relocation is always zero. */
11721 bfd_put_32 (output_bfd, 0,
11722 globals->root.sgotplt->contents + offplt
11723 + globals->sgotplt_jump_table_size + 4);
11724 }
11725 if (tls_type & GOT_TLS_GD)
11726 {
11727 if (need_relocs)
11728 {
11729 outrel.r_addend = 0;
11730 outrel.r_offset = (sgot->output_section->vma
11731 + sgot->output_offset
11732 + cur_off);
11733 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11734
11735 if (globals->use_rel)
11736 bfd_put_32 (output_bfd, outrel.r_addend,
11737 sgot->contents + cur_off);
11738
11739 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11740
11741 if (indx == 0)
11742 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11743 sgot->contents + cur_off + 4);
11744 else
11745 {
11746 outrel.r_addend = 0;
11747 outrel.r_info = ELF32_R_INFO (indx,
11748 R_ARM_TLS_DTPOFF32);
11749 outrel.r_offset += 4;
11750
11751 if (globals->use_rel)
11752 bfd_put_32 (output_bfd, outrel.r_addend,
11753 sgot->contents + cur_off + 4);
11754
11755 elf32_arm_add_dynreloc (output_bfd, info,
11756 srelgot, &outrel);
11757 }
11758 }
11759 else
11760 {
11761 /* If we are not emitting relocations for a
11762 general dynamic reference, then we must be in a
11763 static link or an executable link with the
11764 symbol binding locally. Mark it as belonging
11765 to module 1, the executable. */
11766 bfd_put_32 (output_bfd, 1,
11767 sgot->contents + cur_off);
11768 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11769 sgot->contents + cur_off + 4);
11770 }
11771
11772 cur_off += 8;
11773 }
11774
11775 if (tls_type & GOT_TLS_IE)
11776 {
11777 if (need_relocs)
11778 {
11779 if (indx == 0)
11780 outrel.r_addend = value - dtpoff_base (info);
11781 else
11782 outrel.r_addend = 0;
11783 outrel.r_offset = (sgot->output_section->vma
11784 + sgot->output_offset
11785 + cur_off);
11786 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11787
11788 if (globals->use_rel)
11789 bfd_put_32 (output_bfd, outrel.r_addend,
11790 sgot->contents + cur_off);
11791
11792 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11793 }
11794 else
11795 bfd_put_32 (output_bfd, tpoff (info, value),
11796 sgot->contents + cur_off);
11797 cur_off += 4;
11798 }
11799
11800 if (h != NULL)
11801 h->got.offset |= 1;
11802 else
11803 local_got_offsets[r_symndx] |= 1;
11804 }
11805
11806 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11807 off += 8;
11808 else if (tls_type & GOT_TLS_GDESC)
11809 off = offplt;
11810
11811 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11812 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11813 {
11814 bfd_signed_vma offset;
11815 /* TLS stubs are arm mode. The original symbol is a
11816 data object, so branch_type is bogus. */
11817 branch_type = ST_BRANCH_TO_ARM;
11818 enum elf32_arm_stub_type stub_type
11819 = arm_type_of_stub (info, input_section, rel,
11820 st_type, &branch_type,
11821 (struct elf32_arm_link_hash_entry *)h,
11822 globals->tls_trampoline, globals->root.splt,
11823 input_bfd, sym_name);
11824
11825 if (stub_type != arm_stub_none)
11826 {
11827 struct elf32_arm_stub_hash_entry *stub_entry
11828 = elf32_arm_get_stub_entry
11829 (input_section, globals->root.splt, 0, rel,
11830 globals, stub_type);
11831 offset = (stub_entry->stub_offset
11832 + stub_entry->stub_sec->output_offset
11833 + stub_entry->stub_sec->output_section->vma);
11834 }
11835 else
11836 offset = (globals->root.splt->output_section->vma
11837 + globals->root.splt->output_offset
11838 + globals->tls_trampoline);
11839
11840 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11841 {
11842 unsigned long inst;
11843
11844 offset -= (input_section->output_section->vma
11845 + input_section->output_offset
11846 + rel->r_offset + 8);
11847
11848 inst = offset >> 2;
11849 inst &= 0x00ffffff;
11850 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11851 }
11852 else
11853 {
11854 /* Thumb blx encodes the offset in a complicated
11855 fashion. */
11856 unsigned upper_insn, lower_insn;
11857 unsigned neg;
11858
11859 offset -= (input_section->output_section->vma
11860 + input_section->output_offset
11861 + rel->r_offset + 4);
11862
11863 if (stub_type != arm_stub_none
11864 && arm_stub_is_thumb (stub_type))
11865 {
11866 lower_insn = 0xd000;
11867 }
11868 else
11869 {
11870 lower_insn = 0xc000;
11871 /* Round up the offset to a word boundary. */
11872 offset = (offset + 2) & ~2;
11873 }
11874
11875 neg = offset < 0;
11876 upper_insn = (0xf000
11877 | ((offset >> 12) & 0x3ff)
11878 | (neg << 10));
11879 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11880 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11881 | ((offset >> 1) & 0x7ff);
11882 bfd_put_16 (input_bfd, upper_insn, hit_data);
11883 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11884 return bfd_reloc_ok;
11885 }
11886 }
11887 /* These relocations needs special care, as besides the fact
11888 they point somewhere in .gotplt, the addend must be
11889 adjusted accordingly depending on the type of instruction
11890 we refer to. */
11891 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11892 {
11893 unsigned long data, insn;
11894 unsigned thumb;
11895
11896 data = bfd_get_signed_32 (input_bfd, hit_data);
11897 thumb = data & 1;
11898 data &= ~1ul;
11899
11900 if (thumb)
11901 {
11902 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11903 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11904 insn = (insn << 16)
11905 | bfd_get_16 (input_bfd,
11906 contents + rel->r_offset - data + 2);
11907 if ((insn & 0xf800c000) == 0xf000c000)
11908 /* bl/blx */
11909 value = -6;
11910 else if ((insn & 0xffffff00) == 0x4400)
11911 /* add */
11912 value = -5;
11913 else
11914 {
11915 _bfd_error_handler
11916 /* xgettext:c-format */
11917 (_("%pB(%pA+%#" PRIx64 "): "
11918 "unexpected %s instruction '%#lx' "
11919 "referenced by TLS_GOTDESC"),
11920 input_bfd, input_section, (uint64_t) rel->r_offset,
11921 "Thumb", insn);
11922 return bfd_reloc_notsupported;
11923 }
11924 }
11925 else
11926 {
11927 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11928
11929 switch (insn >> 24)
11930 {
11931 case 0xeb: /* bl */
11932 case 0xfa: /* blx */
11933 value = -4;
11934 break;
11935
11936 case 0xe0: /* add */
11937 value = -8;
11938 break;
11939
11940 default:
11941 _bfd_error_handler
11942 /* xgettext:c-format */
11943 (_("%pB(%pA+%#" PRIx64 "): "
11944 "unexpected %s instruction '%#lx' "
11945 "referenced by TLS_GOTDESC"),
11946 input_bfd, input_section, (uint64_t) rel->r_offset,
11947 "ARM", insn);
11948 return bfd_reloc_notsupported;
11949 }
11950 }
11951
11952 value += ((globals->root.sgotplt->output_section->vma
11953 + globals->root.sgotplt->output_offset + off)
11954 - (input_section->output_section->vma
11955 + input_section->output_offset
11956 + rel->r_offset)
11957 + globals->sgotplt_jump_table_size);
11958 }
11959 else
11960 value = ((globals->root.sgot->output_section->vma
11961 + globals->root.sgot->output_offset + off)
11962 - (input_section->output_section->vma
11963 + input_section->output_offset + rel->r_offset));
11964
11965 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
11966 r_type == R_ARM_TLS_IE32_FDPIC))
11967 {
11968 /* For FDPIC relocations, resolve to the offset of the GOT
11969 entry from the start of GOT. */
11970 bfd_put_32(output_bfd,
11971 globals->root.sgot->output_offset + off,
11972 contents + rel->r_offset);
11973
11974 return bfd_reloc_ok;
11975 }
11976 else
11977 {
11978 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11979 contents, rel->r_offset, value,
11980 rel->r_addend);
11981 }
11982 }
11983
11984 case R_ARM_TLS_LE32:
11985 if (bfd_link_dll (info))
11986 {
11987 _bfd_error_handler
11988 /* xgettext:c-format */
11989 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
11990 "in shared object"),
11991 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
11992 return bfd_reloc_notsupported;
11993 }
11994 else
11995 value = tpoff (info, value);
11996
11997 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11998 contents, rel->r_offset, value,
11999 rel->r_addend);
12000
12001 case R_ARM_V4BX:
12002 if (globals->fix_v4bx)
12003 {
12004 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12005
12006 /* Ensure that we have a BX instruction. */
12007 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12008
12009 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12010 {
12011 /* Branch to veneer. */
12012 bfd_vma glue_addr;
12013 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12014 glue_addr -= input_section->output_section->vma
12015 + input_section->output_offset
12016 + rel->r_offset + 8;
12017 insn = (insn & 0xf0000000) | 0x0a000000
12018 | ((glue_addr >> 2) & 0x00ffffff);
12019 }
12020 else
12021 {
12022 /* Preserve Rm (lowest four bits) and the condition code
12023 (highest four bits). Other bits encode MOV PC,Rm. */
12024 insn = (insn & 0xf000000f) | 0x01a0f000;
12025 }
12026
12027 bfd_put_32 (input_bfd, insn, hit_data);
12028 }
12029 return bfd_reloc_ok;
12030
12031 case R_ARM_MOVW_ABS_NC:
12032 case R_ARM_MOVT_ABS:
12033 case R_ARM_MOVW_PREL_NC:
12034 case R_ARM_MOVT_PREL:
12035 /* Until we properly support segment-base-relative addressing then
12036 we assume the segment base to be zero, as for the group relocations.
12037 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12038 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12039 case R_ARM_MOVW_BREL_NC:
12040 case R_ARM_MOVW_BREL:
12041 case R_ARM_MOVT_BREL:
12042 {
12043 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12044
12045 if (globals->use_rel)
12046 {
12047 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12048 signed_addend = (addend ^ 0x8000) - 0x8000;
12049 }
12050
12051 value += signed_addend;
12052
12053 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12054 value -= (input_section->output_section->vma
12055 + input_section->output_offset + rel->r_offset);
12056
12057 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12058 return bfd_reloc_overflow;
12059
12060 if (branch_type == ST_BRANCH_TO_THUMB)
12061 value |= 1;
12062
12063 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12064 || r_type == R_ARM_MOVT_BREL)
12065 value >>= 16;
12066
12067 insn &= 0xfff0f000;
12068 insn |= value & 0xfff;
12069 insn |= (value & 0xf000) << 4;
12070 bfd_put_32 (input_bfd, insn, hit_data);
12071 }
12072 return bfd_reloc_ok;
12073
12074 case R_ARM_THM_MOVW_ABS_NC:
12075 case R_ARM_THM_MOVT_ABS:
12076 case R_ARM_THM_MOVW_PREL_NC:
12077 case R_ARM_THM_MOVT_PREL:
12078 /* Until we properly support segment-base-relative addressing then
12079 we assume the segment base to be zero, as for the above relocations.
12080 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12081 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12082 as R_ARM_THM_MOVT_ABS. */
12083 case R_ARM_THM_MOVW_BREL_NC:
12084 case R_ARM_THM_MOVW_BREL:
12085 case R_ARM_THM_MOVT_BREL:
12086 {
12087 bfd_vma insn;
12088
12089 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12090 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12091
12092 if (globals->use_rel)
12093 {
12094 addend = ((insn >> 4) & 0xf000)
12095 | ((insn >> 15) & 0x0800)
12096 | ((insn >> 4) & 0x0700)
12097 | (insn & 0x00ff);
12098 signed_addend = (addend ^ 0x8000) - 0x8000;
12099 }
12100
12101 value += signed_addend;
12102
12103 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12104 value -= (input_section->output_section->vma
12105 + input_section->output_offset + rel->r_offset);
12106
12107 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12108 return bfd_reloc_overflow;
12109
12110 if (branch_type == ST_BRANCH_TO_THUMB)
12111 value |= 1;
12112
12113 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12114 || r_type == R_ARM_THM_MOVT_BREL)
12115 value >>= 16;
12116
12117 insn &= 0xfbf08f00;
12118 insn |= (value & 0xf000) << 4;
12119 insn |= (value & 0x0800) << 15;
12120 insn |= (value & 0x0700) << 4;
12121 insn |= (value & 0x00ff);
12122
12123 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12124 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12125 }
12126 return bfd_reloc_ok;
12127
12128 case R_ARM_ALU_PC_G0_NC:
12129 case R_ARM_ALU_PC_G1_NC:
12130 case R_ARM_ALU_PC_G0:
12131 case R_ARM_ALU_PC_G1:
12132 case R_ARM_ALU_PC_G2:
12133 case R_ARM_ALU_SB_G0_NC:
12134 case R_ARM_ALU_SB_G1_NC:
12135 case R_ARM_ALU_SB_G0:
12136 case R_ARM_ALU_SB_G1:
12137 case R_ARM_ALU_SB_G2:
12138 {
12139 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12140 bfd_vma pc = input_section->output_section->vma
12141 + input_section->output_offset + rel->r_offset;
12142 /* sb is the origin of the *segment* containing the symbol. */
12143 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12144 bfd_vma residual;
12145 bfd_vma g_n;
12146 bfd_signed_vma signed_value;
12147 int group = 0;
12148
12149 /* Determine which group of bits to select. */
12150 switch (r_type)
12151 {
12152 case R_ARM_ALU_PC_G0_NC:
12153 case R_ARM_ALU_PC_G0:
12154 case R_ARM_ALU_SB_G0_NC:
12155 case R_ARM_ALU_SB_G0:
12156 group = 0;
12157 break;
12158
12159 case R_ARM_ALU_PC_G1_NC:
12160 case R_ARM_ALU_PC_G1:
12161 case R_ARM_ALU_SB_G1_NC:
12162 case R_ARM_ALU_SB_G1:
12163 group = 1;
12164 break;
12165
12166 case R_ARM_ALU_PC_G2:
12167 case R_ARM_ALU_SB_G2:
12168 group = 2;
12169 break;
12170
12171 default:
12172 abort ();
12173 }
12174
12175 /* If REL, extract the addend from the insn. If RELA, it will
12176 have already been fetched for us. */
12177 if (globals->use_rel)
12178 {
12179 int negative;
12180 bfd_vma constant = insn & 0xff;
12181 bfd_vma rotation = (insn & 0xf00) >> 8;
12182
12183 if (rotation == 0)
12184 signed_addend = constant;
12185 else
12186 {
12187 /* Compensate for the fact that in the instruction, the
12188 rotation is stored in multiples of 2 bits. */
12189 rotation *= 2;
12190
12191 /* Rotate "constant" right by "rotation" bits. */
12192 signed_addend = (constant >> rotation) |
12193 (constant << (8 * sizeof (bfd_vma) - rotation));
12194 }
12195
12196 /* Determine if the instruction is an ADD or a SUB.
12197 (For REL, this determines the sign of the addend.) */
12198 negative = identify_add_or_sub (insn);
12199 if (negative == 0)
12200 {
12201 _bfd_error_handler
12202 /* xgettext:c-format */
12203 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12204 "are allowed for ALU group relocations"),
12205 input_bfd, input_section, (uint64_t) rel->r_offset);
12206 return bfd_reloc_overflow;
12207 }
12208
12209 signed_addend *= negative;
12210 }
12211
12212 /* Compute the value (X) to go in the place. */
12213 if (r_type == R_ARM_ALU_PC_G0_NC
12214 || r_type == R_ARM_ALU_PC_G1_NC
12215 || r_type == R_ARM_ALU_PC_G0
12216 || r_type == R_ARM_ALU_PC_G1
12217 || r_type == R_ARM_ALU_PC_G2)
12218 /* PC relative. */
12219 signed_value = value - pc + signed_addend;
12220 else
12221 /* Section base relative. */
12222 signed_value = value - sb + signed_addend;
12223
12224 /* If the target symbol is a Thumb function, then set the
12225 Thumb bit in the address. */
12226 if (branch_type == ST_BRANCH_TO_THUMB)
12227 signed_value |= 1;
12228
12229 /* Calculate the value of the relevant G_n, in encoded
12230 constant-with-rotation format. */
12231 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12232 group, &residual);
12233
12234 /* Check for overflow if required. */
12235 if ((r_type == R_ARM_ALU_PC_G0
12236 || r_type == R_ARM_ALU_PC_G1
12237 || r_type == R_ARM_ALU_PC_G2
12238 || r_type == R_ARM_ALU_SB_G0
12239 || r_type == R_ARM_ALU_SB_G1
12240 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12241 {
12242 _bfd_error_handler
12243 /* xgettext:c-format */
12244 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12245 "splitting %#" PRIx64 " for group relocation %s"),
12246 input_bfd, input_section, (uint64_t) rel->r_offset,
12247 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12248 howto->name);
12249 return bfd_reloc_overflow;
12250 }
12251
12252 /* Mask out the value and the ADD/SUB part of the opcode; take care
12253 not to destroy the S bit. */
12254 insn &= 0xff1ff000;
12255
12256 /* Set the opcode according to whether the value to go in the
12257 place is negative. */
12258 if (signed_value < 0)
12259 insn |= 1 << 22;
12260 else
12261 insn |= 1 << 23;
12262
12263 /* Encode the offset. */
12264 insn |= g_n;
12265
12266 bfd_put_32 (input_bfd, insn, hit_data);
12267 }
12268 return bfd_reloc_ok;
12269
12270 case R_ARM_LDR_PC_G0:
12271 case R_ARM_LDR_PC_G1:
12272 case R_ARM_LDR_PC_G2:
12273 case R_ARM_LDR_SB_G0:
12274 case R_ARM_LDR_SB_G1:
12275 case R_ARM_LDR_SB_G2:
12276 {
12277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12278 bfd_vma pc = input_section->output_section->vma
12279 + input_section->output_offset + rel->r_offset;
12280 /* sb is the origin of the *segment* containing the symbol. */
12281 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12282 bfd_vma residual;
12283 bfd_signed_vma signed_value;
12284 int group = 0;
12285
12286 /* Determine which groups of bits to calculate. */
12287 switch (r_type)
12288 {
12289 case R_ARM_LDR_PC_G0:
12290 case R_ARM_LDR_SB_G0:
12291 group = 0;
12292 break;
12293
12294 case R_ARM_LDR_PC_G1:
12295 case R_ARM_LDR_SB_G1:
12296 group = 1;
12297 break;
12298
12299 case R_ARM_LDR_PC_G2:
12300 case R_ARM_LDR_SB_G2:
12301 group = 2;
12302 break;
12303
12304 default:
12305 abort ();
12306 }
12307
12308 /* If REL, extract the addend from the insn. If RELA, it will
12309 have already been fetched for us. */
12310 if (globals->use_rel)
12311 {
12312 int negative = (insn & (1 << 23)) ? 1 : -1;
12313 signed_addend = negative * (insn & 0xfff);
12314 }
12315
12316 /* Compute the value (X) to go in the place. */
12317 if (r_type == R_ARM_LDR_PC_G0
12318 || r_type == R_ARM_LDR_PC_G1
12319 || r_type == R_ARM_LDR_PC_G2)
12320 /* PC relative. */
12321 signed_value = value - pc + signed_addend;
12322 else
12323 /* Section base relative. */
12324 signed_value = value - sb + signed_addend;
12325
12326 /* Calculate the value of the relevant G_{n-1} to obtain
12327 the residual at that stage. */
12328 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12329 group - 1, &residual);
12330
12331 /* Check for overflow. */
12332 if (residual >= 0x1000)
12333 {
12334 _bfd_error_handler
12335 /* xgettext:c-format */
12336 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12337 "splitting %#" PRIx64 " for group relocation %s"),
12338 input_bfd, input_section, (uint64_t) rel->r_offset,
12339 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12340 howto->name);
12341 return bfd_reloc_overflow;
12342 }
12343
12344 /* Mask out the value and U bit. */
12345 insn &= 0xff7ff000;
12346
12347 /* Set the U bit if the value to go in the place is non-negative. */
12348 if (signed_value >= 0)
12349 insn |= 1 << 23;
12350
12351 /* Encode the offset. */
12352 insn |= residual;
12353
12354 bfd_put_32 (input_bfd, insn, hit_data);
12355 }
12356 return bfd_reloc_ok;
12357
12358 case R_ARM_LDRS_PC_G0:
12359 case R_ARM_LDRS_PC_G1:
12360 case R_ARM_LDRS_PC_G2:
12361 case R_ARM_LDRS_SB_G0:
12362 case R_ARM_LDRS_SB_G1:
12363 case R_ARM_LDRS_SB_G2:
12364 {
12365 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12366 bfd_vma pc = input_section->output_section->vma
12367 + input_section->output_offset + rel->r_offset;
12368 /* sb is the origin of the *segment* containing the symbol. */
12369 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12370 bfd_vma residual;
12371 bfd_signed_vma signed_value;
12372 int group = 0;
12373
12374 /* Determine which groups of bits to calculate. */
12375 switch (r_type)
12376 {
12377 case R_ARM_LDRS_PC_G0:
12378 case R_ARM_LDRS_SB_G0:
12379 group = 0;
12380 break;
12381
12382 case R_ARM_LDRS_PC_G1:
12383 case R_ARM_LDRS_SB_G1:
12384 group = 1;
12385 break;
12386
12387 case R_ARM_LDRS_PC_G2:
12388 case R_ARM_LDRS_SB_G2:
12389 group = 2;
12390 break;
12391
12392 default:
12393 abort ();
12394 }
12395
12396 /* If REL, extract the addend from the insn. If RELA, it will
12397 have already been fetched for us. */
12398 if (globals->use_rel)
12399 {
12400 int negative = (insn & (1 << 23)) ? 1 : -1;
12401 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12402 }
12403
12404 /* Compute the value (X) to go in the place. */
12405 if (r_type == R_ARM_LDRS_PC_G0
12406 || r_type == R_ARM_LDRS_PC_G1
12407 || r_type == R_ARM_LDRS_PC_G2)
12408 /* PC relative. */
12409 signed_value = value - pc + signed_addend;
12410 else
12411 /* Section base relative. */
12412 signed_value = value - sb + signed_addend;
12413
12414 /* Calculate the value of the relevant G_{n-1} to obtain
12415 the residual at that stage. */
12416 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12417 group - 1, &residual);
12418
12419 /* Check for overflow. */
12420 if (residual >= 0x100)
12421 {
12422 _bfd_error_handler
12423 /* xgettext:c-format */
12424 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12425 "splitting %#" PRIx64 " for group relocation %s"),
12426 input_bfd, input_section, (uint64_t) rel->r_offset,
12427 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12428 howto->name);
12429 return bfd_reloc_overflow;
12430 }
12431
12432 /* Mask out the value and U bit. */
12433 insn &= 0xff7ff0f0;
12434
12435 /* Set the U bit if the value to go in the place is non-negative. */
12436 if (signed_value >= 0)
12437 insn |= 1 << 23;
12438
12439 /* Encode the offset. */
12440 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12441
12442 bfd_put_32 (input_bfd, insn, hit_data);
12443 }
12444 return bfd_reloc_ok;
12445
12446 case R_ARM_LDC_PC_G0:
12447 case R_ARM_LDC_PC_G1:
12448 case R_ARM_LDC_PC_G2:
12449 case R_ARM_LDC_SB_G0:
12450 case R_ARM_LDC_SB_G1:
12451 case R_ARM_LDC_SB_G2:
12452 {
12453 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12454 bfd_vma pc = input_section->output_section->vma
12455 + input_section->output_offset + rel->r_offset;
12456 /* sb is the origin of the *segment* containing the symbol. */
12457 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12458 bfd_vma residual;
12459 bfd_signed_vma signed_value;
12460 int group = 0;
12461
12462 /* Determine which groups of bits to calculate. */
12463 switch (r_type)
12464 {
12465 case R_ARM_LDC_PC_G0:
12466 case R_ARM_LDC_SB_G0:
12467 group = 0;
12468 break;
12469
12470 case R_ARM_LDC_PC_G1:
12471 case R_ARM_LDC_SB_G1:
12472 group = 1;
12473 break;
12474
12475 case R_ARM_LDC_PC_G2:
12476 case R_ARM_LDC_SB_G2:
12477 group = 2;
12478 break;
12479
12480 default:
12481 abort ();
12482 }
12483
12484 /* If REL, extract the addend from the insn. If RELA, it will
12485 have already been fetched for us. */
12486 if (globals->use_rel)
12487 {
12488 int negative = (insn & (1 << 23)) ? 1 : -1;
12489 signed_addend = negative * ((insn & 0xff) << 2);
12490 }
12491
12492 /* Compute the value (X) to go in the place. */
12493 if (r_type == R_ARM_LDC_PC_G0
12494 || r_type == R_ARM_LDC_PC_G1
12495 || r_type == R_ARM_LDC_PC_G2)
12496 /* PC relative. */
12497 signed_value = value - pc + signed_addend;
12498 else
12499 /* Section base relative. */
12500 signed_value = value - sb + signed_addend;
12501
12502 /* Calculate the value of the relevant G_{n-1} to obtain
12503 the residual at that stage. */
12504 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12505 group - 1, &residual);
12506
12507 /* Check for overflow. (The absolute value to go in the place must be
12508 divisible by four and, after having been divided by four, must
12509 fit in eight bits.) */
12510 if ((residual & 0x3) != 0 || residual >= 0x400)
12511 {
12512 _bfd_error_handler
12513 /* xgettext:c-format */
12514 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12515 "splitting %#" PRIx64 " for group relocation %s"),
12516 input_bfd, input_section, (uint64_t) rel->r_offset,
12517 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12518 howto->name);
12519 return bfd_reloc_overflow;
12520 }
12521
12522 /* Mask out the value and U bit. */
12523 insn &= 0xff7fff00;
12524
12525 /* Set the U bit if the value to go in the place is non-negative. */
12526 if (signed_value >= 0)
12527 insn |= 1 << 23;
12528
12529 /* Encode the offset. */
12530 insn |= residual >> 2;
12531
12532 bfd_put_32 (input_bfd, insn, hit_data);
12533 }
12534 return bfd_reloc_ok;
12535
12536 case R_ARM_THM_ALU_ABS_G0_NC:
12537 case R_ARM_THM_ALU_ABS_G1_NC:
12538 case R_ARM_THM_ALU_ABS_G2_NC:
12539 case R_ARM_THM_ALU_ABS_G3_NC:
12540 {
12541 const int shift_array[4] = {0, 8, 16, 24};
12542 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12543 bfd_vma addr = value;
12544 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12545
12546 /* Compute address. */
12547 if (globals->use_rel)
12548 signed_addend = insn & 0xff;
12549 addr += signed_addend;
12550 if (branch_type == ST_BRANCH_TO_THUMB)
12551 addr |= 1;
12552 /* Clean imm8 insn. */
12553 insn &= 0xff00;
12554 /* And update with correct part of address. */
12555 insn |= (addr >> shift) & 0xff;
12556 /* Update insn. */
12557 bfd_put_16 (input_bfd, insn, hit_data);
12558 }
12559
12560 *unresolved_reloc_p = FALSE;
12561 return bfd_reloc_ok;
12562
12563 case R_ARM_GOTOFFFUNCDESC:
12564 {
12565 if (h == NULL)
12566 {
12567 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12568 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12569 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12570 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12571 bfd_vma seg = -1;
12572
12573 if (bfd_link_pic(info) && dynindx == 0)
12574 abort();
12575
12576 /* Resolve relocation. */
12577 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12578 , contents + rel->r_offset);
12579 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12580 not done yet. */
12581 arm_elf_fill_funcdesc(output_bfd, info,
12582 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12583 dynindx, offset, addr, dynreloc_value, seg);
12584 }
12585 else
12586 {
12587 int dynindx;
12588 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12589 bfd_vma addr;
12590 bfd_vma seg = -1;
12591
12592 /* For static binaries, sym_sec can be null. */
12593 if (sym_sec)
12594 {
12595 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12596 addr = dynreloc_value - sym_sec->output_section->vma;
12597 }
12598 else
12599 {
12600 dynindx = 0;
12601 addr = 0;
12602 }
12603
12604 if (bfd_link_pic(info) && dynindx == 0)
12605 abort();
12606
12607 /* This case cannot occur since funcdesc is allocated by
12608 the dynamic loader so we cannot resolve the relocation. */
12609 if (h->dynindx != -1)
12610 abort();
12611
12612 /* Resolve relocation. */
12613 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12614 contents + rel->r_offset);
12615 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12616 arm_elf_fill_funcdesc(output_bfd, info,
12617 &eh->fdpic_cnts.funcdesc_offset,
12618 dynindx, offset, addr, dynreloc_value, seg);
12619 }
12620 }
12621 *unresolved_reloc_p = FALSE;
12622 return bfd_reloc_ok;
12623
12624 case R_ARM_GOTFUNCDESC:
12625 {
12626 if (h != NULL)
12627 {
12628 Elf_Internal_Rela outrel;
12629
12630 /* Resolve relocation. */
12631 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12632 + sgot->output_offset),
12633 contents + rel->r_offset);
12634 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12635 if(h->dynindx == -1)
12636 {
12637 int dynindx;
12638 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12639 bfd_vma addr;
12640 bfd_vma seg = -1;
12641
12642 /* For static binaries sym_sec can be null. */
12643 if (sym_sec)
12644 {
12645 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12646 addr = dynreloc_value - sym_sec->output_section->vma;
12647 }
12648 else
12649 {
12650 dynindx = 0;
12651 addr = 0;
12652 }
12653
12654 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12655 arm_elf_fill_funcdesc(output_bfd, info,
12656 &eh->fdpic_cnts.funcdesc_offset,
12657 dynindx, offset, addr, dynreloc_value, seg);
12658 }
12659
12660 /* Add a dynamic relocation on GOT entry if not already done. */
12661 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12662 {
12663 if (h->dynindx == -1)
12664 {
12665 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12666 if (h->root.type == bfd_link_hash_undefweak)
12667 bfd_put_32(output_bfd, 0, sgot->contents
12668 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12669 else
12670 bfd_put_32(output_bfd, sgot->output_section->vma
12671 + sgot->output_offset
12672 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12673 sgot->contents
12674 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12675 }
12676 else
12677 {
12678 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12679 }
12680 outrel.r_offset = sgot->output_section->vma
12681 + sgot->output_offset
12682 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12683 outrel.r_addend = 0;
12684 if (h->dynindx == -1 && !bfd_link_pic(info))
12685 if (h->root.type == bfd_link_hash_undefweak)
12686 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12687 else
12688 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12689 outrel.r_offset);
12690 else
12691 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12692 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12693 }
12694 }
12695 else
12696 {
12697 /* Such relocation on static function should not have been
12698 emitted by the compiler. */
12699 abort();
12700 }
12701 }
12702 *unresolved_reloc_p = FALSE;
12703 return bfd_reloc_ok;
12704
12705 case R_ARM_FUNCDESC:
12706 {
12707 if (h == NULL)
12708 {
12709 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12710 Elf_Internal_Rela outrel;
12711 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12712 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12713 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12714 bfd_vma seg = -1;
12715
12716 if (bfd_link_pic(info) && dynindx == 0)
12717 abort();
12718
12719 /* Replace static FUNCDESC relocation with a
12720 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12721 executable. */
12722 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12723 outrel.r_offset = input_section->output_section->vma
12724 + input_section->output_offset + rel->r_offset;
12725 outrel.r_addend = 0;
12726 if (bfd_link_pic(info))
12727 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12728 else
12729 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12730
12731 bfd_put_32 (input_bfd, sgot->output_section->vma
12732 + sgot->output_offset + offset, hit_data);
12733
12734 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12735 arm_elf_fill_funcdesc(output_bfd, info,
12736 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12737 dynindx, offset, addr, dynreloc_value, seg);
12738 }
12739 else
12740 {
12741 if (h->dynindx == -1)
12742 {
12743 int dynindx;
12744 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12745 bfd_vma addr;
12746 bfd_vma seg = -1;
12747 Elf_Internal_Rela outrel;
12748
12749 /* For static binaries sym_sec can be null. */
12750 if (sym_sec)
12751 {
12752 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12753 addr = dynreloc_value - sym_sec->output_section->vma;
12754 }
12755 else
12756 {
12757 dynindx = 0;
12758 addr = 0;
12759 }
12760
12761 if (bfd_link_pic(info) && dynindx == 0)
12762 abort();
12763
12764 /* Replace static FUNCDESC relocation with a
12765 R_ARM_RELATIVE dynamic relocation. */
12766 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12767 outrel.r_offset = input_section->output_section->vma
12768 + input_section->output_offset + rel->r_offset;
12769 outrel.r_addend = 0;
12770 if (bfd_link_pic(info))
12771 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12772 else
12773 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12774
12775 bfd_put_32 (input_bfd, sgot->output_section->vma
12776 + sgot->output_offset + offset, hit_data);
12777
12778 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12779 arm_elf_fill_funcdesc(output_bfd, info,
12780 &eh->fdpic_cnts.funcdesc_offset,
12781 dynindx, offset, addr, dynreloc_value, seg);
12782 }
12783 else
12784 {
12785 Elf_Internal_Rela outrel;
12786
12787 /* Add a dynamic relocation. */
12788 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12789 outrel.r_offset = input_section->output_section->vma
12790 + input_section->output_offset + rel->r_offset;
12791 outrel.r_addend = 0;
12792 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12793 }
12794 }
12795 }
12796 *unresolved_reloc_p = FALSE;
12797 return bfd_reloc_ok;
12798
12799 case R_ARM_THM_BF16:
12800 {
12801 bfd_vma relocation;
12802 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12803 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12804
12805 if (globals->use_rel)
12806 {
12807 bfd_vma immA = (upper_insn & 0x001f);
12808 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12809 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12810 addend = (immA << 12);
12811 addend |= (immB << 2);
12812 addend |= (immC << 1);
12813 addend |= 1;
12814 /* Sign extend. */
12815 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12816 }
12817
12818 relocation = value + signed_addend;
12819 relocation -= (input_section->output_section->vma
12820 + input_section->output_offset
12821 + rel->r_offset);
12822
12823 /* Put RELOCATION back into the insn. */
12824 {
12825 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12826 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12827 bfd_vma immC = (relocation & 0x00000002) >> 1;
12828
12829 upper_insn = (upper_insn & 0xffe0) | immA;
12830 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12831 }
12832
12833 /* Put the relocated value back in the object file: */
12834 bfd_put_16 (input_bfd, upper_insn, hit_data);
12835 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12836
12837 return bfd_reloc_ok;
12838 }
12839
12840 case R_ARM_THM_BF12:
12841 {
12842 bfd_vma relocation;
12843 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12844 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12845
12846 if (globals->use_rel)
12847 {
12848 bfd_vma immA = (upper_insn & 0x0001);
12849 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12850 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12851 addend = (immA << 12);
12852 addend |= (immB << 2);
12853 addend |= (immC << 1);
12854 addend |= 1;
12855 /* Sign extend. */
12856 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12857 signed_addend = addend;
12858 }
12859
12860 relocation = value + signed_addend;
12861 relocation -= (input_section->output_section->vma
12862 + input_section->output_offset
12863 + rel->r_offset);
12864
12865 /* Put RELOCATION back into the insn. */
12866 {
12867 bfd_vma immA = (relocation & 0x00001000) >> 12;
12868 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12869 bfd_vma immC = (relocation & 0x00000002) >> 1;
12870
12871 upper_insn = (upper_insn & 0xfffe) | immA;
12872 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12873 }
12874
12875 /* Put the relocated value back in the object file: */
12876 bfd_put_16 (input_bfd, upper_insn, hit_data);
12877 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12878
12879 return bfd_reloc_ok;
12880 }
12881
12882 case R_ARM_THM_BF18:
12883 {
12884 bfd_vma relocation;
12885 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12886 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12887
12888 if (globals->use_rel)
12889 {
12890 bfd_vma immA = (upper_insn & 0x007f);
12891 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12892 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12893 addend = (immA << 12);
12894 addend |= (immB << 2);
12895 addend |= (immC << 1);
12896 addend |= 1;
12897 /* Sign extend. */
12898 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12899 signed_addend = addend;
12900 }
12901
12902 relocation = value + signed_addend;
12903 relocation -= (input_section->output_section->vma
12904 + input_section->output_offset
12905 + rel->r_offset);
12906
12907 /* Put RELOCATION back into the insn. */
12908 {
12909 bfd_vma immA = (relocation & 0x0007f000) >> 12;
12910 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12911 bfd_vma immC = (relocation & 0x00000002) >> 1;
12912
12913 upper_insn = (upper_insn & 0xff80) | immA;
12914 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12915 }
12916
12917 /* Put the relocated value back in the object file: */
12918 bfd_put_16 (input_bfd, upper_insn, hit_data);
12919 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12920
12921 return bfd_reloc_ok;
12922 }
12923
12924 default:
12925 return bfd_reloc_notsupported;
12926 }
12927 }
12928
12929 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12930 static void
12931 arm_add_to_rel (bfd * abfd,
12932 bfd_byte * address,
12933 reloc_howto_type * howto,
12934 bfd_signed_vma increment)
12935 {
12936 bfd_signed_vma addend;
12937
12938 if (howto->type == R_ARM_THM_CALL
12939 || howto->type == R_ARM_THM_JUMP24)
12940 {
12941 int upper_insn, lower_insn;
12942 int upper, lower;
12943
12944 upper_insn = bfd_get_16 (abfd, address);
12945 lower_insn = bfd_get_16 (abfd, address + 2);
12946 upper = upper_insn & 0x7ff;
12947 lower = lower_insn & 0x7ff;
12948
12949 addend = (upper << 12) | (lower << 1);
12950 addend += increment;
12951 addend >>= 1;
12952
12953 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12954 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12955
12956 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12957 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12958 }
12959 else
12960 {
12961 bfd_vma contents;
12962
12963 contents = bfd_get_32 (abfd, address);
12964
12965 /* Get the (signed) value from the instruction. */
12966 addend = contents & howto->src_mask;
12967 if (addend & ((howto->src_mask + 1) >> 1))
12968 {
12969 bfd_signed_vma mask;
12970
12971 mask = -1;
12972 mask &= ~ howto->src_mask;
12973 addend |= mask;
12974 }
12975
12976 /* Add in the increment, (which is a byte value). */
12977 switch (howto->type)
12978 {
12979 default:
12980 addend += increment;
12981 break;
12982
12983 case R_ARM_PC24:
12984 case R_ARM_PLT32:
12985 case R_ARM_CALL:
12986 case R_ARM_JUMP24:
12987 addend <<= howto->size;
12988 addend += increment;
12989
12990 /* Should we check for overflow here ? */
12991
12992 /* Drop any undesired bits. */
12993 addend >>= howto->rightshift;
12994 break;
12995 }
12996
12997 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12998
12999 bfd_put_32 (abfd, contents, address);
13000 }
13001 }
13002
13003 #define IS_ARM_TLS_RELOC(R_TYPE) \
13004 ((R_TYPE) == R_ARM_TLS_GD32 \
13005 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13006 || (R_TYPE) == R_ARM_TLS_LDO32 \
13007 || (R_TYPE) == R_ARM_TLS_LDM32 \
13008 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13009 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13010 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13011 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13012 || (R_TYPE) == R_ARM_TLS_LE32 \
13013 || (R_TYPE) == R_ARM_TLS_IE32 \
13014 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13015 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13016
13017 /* Specific set of relocations for the gnu tls dialect. */
13018 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13019 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13020 || (R_TYPE) == R_ARM_TLS_CALL \
13021 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13022 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13023 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13024
13025 /* Relocate an ARM ELF section. */
13026
13027 static bfd_boolean
13028 elf32_arm_relocate_section (bfd * output_bfd,
13029 struct bfd_link_info * info,
13030 bfd * input_bfd,
13031 asection * input_section,
13032 bfd_byte * contents,
13033 Elf_Internal_Rela * relocs,
13034 Elf_Internal_Sym * local_syms,
13035 asection ** local_sections)
13036 {
13037 Elf_Internal_Shdr *symtab_hdr;
13038 struct elf_link_hash_entry **sym_hashes;
13039 Elf_Internal_Rela *rel;
13040 Elf_Internal_Rela *relend;
13041 const char *name;
13042 struct elf32_arm_link_hash_table * globals;
13043
13044 globals = elf32_arm_hash_table (info);
13045 if (globals == NULL)
13046 return FALSE;
13047
13048 symtab_hdr = & elf_symtab_hdr (input_bfd);
13049 sym_hashes = elf_sym_hashes (input_bfd);
13050
13051 rel = relocs;
13052 relend = relocs + input_section->reloc_count;
13053 for (; rel < relend; rel++)
13054 {
13055 int r_type;
13056 reloc_howto_type * howto;
13057 unsigned long r_symndx;
13058 Elf_Internal_Sym * sym;
13059 asection * sec;
13060 struct elf_link_hash_entry * h;
13061 bfd_vma relocation;
13062 bfd_reloc_status_type r;
13063 arelent bfd_reloc;
13064 char sym_type;
13065 bfd_boolean unresolved_reloc = FALSE;
13066 char *error_message = NULL;
13067
13068 r_symndx = ELF32_R_SYM (rel->r_info);
13069 r_type = ELF32_R_TYPE (rel->r_info);
13070 r_type = arm_real_reloc_type (globals, r_type);
13071
13072 if ( r_type == R_ARM_GNU_VTENTRY
13073 || r_type == R_ARM_GNU_VTINHERIT)
13074 continue;
13075
13076 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13077
13078 if (howto == NULL)
13079 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13080
13081 h = NULL;
13082 sym = NULL;
13083 sec = NULL;
13084
13085 if (r_symndx < symtab_hdr->sh_info)
13086 {
13087 sym = local_syms + r_symndx;
13088 sym_type = ELF32_ST_TYPE (sym->st_info);
13089 sec = local_sections[r_symndx];
13090
13091 /* An object file might have a reference to a local
13092 undefined symbol. This is a daft object file, but we
13093 should at least do something about it. V4BX & NONE
13094 relocations do not use the symbol and are explicitly
13095 allowed to use the undefined symbol, so allow those.
13096 Likewise for relocations against STN_UNDEF. */
13097 if (r_type != R_ARM_V4BX
13098 && r_type != R_ARM_NONE
13099 && r_symndx != STN_UNDEF
13100 && bfd_is_und_section (sec)
13101 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13102 (*info->callbacks->undefined_symbol)
13103 (info, bfd_elf_string_from_elf_section
13104 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13105 input_bfd, input_section,
13106 rel->r_offset, TRUE);
13107
13108 if (globals->use_rel)
13109 {
13110 relocation = (sec->output_section->vma
13111 + sec->output_offset
13112 + sym->st_value);
13113 if (!bfd_link_relocatable (info)
13114 && (sec->flags & SEC_MERGE)
13115 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13116 {
13117 asection *msec;
13118 bfd_vma addend, value;
13119
13120 switch (r_type)
13121 {
13122 case R_ARM_MOVW_ABS_NC:
13123 case R_ARM_MOVT_ABS:
13124 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13125 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13126 addend = (addend ^ 0x8000) - 0x8000;
13127 break;
13128
13129 case R_ARM_THM_MOVW_ABS_NC:
13130 case R_ARM_THM_MOVT_ABS:
13131 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13132 << 16;
13133 value |= bfd_get_16 (input_bfd,
13134 contents + rel->r_offset + 2);
13135 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13136 | ((value & 0x04000000) >> 15);
13137 addend = (addend ^ 0x8000) - 0x8000;
13138 break;
13139
13140 default:
13141 if (howto->rightshift
13142 || (howto->src_mask & (howto->src_mask + 1)))
13143 {
13144 _bfd_error_handler
13145 /* xgettext:c-format */
13146 (_("%pB(%pA+%#" PRIx64 "): "
13147 "%s relocation against SEC_MERGE section"),
13148 input_bfd, input_section,
13149 (uint64_t) rel->r_offset, howto->name);
13150 return FALSE;
13151 }
13152
13153 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13154
13155 /* Get the (signed) value from the instruction. */
13156 addend = value & howto->src_mask;
13157 if (addend & ((howto->src_mask + 1) >> 1))
13158 {
13159 bfd_signed_vma mask;
13160
13161 mask = -1;
13162 mask &= ~ howto->src_mask;
13163 addend |= mask;
13164 }
13165 break;
13166 }
13167
13168 msec = sec;
13169 addend =
13170 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13171 - relocation;
13172 addend += msec->output_section->vma + msec->output_offset;
13173
13174 /* Cases here must match those in the preceding
13175 switch statement. */
13176 switch (r_type)
13177 {
13178 case R_ARM_MOVW_ABS_NC:
13179 case R_ARM_MOVT_ABS:
13180 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13181 | (addend & 0xfff);
13182 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13183 break;
13184
13185 case R_ARM_THM_MOVW_ABS_NC:
13186 case R_ARM_THM_MOVT_ABS:
13187 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13188 | (addend & 0xff) | ((addend & 0x0800) << 15);
13189 bfd_put_16 (input_bfd, value >> 16,
13190 contents + rel->r_offset);
13191 bfd_put_16 (input_bfd, value,
13192 contents + rel->r_offset + 2);
13193 break;
13194
13195 default:
13196 value = (value & ~ howto->dst_mask)
13197 | (addend & howto->dst_mask);
13198 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13199 break;
13200 }
13201 }
13202 }
13203 else
13204 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13205 }
13206 else
13207 {
13208 bfd_boolean warned, ignored;
13209
13210 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13211 r_symndx, symtab_hdr, sym_hashes,
13212 h, sec, relocation,
13213 unresolved_reloc, warned, ignored);
13214
13215 sym_type = h->type;
13216 }
13217
13218 if (sec != NULL && discarded_section (sec))
13219 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13220 rel, 1, relend, howto, 0, contents);
13221
13222 if (bfd_link_relocatable (info))
13223 {
13224 /* This is a relocatable link. We don't have to change
13225 anything, unless the reloc is against a section symbol,
13226 in which case we have to adjust according to where the
13227 section symbol winds up in the output section. */
13228 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13229 {
13230 if (globals->use_rel)
13231 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13232 howto, (bfd_signed_vma) sec->output_offset);
13233 else
13234 rel->r_addend += sec->output_offset;
13235 }
13236 continue;
13237 }
13238
13239 if (h != NULL)
13240 name = h->root.root.string;
13241 else
13242 {
13243 name = (bfd_elf_string_from_elf_section
13244 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13245 if (name == NULL || *name == '\0')
13246 name = bfd_section_name (sec);
13247 }
13248
13249 if (r_symndx != STN_UNDEF
13250 && r_type != R_ARM_NONE
13251 && (h == NULL
13252 || h->root.type == bfd_link_hash_defined
13253 || h->root.type == bfd_link_hash_defweak)
13254 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13255 {
13256 _bfd_error_handler
13257 ((sym_type == STT_TLS
13258 /* xgettext:c-format */
13259 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13260 /* xgettext:c-format */
13261 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13262 input_bfd,
13263 input_section,
13264 (uint64_t) rel->r_offset,
13265 howto->name,
13266 name);
13267 }
13268
13269 /* We call elf32_arm_final_link_relocate unless we're completely
13270 done, i.e., the relaxation produced the final output we want,
13271 and we won't let anybody mess with it. Also, we have to do
13272 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13273 both in relaxed and non-relaxed cases. */
13274 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13275 || (IS_ARM_TLS_GNU_RELOC (r_type)
13276 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13277 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13278 & GOT_TLS_GDESC)))
13279 {
13280 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13281 contents, rel, h == NULL);
13282 /* This may have been marked unresolved because it came from
13283 a shared library. But we've just dealt with that. */
13284 unresolved_reloc = 0;
13285 }
13286 else
13287 r = bfd_reloc_continue;
13288
13289 if (r == bfd_reloc_continue)
13290 {
13291 unsigned char branch_type =
13292 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13293 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13294
13295 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13296 input_section, contents, rel,
13297 relocation, info, sec, name,
13298 sym_type, branch_type, h,
13299 &unresolved_reloc,
13300 &error_message);
13301 }
13302
13303 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13304 because such sections are not SEC_ALLOC and thus ld.so will
13305 not process them. */
13306 if (unresolved_reloc
13307 && !((input_section->flags & SEC_DEBUGGING) != 0
13308 && h->def_dynamic)
13309 && _bfd_elf_section_offset (output_bfd, info, input_section,
13310 rel->r_offset) != (bfd_vma) -1)
13311 {
13312 _bfd_error_handler
13313 /* xgettext:c-format */
13314 (_("%pB(%pA+%#" PRIx64 "): "
13315 "unresolvable %s relocation against symbol `%s'"),
13316 input_bfd,
13317 input_section,
13318 (uint64_t) rel->r_offset,
13319 howto->name,
13320 h->root.root.string);
13321 return FALSE;
13322 }
13323
13324 if (r != bfd_reloc_ok)
13325 {
13326 switch (r)
13327 {
13328 case bfd_reloc_overflow:
13329 /* If the overflowing reloc was to an undefined symbol,
13330 we have already printed one error message and there
13331 is no point complaining again. */
13332 if (!h || h->root.type != bfd_link_hash_undefined)
13333 (*info->callbacks->reloc_overflow)
13334 (info, (h ? &h->root : NULL), name, howto->name,
13335 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13336 break;
13337
13338 case bfd_reloc_undefined:
13339 (*info->callbacks->undefined_symbol)
13340 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13341 break;
13342
13343 case bfd_reloc_outofrange:
13344 error_message = _("out of range");
13345 goto common_error;
13346
13347 case bfd_reloc_notsupported:
13348 error_message = _("unsupported relocation");
13349 goto common_error;
13350
13351 case bfd_reloc_dangerous:
13352 /* error_message should already be set. */
13353 goto common_error;
13354
13355 default:
13356 error_message = _("unknown error");
13357 /* Fall through. */
13358
13359 common_error:
13360 BFD_ASSERT (error_message != NULL);
13361 (*info->callbacks->reloc_dangerous)
13362 (info, error_message, input_bfd, input_section, rel->r_offset);
13363 break;
13364 }
13365 }
13366 }
13367
13368 return TRUE;
13369 }
13370
13371 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13372 adds the edit to the start of the list. (The list must be built in order of
13373 ascending TINDEX: the function's callers are primarily responsible for
13374 maintaining that condition). */
13375
13376 static void
13377 add_unwind_table_edit (arm_unwind_table_edit **head,
13378 arm_unwind_table_edit **tail,
13379 arm_unwind_edit_type type,
13380 asection *linked_section,
13381 unsigned int tindex)
13382 {
13383 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13384 xmalloc (sizeof (arm_unwind_table_edit));
13385
13386 new_edit->type = type;
13387 new_edit->linked_section = linked_section;
13388 new_edit->index = tindex;
13389
13390 if (tindex > 0)
13391 {
13392 new_edit->next = NULL;
13393
13394 if (*tail)
13395 (*tail)->next = new_edit;
13396
13397 (*tail) = new_edit;
13398
13399 if (!*head)
13400 (*head) = new_edit;
13401 }
13402 else
13403 {
13404 new_edit->next = *head;
13405
13406 if (!*tail)
13407 *tail = new_edit;
13408
13409 *head = new_edit;
13410 }
13411 }
13412
13413 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13414
13415 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13416 static void
13417 adjust_exidx_size(asection *exidx_sec, int adjust)
13418 {
13419 asection *out_sec;
13420
13421 if (!exidx_sec->rawsize)
13422 exidx_sec->rawsize = exidx_sec->size;
13423
13424 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13425 out_sec = exidx_sec->output_section;
13426 /* Adjust size of output section. */
13427 bfd_set_section_size (out_sec, out_sec->size +adjust);
13428 }
13429
13430 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13431 static void
13432 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13433 {
13434 struct _arm_elf_section_data *exidx_arm_data;
13435
13436 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13437 add_unwind_table_edit (
13438 &exidx_arm_data->u.exidx.unwind_edit_list,
13439 &exidx_arm_data->u.exidx.unwind_edit_tail,
13440 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13441
13442 exidx_arm_data->additional_reloc_count++;
13443
13444 adjust_exidx_size(exidx_sec, 8);
13445 }
13446
13447 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13448 made to those tables, such that:
13449
13450 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13451 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13452 codes which have been inlined into the index).
13453
13454 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13455
13456 The edits are applied when the tables are written
13457 (in elf32_arm_write_section). */
13458
13459 bfd_boolean
13460 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13461 unsigned int num_text_sections,
13462 struct bfd_link_info *info,
13463 bfd_boolean merge_exidx_entries)
13464 {
13465 bfd *inp;
13466 unsigned int last_second_word = 0, i;
13467 asection *last_exidx_sec = NULL;
13468 asection *last_text_sec = NULL;
13469 int last_unwind_type = -1;
13470
13471 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13472 text sections. */
13473 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13474 {
13475 asection *sec;
13476
13477 for (sec = inp->sections; sec != NULL; sec = sec->next)
13478 {
13479 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13480 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13481
13482 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13483 continue;
13484
13485 if (elf_sec->linked_to)
13486 {
13487 Elf_Internal_Shdr *linked_hdr
13488 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13489 struct _arm_elf_section_data *linked_sec_arm_data
13490 = get_arm_elf_section_data (linked_hdr->bfd_section);
13491
13492 if (linked_sec_arm_data == NULL)
13493 continue;
13494
13495 /* Link this .ARM.exidx section back from the text section it
13496 describes. */
13497 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13498 }
13499 }
13500 }
13501
13502 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13503 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13504 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13505
13506 for (i = 0; i < num_text_sections; i++)
13507 {
13508 asection *sec = text_section_order[i];
13509 asection *exidx_sec;
13510 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13511 struct _arm_elf_section_data *exidx_arm_data;
13512 bfd_byte *contents = NULL;
13513 int deleted_exidx_bytes = 0;
13514 bfd_vma j;
13515 arm_unwind_table_edit *unwind_edit_head = NULL;
13516 arm_unwind_table_edit *unwind_edit_tail = NULL;
13517 Elf_Internal_Shdr *hdr;
13518 bfd *ibfd;
13519
13520 if (arm_data == NULL)
13521 continue;
13522
13523 exidx_sec = arm_data->u.text.arm_exidx_sec;
13524 if (exidx_sec == NULL)
13525 {
13526 /* Section has no unwind data. */
13527 if (last_unwind_type == 0 || !last_exidx_sec)
13528 continue;
13529
13530 /* Ignore zero sized sections. */
13531 if (sec->size == 0)
13532 continue;
13533
13534 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13535 last_unwind_type = 0;
13536 continue;
13537 }
13538
13539 /* Skip /DISCARD/ sections. */
13540 if (bfd_is_abs_section (exidx_sec->output_section))
13541 continue;
13542
13543 hdr = &elf_section_data (exidx_sec)->this_hdr;
13544 if (hdr->sh_type != SHT_ARM_EXIDX)
13545 continue;
13546
13547 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13548 if (exidx_arm_data == NULL)
13549 continue;
13550
13551 ibfd = exidx_sec->owner;
13552
13553 if (hdr->contents != NULL)
13554 contents = hdr->contents;
13555 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13556 /* An error? */
13557 continue;
13558
13559 if (last_unwind_type > 0)
13560 {
13561 unsigned int first_word = bfd_get_32 (ibfd, contents);
13562 /* Add cantunwind if first unwind item does not match section
13563 start. */
13564 if (first_word != sec->vma)
13565 {
13566 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13567 last_unwind_type = 0;
13568 }
13569 }
13570
13571 for (j = 0; j < hdr->sh_size; j += 8)
13572 {
13573 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13574 int unwind_type;
13575 int elide = 0;
13576
13577 /* An EXIDX_CANTUNWIND entry. */
13578 if (second_word == 1)
13579 {
13580 if (last_unwind_type == 0)
13581 elide = 1;
13582 unwind_type = 0;
13583 }
13584 /* Inlined unwinding data. Merge if equal to previous. */
13585 else if ((second_word & 0x80000000) != 0)
13586 {
13587 if (merge_exidx_entries
13588 && last_second_word == second_word && last_unwind_type == 1)
13589 elide = 1;
13590 unwind_type = 1;
13591 last_second_word = second_word;
13592 }
13593 /* Normal table entry. In theory we could merge these too,
13594 but duplicate entries are likely to be much less common. */
13595 else
13596 unwind_type = 2;
13597
13598 if (elide && !bfd_link_relocatable (info))
13599 {
13600 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13601 DELETE_EXIDX_ENTRY, NULL, j / 8);
13602
13603 deleted_exidx_bytes += 8;
13604 }
13605
13606 last_unwind_type = unwind_type;
13607 }
13608
13609 /* Free contents if we allocated it ourselves. */
13610 if (contents != hdr->contents)
13611 free (contents);
13612
13613 /* Record edits to be applied later (in elf32_arm_write_section). */
13614 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13615 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13616
13617 if (deleted_exidx_bytes > 0)
13618 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13619
13620 last_exidx_sec = exidx_sec;
13621 last_text_sec = sec;
13622 }
13623
13624 /* Add terminating CANTUNWIND entry. */
13625 if (!bfd_link_relocatable (info) && last_exidx_sec
13626 && last_unwind_type != 0)
13627 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13628
13629 return TRUE;
13630 }
13631
13632 static bfd_boolean
13633 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13634 bfd *ibfd, const char *name)
13635 {
13636 asection *sec, *osec;
13637
13638 sec = bfd_get_linker_section (ibfd, name);
13639 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13640 return TRUE;
13641
13642 osec = sec->output_section;
13643 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13644 return TRUE;
13645
13646 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13647 sec->output_offset, sec->size))
13648 return FALSE;
13649
13650 return TRUE;
13651 }
13652
13653 static bfd_boolean
13654 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13655 {
13656 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13657 asection *sec, *osec;
13658
13659 if (globals == NULL)
13660 return FALSE;
13661
13662 /* Invoke the regular ELF backend linker to do all the work. */
13663 if (!bfd_elf_final_link (abfd, info))
13664 return FALSE;
13665
13666 /* Process stub sections (eg BE8 encoding, ...). */
13667 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13668 unsigned int i;
13669 for (i=0; i<htab->top_id; i++)
13670 {
13671 sec = htab->stub_group[i].stub_sec;
13672 /* Only process it once, in its link_sec slot. */
13673 if (sec && i == htab->stub_group[i].link_sec->id)
13674 {
13675 osec = sec->output_section;
13676 elf32_arm_write_section (abfd, info, sec, sec->contents);
13677 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13678 sec->output_offset, sec->size))
13679 return FALSE;
13680 }
13681 }
13682
13683 /* Write out any glue sections now that we have created all the
13684 stubs. */
13685 if (globals->bfd_of_glue_owner != NULL)
13686 {
13687 if (! elf32_arm_output_glue_section (info, abfd,
13688 globals->bfd_of_glue_owner,
13689 ARM2THUMB_GLUE_SECTION_NAME))
13690 return FALSE;
13691
13692 if (! elf32_arm_output_glue_section (info, abfd,
13693 globals->bfd_of_glue_owner,
13694 THUMB2ARM_GLUE_SECTION_NAME))
13695 return FALSE;
13696
13697 if (! elf32_arm_output_glue_section (info, abfd,
13698 globals->bfd_of_glue_owner,
13699 VFP11_ERRATUM_VENEER_SECTION_NAME))
13700 return FALSE;
13701
13702 if (! elf32_arm_output_glue_section (info, abfd,
13703 globals->bfd_of_glue_owner,
13704 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13705 return FALSE;
13706
13707 if (! elf32_arm_output_glue_section (info, abfd,
13708 globals->bfd_of_glue_owner,
13709 ARM_BX_GLUE_SECTION_NAME))
13710 return FALSE;
13711 }
13712
13713 return TRUE;
13714 }
13715
13716 /* Return a best guess for the machine number based on the attributes. */
13717
13718 static unsigned int
13719 bfd_arm_get_mach_from_attributes (bfd * abfd)
13720 {
13721 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13722
13723 switch (arch)
13724 {
13725 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13726 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13727 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13728 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13729
13730 case TAG_CPU_ARCH_V5TE:
13731 {
13732 char * name;
13733
13734 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13735 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13736
13737 if (name)
13738 {
13739 if (strcmp (name, "IWMMXT2") == 0)
13740 return bfd_mach_arm_iWMMXt2;
13741
13742 if (strcmp (name, "IWMMXT") == 0)
13743 return bfd_mach_arm_iWMMXt;
13744
13745 if (strcmp (name, "XSCALE") == 0)
13746 {
13747 int wmmx;
13748
13749 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13750 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13751 switch (wmmx)
13752 {
13753 case 1: return bfd_mach_arm_iWMMXt;
13754 case 2: return bfd_mach_arm_iWMMXt2;
13755 default: return bfd_mach_arm_XScale;
13756 }
13757 }
13758 }
13759
13760 return bfd_mach_arm_5TE;
13761 }
13762
13763 case TAG_CPU_ARCH_V5TEJ:
13764 return bfd_mach_arm_5TEJ;
13765 case TAG_CPU_ARCH_V6:
13766 return bfd_mach_arm_6;
13767 case TAG_CPU_ARCH_V6KZ:
13768 return bfd_mach_arm_6KZ;
13769 case TAG_CPU_ARCH_V6T2:
13770 return bfd_mach_arm_6T2;
13771 case TAG_CPU_ARCH_V6K:
13772 return bfd_mach_arm_6K;
13773 case TAG_CPU_ARCH_V7:
13774 return bfd_mach_arm_7;
13775 case TAG_CPU_ARCH_V6_M:
13776 return bfd_mach_arm_6M;
13777 case TAG_CPU_ARCH_V6S_M:
13778 return bfd_mach_arm_6SM;
13779 case TAG_CPU_ARCH_V7E_M:
13780 return bfd_mach_arm_7EM;
13781 case TAG_CPU_ARCH_V8:
13782 return bfd_mach_arm_8;
13783 case TAG_CPU_ARCH_V8R:
13784 return bfd_mach_arm_8R;
13785 case TAG_CPU_ARCH_V8M_BASE:
13786 return bfd_mach_arm_8M_BASE;
13787 case TAG_CPU_ARCH_V8M_MAIN:
13788 return bfd_mach_arm_8M_MAIN;
13789 case TAG_CPU_ARCH_V8_1M_MAIN:
13790 return bfd_mach_arm_8_1M_MAIN;
13791
13792 default:
13793 /* Force entry to be added for any new known Tag_CPU_arch value. */
13794 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13795
13796 /* Unknown Tag_CPU_arch value. */
13797 return bfd_mach_arm_unknown;
13798 }
13799 }
13800
13801 /* Set the right machine number. */
13802
13803 static bfd_boolean
13804 elf32_arm_object_p (bfd *abfd)
13805 {
13806 unsigned int mach;
13807
13808 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13809
13810 if (mach == bfd_mach_arm_unknown)
13811 {
13812 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13813 mach = bfd_mach_arm_ep9312;
13814 else
13815 mach = bfd_arm_get_mach_from_attributes (abfd);
13816 }
13817
13818 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13819 return TRUE;
13820 }
13821
13822 /* Function to keep ARM specific flags in the ELF header. */
13823
13824 static bfd_boolean
13825 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13826 {
13827 if (elf_flags_init (abfd)
13828 && elf_elfheader (abfd)->e_flags != flags)
13829 {
13830 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13831 {
13832 if (flags & EF_ARM_INTERWORK)
13833 _bfd_error_handler
13834 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13835 abfd);
13836 else
13837 _bfd_error_handler
13838 (_("warning: clearing the interworking flag of %pB due to outside request"),
13839 abfd);
13840 }
13841 }
13842 else
13843 {
13844 elf_elfheader (abfd)->e_flags = flags;
13845 elf_flags_init (abfd) = TRUE;
13846 }
13847
13848 return TRUE;
13849 }
13850
13851 /* Copy backend specific data from one object module to another. */
13852
13853 static bfd_boolean
13854 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13855 {
13856 flagword in_flags;
13857 flagword out_flags;
13858
13859 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13860 return TRUE;
13861
13862 in_flags = elf_elfheader (ibfd)->e_flags;
13863 out_flags = elf_elfheader (obfd)->e_flags;
13864
13865 if (elf_flags_init (obfd)
13866 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13867 && in_flags != out_flags)
13868 {
13869 /* Cannot mix APCS26 and APCS32 code. */
13870 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13871 return FALSE;
13872
13873 /* Cannot mix float APCS and non-float APCS code. */
13874 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13875 return FALSE;
13876
13877 /* If the src and dest have different interworking flags
13878 then turn off the interworking bit. */
13879 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13880 {
13881 if (out_flags & EF_ARM_INTERWORK)
13882 _bfd_error_handler
13883 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13884 obfd, ibfd);
13885
13886 in_flags &= ~EF_ARM_INTERWORK;
13887 }
13888
13889 /* Likewise for PIC, though don't warn for this case. */
13890 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13891 in_flags &= ~EF_ARM_PIC;
13892 }
13893
13894 elf_elfheader (obfd)->e_flags = in_flags;
13895 elf_flags_init (obfd) = TRUE;
13896
13897 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13898 }
13899
13900 /* Values for Tag_ABI_PCS_R9_use. */
13901 enum
13902 {
13903 AEABI_R9_V6,
13904 AEABI_R9_SB,
13905 AEABI_R9_TLS,
13906 AEABI_R9_unused
13907 };
13908
13909 /* Values for Tag_ABI_PCS_RW_data. */
13910 enum
13911 {
13912 AEABI_PCS_RW_data_absolute,
13913 AEABI_PCS_RW_data_PCrel,
13914 AEABI_PCS_RW_data_SBrel,
13915 AEABI_PCS_RW_data_unused
13916 };
13917
13918 /* Values for Tag_ABI_enum_size. */
13919 enum
13920 {
13921 AEABI_enum_unused,
13922 AEABI_enum_short,
13923 AEABI_enum_wide,
13924 AEABI_enum_forced_wide
13925 };
13926
13927 /* Determine whether an object attribute tag takes an integer, a
13928 string or both. */
13929
13930 static int
13931 elf32_arm_obj_attrs_arg_type (int tag)
13932 {
13933 if (tag == Tag_compatibility)
13934 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13935 else if (tag == Tag_nodefaults)
13936 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13937 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13938 return ATTR_TYPE_FLAG_STR_VAL;
13939 else if (tag < 32)
13940 return ATTR_TYPE_FLAG_INT_VAL;
13941 else
13942 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13943 }
13944
13945 /* The ABI defines that Tag_conformance should be emitted first, and that
13946 Tag_nodefaults should be second (if either is defined). This sets those
13947 two positions, and bumps up the position of all the remaining tags to
13948 compensate. */
13949 static int
13950 elf32_arm_obj_attrs_order (int num)
13951 {
13952 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13953 return Tag_conformance;
13954 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13955 return Tag_nodefaults;
13956 if ((num - 2) < Tag_nodefaults)
13957 return num - 2;
13958 if ((num - 1) < Tag_conformance)
13959 return num - 1;
13960 return num;
13961 }
13962
13963 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13964 static bfd_boolean
13965 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13966 {
13967 if ((tag & 127) < 64)
13968 {
13969 _bfd_error_handler
13970 (_("%pB: unknown mandatory EABI object attribute %d"),
13971 abfd, tag);
13972 bfd_set_error (bfd_error_bad_value);
13973 return FALSE;
13974 }
13975 else
13976 {
13977 _bfd_error_handler
13978 (_("warning: %pB: unknown EABI object attribute %d"),
13979 abfd, tag);
13980 return TRUE;
13981 }
13982 }
13983
13984 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13985 Returns -1 if no architecture could be read. */
13986
13987 static int
13988 get_secondary_compatible_arch (bfd *abfd)
13989 {
13990 obj_attribute *attr =
13991 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13992
13993 /* Note: the tag and its argument below are uleb128 values, though
13994 currently-defined values fit in one byte for each. */
13995 if (attr->s
13996 && attr->s[0] == Tag_CPU_arch
13997 && (attr->s[1] & 128) != 128
13998 && attr->s[2] == 0)
13999 return attr->s[1];
14000
14001 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14002 return -1;
14003 }
14004
14005 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14006 The tag is removed if ARCH is -1. */
14007
14008 static void
14009 set_secondary_compatible_arch (bfd *abfd, int arch)
14010 {
14011 obj_attribute *attr =
14012 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14013
14014 if (arch == -1)
14015 {
14016 attr->s = NULL;
14017 return;
14018 }
14019
14020 /* Note: the tag and its argument below are uleb128 values, though
14021 currently-defined values fit in one byte for each. */
14022 if (!attr->s)
14023 attr->s = (char *) bfd_alloc (abfd, 3);
14024 attr->s[0] = Tag_CPU_arch;
14025 attr->s[1] = arch;
14026 attr->s[2] = '\0';
14027 }
14028
14029 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14030 into account. */
14031
14032 static int
14033 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14034 int newtag, int secondary_compat)
14035 {
14036 #define T(X) TAG_CPU_ARCH_##X
14037 int tagl, tagh, result;
14038 const int v6t2[] =
14039 {
14040 T(V6T2), /* PRE_V4. */
14041 T(V6T2), /* V4. */
14042 T(V6T2), /* V4T. */
14043 T(V6T2), /* V5T. */
14044 T(V6T2), /* V5TE. */
14045 T(V6T2), /* V5TEJ. */
14046 T(V6T2), /* V6. */
14047 T(V7), /* V6KZ. */
14048 T(V6T2) /* V6T2. */
14049 };
14050 const int v6k[] =
14051 {
14052 T(V6K), /* PRE_V4. */
14053 T(V6K), /* V4. */
14054 T(V6K), /* V4T. */
14055 T(V6K), /* V5T. */
14056 T(V6K), /* V5TE. */
14057 T(V6K), /* V5TEJ. */
14058 T(V6K), /* V6. */
14059 T(V6KZ), /* V6KZ. */
14060 T(V7), /* V6T2. */
14061 T(V6K) /* V6K. */
14062 };
14063 const int v7[] =
14064 {
14065 T(V7), /* PRE_V4. */
14066 T(V7), /* V4. */
14067 T(V7), /* V4T. */
14068 T(V7), /* V5T. */
14069 T(V7), /* V5TE. */
14070 T(V7), /* V5TEJ. */
14071 T(V7), /* V6. */
14072 T(V7), /* V6KZ. */
14073 T(V7), /* V6T2. */
14074 T(V7), /* V6K. */
14075 T(V7) /* V7. */
14076 };
14077 const int v6_m[] =
14078 {
14079 -1, /* PRE_V4. */
14080 -1, /* V4. */
14081 T(V6K), /* V4T. */
14082 T(V6K), /* V5T. */
14083 T(V6K), /* V5TE. */
14084 T(V6K), /* V5TEJ. */
14085 T(V6K), /* V6. */
14086 T(V6KZ), /* V6KZ. */
14087 T(V7), /* V6T2. */
14088 T(V6K), /* V6K. */
14089 T(V7), /* V7. */
14090 T(V6_M) /* V6_M. */
14091 };
14092 const int v6s_m[] =
14093 {
14094 -1, /* PRE_V4. */
14095 -1, /* V4. */
14096 T(V6K), /* V4T. */
14097 T(V6K), /* V5T. */
14098 T(V6K), /* V5TE. */
14099 T(V6K), /* V5TEJ. */
14100 T(V6K), /* V6. */
14101 T(V6KZ), /* V6KZ. */
14102 T(V7), /* V6T2. */
14103 T(V6K), /* V6K. */
14104 T(V7), /* V7. */
14105 T(V6S_M), /* V6_M. */
14106 T(V6S_M) /* V6S_M. */
14107 };
14108 const int v7e_m[] =
14109 {
14110 -1, /* PRE_V4. */
14111 -1, /* V4. */
14112 T(V7E_M), /* V4T. */
14113 T(V7E_M), /* V5T. */
14114 T(V7E_M), /* V5TE. */
14115 T(V7E_M), /* V5TEJ. */
14116 T(V7E_M), /* V6. */
14117 T(V7E_M), /* V6KZ. */
14118 T(V7E_M), /* V6T2. */
14119 T(V7E_M), /* V6K. */
14120 T(V7E_M), /* V7. */
14121 T(V7E_M), /* V6_M. */
14122 T(V7E_M), /* V6S_M. */
14123 T(V7E_M) /* V7E_M. */
14124 };
14125 const int v8[] =
14126 {
14127 T(V8), /* PRE_V4. */
14128 T(V8), /* V4. */
14129 T(V8), /* V4T. */
14130 T(V8), /* V5T. */
14131 T(V8), /* V5TE. */
14132 T(V8), /* V5TEJ. */
14133 T(V8), /* V6. */
14134 T(V8), /* V6KZ. */
14135 T(V8), /* V6T2. */
14136 T(V8), /* V6K. */
14137 T(V8), /* V7. */
14138 T(V8), /* V6_M. */
14139 T(V8), /* V6S_M. */
14140 T(V8), /* V7E_M. */
14141 T(V8) /* V8. */
14142 };
14143 const int v8r[] =
14144 {
14145 T(V8R), /* PRE_V4. */
14146 T(V8R), /* V4. */
14147 T(V8R), /* V4T. */
14148 T(V8R), /* V5T. */
14149 T(V8R), /* V5TE. */
14150 T(V8R), /* V5TEJ. */
14151 T(V8R), /* V6. */
14152 T(V8R), /* V6KZ. */
14153 T(V8R), /* V6T2. */
14154 T(V8R), /* V6K. */
14155 T(V8R), /* V7. */
14156 T(V8R), /* V6_M. */
14157 T(V8R), /* V6S_M. */
14158 T(V8R), /* V7E_M. */
14159 T(V8), /* V8. */
14160 T(V8R), /* V8R. */
14161 };
14162 const int v8m_baseline[] =
14163 {
14164 -1, /* PRE_V4. */
14165 -1, /* V4. */
14166 -1, /* V4T. */
14167 -1, /* V5T. */
14168 -1, /* V5TE. */
14169 -1, /* V5TEJ. */
14170 -1, /* V6. */
14171 -1, /* V6KZ. */
14172 -1, /* V6T2. */
14173 -1, /* V6K. */
14174 -1, /* V7. */
14175 T(V8M_BASE), /* V6_M. */
14176 T(V8M_BASE), /* V6S_M. */
14177 -1, /* V7E_M. */
14178 -1, /* V8. */
14179 -1, /* V8R. */
14180 T(V8M_BASE) /* V8-M BASELINE. */
14181 };
14182 const int v8m_mainline[] =
14183 {
14184 -1, /* PRE_V4. */
14185 -1, /* V4. */
14186 -1, /* V4T. */
14187 -1, /* V5T. */
14188 -1, /* V5TE. */
14189 -1, /* V5TEJ. */
14190 -1, /* V6. */
14191 -1, /* V6KZ. */
14192 -1, /* V6T2. */
14193 -1, /* V6K. */
14194 T(V8M_MAIN), /* V7. */
14195 T(V8M_MAIN), /* V6_M. */
14196 T(V8M_MAIN), /* V6S_M. */
14197 T(V8M_MAIN), /* V7E_M. */
14198 -1, /* V8. */
14199 -1, /* V8R. */
14200 T(V8M_MAIN), /* V8-M BASELINE. */
14201 T(V8M_MAIN) /* V8-M MAINLINE. */
14202 };
14203 const int v8_1m_mainline[] =
14204 {
14205 -1, /* PRE_V4. */
14206 -1, /* V4. */
14207 -1, /* V4T. */
14208 -1, /* V5T. */
14209 -1, /* V5TE. */
14210 -1, /* V5TEJ. */
14211 -1, /* V6. */
14212 -1, /* V6KZ. */
14213 -1, /* V6T2. */
14214 -1, /* V6K. */
14215 T(V8_1M_MAIN), /* V7. */
14216 T(V8_1M_MAIN), /* V6_M. */
14217 T(V8_1M_MAIN), /* V6S_M. */
14218 T(V8_1M_MAIN), /* V7E_M. */
14219 -1, /* V8. */
14220 -1, /* V8R. */
14221 T(V8_1M_MAIN), /* V8-M BASELINE. */
14222 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14223 -1, /* Unused (18). */
14224 -1, /* Unused (19). */
14225 -1, /* Unused (20). */
14226 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14227 };
14228 const int v4t_plus_v6_m[] =
14229 {
14230 -1, /* PRE_V4. */
14231 -1, /* V4. */
14232 T(V4T), /* V4T. */
14233 T(V5T), /* V5T. */
14234 T(V5TE), /* V5TE. */
14235 T(V5TEJ), /* V5TEJ. */
14236 T(V6), /* V6. */
14237 T(V6KZ), /* V6KZ. */
14238 T(V6T2), /* V6T2. */
14239 T(V6K), /* V6K. */
14240 T(V7), /* V7. */
14241 T(V6_M), /* V6_M. */
14242 T(V6S_M), /* V6S_M. */
14243 T(V7E_M), /* V7E_M. */
14244 T(V8), /* V8. */
14245 -1, /* V8R. */
14246 T(V8M_BASE), /* V8-M BASELINE. */
14247 T(V8M_MAIN), /* V8-M MAINLINE. */
14248 -1, /* Unused (18). */
14249 -1, /* Unused (19). */
14250 -1, /* Unused (20). */
14251 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14252 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14253 };
14254 const int *comb[] =
14255 {
14256 v6t2,
14257 v6k,
14258 v7,
14259 v6_m,
14260 v6s_m,
14261 v7e_m,
14262 v8,
14263 v8r,
14264 v8m_baseline,
14265 v8m_mainline,
14266 NULL,
14267 NULL,
14268 NULL,
14269 v8_1m_mainline,
14270 /* Pseudo-architecture. */
14271 v4t_plus_v6_m
14272 };
14273
14274 /* Check we've not got a higher architecture than we know about. */
14275
14276 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14277 {
14278 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14279 return -1;
14280 }
14281
14282 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14283
14284 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14285 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14286 oldtag = T(V4T_PLUS_V6_M);
14287
14288 /* And override the new tag if we have a Tag_also_compatible_with on the
14289 input. */
14290
14291 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14292 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14293 newtag = T(V4T_PLUS_V6_M);
14294
14295 tagl = (oldtag < newtag) ? oldtag : newtag;
14296 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14297
14298 /* Architectures before V6KZ add features monotonically. */
14299 if (tagh <= TAG_CPU_ARCH_V6KZ)
14300 return result;
14301
14302 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14303
14304 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14305 as the canonical version. */
14306 if (result == T(V4T_PLUS_V6_M))
14307 {
14308 result = T(V4T);
14309 *secondary_compat_out = T(V6_M);
14310 }
14311 else
14312 *secondary_compat_out = -1;
14313
14314 if (result == -1)
14315 {
14316 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14317 ibfd, oldtag, newtag);
14318 return -1;
14319 }
14320
14321 return result;
14322 #undef T
14323 }
14324
14325 /* Query attributes object to see if integer divide instructions may be
14326 present in an object. */
14327 static bfd_boolean
14328 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14329 {
14330 int arch = attr[Tag_CPU_arch].i;
14331 int profile = attr[Tag_CPU_arch_profile].i;
14332
14333 switch (attr[Tag_DIV_use].i)
14334 {
14335 case 0:
14336 /* Integer divide allowed if instruction contained in archetecture. */
14337 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14338 return TRUE;
14339 else if (arch >= TAG_CPU_ARCH_V7E_M)
14340 return TRUE;
14341 else
14342 return FALSE;
14343
14344 case 1:
14345 /* Integer divide explicitly prohibited. */
14346 return FALSE;
14347
14348 default:
14349 /* Unrecognised case - treat as allowing divide everywhere. */
14350 case 2:
14351 /* Integer divide allowed in ARM state. */
14352 return TRUE;
14353 }
14354 }
14355
14356 /* Query attributes object to see if integer divide instructions are
14357 forbidden to be in the object. This is not the inverse of
14358 elf32_arm_attributes_accept_div. */
14359 static bfd_boolean
14360 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14361 {
14362 return attr[Tag_DIV_use].i == 1;
14363 }
14364
14365 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14366 are conflicting attributes. */
14367
14368 static bfd_boolean
14369 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14370 {
14371 bfd *obfd = info->output_bfd;
14372 obj_attribute *in_attr;
14373 obj_attribute *out_attr;
14374 /* Some tags have 0 = don't care, 1 = strong requirement,
14375 2 = weak requirement. */
14376 static const int order_021[3] = {0, 2, 1};
14377 int i;
14378 bfd_boolean result = TRUE;
14379 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14380
14381 /* Skip the linker stubs file. This preserves previous behavior
14382 of accepting unknown attributes in the first input file - but
14383 is that a bug? */
14384 if (ibfd->flags & BFD_LINKER_CREATED)
14385 return TRUE;
14386
14387 /* Skip any input that hasn't attribute section.
14388 This enables to link object files without attribute section with
14389 any others. */
14390 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14391 return TRUE;
14392
14393 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14394 {
14395 /* This is the first object. Copy the attributes. */
14396 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14397
14398 out_attr = elf_known_obj_attributes_proc (obfd);
14399
14400 /* Use the Tag_null value to indicate the attributes have been
14401 initialized. */
14402 out_attr[0].i = 1;
14403
14404 /* We do not output objects with Tag_MPextension_use_legacy - we move
14405 the attribute's value to Tag_MPextension_use. */
14406 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14407 {
14408 if (out_attr[Tag_MPextension_use].i != 0
14409 && out_attr[Tag_MPextension_use_legacy].i
14410 != out_attr[Tag_MPextension_use].i)
14411 {
14412 _bfd_error_handler
14413 (_("Error: %pB has both the current and legacy "
14414 "Tag_MPextension_use attributes"), ibfd);
14415 result = FALSE;
14416 }
14417
14418 out_attr[Tag_MPextension_use] =
14419 out_attr[Tag_MPextension_use_legacy];
14420 out_attr[Tag_MPextension_use_legacy].type = 0;
14421 out_attr[Tag_MPextension_use_legacy].i = 0;
14422 }
14423
14424 return result;
14425 }
14426
14427 in_attr = elf_known_obj_attributes_proc (ibfd);
14428 out_attr = elf_known_obj_attributes_proc (obfd);
14429 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14430 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14431 {
14432 /* Ignore mismatches if the object doesn't use floating point or is
14433 floating point ABI independent. */
14434 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14435 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14436 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14437 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14438 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14439 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14440 {
14441 _bfd_error_handler
14442 (_("error: %pB uses VFP register arguments, %pB does not"),
14443 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14444 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14445 result = FALSE;
14446 }
14447 }
14448
14449 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14450 {
14451 /* Merge this attribute with existing attributes. */
14452 switch (i)
14453 {
14454 case Tag_CPU_raw_name:
14455 case Tag_CPU_name:
14456 /* These are merged after Tag_CPU_arch. */
14457 break;
14458
14459 case Tag_ABI_optimization_goals:
14460 case Tag_ABI_FP_optimization_goals:
14461 /* Use the first value seen. */
14462 break;
14463
14464 case Tag_CPU_arch:
14465 {
14466 int secondary_compat = -1, secondary_compat_out = -1;
14467 unsigned int saved_out_attr = out_attr[i].i;
14468 int arch_attr;
14469 static const char *name_table[] =
14470 {
14471 /* These aren't real CPU names, but we can't guess
14472 that from the architecture version alone. */
14473 "Pre v4",
14474 "ARM v4",
14475 "ARM v4T",
14476 "ARM v5T",
14477 "ARM v5TE",
14478 "ARM v5TEJ",
14479 "ARM v6",
14480 "ARM v6KZ",
14481 "ARM v6T2",
14482 "ARM v6K",
14483 "ARM v7",
14484 "ARM v6-M",
14485 "ARM v6S-M",
14486 "ARM v8",
14487 "",
14488 "ARM v8-M.baseline",
14489 "ARM v8-M.mainline",
14490 };
14491
14492 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14493 secondary_compat = get_secondary_compatible_arch (ibfd);
14494 secondary_compat_out = get_secondary_compatible_arch (obfd);
14495 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14496 &secondary_compat_out,
14497 in_attr[i].i,
14498 secondary_compat);
14499
14500 /* Return with error if failed to merge. */
14501 if (arch_attr == -1)
14502 return FALSE;
14503
14504 out_attr[i].i = arch_attr;
14505
14506 set_secondary_compatible_arch (obfd, secondary_compat_out);
14507
14508 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14509 if (out_attr[i].i == saved_out_attr)
14510 ; /* Leave the names alone. */
14511 else if (out_attr[i].i == in_attr[i].i)
14512 {
14513 /* The output architecture has been changed to match the
14514 input architecture. Use the input names. */
14515 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14516 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14517 : NULL;
14518 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14519 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14520 : NULL;
14521 }
14522 else
14523 {
14524 out_attr[Tag_CPU_name].s = NULL;
14525 out_attr[Tag_CPU_raw_name].s = NULL;
14526 }
14527
14528 /* If we still don't have a value for Tag_CPU_name,
14529 make one up now. Tag_CPU_raw_name remains blank. */
14530 if (out_attr[Tag_CPU_name].s == NULL
14531 && out_attr[i].i < ARRAY_SIZE (name_table))
14532 out_attr[Tag_CPU_name].s =
14533 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14534 }
14535 break;
14536
14537 case Tag_ARM_ISA_use:
14538 case Tag_THUMB_ISA_use:
14539 case Tag_WMMX_arch:
14540 case Tag_Advanced_SIMD_arch:
14541 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14542 case Tag_ABI_FP_rounding:
14543 case Tag_ABI_FP_exceptions:
14544 case Tag_ABI_FP_user_exceptions:
14545 case Tag_ABI_FP_number_model:
14546 case Tag_FP_HP_extension:
14547 case Tag_CPU_unaligned_access:
14548 case Tag_T2EE_use:
14549 case Tag_MPextension_use:
14550 case Tag_MVE_arch:
14551 /* Use the largest value specified. */
14552 if (in_attr[i].i > out_attr[i].i)
14553 out_attr[i].i = in_attr[i].i;
14554 break;
14555
14556 case Tag_ABI_align_preserved:
14557 case Tag_ABI_PCS_RO_data:
14558 /* Use the smallest value specified. */
14559 if (in_attr[i].i < out_attr[i].i)
14560 out_attr[i].i = in_attr[i].i;
14561 break;
14562
14563 case Tag_ABI_align_needed:
14564 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14565 && (in_attr[Tag_ABI_align_preserved].i == 0
14566 || out_attr[Tag_ABI_align_preserved].i == 0))
14567 {
14568 /* This error message should be enabled once all non-conformant
14569 binaries in the toolchain have had the attributes set
14570 properly.
14571 _bfd_error_handler
14572 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14573 obfd, ibfd);
14574 result = FALSE; */
14575 }
14576 /* Fall through. */
14577 case Tag_ABI_FP_denormal:
14578 case Tag_ABI_PCS_GOT_use:
14579 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14580 value if greater than 2 (for future-proofing). */
14581 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14582 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14583 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14584 out_attr[i].i = in_attr[i].i;
14585 break;
14586
14587 case Tag_Virtualization_use:
14588 /* The virtualization tag effectively stores two bits of
14589 information: the intended use of TrustZone (in bit 0), and the
14590 intended use of Virtualization (in bit 1). */
14591 if (out_attr[i].i == 0)
14592 out_attr[i].i = in_attr[i].i;
14593 else if (in_attr[i].i != 0
14594 && in_attr[i].i != out_attr[i].i)
14595 {
14596 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14597 out_attr[i].i = 3;
14598 else
14599 {
14600 _bfd_error_handler
14601 (_("error: %pB: unable to merge virtualization attributes "
14602 "with %pB"),
14603 obfd, ibfd);
14604 result = FALSE;
14605 }
14606 }
14607 break;
14608
14609 case Tag_CPU_arch_profile:
14610 if (out_attr[i].i != in_attr[i].i)
14611 {
14612 /* 0 will merge with anything.
14613 'A' and 'S' merge to 'A'.
14614 'R' and 'S' merge to 'R'.
14615 'M' and 'A|R|S' is an error. */
14616 if (out_attr[i].i == 0
14617 || (out_attr[i].i == 'S'
14618 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14619 out_attr[i].i = in_attr[i].i;
14620 else if (in_attr[i].i == 0
14621 || (in_attr[i].i == 'S'
14622 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14623 ; /* Do nothing. */
14624 else
14625 {
14626 _bfd_error_handler
14627 (_("error: %pB: conflicting architecture profiles %c/%c"),
14628 ibfd,
14629 in_attr[i].i ? in_attr[i].i : '0',
14630 out_attr[i].i ? out_attr[i].i : '0');
14631 result = FALSE;
14632 }
14633 }
14634 break;
14635
14636 case Tag_DSP_extension:
14637 /* No need to change output value if any of:
14638 - pre (<=) ARMv5T input architecture (do not have DSP)
14639 - M input profile not ARMv7E-M and do not have DSP. */
14640 if (in_attr[Tag_CPU_arch].i <= 3
14641 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14642 && in_attr[Tag_CPU_arch].i != 13
14643 && in_attr[i].i == 0))
14644 ; /* Do nothing. */
14645 /* Output value should be 0 if DSP part of architecture, ie.
14646 - post (>=) ARMv5te architecture output
14647 - A, R or S profile output or ARMv7E-M output architecture. */
14648 else if (out_attr[Tag_CPU_arch].i >= 4
14649 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14650 || out_attr[Tag_CPU_arch_profile].i == 'R'
14651 || out_attr[Tag_CPU_arch_profile].i == 'S'
14652 || out_attr[Tag_CPU_arch].i == 13))
14653 out_attr[i].i = 0;
14654 /* Otherwise, DSP instructions are added and not part of output
14655 architecture. */
14656 else
14657 out_attr[i].i = 1;
14658 break;
14659
14660 case Tag_FP_arch:
14661 {
14662 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14663 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14664 when it's 0. It might mean absence of FP hardware if
14665 Tag_FP_arch is zero. */
14666
14667 #define VFP_VERSION_COUNT 9
14668 static const struct
14669 {
14670 int ver;
14671 int regs;
14672 } vfp_versions[VFP_VERSION_COUNT] =
14673 {
14674 {0, 0},
14675 {1, 16},
14676 {2, 16},
14677 {3, 32},
14678 {3, 16},
14679 {4, 32},
14680 {4, 16},
14681 {8, 32},
14682 {8, 16}
14683 };
14684 int ver;
14685 int regs;
14686 int newval;
14687
14688 /* If the output has no requirement about FP hardware,
14689 follow the requirement of the input. */
14690 if (out_attr[i].i == 0)
14691 {
14692 /* This assert is still reasonable, we shouldn't
14693 produce the suspicious build attribute
14694 combination (See below for in_attr). */
14695 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14696 out_attr[i].i = in_attr[i].i;
14697 out_attr[Tag_ABI_HardFP_use].i
14698 = in_attr[Tag_ABI_HardFP_use].i;
14699 break;
14700 }
14701 /* If the input has no requirement about FP hardware, do
14702 nothing. */
14703 else if (in_attr[i].i == 0)
14704 {
14705 /* We used to assert that Tag_ABI_HardFP_use was
14706 zero here, but we should never assert when
14707 consuming an object file that has suspicious
14708 build attributes. The single precision variant
14709 of 'no FP architecture' is still 'no FP
14710 architecture', so we just ignore the tag in this
14711 case. */
14712 break;
14713 }
14714
14715 /* Both the input and the output have nonzero Tag_FP_arch.
14716 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14717
14718 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14719 do nothing. */
14720 if (in_attr[Tag_ABI_HardFP_use].i == 0
14721 && out_attr[Tag_ABI_HardFP_use].i == 0)
14722 ;
14723 /* If the input and the output have different Tag_ABI_HardFP_use,
14724 the combination of them is 0 (implied by Tag_FP_arch). */
14725 else if (in_attr[Tag_ABI_HardFP_use].i
14726 != out_attr[Tag_ABI_HardFP_use].i)
14727 out_attr[Tag_ABI_HardFP_use].i = 0;
14728
14729 /* Now we can handle Tag_FP_arch. */
14730
14731 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14732 pick the biggest. */
14733 if (in_attr[i].i >= VFP_VERSION_COUNT
14734 && in_attr[i].i > out_attr[i].i)
14735 {
14736 out_attr[i] = in_attr[i];
14737 break;
14738 }
14739 /* The output uses the superset of input features
14740 (ISA version) and registers. */
14741 ver = vfp_versions[in_attr[i].i].ver;
14742 if (ver < vfp_versions[out_attr[i].i].ver)
14743 ver = vfp_versions[out_attr[i].i].ver;
14744 regs = vfp_versions[in_attr[i].i].regs;
14745 if (regs < vfp_versions[out_attr[i].i].regs)
14746 regs = vfp_versions[out_attr[i].i].regs;
14747 /* This assumes all possible supersets are also a valid
14748 options. */
14749 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14750 {
14751 if (regs == vfp_versions[newval].regs
14752 && ver == vfp_versions[newval].ver)
14753 break;
14754 }
14755 out_attr[i].i = newval;
14756 }
14757 break;
14758 case Tag_PCS_config:
14759 if (out_attr[i].i == 0)
14760 out_attr[i].i = in_attr[i].i;
14761 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14762 {
14763 /* It's sometimes ok to mix different configs, so this is only
14764 a warning. */
14765 _bfd_error_handler
14766 (_("warning: %pB: conflicting platform configuration"), ibfd);
14767 }
14768 break;
14769 case Tag_ABI_PCS_R9_use:
14770 if (in_attr[i].i != out_attr[i].i
14771 && out_attr[i].i != AEABI_R9_unused
14772 && in_attr[i].i != AEABI_R9_unused)
14773 {
14774 _bfd_error_handler
14775 (_("error: %pB: conflicting use of R9"), ibfd);
14776 result = FALSE;
14777 }
14778 if (out_attr[i].i == AEABI_R9_unused)
14779 out_attr[i].i = in_attr[i].i;
14780 break;
14781 case Tag_ABI_PCS_RW_data:
14782 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14783 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14784 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14785 {
14786 _bfd_error_handler
14787 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14788 ibfd);
14789 result = FALSE;
14790 }
14791 /* Use the smallest value specified. */
14792 if (in_attr[i].i < out_attr[i].i)
14793 out_attr[i].i = in_attr[i].i;
14794 break;
14795 case Tag_ABI_PCS_wchar_t:
14796 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14797 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14798 {
14799 _bfd_error_handler
14800 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14801 ibfd, in_attr[i].i, out_attr[i].i);
14802 }
14803 else if (in_attr[i].i && !out_attr[i].i)
14804 out_attr[i].i = in_attr[i].i;
14805 break;
14806 case Tag_ABI_enum_size:
14807 if (in_attr[i].i != AEABI_enum_unused)
14808 {
14809 if (out_attr[i].i == AEABI_enum_unused
14810 || out_attr[i].i == AEABI_enum_forced_wide)
14811 {
14812 /* The existing object is compatible with anything.
14813 Use whatever requirements the new object has. */
14814 out_attr[i].i = in_attr[i].i;
14815 }
14816 else if (in_attr[i].i != AEABI_enum_forced_wide
14817 && out_attr[i].i != in_attr[i].i
14818 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14819 {
14820 static const char *aeabi_enum_names[] =
14821 { "", "variable-size", "32-bit", "" };
14822 const char *in_name =
14823 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14824 ? aeabi_enum_names[in_attr[i].i]
14825 : "<unknown>";
14826 const char *out_name =
14827 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14828 ? aeabi_enum_names[out_attr[i].i]
14829 : "<unknown>";
14830 _bfd_error_handler
14831 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14832 ibfd, in_name, out_name);
14833 }
14834 }
14835 break;
14836 case Tag_ABI_VFP_args:
14837 /* Aready done. */
14838 break;
14839 case Tag_ABI_WMMX_args:
14840 if (in_attr[i].i != out_attr[i].i)
14841 {
14842 _bfd_error_handler
14843 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14844 ibfd, obfd);
14845 result = FALSE;
14846 }
14847 break;
14848 case Tag_compatibility:
14849 /* Merged in target-independent code. */
14850 break;
14851 case Tag_ABI_HardFP_use:
14852 /* This is handled along with Tag_FP_arch. */
14853 break;
14854 case Tag_ABI_FP_16bit_format:
14855 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14856 {
14857 if (in_attr[i].i != out_attr[i].i)
14858 {
14859 _bfd_error_handler
14860 (_("error: fp16 format mismatch between %pB and %pB"),
14861 ibfd, obfd);
14862 result = FALSE;
14863 }
14864 }
14865 if (in_attr[i].i != 0)
14866 out_attr[i].i = in_attr[i].i;
14867 break;
14868
14869 case Tag_DIV_use:
14870 /* A value of zero on input means that the divide instruction may
14871 be used if available in the base architecture as specified via
14872 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14873 the user did not want divide instructions. A value of 2
14874 explicitly means that divide instructions were allowed in ARM
14875 and Thumb state. */
14876 if (in_attr[i].i == out_attr[i].i)
14877 /* Do nothing. */ ;
14878 else if (elf32_arm_attributes_forbid_div (in_attr)
14879 && !elf32_arm_attributes_accept_div (out_attr))
14880 out_attr[i].i = 1;
14881 else if (elf32_arm_attributes_forbid_div (out_attr)
14882 && elf32_arm_attributes_accept_div (in_attr))
14883 out_attr[i].i = in_attr[i].i;
14884 else if (in_attr[i].i == 2)
14885 out_attr[i].i = in_attr[i].i;
14886 break;
14887
14888 case Tag_MPextension_use_legacy:
14889 /* We don't output objects with Tag_MPextension_use_legacy - we
14890 move the value to Tag_MPextension_use. */
14891 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14892 {
14893 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14894 {
14895 _bfd_error_handler
14896 (_("%pB has both the current and legacy "
14897 "Tag_MPextension_use attributes"),
14898 ibfd);
14899 result = FALSE;
14900 }
14901 }
14902
14903 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14904 out_attr[Tag_MPextension_use] = in_attr[i];
14905
14906 break;
14907
14908 case Tag_nodefaults:
14909 /* This tag is set if it exists, but the value is unused (and is
14910 typically zero). We don't actually need to do anything here -
14911 the merge happens automatically when the type flags are merged
14912 below. */
14913 break;
14914 case Tag_also_compatible_with:
14915 /* Already done in Tag_CPU_arch. */
14916 break;
14917 case Tag_conformance:
14918 /* Keep the attribute if it matches. Throw it away otherwise.
14919 No attribute means no claim to conform. */
14920 if (!in_attr[i].s || !out_attr[i].s
14921 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14922 out_attr[i].s = NULL;
14923 break;
14924
14925 default:
14926 result
14927 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14928 }
14929
14930 /* If out_attr was copied from in_attr then it won't have a type yet. */
14931 if (in_attr[i].type && !out_attr[i].type)
14932 out_attr[i].type = in_attr[i].type;
14933 }
14934
14935 /* Merge Tag_compatibility attributes and any common GNU ones. */
14936 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14937 return FALSE;
14938
14939 /* Check for any attributes not known on ARM. */
14940 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14941
14942 return result;
14943 }
14944
14945
14946 /* Return TRUE if the two EABI versions are incompatible. */
14947
14948 static bfd_boolean
14949 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14950 {
14951 /* v4 and v5 are the same spec before and after it was released,
14952 so allow mixing them. */
14953 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14954 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14955 return TRUE;
14956
14957 return (iver == over);
14958 }
14959
14960 /* Merge backend specific data from an object file to the output
14961 object file when linking. */
14962
14963 static bfd_boolean
14964 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14965
14966 /* Display the flags field. */
14967
14968 static bfd_boolean
14969 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14970 {
14971 FILE * file = (FILE *) ptr;
14972 unsigned long flags;
14973
14974 BFD_ASSERT (abfd != NULL && ptr != NULL);
14975
14976 /* Print normal ELF private data. */
14977 _bfd_elf_print_private_bfd_data (abfd, ptr);
14978
14979 flags = elf_elfheader (abfd)->e_flags;
14980 /* Ignore init flag - it may not be set, despite the flags field
14981 containing valid data. */
14982
14983 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
14984
14985 switch (EF_ARM_EABI_VERSION (flags))
14986 {
14987 case EF_ARM_EABI_UNKNOWN:
14988 /* The following flag bits are GNU extensions and not part of the
14989 official ARM ELF extended ABI. Hence they are only decoded if
14990 the EABI version is not set. */
14991 if (flags & EF_ARM_INTERWORK)
14992 fprintf (file, _(" [interworking enabled]"));
14993
14994 if (flags & EF_ARM_APCS_26)
14995 fprintf (file, " [APCS-26]");
14996 else
14997 fprintf (file, " [APCS-32]");
14998
14999 if (flags & EF_ARM_VFP_FLOAT)
15000 fprintf (file, _(" [VFP float format]"));
15001 else if (flags & EF_ARM_MAVERICK_FLOAT)
15002 fprintf (file, _(" [Maverick float format]"));
15003 else
15004 fprintf (file, _(" [FPA float format]"));
15005
15006 if (flags & EF_ARM_APCS_FLOAT)
15007 fprintf (file, _(" [floats passed in float registers]"));
15008
15009 if (flags & EF_ARM_PIC)
15010 fprintf (file, _(" [position independent]"));
15011
15012 if (flags & EF_ARM_NEW_ABI)
15013 fprintf (file, _(" [new ABI]"));
15014
15015 if (flags & EF_ARM_OLD_ABI)
15016 fprintf (file, _(" [old ABI]"));
15017
15018 if (flags & EF_ARM_SOFT_FLOAT)
15019 fprintf (file, _(" [software FP]"));
15020
15021 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15022 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15023 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15024 | EF_ARM_MAVERICK_FLOAT);
15025 break;
15026
15027 case EF_ARM_EABI_VER1:
15028 fprintf (file, _(" [Version1 EABI]"));
15029
15030 if (flags & EF_ARM_SYMSARESORTED)
15031 fprintf (file, _(" [sorted symbol table]"));
15032 else
15033 fprintf (file, _(" [unsorted symbol table]"));
15034
15035 flags &= ~ EF_ARM_SYMSARESORTED;
15036 break;
15037
15038 case EF_ARM_EABI_VER2:
15039 fprintf (file, _(" [Version2 EABI]"));
15040
15041 if (flags & EF_ARM_SYMSARESORTED)
15042 fprintf (file, _(" [sorted symbol table]"));
15043 else
15044 fprintf (file, _(" [unsorted symbol table]"));
15045
15046 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15047 fprintf (file, _(" [dynamic symbols use segment index]"));
15048
15049 if (flags & EF_ARM_MAPSYMSFIRST)
15050 fprintf (file, _(" [mapping symbols precede others]"));
15051
15052 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15053 | EF_ARM_MAPSYMSFIRST);
15054 break;
15055
15056 case EF_ARM_EABI_VER3:
15057 fprintf (file, _(" [Version3 EABI]"));
15058 break;
15059
15060 case EF_ARM_EABI_VER4:
15061 fprintf (file, _(" [Version4 EABI]"));
15062 goto eabi;
15063
15064 case EF_ARM_EABI_VER5:
15065 fprintf (file, _(" [Version5 EABI]"));
15066
15067 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15068 fprintf (file, _(" [soft-float ABI]"));
15069
15070 if (flags & EF_ARM_ABI_FLOAT_HARD)
15071 fprintf (file, _(" [hard-float ABI]"));
15072
15073 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15074
15075 eabi:
15076 if (flags & EF_ARM_BE8)
15077 fprintf (file, _(" [BE8]"));
15078
15079 if (flags & EF_ARM_LE8)
15080 fprintf (file, _(" [LE8]"));
15081
15082 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15083 break;
15084
15085 default:
15086 fprintf (file, _(" <EABI version unrecognised>"));
15087 break;
15088 }
15089
15090 flags &= ~ EF_ARM_EABIMASK;
15091
15092 if (flags & EF_ARM_RELEXEC)
15093 fprintf (file, _(" [relocatable executable]"));
15094
15095 if (flags & EF_ARM_PIC)
15096 fprintf (file, _(" [position independent]"));
15097
15098 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15099 fprintf (file, _(" [FDPIC ABI supplement]"));
15100
15101 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15102
15103 if (flags)
15104 fprintf (file, _(" <Unrecognised flag bits set>"));
15105
15106 fputc ('\n', file);
15107
15108 return TRUE;
15109 }
15110
15111 static int
15112 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15113 {
15114 switch (ELF_ST_TYPE (elf_sym->st_info))
15115 {
15116 case STT_ARM_TFUNC:
15117 return ELF_ST_TYPE (elf_sym->st_info);
15118
15119 case STT_ARM_16BIT:
15120 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15121 This allows us to distinguish between data used by Thumb instructions
15122 and non-data (which is probably code) inside Thumb regions of an
15123 executable. */
15124 if (type != STT_OBJECT && type != STT_TLS)
15125 return ELF_ST_TYPE (elf_sym->st_info);
15126 break;
15127
15128 default:
15129 break;
15130 }
15131
15132 return type;
15133 }
15134
15135 static asection *
15136 elf32_arm_gc_mark_hook (asection *sec,
15137 struct bfd_link_info *info,
15138 Elf_Internal_Rela *rel,
15139 struct elf_link_hash_entry *h,
15140 Elf_Internal_Sym *sym)
15141 {
15142 if (h != NULL)
15143 switch (ELF32_R_TYPE (rel->r_info))
15144 {
15145 case R_ARM_GNU_VTINHERIT:
15146 case R_ARM_GNU_VTENTRY:
15147 return NULL;
15148 }
15149
15150 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15151 }
15152
15153 /* Look through the relocs for a section during the first phase. */
15154
15155 static bfd_boolean
15156 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15157 asection *sec, const Elf_Internal_Rela *relocs)
15158 {
15159 Elf_Internal_Shdr *symtab_hdr;
15160 struct elf_link_hash_entry **sym_hashes;
15161 const Elf_Internal_Rela *rel;
15162 const Elf_Internal_Rela *rel_end;
15163 bfd *dynobj;
15164 asection *sreloc;
15165 struct elf32_arm_link_hash_table *htab;
15166 bfd_boolean call_reloc_p;
15167 bfd_boolean may_become_dynamic_p;
15168 bfd_boolean may_need_local_target_p;
15169 unsigned long nsyms;
15170
15171 if (bfd_link_relocatable (info))
15172 return TRUE;
15173
15174 BFD_ASSERT (is_arm_elf (abfd));
15175
15176 htab = elf32_arm_hash_table (info);
15177 if (htab == NULL)
15178 return FALSE;
15179
15180 sreloc = NULL;
15181
15182 /* Create dynamic sections for relocatable executables so that we can
15183 copy relocations. */
15184 if (htab->root.is_relocatable_executable
15185 && ! htab->root.dynamic_sections_created)
15186 {
15187 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15188 return FALSE;
15189 }
15190
15191 if (htab->root.dynobj == NULL)
15192 htab->root.dynobj = abfd;
15193 if (!create_ifunc_sections (info))
15194 return FALSE;
15195
15196 dynobj = htab->root.dynobj;
15197
15198 symtab_hdr = & elf_symtab_hdr (abfd);
15199 sym_hashes = elf_sym_hashes (abfd);
15200 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15201
15202 rel_end = relocs + sec->reloc_count;
15203 for (rel = relocs; rel < rel_end; rel++)
15204 {
15205 Elf_Internal_Sym *isym;
15206 struct elf_link_hash_entry *h;
15207 struct elf32_arm_link_hash_entry *eh;
15208 unsigned int r_symndx;
15209 int r_type;
15210
15211 r_symndx = ELF32_R_SYM (rel->r_info);
15212 r_type = ELF32_R_TYPE (rel->r_info);
15213 r_type = arm_real_reloc_type (htab, r_type);
15214
15215 if (r_symndx >= nsyms
15216 /* PR 9934: It is possible to have relocations that do not
15217 refer to symbols, thus it is also possible to have an
15218 object file containing relocations but no symbol table. */
15219 && (r_symndx > STN_UNDEF || nsyms > 0))
15220 {
15221 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15222 r_symndx);
15223 return FALSE;
15224 }
15225
15226 h = NULL;
15227 isym = NULL;
15228 if (nsyms > 0)
15229 {
15230 if (r_symndx < symtab_hdr->sh_info)
15231 {
15232 /* A local symbol. */
15233 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15234 abfd, r_symndx);
15235 if (isym == NULL)
15236 return FALSE;
15237 }
15238 else
15239 {
15240 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15241 while (h->root.type == bfd_link_hash_indirect
15242 || h->root.type == bfd_link_hash_warning)
15243 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15244 }
15245 }
15246
15247 eh = (struct elf32_arm_link_hash_entry *) h;
15248
15249 call_reloc_p = FALSE;
15250 may_become_dynamic_p = FALSE;
15251 may_need_local_target_p = FALSE;
15252
15253 /* Could be done earlier, if h were already available. */
15254 r_type = elf32_arm_tls_transition (info, r_type, h);
15255 switch (r_type)
15256 {
15257 case R_ARM_GOTOFFFUNCDESC:
15258 {
15259 if (h == NULL)
15260 {
15261 if (!elf32_arm_allocate_local_sym_info (abfd))
15262 return FALSE;
15263 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15264 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15265 }
15266 else
15267 {
15268 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15269 }
15270 }
15271 break;
15272
15273 case R_ARM_GOTFUNCDESC:
15274 {
15275 if (h == NULL)
15276 {
15277 /* Such a relocation is not supposed to be generated
15278 by gcc on a static function. */
15279 /* Anyway if needed it could be handled. */
15280 abort();
15281 }
15282 else
15283 {
15284 eh->fdpic_cnts.gotfuncdesc_cnt++;
15285 }
15286 }
15287 break;
15288
15289 case R_ARM_FUNCDESC:
15290 {
15291 if (h == NULL)
15292 {
15293 if (!elf32_arm_allocate_local_sym_info (abfd))
15294 return FALSE;
15295 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15296 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15297 }
15298 else
15299 {
15300 eh->fdpic_cnts.funcdesc_cnt++;
15301 }
15302 }
15303 break;
15304
15305 case R_ARM_GOT32:
15306 case R_ARM_GOT_PREL:
15307 case R_ARM_TLS_GD32:
15308 case R_ARM_TLS_GD32_FDPIC:
15309 case R_ARM_TLS_IE32:
15310 case R_ARM_TLS_IE32_FDPIC:
15311 case R_ARM_TLS_GOTDESC:
15312 case R_ARM_TLS_DESCSEQ:
15313 case R_ARM_THM_TLS_DESCSEQ:
15314 case R_ARM_TLS_CALL:
15315 case R_ARM_THM_TLS_CALL:
15316 /* This symbol requires a global offset table entry. */
15317 {
15318 int tls_type, old_tls_type;
15319
15320 switch (r_type)
15321 {
15322 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15323 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15324
15325 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15326 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15327
15328 case R_ARM_TLS_GOTDESC:
15329 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15330 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15331 tls_type = GOT_TLS_GDESC; break;
15332
15333 default: tls_type = GOT_NORMAL; break;
15334 }
15335
15336 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15337 info->flags |= DF_STATIC_TLS;
15338
15339 if (h != NULL)
15340 {
15341 h->got.refcount++;
15342 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15343 }
15344 else
15345 {
15346 /* This is a global offset table entry for a local symbol. */
15347 if (!elf32_arm_allocate_local_sym_info (abfd))
15348 return FALSE;
15349 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15350 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15351 }
15352
15353 /* If a variable is accessed with both tls methods, two
15354 slots may be created. */
15355 if (GOT_TLS_GD_ANY_P (old_tls_type)
15356 && GOT_TLS_GD_ANY_P (tls_type))
15357 tls_type |= old_tls_type;
15358
15359 /* We will already have issued an error message if there
15360 is a TLS/non-TLS mismatch, based on the symbol
15361 type. So just combine any TLS types needed. */
15362 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15363 && tls_type != GOT_NORMAL)
15364 tls_type |= old_tls_type;
15365
15366 /* If the symbol is accessed in both IE and GDESC
15367 method, we're able to relax. Turn off the GDESC flag,
15368 without messing up with any other kind of tls types
15369 that may be involved. */
15370 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15371 tls_type &= ~GOT_TLS_GDESC;
15372
15373 if (old_tls_type != tls_type)
15374 {
15375 if (h != NULL)
15376 elf32_arm_hash_entry (h)->tls_type = tls_type;
15377 else
15378 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15379 }
15380 }
15381 /* Fall through. */
15382
15383 case R_ARM_TLS_LDM32:
15384 case R_ARM_TLS_LDM32_FDPIC:
15385 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15386 htab->tls_ldm_got.refcount++;
15387 /* Fall through. */
15388
15389 case R_ARM_GOTOFF32:
15390 case R_ARM_GOTPC:
15391 if (htab->root.sgot == NULL
15392 && !create_got_section (htab->root.dynobj, info))
15393 return FALSE;
15394 break;
15395
15396 case R_ARM_PC24:
15397 case R_ARM_PLT32:
15398 case R_ARM_CALL:
15399 case R_ARM_JUMP24:
15400 case R_ARM_PREL31:
15401 case R_ARM_THM_CALL:
15402 case R_ARM_THM_JUMP24:
15403 case R_ARM_THM_JUMP19:
15404 call_reloc_p = TRUE;
15405 may_need_local_target_p = TRUE;
15406 break;
15407
15408 case R_ARM_ABS12:
15409 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15410 ldr __GOTT_INDEX__ offsets. */
15411 if (htab->root.target_os != is_vxworks)
15412 {
15413 may_need_local_target_p = TRUE;
15414 break;
15415 }
15416 else goto jump_over;
15417
15418 /* Fall through. */
15419
15420 case R_ARM_MOVW_ABS_NC:
15421 case R_ARM_MOVT_ABS:
15422 case R_ARM_THM_MOVW_ABS_NC:
15423 case R_ARM_THM_MOVT_ABS:
15424 if (bfd_link_pic (info))
15425 {
15426 _bfd_error_handler
15427 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15428 abfd, elf32_arm_howto_table_1[r_type].name,
15429 (h) ? h->root.root.string : "a local symbol");
15430 bfd_set_error (bfd_error_bad_value);
15431 return FALSE;
15432 }
15433
15434 /* Fall through. */
15435 case R_ARM_ABS32:
15436 case R_ARM_ABS32_NOI:
15437 jump_over:
15438 if (h != NULL && bfd_link_executable (info))
15439 {
15440 h->pointer_equality_needed = 1;
15441 }
15442 /* Fall through. */
15443 case R_ARM_REL32:
15444 case R_ARM_REL32_NOI:
15445 case R_ARM_MOVW_PREL_NC:
15446 case R_ARM_MOVT_PREL:
15447 case R_ARM_THM_MOVW_PREL_NC:
15448 case R_ARM_THM_MOVT_PREL:
15449
15450 /* Should the interworking branches be listed here? */
15451 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15452 || htab->fdpic_p)
15453 && (sec->flags & SEC_ALLOC) != 0)
15454 {
15455 if (h == NULL
15456 && elf32_arm_howto_from_type (r_type)->pc_relative)
15457 {
15458 /* In shared libraries and relocatable executables,
15459 we treat local relative references as calls;
15460 see the related SYMBOL_CALLS_LOCAL code in
15461 allocate_dynrelocs. */
15462 call_reloc_p = TRUE;
15463 may_need_local_target_p = TRUE;
15464 }
15465 else
15466 /* We are creating a shared library or relocatable
15467 executable, and this is a reloc against a global symbol,
15468 or a non-PC-relative reloc against a local symbol.
15469 We may need to copy the reloc into the output. */
15470 may_become_dynamic_p = TRUE;
15471 }
15472 else
15473 may_need_local_target_p = TRUE;
15474 break;
15475
15476 /* This relocation describes the C++ object vtable hierarchy.
15477 Reconstruct it for later use during GC. */
15478 case R_ARM_GNU_VTINHERIT:
15479 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15480 return FALSE;
15481 break;
15482
15483 /* This relocation describes which C++ vtable entries are actually
15484 used. Record for later use during GC. */
15485 case R_ARM_GNU_VTENTRY:
15486 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15487 return FALSE;
15488 break;
15489 }
15490
15491 if (h != NULL)
15492 {
15493 if (call_reloc_p)
15494 /* We may need a .plt entry if the function this reloc
15495 refers to is in a different object, regardless of the
15496 symbol's type. We can't tell for sure yet, because
15497 something later might force the symbol local. */
15498 h->needs_plt = 1;
15499 else if (may_need_local_target_p)
15500 /* If this reloc is in a read-only section, we might
15501 need a copy reloc. We can't check reliably at this
15502 stage whether the section is read-only, as input
15503 sections have not yet been mapped to output sections.
15504 Tentatively set the flag for now, and correct in
15505 adjust_dynamic_symbol. */
15506 h->non_got_ref = 1;
15507 }
15508
15509 if (may_need_local_target_p
15510 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15511 {
15512 union gotplt_union *root_plt;
15513 struct arm_plt_info *arm_plt;
15514 struct arm_local_iplt_info *local_iplt;
15515
15516 if (h != NULL)
15517 {
15518 root_plt = &h->plt;
15519 arm_plt = &eh->plt;
15520 }
15521 else
15522 {
15523 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15524 if (local_iplt == NULL)
15525 return FALSE;
15526 root_plt = &local_iplt->root;
15527 arm_plt = &local_iplt->arm;
15528 }
15529
15530 /* If the symbol is a function that doesn't bind locally,
15531 this relocation will need a PLT entry. */
15532 if (root_plt->refcount != -1)
15533 root_plt->refcount += 1;
15534
15535 if (!call_reloc_p)
15536 arm_plt->noncall_refcount++;
15537
15538 /* It's too early to use htab->use_blx here, so we have to
15539 record possible blx references separately from
15540 relocs that definitely need a thumb stub. */
15541
15542 if (r_type == R_ARM_THM_CALL)
15543 arm_plt->maybe_thumb_refcount += 1;
15544
15545 if (r_type == R_ARM_THM_JUMP24
15546 || r_type == R_ARM_THM_JUMP19)
15547 arm_plt->thumb_refcount += 1;
15548 }
15549
15550 if (may_become_dynamic_p)
15551 {
15552 struct elf_dyn_relocs *p, **head;
15553
15554 /* Create a reloc section in dynobj. */
15555 if (sreloc == NULL)
15556 {
15557 sreloc = _bfd_elf_make_dynamic_reloc_section
15558 (sec, dynobj, 2, abfd, ! htab->use_rel);
15559
15560 if (sreloc == NULL)
15561 return FALSE;
15562 }
15563
15564 /* If this is a global symbol, count the number of
15565 relocations we need for this symbol. */
15566 if (h != NULL)
15567 head = &h->dyn_relocs;
15568 else
15569 {
15570 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15571 if (head == NULL)
15572 return FALSE;
15573 }
15574
15575 p = *head;
15576 if (p == NULL || p->sec != sec)
15577 {
15578 size_t amt = sizeof *p;
15579
15580 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15581 if (p == NULL)
15582 return FALSE;
15583 p->next = *head;
15584 *head = p;
15585 p->sec = sec;
15586 p->count = 0;
15587 p->pc_count = 0;
15588 }
15589
15590 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15591 p->pc_count += 1;
15592 p->count += 1;
15593 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15594 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15595 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15596 that will become rofixup. */
15597 /* This is due to the fact that we suppose all will become rofixup. */
15598 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15599 _bfd_error_handler
15600 (_("FDPIC does not yet support %s relocation"
15601 " to become dynamic for executable"),
15602 elf32_arm_howto_table_1[r_type].name);
15603 abort();
15604 }
15605 }
15606 }
15607
15608 return TRUE;
15609 }
15610
15611 static void
15612 elf32_arm_update_relocs (asection *o,
15613 struct bfd_elf_section_reloc_data *reldata)
15614 {
15615 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15616 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15617 const struct elf_backend_data *bed;
15618 _arm_elf_section_data *eado;
15619 struct bfd_link_order *p;
15620 bfd_byte *erela_head, *erela;
15621 Elf_Internal_Rela *irela_head, *irela;
15622 Elf_Internal_Shdr *rel_hdr;
15623 bfd *abfd;
15624 unsigned int count;
15625
15626 eado = get_arm_elf_section_data (o);
15627
15628 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15629 return;
15630
15631 abfd = o->owner;
15632 bed = get_elf_backend_data (abfd);
15633 rel_hdr = reldata->hdr;
15634
15635 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15636 {
15637 swap_in = bed->s->swap_reloc_in;
15638 swap_out = bed->s->swap_reloc_out;
15639 }
15640 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15641 {
15642 swap_in = bed->s->swap_reloca_in;
15643 swap_out = bed->s->swap_reloca_out;
15644 }
15645 else
15646 abort ();
15647
15648 erela_head = rel_hdr->contents;
15649 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15650 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15651
15652 erela = erela_head;
15653 irela = irela_head;
15654 count = 0;
15655
15656 for (p = o->map_head.link_order; p; p = p->next)
15657 {
15658 if (p->type == bfd_section_reloc_link_order
15659 || p->type == bfd_symbol_reloc_link_order)
15660 {
15661 (*swap_in) (abfd, erela, irela);
15662 erela += rel_hdr->sh_entsize;
15663 irela++;
15664 count++;
15665 }
15666 else if (p->type == bfd_indirect_link_order)
15667 {
15668 struct bfd_elf_section_reloc_data *input_reldata;
15669 arm_unwind_table_edit *edit_list, *edit_tail;
15670 _arm_elf_section_data *eadi;
15671 bfd_size_type j;
15672 bfd_vma offset;
15673 asection *i;
15674
15675 i = p->u.indirect.section;
15676
15677 eadi = get_arm_elf_section_data (i);
15678 edit_list = eadi->u.exidx.unwind_edit_list;
15679 edit_tail = eadi->u.exidx.unwind_edit_tail;
15680 offset = i->output_offset;
15681
15682 if (eadi->elf.rel.hdr &&
15683 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15684 input_reldata = &eadi->elf.rel;
15685 else if (eadi->elf.rela.hdr &&
15686 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15687 input_reldata = &eadi->elf.rela;
15688 else
15689 abort ();
15690
15691 if (edit_list)
15692 {
15693 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15694 {
15695 arm_unwind_table_edit *edit_node, *edit_next;
15696 bfd_vma bias;
15697 bfd_vma reloc_index;
15698
15699 (*swap_in) (abfd, erela, irela);
15700 reloc_index = (irela->r_offset - offset) / 8;
15701
15702 bias = 0;
15703 edit_node = edit_list;
15704 for (edit_next = edit_list;
15705 edit_next && edit_next->index <= reloc_index;
15706 edit_next = edit_node->next)
15707 {
15708 bias++;
15709 edit_node = edit_next;
15710 }
15711
15712 if (edit_node->type != DELETE_EXIDX_ENTRY
15713 || edit_node->index != reloc_index)
15714 {
15715 irela->r_offset -= bias * 8;
15716 irela++;
15717 count++;
15718 }
15719
15720 erela += rel_hdr->sh_entsize;
15721 }
15722
15723 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15724 {
15725 /* New relocation entity. */
15726 asection *text_sec = edit_tail->linked_section;
15727 asection *text_out = text_sec->output_section;
15728 bfd_vma exidx_offset = offset + i->size - 8;
15729
15730 irela->r_addend = 0;
15731 irela->r_offset = exidx_offset;
15732 irela->r_info = ELF32_R_INFO
15733 (text_out->target_index, R_ARM_PREL31);
15734 irela++;
15735 count++;
15736 }
15737 }
15738 else
15739 {
15740 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15741 {
15742 (*swap_in) (abfd, erela, irela);
15743 erela += rel_hdr->sh_entsize;
15744 irela++;
15745 }
15746
15747 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15748 }
15749 }
15750 }
15751
15752 reldata->count = count;
15753 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15754
15755 erela = erela_head;
15756 irela = irela_head;
15757 while (count > 0)
15758 {
15759 (*swap_out) (abfd, irela, erela);
15760 erela += rel_hdr->sh_entsize;
15761 irela++;
15762 count--;
15763 }
15764
15765 free (irela_head);
15766
15767 /* Hashes are no longer valid. */
15768 free (reldata->hashes);
15769 reldata->hashes = NULL;
15770 }
15771
15772 /* Unwinding tables are not referenced directly. This pass marks them as
15773 required if the corresponding code section is marked. Similarly, ARMv8-M
15774 secure entry functions can only be referenced by SG veneers which are
15775 created after the GC process. They need to be marked in case they reside in
15776 their own section (as would be the case if code was compiled with
15777 -ffunction-sections). */
15778
15779 static bfd_boolean
15780 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15781 elf_gc_mark_hook_fn gc_mark_hook)
15782 {
15783 bfd *sub;
15784 Elf_Internal_Shdr **elf_shdrp;
15785 asection *cmse_sec;
15786 obj_attribute *out_attr;
15787 Elf_Internal_Shdr *symtab_hdr;
15788 unsigned i, sym_count, ext_start;
15789 const struct elf_backend_data *bed;
15790 struct elf_link_hash_entry **sym_hashes;
15791 struct elf32_arm_link_hash_entry *cmse_hash;
15792 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15793 bfd_boolean debug_sec_need_to_be_marked = FALSE;
15794 asection *isec;
15795
15796 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15797
15798 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15799 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15800 && out_attr[Tag_CPU_arch_profile].i == 'M';
15801
15802 /* Marking EH data may cause additional code sections to be marked,
15803 requiring multiple passes. */
15804 again = TRUE;
15805 while (again)
15806 {
15807 again = FALSE;
15808 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15809 {
15810 asection *o;
15811
15812 if (! is_arm_elf (sub))
15813 continue;
15814
15815 elf_shdrp = elf_elfsections (sub);
15816 for (o = sub->sections; o != NULL; o = o->next)
15817 {
15818 Elf_Internal_Shdr *hdr;
15819
15820 hdr = &elf_section_data (o)->this_hdr;
15821 if (hdr->sh_type == SHT_ARM_EXIDX
15822 && hdr->sh_link
15823 && hdr->sh_link < elf_numsections (sub)
15824 && !o->gc_mark
15825 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15826 {
15827 again = TRUE;
15828 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15829 return FALSE;
15830 }
15831 }
15832
15833 /* Mark section holding ARMv8-M secure entry functions. We mark all
15834 of them so no need for a second browsing. */
15835 if (is_v8m && first_bfd_browse)
15836 {
15837 sym_hashes = elf_sym_hashes (sub);
15838 bed = get_elf_backend_data (sub);
15839 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15840 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15841 ext_start = symtab_hdr->sh_info;
15842
15843 /* Scan symbols. */
15844 for (i = ext_start; i < sym_count; i++)
15845 {
15846 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15847
15848 /* Assume it is a special symbol. If not, cmse_scan will
15849 warn about it and user can do something about it. */
15850 if (startswith (cmse_hash->root.root.root.string,
15851 CMSE_PREFIX))
15852 {
15853 cmse_sec = cmse_hash->root.root.u.def.section;
15854 if (!cmse_sec->gc_mark
15855 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15856 return FALSE;
15857 /* The debug sections related to these secure entry
15858 functions are marked on enabling below flag. */
15859 debug_sec_need_to_be_marked = TRUE;
15860 }
15861 }
15862
15863 if (debug_sec_need_to_be_marked)
15864 {
15865 /* Looping over all the sections of the object file containing
15866 Armv8-M secure entry functions and marking all the debug
15867 sections. */
15868 for (isec = sub->sections; isec != NULL; isec = isec->next)
15869 {
15870 /* If not a debug sections, skip it. */
15871 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
15872 isec->gc_mark = 1 ;
15873 }
15874 debug_sec_need_to_be_marked = FALSE;
15875 }
15876 }
15877 }
15878 first_bfd_browse = FALSE;
15879 }
15880
15881 return TRUE;
15882 }
15883
15884 /* Treat mapping symbols as special target symbols. */
15885
15886 static bfd_boolean
15887 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15888 {
15889 return bfd_is_arm_special_symbol_name (sym->name,
15890 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15891 }
15892
15893 /* If the ELF symbol SYM might be a function in SEC, return the
15894 function size and set *CODE_OFF to the function's entry point,
15895 otherwise return zero. */
15896
15897 static bfd_size_type
15898 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
15899 bfd_vma *code_off)
15900 {
15901 bfd_size_type size;
15902
15903 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
15904 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
15905 || sym->section != sec)
15906 return 0;
15907
15908 if (!(sym->flags & BSF_SYNTHETIC))
15909 switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info))
15910 {
15911 case STT_FUNC:
15912 case STT_ARM_TFUNC:
15913 case STT_NOTYPE:
15914 break;
15915 default:
15916 return 0;
15917 }
15918
15919 if ((sym->flags & BSF_LOCAL)
15920 && bfd_is_arm_special_symbol_name (sym->name,
15921 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15922 return 0;
15923
15924 *code_off = sym->value;
15925 size = 0;
15926 if (!(sym->flags & BSF_SYNTHETIC))
15927 size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size;
15928 if (size == 0)
15929 size = 1;
15930 return size;
15931 }
15932
15933 static bfd_boolean
15934 elf32_arm_find_inliner_info (bfd * abfd,
15935 const char ** filename_ptr,
15936 const char ** functionname_ptr,
15937 unsigned int * line_ptr)
15938 {
15939 bfd_boolean found;
15940 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15941 functionname_ptr, line_ptr,
15942 & elf_tdata (abfd)->dwarf2_find_line_info);
15943 return found;
15944 }
15945
15946 /* Adjust a symbol defined by a dynamic object and referenced by a
15947 regular object. The current definition is in some section of the
15948 dynamic object, but we're not including those sections. We have to
15949 change the definition to something the rest of the link can
15950 understand. */
15951
15952 static bfd_boolean
15953 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15954 struct elf_link_hash_entry * h)
15955 {
15956 bfd * dynobj;
15957 asection *s, *srel;
15958 struct elf32_arm_link_hash_entry * eh;
15959 struct elf32_arm_link_hash_table *globals;
15960
15961 globals = elf32_arm_hash_table (info);
15962 if (globals == NULL)
15963 return FALSE;
15964
15965 dynobj = elf_hash_table (info)->dynobj;
15966
15967 /* Make sure we know what is going on here. */
15968 BFD_ASSERT (dynobj != NULL
15969 && (h->needs_plt
15970 || h->type == STT_GNU_IFUNC
15971 || h->is_weakalias
15972 || (h->def_dynamic
15973 && h->ref_regular
15974 && !h->def_regular)));
15975
15976 eh = (struct elf32_arm_link_hash_entry *) h;
15977
15978 /* If this is a function, put it in the procedure linkage table. We
15979 will fill in the contents of the procedure linkage table later,
15980 when we know the address of the .got section. */
15981 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15982 {
15983 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15984 symbol binds locally. */
15985 if (h->plt.refcount <= 0
15986 || (h->type != STT_GNU_IFUNC
15987 && (SYMBOL_CALLS_LOCAL (info, h)
15988 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15989 && h->root.type == bfd_link_hash_undefweak))))
15990 {
15991 /* This case can occur if we saw a PLT32 reloc in an input
15992 file, but the symbol was never referred to by a dynamic
15993 object, or if all references were garbage collected. In
15994 such a case, we don't actually need to build a procedure
15995 linkage table, and we can just do a PC24 reloc instead. */
15996 h->plt.offset = (bfd_vma) -1;
15997 eh->plt.thumb_refcount = 0;
15998 eh->plt.maybe_thumb_refcount = 0;
15999 eh->plt.noncall_refcount = 0;
16000 h->needs_plt = 0;
16001 }
16002
16003 return TRUE;
16004 }
16005 else
16006 {
16007 /* It's possible that we incorrectly decided a .plt reloc was
16008 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16009 in check_relocs. We can't decide accurately between function
16010 and non-function syms in check-relocs; Objects loaded later in
16011 the link may change h->type. So fix it now. */
16012 h->plt.offset = (bfd_vma) -1;
16013 eh->plt.thumb_refcount = 0;
16014 eh->plt.maybe_thumb_refcount = 0;
16015 eh->plt.noncall_refcount = 0;
16016 }
16017
16018 /* If this is a weak symbol, and there is a real definition, the
16019 processor independent code will have arranged for us to see the
16020 real definition first, and we can just use the same value. */
16021 if (h->is_weakalias)
16022 {
16023 struct elf_link_hash_entry *def = weakdef (h);
16024 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16025 h->root.u.def.section = def->root.u.def.section;
16026 h->root.u.def.value = def->root.u.def.value;
16027 return TRUE;
16028 }
16029
16030 /* If there are no non-GOT references, we do not need a copy
16031 relocation. */
16032 if (!h->non_got_ref)
16033 return TRUE;
16034
16035 /* This is a reference to a symbol defined by a dynamic object which
16036 is not a function. */
16037
16038 /* If we are creating a shared library, we must presume that the
16039 only references to the symbol are via the global offset table.
16040 For such cases we need not do anything here; the relocations will
16041 be handled correctly by relocate_section. Relocatable executables
16042 can reference data in shared objects directly, so we don't need to
16043 do anything here. */
16044 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16045 return TRUE;
16046
16047 /* We must allocate the symbol in our .dynbss section, which will
16048 become part of the .bss section of the executable. There will be
16049 an entry for this symbol in the .dynsym section. The dynamic
16050 object will contain position independent code, so all references
16051 from the dynamic object to this symbol will go through the global
16052 offset table. The dynamic linker will use the .dynsym entry to
16053 determine the address it must put in the global offset table, so
16054 both the dynamic object and the regular object will refer to the
16055 same memory location for the variable. */
16056 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16057 linker to copy the initial value out of the dynamic object and into
16058 the runtime process image. We need to remember the offset into the
16059 .rel(a).bss section we are going to use. */
16060 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16061 {
16062 s = globals->root.sdynrelro;
16063 srel = globals->root.sreldynrelro;
16064 }
16065 else
16066 {
16067 s = globals->root.sdynbss;
16068 srel = globals->root.srelbss;
16069 }
16070 if (info->nocopyreloc == 0
16071 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16072 && h->size != 0)
16073 {
16074 elf32_arm_allocate_dynrelocs (info, srel, 1);
16075 h->needs_copy = 1;
16076 }
16077
16078 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16079 }
16080
16081 /* Allocate space in .plt, .got and associated reloc sections for
16082 dynamic relocs. */
16083
16084 static bfd_boolean
16085 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16086 {
16087 struct bfd_link_info *info;
16088 struct elf32_arm_link_hash_table *htab;
16089 struct elf32_arm_link_hash_entry *eh;
16090 struct elf_dyn_relocs *p;
16091
16092 if (h->root.type == bfd_link_hash_indirect)
16093 return TRUE;
16094
16095 eh = (struct elf32_arm_link_hash_entry *) h;
16096
16097 info = (struct bfd_link_info *) inf;
16098 htab = elf32_arm_hash_table (info);
16099 if (htab == NULL)
16100 return FALSE;
16101
16102 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16103 && h->plt.refcount > 0)
16104 {
16105 /* Make sure this symbol is output as a dynamic symbol.
16106 Undefined weak syms won't yet be marked as dynamic. */
16107 if (h->dynindx == -1 && !h->forced_local
16108 && h->root.type == bfd_link_hash_undefweak)
16109 {
16110 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16111 return FALSE;
16112 }
16113
16114 /* If the call in the PLT entry binds locally, the associated
16115 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16116 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16117 than the .plt section. */
16118 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16119 {
16120 eh->is_iplt = 1;
16121 if (eh->plt.noncall_refcount == 0
16122 && SYMBOL_REFERENCES_LOCAL (info, h))
16123 /* All non-call references can be resolved directly.
16124 This means that they can (and in some cases, must)
16125 resolve directly to the run-time target, rather than
16126 to the PLT. That in turns means that any .got entry
16127 would be equal to the .igot.plt entry, so there's
16128 no point having both. */
16129 h->got.refcount = 0;
16130 }
16131
16132 if (bfd_link_pic (info)
16133 || eh->is_iplt
16134 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16135 {
16136 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16137
16138 /* If this symbol is not defined in a regular file, and we are
16139 not generating a shared library, then set the symbol to this
16140 location in the .plt. This is required to make function
16141 pointers compare as equal between the normal executable and
16142 the shared library. */
16143 if (! bfd_link_pic (info)
16144 && !h->def_regular)
16145 {
16146 h->root.u.def.section = htab->root.splt;
16147 h->root.u.def.value = h->plt.offset;
16148
16149 /* Make sure the function is not marked as Thumb, in case
16150 it is the target of an ABS32 relocation, which will
16151 point to the PLT entry. */
16152 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16153 }
16154
16155 /* VxWorks executables have a second set of relocations for
16156 each PLT entry. They go in a separate relocation section,
16157 which is processed by the kernel loader. */
16158 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16159 {
16160 /* There is a relocation for the initial PLT entry:
16161 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16162 if (h->plt.offset == htab->plt_header_size)
16163 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16164
16165 /* There are two extra relocations for each subsequent
16166 PLT entry: an R_ARM_32 relocation for the GOT entry,
16167 and an R_ARM_32 relocation for the PLT entry. */
16168 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16169 }
16170 }
16171 else
16172 {
16173 h->plt.offset = (bfd_vma) -1;
16174 h->needs_plt = 0;
16175 }
16176 }
16177 else
16178 {
16179 h->plt.offset = (bfd_vma) -1;
16180 h->needs_plt = 0;
16181 }
16182
16183 eh = (struct elf32_arm_link_hash_entry *) h;
16184 eh->tlsdesc_got = (bfd_vma) -1;
16185
16186 if (h->got.refcount > 0)
16187 {
16188 asection *s;
16189 bfd_boolean dyn;
16190 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16191 int indx;
16192
16193 /* Make sure this symbol is output as a dynamic symbol.
16194 Undefined weak syms won't yet be marked as dynamic. */
16195 if (htab->root.dynamic_sections_created
16196 && h->dynindx == -1
16197 && !h->forced_local
16198 && h->root.type == bfd_link_hash_undefweak)
16199 {
16200 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16201 return FALSE;
16202 }
16203
16204 s = htab->root.sgot;
16205 h->got.offset = s->size;
16206
16207 if (tls_type == GOT_UNKNOWN)
16208 abort ();
16209
16210 if (tls_type == GOT_NORMAL)
16211 /* Non-TLS symbols need one GOT slot. */
16212 s->size += 4;
16213 else
16214 {
16215 if (tls_type & GOT_TLS_GDESC)
16216 {
16217 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16218 eh->tlsdesc_got
16219 = (htab->root.sgotplt->size
16220 - elf32_arm_compute_jump_table_size (htab));
16221 htab->root.sgotplt->size += 8;
16222 h->got.offset = (bfd_vma) -2;
16223 /* plt.got_offset needs to know there's a TLS_DESC
16224 reloc in the middle of .got.plt. */
16225 htab->num_tls_desc++;
16226 }
16227
16228 if (tls_type & GOT_TLS_GD)
16229 {
16230 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16231 consecutive GOT slots. If the symbol is both GD
16232 and GDESC, got.offset may have been
16233 overwritten. */
16234 h->got.offset = s->size;
16235 s->size += 8;
16236 }
16237
16238 if (tls_type & GOT_TLS_IE)
16239 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16240 slot. */
16241 s->size += 4;
16242 }
16243
16244 dyn = htab->root.dynamic_sections_created;
16245
16246 indx = 0;
16247 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16248 && (!bfd_link_pic (info)
16249 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16250 indx = h->dynindx;
16251
16252 if (tls_type != GOT_NORMAL
16253 && (bfd_link_dll (info) || indx != 0)
16254 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16255 || h->root.type != bfd_link_hash_undefweak))
16256 {
16257 if (tls_type & GOT_TLS_IE)
16258 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16259
16260 if (tls_type & GOT_TLS_GD)
16261 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16262
16263 if (tls_type & GOT_TLS_GDESC)
16264 {
16265 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16266 /* GDESC needs a trampoline to jump to. */
16267 htab->tls_trampoline = -1;
16268 }
16269
16270 /* Only GD needs it. GDESC just emits one relocation per
16271 2 entries. */
16272 if ((tls_type & GOT_TLS_GD) && indx != 0)
16273 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16274 }
16275 else if (((indx != -1) || htab->fdpic_p)
16276 && !SYMBOL_REFERENCES_LOCAL (info, h))
16277 {
16278 if (htab->root.dynamic_sections_created)
16279 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16280 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16281 }
16282 else if (h->type == STT_GNU_IFUNC
16283 && eh->plt.noncall_refcount == 0)
16284 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16285 they all resolve dynamically instead. Reserve room for the
16286 GOT entry's R_ARM_IRELATIVE relocation. */
16287 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16288 else if (bfd_link_pic (info)
16289 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16290 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16291 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16292 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16293 /* Reserve room for rofixup for FDPIC executable. */
16294 /* TLS relocs do not need space since they are completely
16295 resolved. */
16296 htab->srofixup->size += 4;
16297 }
16298 else
16299 h->got.offset = (bfd_vma) -1;
16300
16301 /* FDPIC support. */
16302 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16303 {
16304 /* Symbol musn't be exported. */
16305 if (h->dynindx != -1)
16306 abort();
16307
16308 /* We only allocate one function descriptor with its associated
16309 relocation. */
16310 if (eh->fdpic_cnts.funcdesc_offset == -1)
16311 {
16312 asection *s = htab->root.sgot;
16313
16314 eh->fdpic_cnts.funcdesc_offset = s->size;
16315 s->size += 8;
16316 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16317 if (bfd_link_pic(info))
16318 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16319 else
16320 htab->srofixup->size += 8;
16321 }
16322 }
16323
16324 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16325 {
16326 asection *s = htab->root.sgot;
16327
16328 if (htab->root.dynamic_sections_created && h->dynindx == -1
16329 && !h->forced_local)
16330 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16331 return FALSE;
16332
16333 if (h->dynindx == -1)
16334 {
16335 /* We only allocate one function descriptor with its
16336 associated relocation. */
16337 if (eh->fdpic_cnts.funcdesc_offset == -1)
16338 {
16339
16340 eh->fdpic_cnts.funcdesc_offset = s->size;
16341 s->size += 8;
16342 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16343 rofixups. */
16344 if (bfd_link_pic(info))
16345 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16346 else
16347 htab->srofixup->size += 8;
16348 }
16349 }
16350
16351 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16352 R_ARM_RELATIVE/rofixup relocation on it. */
16353 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16354 s->size += 4;
16355 if (h->dynindx == -1 && !bfd_link_pic(info))
16356 htab->srofixup->size += 4;
16357 else
16358 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16359 }
16360
16361 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16362 {
16363 if (htab->root.dynamic_sections_created && h->dynindx == -1
16364 && !h->forced_local)
16365 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16366 return FALSE;
16367
16368 if (h->dynindx == -1)
16369 {
16370 /* We only allocate one function descriptor with its
16371 associated relocation. */
16372 if (eh->fdpic_cnts.funcdesc_offset == -1)
16373 {
16374 asection *s = htab->root.sgot;
16375
16376 eh->fdpic_cnts.funcdesc_offset = s->size;
16377 s->size += 8;
16378 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16379 rofixups. */
16380 if (bfd_link_pic(info))
16381 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16382 else
16383 htab->srofixup->size += 8;
16384 }
16385 }
16386 if (h->dynindx == -1 && !bfd_link_pic(info))
16387 {
16388 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16389 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16390 }
16391 else
16392 {
16393 /* Will need one dynamic reloc per reference. will be either
16394 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16395 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16396 eh->fdpic_cnts.funcdesc_cnt);
16397 }
16398 }
16399
16400 /* Allocate stubs for exported Thumb functions on v4t. */
16401 if (!htab->use_blx && h->dynindx != -1
16402 && h->def_regular
16403 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16404 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16405 {
16406 struct elf_link_hash_entry * th;
16407 struct bfd_link_hash_entry * bh;
16408 struct elf_link_hash_entry * myh;
16409 char name[1024];
16410 asection *s;
16411 bh = NULL;
16412 /* Create a new symbol to regist the real location of the function. */
16413 s = h->root.u.def.section;
16414 sprintf (name, "__real_%s", h->root.root.string);
16415 _bfd_generic_link_add_one_symbol (info, s->owner,
16416 name, BSF_GLOBAL, s,
16417 h->root.u.def.value,
16418 NULL, TRUE, FALSE, &bh);
16419
16420 myh = (struct elf_link_hash_entry *) bh;
16421 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16422 myh->forced_local = 1;
16423 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16424 eh->export_glue = myh;
16425 th = record_arm_to_thumb_glue (info, h);
16426 /* Point the symbol at the stub. */
16427 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16428 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16429 h->root.u.def.section = th->root.u.def.section;
16430 h->root.u.def.value = th->root.u.def.value & ~1;
16431 }
16432
16433 if (h->dyn_relocs == NULL)
16434 return TRUE;
16435
16436 /* In the shared -Bsymbolic case, discard space allocated for
16437 dynamic pc-relative relocs against symbols which turn out to be
16438 defined in regular objects. For the normal shared case, discard
16439 space for pc-relative relocs that have become local due to symbol
16440 visibility changes. */
16441
16442 if (bfd_link_pic (info)
16443 || htab->root.is_relocatable_executable
16444 || htab->fdpic_p)
16445 {
16446 /* Relocs that use pc_count are PC-relative forms, which will appear
16447 on something like ".long foo - ." or "movw REG, foo - .". We want
16448 calls to protected symbols to resolve directly to the function
16449 rather than going via the plt. If people want function pointer
16450 comparisons to work as expected then they should avoid writing
16451 assembly like ".long foo - .". */
16452 if (SYMBOL_CALLS_LOCAL (info, h))
16453 {
16454 struct elf_dyn_relocs **pp;
16455
16456 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16457 {
16458 p->count -= p->pc_count;
16459 p->pc_count = 0;
16460 if (p->count == 0)
16461 *pp = p->next;
16462 else
16463 pp = &p->next;
16464 }
16465 }
16466
16467 if (htab->root.target_os == is_vxworks)
16468 {
16469 struct elf_dyn_relocs **pp;
16470
16471 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16472 {
16473 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16474 *pp = p->next;
16475 else
16476 pp = &p->next;
16477 }
16478 }
16479
16480 /* Also discard relocs on undefined weak syms with non-default
16481 visibility. */
16482 if (h->dyn_relocs != NULL
16483 && h->root.type == bfd_link_hash_undefweak)
16484 {
16485 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16486 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16487 h->dyn_relocs = NULL;
16488
16489 /* Make sure undefined weak symbols are output as a dynamic
16490 symbol in PIEs. */
16491 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16492 && !h->forced_local)
16493 {
16494 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16495 return FALSE;
16496 }
16497 }
16498
16499 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16500 && h->root.type == bfd_link_hash_new)
16501 {
16502 /* Output absolute symbols so that we can create relocations
16503 against them. For normal symbols we output a relocation
16504 against the section that contains them. */
16505 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16506 return FALSE;
16507 }
16508
16509 }
16510 else
16511 {
16512 /* For the non-shared case, discard space for relocs against
16513 symbols which turn out to need copy relocs or are not
16514 dynamic. */
16515
16516 if (!h->non_got_ref
16517 && ((h->def_dynamic
16518 && !h->def_regular)
16519 || (htab->root.dynamic_sections_created
16520 && (h->root.type == bfd_link_hash_undefweak
16521 || h->root.type == bfd_link_hash_undefined))))
16522 {
16523 /* Make sure this symbol is output as a dynamic symbol.
16524 Undefined weak syms won't yet be marked as dynamic. */
16525 if (h->dynindx == -1 && !h->forced_local
16526 && h->root.type == bfd_link_hash_undefweak)
16527 {
16528 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16529 return FALSE;
16530 }
16531
16532 /* If that succeeded, we know we'll be keeping all the
16533 relocs. */
16534 if (h->dynindx != -1)
16535 goto keep;
16536 }
16537
16538 h->dyn_relocs = NULL;
16539
16540 keep: ;
16541 }
16542
16543 /* Finally, allocate space. */
16544 for (p = h->dyn_relocs; p != NULL; p = p->next)
16545 {
16546 asection *sreloc = elf_section_data (p->sec)->sreloc;
16547
16548 if (h->type == STT_GNU_IFUNC
16549 && eh->plt.noncall_refcount == 0
16550 && SYMBOL_REFERENCES_LOCAL (info, h))
16551 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16552 else if (h->dynindx != -1
16553 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16554 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16555 else if (htab->fdpic_p && !bfd_link_pic(info))
16556 htab->srofixup->size += 4 * p->count;
16557 else
16558 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16559 }
16560
16561 return TRUE;
16562 }
16563
16564 void
16565 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16566 int byteswap_code)
16567 {
16568 struct elf32_arm_link_hash_table *globals;
16569
16570 globals = elf32_arm_hash_table (info);
16571 if (globals == NULL)
16572 return;
16573
16574 globals->byteswap_code = byteswap_code;
16575 }
16576
16577 /* Set the sizes of the dynamic sections. */
16578
16579 static bfd_boolean
16580 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16581 struct bfd_link_info * info)
16582 {
16583 bfd * dynobj;
16584 asection * s;
16585 bfd_boolean relocs;
16586 bfd *ibfd;
16587 struct elf32_arm_link_hash_table *htab;
16588
16589 htab = elf32_arm_hash_table (info);
16590 if (htab == NULL)
16591 return FALSE;
16592
16593 dynobj = elf_hash_table (info)->dynobj;
16594 BFD_ASSERT (dynobj != NULL);
16595 check_use_blx (htab);
16596
16597 if (elf_hash_table (info)->dynamic_sections_created)
16598 {
16599 /* Set the contents of the .interp section to the interpreter. */
16600 if (bfd_link_executable (info) && !info->nointerp)
16601 {
16602 s = bfd_get_linker_section (dynobj, ".interp");
16603 BFD_ASSERT (s != NULL);
16604 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16605 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16606 }
16607 }
16608
16609 /* Set up .got offsets for local syms, and space for local dynamic
16610 relocs. */
16611 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16612 {
16613 bfd_signed_vma *local_got;
16614 bfd_signed_vma *end_local_got;
16615 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16616 char *local_tls_type;
16617 bfd_vma *local_tlsdesc_gotent;
16618 bfd_size_type locsymcount;
16619 Elf_Internal_Shdr *symtab_hdr;
16620 asection *srel;
16621 unsigned int symndx;
16622 struct fdpic_local *local_fdpic_cnts;
16623
16624 if (! is_arm_elf (ibfd))
16625 continue;
16626
16627 for (s = ibfd->sections; s != NULL; s = s->next)
16628 {
16629 struct elf_dyn_relocs *p;
16630
16631 for (p = (struct elf_dyn_relocs *)
16632 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16633 {
16634 if (!bfd_is_abs_section (p->sec)
16635 && bfd_is_abs_section (p->sec->output_section))
16636 {
16637 /* Input section has been discarded, either because
16638 it is a copy of a linkonce section or due to
16639 linker script /DISCARD/, so we'll be discarding
16640 the relocs too. */
16641 }
16642 else if (htab->root.target_os == is_vxworks
16643 && strcmp (p->sec->output_section->name,
16644 ".tls_vars") == 0)
16645 {
16646 /* Relocations in vxworks .tls_vars sections are
16647 handled specially by the loader. */
16648 }
16649 else if (p->count != 0)
16650 {
16651 srel = elf_section_data (p->sec)->sreloc;
16652 if (htab->fdpic_p && !bfd_link_pic(info))
16653 htab->srofixup->size += 4 * p->count;
16654 else
16655 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16656 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16657 info->flags |= DF_TEXTREL;
16658 }
16659 }
16660 }
16661
16662 local_got = elf_local_got_refcounts (ibfd);
16663 if (!local_got)
16664 continue;
16665
16666 symtab_hdr = & elf_symtab_hdr (ibfd);
16667 locsymcount = symtab_hdr->sh_info;
16668 end_local_got = local_got + locsymcount;
16669 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16670 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16671 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16672 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16673 symndx = 0;
16674 s = htab->root.sgot;
16675 srel = htab->root.srelgot;
16676 for (; local_got < end_local_got;
16677 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16678 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16679 {
16680 *local_tlsdesc_gotent = (bfd_vma) -1;
16681 local_iplt = *local_iplt_ptr;
16682
16683 /* FDPIC support. */
16684 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16685 {
16686 if (local_fdpic_cnts->funcdesc_offset == -1)
16687 {
16688 local_fdpic_cnts->funcdesc_offset = s->size;
16689 s->size += 8;
16690
16691 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16692 if (bfd_link_pic(info))
16693 elf32_arm_allocate_dynrelocs (info, srel, 1);
16694 else
16695 htab->srofixup->size += 8;
16696 }
16697 }
16698
16699 if (local_fdpic_cnts->funcdesc_cnt > 0)
16700 {
16701 if (local_fdpic_cnts->funcdesc_offset == -1)
16702 {
16703 local_fdpic_cnts->funcdesc_offset = s->size;
16704 s->size += 8;
16705
16706 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16707 if (bfd_link_pic(info))
16708 elf32_arm_allocate_dynrelocs (info, srel, 1);
16709 else
16710 htab->srofixup->size += 8;
16711 }
16712
16713 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16714 if (bfd_link_pic(info))
16715 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16716 else
16717 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16718 }
16719
16720 if (local_iplt != NULL)
16721 {
16722 struct elf_dyn_relocs *p;
16723
16724 if (local_iplt->root.refcount > 0)
16725 {
16726 elf32_arm_allocate_plt_entry (info, TRUE,
16727 &local_iplt->root,
16728 &local_iplt->arm);
16729 if (local_iplt->arm.noncall_refcount == 0)
16730 /* All references to the PLT are calls, so all
16731 non-call references can resolve directly to the
16732 run-time target. This means that the .got entry
16733 would be the same as the .igot.plt entry, so there's
16734 no point creating both. */
16735 *local_got = 0;
16736 }
16737 else
16738 {
16739 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16740 local_iplt->root.offset = (bfd_vma) -1;
16741 }
16742
16743 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16744 {
16745 asection *psrel;
16746
16747 psrel = elf_section_data (p->sec)->sreloc;
16748 if (local_iplt->arm.noncall_refcount == 0)
16749 elf32_arm_allocate_irelocs (info, psrel, p->count);
16750 else
16751 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16752 }
16753 }
16754 if (*local_got > 0)
16755 {
16756 Elf_Internal_Sym *isym;
16757
16758 *local_got = s->size;
16759 if (*local_tls_type & GOT_TLS_GD)
16760 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16761 s->size += 8;
16762 if (*local_tls_type & GOT_TLS_GDESC)
16763 {
16764 *local_tlsdesc_gotent = htab->root.sgotplt->size
16765 - elf32_arm_compute_jump_table_size (htab);
16766 htab->root.sgotplt->size += 8;
16767 *local_got = (bfd_vma) -2;
16768 /* plt.got_offset needs to know there's a TLS_DESC
16769 reloc in the middle of .got.plt. */
16770 htab->num_tls_desc++;
16771 }
16772 if (*local_tls_type & GOT_TLS_IE)
16773 s->size += 4;
16774
16775 if (*local_tls_type & GOT_NORMAL)
16776 {
16777 /* If the symbol is both GD and GDESC, *local_got
16778 may have been overwritten. */
16779 *local_got = s->size;
16780 s->size += 4;
16781 }
16782
16783 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16784 symndx);
16785 if (isym == NULL)
16786 return FALSE;
16787
16788 /* If all references to an STT_GNU_IFUNC PLT are calls,
16789 then all non-call references, including this GOT entry,
16790 resolve directly to the run-time target. */
16791 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16792 && (local_iplt == NULL
16793 || local_iplt->arm.noncall_refcount == 0))
16794 elf32_arm_allocate_irelocs (info, srel, 1);
16795 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16796 {
16797 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16798 elf32_arm_allocate_dynrelocs (info, srel, 1);
16799 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16800 htab->srofixup->size += 4;
16801
16802 if ((bfd_link_pic (info) || htab->fdpic_p)
16803 && *local_tls_type & GOT_TLS_GDESC)
16804 {
16805 elf32_arm_allocate_dynrelocs (info,
16806 htab->root.srelplt, 1);
16807 htab->tls_trampoline = -1;
16808 }
16809 }
16810 }
16811 else
16812 *local_got = (bfd_vma) -1;
16813 }
16814 }
16815
16816 if (htab->tls_ldm_got.refcount > 0)
16817 {
16818 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16819 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16820 htab->tls_ldm_got.offset = htab->root.sgot->size;
16821 htab->root.sgot->size += 8;
16822 if (bfd_link_pic (info))
16823 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16824 }
16825 else
16826 htab->tls_ldm_got.offset = -1;
16827
16828 /* At the very end of the .rofixup section is a pointer to the GOT,
16829 reserve space for it. */
16830 if (htab->fdpic_p && htab->srofixup != NULL)
16831 htab->srofixup->size += 4;
16832
16833 /* Allocate global sym .plt and .got entries, and space for global
16834 sym dynamic relocs. */
16835 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16836
16837 /* Here we rummage through the found bfds to collect glue information. */
16838 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16839 {
16840 if (! is_arm_elf (ibfd))
16841 continue;
16842
16843 /* Initialise mapping tables for code/data. */
16844 bfd_elf32_arm_init_maps (ibfd);
16845
16846 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16847 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16848 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16849 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16850 }
16851
16852 /* Allocate space for the glue sections now that we've sized them. */
16853 bfd_elf32_arm_allocate_interworking_sections (info);
16854
16855 /* For every jump slot reserved in the sgotplt, reloc_count is
16856 incremented. However, when we reserve space for TLS descriptors,
16857 it's not incremented, so in order to compute the space reserved
16858 for them, it suffices to multiply the reloc count by the jump
16859 slot size. */
16860 if (htab->root.srelplt)
16861 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16862
16863 if (htab->tls_trampoline)
16864 {
16865 if (htab->root.splt->size == 0)
16866 htab->root.splt->size += htab->plt_header_size;
16867
16868 htab->tls_trampoline = htab->root.splt->size;
16869 htab->root.splt->size += htab->plt_entry_size;
16870
16871 /* If we're not using lazy TLS relocations, don't generate the
16872 PLT and GOT entries they require. */
16873 if ((info->flags & DF_BIND_NOW))
16874 htab->root.tlsdesc_plt = 0;
16875 else
16876 {
16877 htab->root.tlsdesc_got = htab->root.sgot->size;
16878 htab->root.sgot->size += 4;
16879
16880 htab->root.tlsdesc_plt = htab->root.splt->size;
16881 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16882 }
16883 }
16884
16885 /* The check_relocs and adjust_dynamic_symbol entry points have
16886 determined the sizes of the various dynamic sections. Allocate
16887 memory for them. */
16888 relocs = FALSE;
16889 for (s = dynobj->sections; s != NULL; s = s->next)
16890 {
16891 const char * name;
16892
16893 if ((s->flags & SEC_LINKER_CREATED) == 0)
16894 continue;
16895
16896 /* It's OK to base decisions on the section name, because none
16897 of the dynobj section names depend upon the input files. */
16898 name = bfd_section_name (s);
16899
16900 if (s == htab->root.splt)
16901 {
16902 /* Remember whether there is a PLT. */
16903 ;
16904 }
16905 else if (startswith (name, ".rel"))
16906 {
16907 if (s->size != 0)
16908 {
16909 /* Remember whether there are any reloc sections other
16910 than .rel(a).plt and .rela.plt.unloaded. */
16911 if (s != htab->root.srelplt && s != htab->srelplt2)
16912 relocs = TRUE;
16913
16914 /* We use the reloc_count field as a counter if we need
16915 to copy relocs into the output file. */
16916 s->reloc_count = 0;
16917 }
16918 }
16919 else if (s != htab->root.sgot
16920 && s != htab->root.sgotplt
16921 && s != htab->root.iplt
16922 && s != htab->root.igotplt
16923 && s != htab->root.sdynbss
16924 && s != htab->root.sdynrelro
16925 && s != htab->srofixup)
16926 {
16927 /* It's not one of our sections, so don't allocate space. */
16928 continue;
16929 }
16930
16931 if (s->size == 0)
16932 {
16933 /* If we don't need this section, strip it from the
16934 output file. This is mostly to handle .rel(a).bss and
16935 .rel(a).plt. We must create both sections in
16936 create_dynamic_sections, because they must be created
16937 before the linker maps input sections to output
16938 sections. The linker does that before
16939 adjust_dynamic_symbol is called, and it is that
16940 function which decides whether anything needs to go
16941 into these sections. */
16942 s->flags |= SEC_EXCLUDE;
16943 continue;
16944 }
16945
16946 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16947 continue;
16948
16949 /* Allocate memory for the section contents. */
16950 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16951 if (s->contents == NULL)
16952 return FALSE;
16953 }
16954
16955 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
16956 relocs);
16957 }
16958
16959 /* Size sections even though they're not dynamic. We use it to setup
16960 _TLS_MODULE_BASE_, if needed. */
16961
16962 static bfd_boolean
16963 elf32_arm_always_size_sections (bfd *output_bfd,
16964 struct bfd_link_info *info)
16965 {
16966 asection *tls_sec;
16967 struct elf32_arm_link_hash_table *htab;
16968
16969 htab = elf32_arm_hash_table (info);
16970
16971 if (bfd_link_relocatable (info))
16972 return TRUE;
16973
16974 tls_sec = elf_hash_table (info)->tls_sec;
16975
16976 if (tls_sec)
16977 {
16978 struct elf_link_hash_entry *tlsbase;
16979
16980 tlsbase = elf_link_hash_lookup
16981 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
16982
16983 if (tlsbase)
16984 {
16985 struct bfd_link_hash_entry *bh = NULL;
16986 const struct elf_backend_data *bed
16987 = get_elf_backend_data (output_bfd);
16988
16989 if (!(_bfd_generic_link_add_one_symbol
16990 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
16991 tls_sec, 0, NULL, FALSE,
16992 bed->collect, &bh)))
16993 return FALSE;
16994
16995 tlsbase->type = STT_TLS;
16996 tlsbase = (struct elf_link_hash_entry *)bh;
16997 tlsbase->def_regular = 1;
16998 tlsbase->other = STV_HIDDEN;
16999 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17000 }
17001 }
17002
17003 if (htab->fdpic_p && !bfd_link_relocatable (info)
17004 && !bfd_elf_stack_segment_size (output_bfd, info,
17005 "__stacksize", DEFAULT_STACK_SIZE))
17006 return FALSE;
17007
17008 return TRUE;
17009 }
17010
17011 /* Finish up dynamic symbol handling. We set the contents of various
17012 dynamic sections here. */
17013
17014 static bfd_boolean
17015 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17016 struct bfd_link_info * info,
17017 struct elf_link_hash_entry * h,
17018 Elf_Internal_Sym * sym)
17019 {
17020 struct elf32_arm_link_hash_table *htab;
17021 struct elf32_arm_link_hash_entry *eh;
17022
17023 htab = elf32_arm_hash_table (info);
17024 if (htab == NULL)
17025 return FALSE;
17026
17027 eh = (struct elf32_arm_link_hash_entry *) h;
17028
17029 if (h->plt.offset != (bfd_vma) -1)
17030 {
17031 if (!eh->is_iplt)
17032 {
17033 BFD_ASSERT (h->dynindx != -1);
17034 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17035 h->dynindx, 0))
17036 return FALSE;
17037 }
17038
17039 if (!h->def_regular)
17040 {
17041 /* Mark the symbol as undefined, rather than as defined in
17042 the .plt section. */
17043 sym->st_shndx = SHN_UNDEF;
17044 /* If the symbol is weak we need to clear the value.
17045 Otherwise, the PLT entry would provide a definition for
17046 the symbol even if the symbol wasn't defined anywhere,
17047 and so the symbol would never be NULL. Leave the value if
17048 there were any relocations where pointer equality matters
17049 (this is a clue for the dynamic linker, to make function
17050 pointer comparisons work between an application and shared
17051 library). */
17052 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17053 sym->st_value = 0;
17054 }
17055 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17056 {
17057 /* At least one non-call relocation references this .iplt entry,
17058 so the .iplt entry is the function's canonical address. */
17059 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17060 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17061 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17062 (output_bfd, htab->root.iplt->output_section));
17063 sym->st_value = (h->plt.offset
17064 + htab->root.iplt->output_section->vma
17065 + htab->root.iplt->output_offset);
17066 }
17067 }
17068
17069 if (h->needs_copy)
17070 {
17071 asection * s;
17072 Elf_Internal_Rela rel;
17073
17074 /* This symbol needs a copy reloc. Set it up. */
17075 BFD_ASSERT (h->dynindx != -1
17076 && (h->root.type == bfd_link_hash_defined
17077 || h->root.type == bfd_link_hash_defweak));
17078
17079 rel.r_addend = 0;
17080 rel.r_offset = (h->root.u.def.value
17081 + h->root.u.def.section->output_section->vma
17082 + h->root.u.def.section->output_offset);
17083 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17084 if (h->root.u.def.section == htab->root.sdynrelro)
17085 s = htab->root.sreldynrelro;
17086 else
17087 s = htab->root.srelbss;
17088 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17089 }
17090
17091 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17092 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17093 it is relative to the ".got" section. */
17094 if (h == htab->root.hdynamic
17095 || (!htab->fdpic_p
17096 && htab->root.target_os != is_vxworks
17097 && h == htab->root.hgot))
17098 sym->st_shndx = SHN_ABS;
17099
17100 return TRUE;
17101 }
17102
17103 static void
17104 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17105 void *contents,
17106 const unsigned long *template, unsigned count)
17107 {
17108 unsigned ix;
17109
17110 for (ix = 0; ix != count; ix++)
17111 {
17112 unsigned long insn = template[ix];
17113
17114 /* Emit mov pc,rx if bx is not permitted. */
17115 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17116 insn = (insn & 0xf000000f) | 0x01a0f000;
17117 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17118 }
17119 }
17120
17121 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17122 other variants, NaCl needs this entry in a static executable's
17123 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17124 zero. For .iplt really only the last bundle is useful, and .iplt
17125 could have a shorter first entry, with each individual PLT entry's
17126 relative branch calculated differently so it targets the last
17127 bundle instead of the instruction before it (labelled .Lplt_tail
17128 above). But it's simpler to keep the size and layout of PLT0
17129 consistent with the dynamic case, at the cost of some dead code at
17130 the start of .iplt and the one dead store to the stack at the start
17131 of .Lplt_tail. */
17132 static void
17133 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17134 asection *plt, bfd_vma got_displacement)
17135 {
17136 unsigned int i;
17137
17138 put_arm_insn (htab, output_bfd,
17139 elf32_arm_nacl_plt0_entry[0]
17140 | arm_movw_immediate (got_displacement),
17141 plt->contents + 0);
17142 put_arm_insn (htab, output_bfd,
17143 elf32_arm_nacl_plt0_entry[1]
17144 | arm_movt_immediate (got_displacement),
17145 plt->contents + 4);
17146
17147 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17148 put_arm_insn (htab, output_bfd,
17149 elf32_arm_nacl_plt0_entry[i],
17150 plt->contents + (i * 4));
17151 }
17152
17153 /* Finish up the dynamic sections. */
17154
17155 static bfd_boolean
17156 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17157 {
17158 bfd * dynobj;
17159 asection * sgot;
17160 asection * sdyn;
17161 struct elf32_arm_link_hash_table *htab;
17162
17163 htab = elf32_arm_hash_table (info);
17164 if (htab == NULL)
17165 return FALSE;
17166
17167 dynobj = elf_hash_table (info)->dynobj;
17168
17169 sgot = htab->root.sgotplt;
17170 /* A broken linker script might have discarded the dynamic sections.
17171 Catch this here so that we do not seg-fault later on. */
17172 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17173 return FALSE;
17174 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17175
17176 if (elf_hash_table (info)->dynamic_sections_created)
17177 {
17178 asection *splt;
17179 Elf32_External_Dyn *dyncon, *dynconend;
17180
17181 splt = htab->root.splt;
17182 BFD_ASSERT (splt != NULL && sdyn != NULL);
17183 BFD_ASSERT (sgot != NULL);
17184
17185 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17186 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17187
17188 for (; dyncon < dynconend; dyncon++)
17189 {
17190 Elf_Internal_Dyn dyn;
17191 const char * name;
17192 asection * s;
17193
17194 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17195
17196 switch (dyn.d_tag)
17197 {
17198 default:
17199 if (htab->root.target_os == is_vxworks
17200 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17201 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17202 break;
17203
17204 case DT_HASH:
17205 case DT_STRTAB:
17206 case DT_SYMTAB:
17207 case DT_VERSYM:
17208 case DT_VERDEF:
17209 case DT_VERNEED:
17210 break;
17211
17212 case DT_PLTGOT:
17213 name = ".got.plt";
17214 goto get_vma;
17215 case DT_JMPREL:
17216 name = RELOC_SECTION (htab, ".plt");
17217 get_vma:
17218 s = bfd_get_linker_section (dynobj, name);
17219 if (s == NULL)
17220 {
17221 _bfd_error_handler
17222 (_("could not find section %s"), name);
17223 bfd_set_error (bfd_error_invalid_operation);
17224 return FALSE;
17225 }
17226 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17227 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17228 break;
17229
17230 case DT_PLTRELSZ:
17231 s = htab->root.srelplt;
17232 BFD_ASSERT (s != NULL);
17233 dyn.d_un.d_val = s->size;
17234 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17235 break;
17236
17237 case DT_RELSZ:
17238 case DT_RELASZ:
17239 case DT_REL:
17240 case DT_RELA:
17241 break;
17242
17243 case DT_TLSDESC_PLT:
17244 s = htab->root.splt;
17245 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17246 + htab->root.tlsdesc_plt);
17247 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17248 break;
17249
17250 case DT_TLSDESC_GOT:
17251 s = htab->root.sgot;
17252 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17253 + htab->root.tlsdesc_got);
17254 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17255 break;
17256
17257 /* Set the bottom bit of DT_INIT/FINI if the
17258 corresponding function is Thumb. */
17259 case DT_INIT:
17260 name = info->init_function;
17261 goto get_sym;
17262 case DT_FINI:
17263 name = info->fini_function;
17264 get_sym:
17265 /* If it wasn't set by elf_bfd_final_link
17266 then there is nothing to adjust. */
17267 if (dyn.d_un.d_val != 0)
17268 {
17269 struct elf_link_hash_entry * eh;
17270
17271 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17272 FALSE, FALSE, TRUE);
17273 if (eh != NULL
17274 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17275 == ST_BRANCH_TO_THUMB)
17276 {
17277 dyn.d_un.d_val |= 1;
17278 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17279 }
17280 }
17281 break;
17282 }
17283 }
17284
17285 /* Fill in the first entry in the procedure linkage table. */
17286 if (splt->size > 0 && htab->plt_header_size)
17287 {
17288 const bfd_vma *plt0_entry;
17289 bfd_vma got_address, plt_address, got_displacement;
17290
17291 /* Calculate the addresses of the GOT and PLT. */
17292 got_address = sgot->output_section->vma + sgot->output_offset;
17293 plt_address = splt->output_section->vma + splt->output_offset;
17294
17295 if (htab->root.target_os == is_vxworks)
17296 {
17297 /* The VxWorks GOT is relocated by the dynamic linker.
17298 Therefore, we must emit relocations rather than simply
17299 computing the values now. */
17300 Elf_Internal_Rela rel;
17301
17302 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17303 put_arm_insn (htab, output_bfd, plt0_entry[0],
17304 splt->contents + 0);
17305 put_arm_insn (htab, output_bfd, plt0_entry[1],
17306 splt->contents + 4);
17307 put_arm_insn (htab, output_bfd, plt0_entry[2],
17308 splt->contents + 8);
17309 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17310
17311 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17312 rel.r_offset = plt_address + 12;
17313 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17314 rel.r_addend = 0;
17315 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17316 htab->srelplt2->contents);
17317 }
17318 else if (htab->root.target_os == is_nacl)
17319 arm_nacl_put_plt0 (htab, output_bfd, splt,
17320 got_address + 8 - (plt_address + 16));
17321 else if (using_thumb_only (htab))
17322 {
17323 got_displacement = got_address - (plt_address + 12);
17324
17325 plt0_entry = elf32_thumb2_plt0_entry;
17326 put_arm_insn (htab, output_bfd, plt0_entry[0],
17327 splt->contents + 0);
17328 put_arm_insn (htab, output_bfd, plt0_entry[1],
17329 splt->contents + 4);
17330 put_arm_insn (htab, output_bfd, plt0_entry[2],
17331 splt->contents + 8);
17332
17333 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17334 }
17335 else
17336 {
17337 got_displacement = got_address - (plt_address + 16);
17338
17339 plt0_entry = elf32_arm_plt0_entry;
17340 put_arm_insn (htab, output_bfd, plt0_entry[0],
17341 splt->contents + 0);
17342 put_arm_insn (htab, output_bfd, plt0_entry[1],
17343 splt->contents + 4);
17344 put_arm_insn (htab, output_bfd, plt0_entry[2],
17345 splt->contents + 8);
17346 put_arm_insn (htab, output_bfd, plt0_entry[3],
17347 splt->contents + 12);
17348
17349 #ifdef FOUR_WORD_PLT
17350 /* The displacement value goes in the otherwise-unused
17351 last word of the second entry. */
17352 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17353 #else
17354 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17355 #endif
17356 }
17357 }
17358
17359 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17360 really seem like the right value. */
17361 if (splt->output_section->owner == output_bfd)
17362 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17363
17364 if (htab->root.tlsdesc_plt)
17365 {
17366 bfd_vma got_address
17367 = sgot->output_section->vma + sgot->output_offset;
17368 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17369 + htab->root.sgot->output_offset);
17370 bfd_vma plt_address
17371 = splt->output_section->vma + splt->output_offset;
17372
17373 arm_put_trampoline (htab, output_bfd,
17374 splt->contents + htab->root.tlsdesc_plt,
17375 dl_tlsdesc_lazy_trampoline, 6);
17376
17377 bfd_put_32 (output_bfd,
17378 gotplt_address + htab->root.tlsdesc_got
17379 - (plt_address + htab->root.tlsdesc_plt)
17380 - dl_tlsdesc_lazy_trampoline[6],
17381 splt->contents + htab->root.tlsdesc_plt + 24);
17382 bfd_put_32 (output_bfd,
17383 got_address - (plt_address + htab->root.tlsdesc_plt)
17384 - dl_tlsdesc_lazy_trampoline[7],
17385 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17386 }
17387
17388 if (htab->tls_trampoline)
17389 {
17390 arm_put_trampoline (htab, output_bfd,
17391 splt->contents + htab->tls_trampoline,
17392 tls_trampoline, 3);
17393 #ifdef FOUR_WORD_PLT
17394 bfd_put_32 (output_bfd, 0x00000000,
17395 splt->contents + htab->tls_trampoline + 12);
17396 #endif
17397 }
17398
17399 if (htab->root.target_os == is_vxworks
17400 && !bfd_link_pic (info)
17401 && htab->root.splt->size > 0)
17402 {
17403 /* Correct the .rel(a).plt.unloaded relocations. They will have
17404 incorrect symbol indexes. */
17405 int num_plts;
17406 unsigned char *p;
17407
17408 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17409 / htab->plt_entry_size);
17410 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17411
17412 for (; num_plts; num_plts--)
17413 {
17414 Elf_Internal_Rela rel;
17415
17416 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17417 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17418 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17419 p += RELOC_SIZE (htab);
17420
17421 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17422 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17423 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17424 p += RELOC_SIZE (htab);
17425 }
17426 }
17427 }
17428
17429 if (htab->root.target_os == is_nacl
17430 && htab->root.iplt != NULL
17431 && htab->root.iplt->size > 0)
17432 /* NaCl uses a special first entry in .iplt too. */
17433 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17434
17435 /* Fill in the first three entries in the global offset table. */
17436 if (sgot)
17437 {
17438 if (sgot->size > 0)
17439 {
17440 if (sdyn == NULL)
17441 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17442 else
17443 bfd_put_32 (output_bfd,
17444 sdyn->output_section->vma + sdyn->output_offset,
17445 sgot->contents);
17446 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17447 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17448 }
17449
17450 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17451 }
17452
17453 /* At the very end of the .rofixup section is a pointer to the GOT. */
17454 if (htab->fdpic_p && htab->srofixup != NULL)
17455 {
17456 struct elf_link_hash_entry *hgot = htab->root.hgot;
17457
17458 bfd_vma got_value = hgot->root.u.def.value
17459 + hgot->root.u.def.section->output_section->vma
17460 + hgot->root.u.def.section->output_offset;
17461
17462 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17463
17464 /* Make sure we allocated and generated the same number of fixups. */
17465 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17466 }
17467
17468 return TRUE;
17469 }
17470
17471 static bfd_boolean
17472 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17473 {
17474 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17475 struct elf32_arm_link_hash_table *globals;
17476 struct elf_segment_map *m;
17477
17478 if (!_bfd_elf_init_file_header (abfd, link_info))
17479 return FALSE;
17480
17481 i_ehdrp = elf_elfheader (abfd);
17482
17483 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17484 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17485 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17486
17487 if (link_info)
17488 {
17489 globals = elf32_arm_hash_table (link_info);
17490 if (globals != NULL && globals->byteswap_code)
17491 i_ehdrp->e_flags |= EF_ARM_BE8;
17492
17493 if (globals->fdpic_p)
17494 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17495 }
17496
17497 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17498 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17499 {
17500 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17501 if (abi == AEABI_VFP_args_vfp)
17502 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17503 else
17504 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17505 }
17506
17507 /* Scan segment to set p_flags attribute if it contains only sections with
17508 SHF_ARM_PURECODE flag. */
17509 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17510 {
17511 unsigned int j;
17512
17513 if (m->count == 0)
17514 continue;
17515 for (j = 0; j < m->count; j++)
17516 {
17517 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17518 break;
17519 }
17520 if (j == m->count)
17521 {
17522 m->p_flags = PF_X;
17523 m->p_flags_valid = 1;
17524 }
17525 }
17526 return TRUE;
17527 }
17528
17529 static enum elf_reloc_type_class
17530 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17531 const asection *rel_sec ATTRIBUTE_UNUSED,
17532 const Elf_Internal_Rela *rela)
17533 {
17534 switch ((int) ELF32_R_TYPE (rela->r_info))
17535 {
17536 case R_ARM_RELATIVE:
17537 return reloc_class_relative;
17538 case R_ARM_JUMP_SLOT:
17539 return reloc_class_plt;
17540 case R_ARM_COPY:
17541 return reloc_class_copy;
17542 case R_ARM_IRELATIVE:
17543 return reloc_class_ifunc;
17544 default:
17545 return reloc_class_normal;
17546 }
17547 }
17548
17549 static void
17550 arm_final_write_processing (bfd *abfd)
17551 {
17552 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17553 }
17554
17555 static bfd_boolean
17556 elf32_arm_final_write_processing (bfd *abfd)
17557 {
17558 arm_final_write_processing (abfd);
17559 return _bfd_elf_final_write_processing (abfd);
17560 }
17561
17562 /* Return TRUE if this is an unwinding table entry. */
17563
17564 static bfd_boolean
17565 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17566 {
17567 return (startswith (name, ELF_STRING_ARM_unwind)
17568 || startswith (name, ELF_STRING_ARM_unwind_once));
17569 }
17570
17571
17572 /* Set the type and flags for an ARM section. We do this by
17573 the section name, which is a hack, but ought to work. */
17574
17575 static bfd_boolean
17576 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17577 {
17578 const char * name;
17579
17580 name = bfd_section_name (sec);
17581
17582 if (is_arm_elf_unwind_section_name (abfd, name))
17583 {
17584 hdr->sh_type = SHT_ARM_EXIDX;
17585 hdr->sh_flags |= SHF_LINK_ORDER;
17586 }
17587
17588 if (sec->flags & SEC_ELF_PURECODE)
17589 hdr->sh_flags |= SHF_ARM_PURECODE;
17590
17591 return TRUE;
17592 }
17593
17594 /* Handle an ARM specific section when reading an object file. This is
17595 called when bfd_section_from_shdr finds a section with an unknown
17596 type. */
17597
17598 static bfd_boolean
17599 elf32_arm_section_from_shdr (bfd *abfd,
17600 Elf_Internal_Shdr * hdr,
17601 const char *name,
17602 int shindex)
17603 {
17604 /* There ought to be a place to keep ELF backend specific flags, but
17605 at the moment there isn't one. We just keep track of the
17606 sections by their name, instead. Fortunately, the ABI gives
17607 names for all the ARM specific sections, so we will probably get
17608 away with this. */
17609 switch (hdr->sh_type)
17610 {
17611 case SHT_ARM_EXIDX:
17612 case SHT_ARM_PREEMPTMAP:
17613 case SHT_ARM_ATTRIBUTES:
17614 break;
17615
17616 default:
17617 return FALSE;
17618 }
17619
17620 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17621 return FALSE;
17622
17623 return TRUE;
17624 }
17625
17626 static _arm_elf_section_data *
17627 get_arm_elf_section_data (asection * sec)
17628 {
17629 if (sec && sec->owner && is_arm_elf (sec->owner))
17630 return elf32_arm_section_data (sec);
17631 else
17632 return NULL;
17633 }
17634
17635 typedef struct
17636 {
17637 void *flaginfo;
17638 struct bfd_link_info *info;
17639 asection *sec;
17640 int sec_shndx;
17641 int (*func) (void *, const char *, Elf_Internal_Sym *,
17642 asection *, struct elf_link_hash_entry *);
17643 } output_arch_syminfo;
17644
17645 enum map_symbol_type
17646 {
17647 ARM_MAP_ARM,
17648 ARM_MAP_THUMB,
17649 ARM_MAP_DATA
17650 };
17651
17652
17653 /* Output a single mapping symbol. */
17654
17655 static bfd_boolean
17656 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17657 enum map_symbol_type type,
17658 bfd_vma offset)
17659 {
17660 static const char *names[3] = {"$a", "$t", "$d"};
17661 Elf_Internal_Sym sym;
17662
17663 sym.st_value = osi->sec->output_section->vma
17664 + osi->sec->output_offset
17665 + offset;
17666 sym.st_size = 0;
17667 sym.st_other = 0;
17668 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17669 sym.st_shndx = osi->sec_shndx;
17670 sym.st_target_internal = 0;
17671 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17672 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17673 }
17674
17675 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17676 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17677
17678 static bfd_boolean
17679 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17680 bfd_boolean is_iplt_entry_p,
17681 union gotplt_union *root_plt,
17682 struct arm_plt_info *arm_plt)
17683 {
17684 struct elf32_arm_link_hash_table *htab;
17685 bfd_vma addr, plt_header_size;
17686
17687 if (root_plt->offset == (bfd_vma) -1)
17688 return TRUE;
17689
17690 htab = elf32_arm_hash_table (osi->info);
17691 if (htab == NULL)
17692 return FALSE;
17693
17694 if (is_iplt_entry_p)
17695 {
17696 osi->sec = htab->root.iplt;
17697 plt_header_size = 0;
17698 }
17699 else
17700 {
17701 osi->sec = htab->root.splt;
17702 plt_header_size = htab->plt_header_size;
17703 }
17704 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17705 (osi->info->output_bfd, osi->sec->output_section));
17706
17707 addr = root_plt->offset & -2;
17708 if (htab->root.target_os == is_vxworks)
17709 {
17710 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17711 return FALSE;
17712 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17713 return FALSE;
17714 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17715 return FALSE;
17716 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17717 return FALSE;
17718 }
17719 else if (htab->root.target_os == is_nacl)
17720 {
17721 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17722 return FALSE;
17723 }
17724 else if (htab->fdpic_p)
17725 {
17726 enum map_symbol_type type = using_thumb_only(htab)
17727 ? ARM_MAP_THUMB
17728 : ARM_MAP_ARM;
17729
17730 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17731 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17732 return FALSE;
17733 if (!elf32_arm_output_map_sym (osi, type, addr))
17734 return FALSE;
17735 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17736 return FALSE;
17737 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17738 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17739 return FALSE;
17740 }
17741 else if (using_thumb_only (htab))
17742 {
17743 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17744 return FALSE;
17745 }
17746 else
17747 {
17748 bfd_boolean thumb_stub_p;
17749
17750 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17751 if (thumb_stub_p)
17752 {
17753 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17754 return FALSE;
17755 }
17756 #ifdef FOUR_WORD_PLT
17757 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17758 return FALSE;
17759 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17760 return FALSE;
17761 #else
17762 /* A three-word PLT with no Thumb thunk contains only Arm code,
17763 so only need to output a mapping symbol for the first PLT entry and
17764 entries with thumb thunks. */
17765 if (thumb_stub_p || addr == plt_header_size)
17766 {
17767 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17768 return FALSE;
17769 }
17770 #endif
17771 }
17772
17773 return TRUE;
17774 }
17775
17776 /* Output mapping symbols for PLT entries associated with H. */
17777
17778 static bfd_boolean
17779 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17780 {
17781 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17782 struct elf32_arm_link_hash_entry *eh;
17783
17784 if (h->root.type == bfd_link_hash_indirect)
17785 return TRUE;
17786
17787 if (h->root.type == bfd_link_hash_warning)
17788 /* When warning symbols are created, they **replace** the "real"
17789 entry in the hash table, thus we never get to see the real
17790 symbol in a hash traversal. So look at it now. */
17791 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17792
17793 eh = (struct elf32_arm_link_hash_entry *) h;
17794 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17795 &h->plt, &eh->plt);
17796 }
17797
17798 /* Bind a veneered symbol to its veneer identified by its hash entry
17799 STUB_ENTRY. The veneered location thus loose its symbol. */
17800
17801 static void
17802 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17803 {
17804 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17805
17806 BFD_ASSERT (hash);
17807 hash->root.root.u.def.section = stub_entry->stub_sec;
17808 hash->root.root.u.def.value = stub_entry->stub_offset;
17809 hash->root.size = stub_entry->stub_size;
17810 }
17811
17812 /* Output a single local symbol for a generated stub. */
17813
17814 static bfd_boolean
17815 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17816 bfd_vma offset, bfd_vma size)
17817 {
17818 Elf_Internal_Sym sym;
17819
17820 sym.st_value = osi->sec->output_section->vma
17821 + osi->sec->output_offset
17822 + offset;
17823 sym.st_size = size;
17824 sym.st_other = 0;
17825 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17826 sym.st_shndx = osi->sec_shndx;
17827 sym.st_target_internal = 0;
17828 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17829 }
17830
17831 static bfd_boolean
17832 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17833 void * in_arg)
17834 {
17835 struct elf32_arm_stub_hash_entry *stub_entry;
17836 asection *stub_sec;
17837 bfd_vma addr;
17838 char *stub_name;
17839 output_arch_syminfo *osi;
17840 const insn_sequence *template_sequence;
17841 enum stub_insn_type prev_type;
17842 int size;
17843 int i;
17844 enum map_symbol_type sym_type;
17845
17846 /* Massage our args to the form they really have. */
17847 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17848 osi = (output_arch_syminfo *) in_arg;
17849
17850 stub_sec = stub_entry->stub_sec;
17851
17852 /* Ensure this stub is attached to the current section being
17853 processed. */
17854 if (stub_sec != osi->sec)
17855 return TRUE;
17856
17857 addr = (bfd_vma) stub_entry->stub_offset;
17858 template_sequence = stub_entry->stub_template;
17859
17860 if (arm_stub_sym_claimed (stub_entry->stub_type))
17861 arm_stub_claim_sym (stub_entry);
17862 else
17863 {
17864 stub_name = stub_entry->output_name;
17865 switch (template_sequence[0].type)
17866 {
17867 case ARM_TYPE:
17868 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17869 stub_entry->stub_size))
17870 return FALSE;
17871 break;
17872 case THUMB16_TYPE:
17873 case THUMB32_TYPE:
17874 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17875 stub_entry->stub_size))
17876 return FALSE;
17877 break;
17878 default:
17879 BFD_FAIL ();
17880 return 0;
17881 }
17882 }
17883
17884 prev_type = DATA_TYPE;
17885 size = 0;
17886 for (i = 0; i < stub_entry->stub_template_size; i++)
17887 {
17888 switch (template_sequence[i].type)
17889 {
17890 case ARM_TYPE:
17891 sym_type = ARM_MAP_ARM;
17892 break;
17893
17894 case THUMB16_TYPE:
17895 case THUMB32_TYPE:
17896 sym_type = ARM_MAP_THUMB;
17897 break;
17898
17899 case DATA_TYPE:
17900 sym_type = ARM_MAP_DATA;
17901 break;
17902
17903 default:
17904 BFD_FAIL ();
17905 return FALSE;
17906 }
17907
17908 if (template_sequence[i].type != prev_type)
17909 {
17910 prev_type = template_sequence[i].type;
17911 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17912 return FALSE;
17913 }
17914
17915 switch (template_sequence[i].type)
17916 {
17917 case ARM_TYPE:
17918 case THUMB32_TYPE:
17919 size += 4;
17920 break;
17921
17922 case THUMB16_TYPE:
17923 size += 2;
17924 break;
17925
17926 case DATA_TYPE:
17927 size += 4;
17928 break;
17929
17930 default:
17931 BFD_FAIL ();
17932 return FALSE;
17933 }
17934 }
17935
17936 return TRUE;
17937 }
17938
17939 /* Output mapping symbols for linker generated sections,
17940 and for those data-only sections that do not have a
17941 $d. */
17942
17943 static bfd_boolean
17944 elf32_arm_output_arch_local_syms (bfd *output_bfd,
17945 struct bfd_link_info *info,
17946 void *flaginfo,
17947 int (*func) (void *, const char *,
17948 Elf_Internal_Sym *,
17949 asection *,
17950 struct elf_link_hash_entry *))
17951 {
17952 output_arch_syminfo osi;
17953 struct elf32_arm_link_hash_table *htab;
17954 bfd_vma offset;
17955 bfd_size_type size;
17956 bfd *input_bfd;
17957
17958 htab = elf32_arm_hash_table (info);
17959 if (htab == NULL)
17960 return FALSE;
17961
17962 check_use_blx (htab);
17963
17964 osi.flaginfo = flaginfo;
17965 osi.info = info;
17966 osi.func = func;
17967
17968 /* Add a $d mapping symbol to data-only sections that
17969 don't have any mapping symbol. This may result in (harmless) redundant
17970 mapping symbols. */
17971 for (input_bfd = info->input_bfds;
17972 input_bfd != NULL;
17973 input_bfd = input_bfd->link.next)
17974 {
17975 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
17976 for (osi.sec = input_bfd->sections;
17977 osi.sec != NULL;
17978 osi.sec = osi.sec->next)
17979 {
17980 if (osi.sec->output_section != NULL
17981 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
17982 != 0)
17983 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
17984 == SEC_HAS_CONTENTS
17985 && get_arm_elf_section_data (osi.sec) != NULL
17986 && get_arm_elf_section_data (osi.sec)->mapcount == 0
17987 && osi.sec->size > 0
17988 && (osi.sec->flags & SEC_EXCLUDE) == 0)
17989 {
17990 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17991 (output_bfd, osi.sec->output_section);
17992 if (osi.sec_shndx != (int)SHN_BAD)
17993 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
17994 }
17995 }
17996 }
17997
17998 /* ARM->Thumb glue. */
17999 if (htab->arm_glue_size > 0)
18000 {
18001 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18002 ARM2THUMB_GLUE_SECTION_NAME);
18003
18004 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18005 (output_bfd, osi.sec->output_section);
18006 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18007 || htab->pic_veneer)
18008 size = ARM2THUMB_PIC_GLUE_SIZE;
18009 else if (htab->use_blx)
18010 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18011 else
18012 size = ARM2THUMB_STATIC_GLUE_SIZE;
18013
18014 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18015 {
18016 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18017 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18018 }
18019 }
18020
18021 /* Thumb->ARM glue. */
18022 if (htab->thumb_glue_size > 0)
18023 {
18024 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18025 THUMB2ARM_GLUE_SECTION_NAME);
18026
18027 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18028 (output_bfd, osi.sec->output_section);
18029 size = THUMB2ARM_GLUE_SIZE;
18030
18031 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18032 {
18033 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18034 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18035 }
18036 }
18037
18038 /* ARMv4 BX veneers. */
18039 if (htab->bx_glue_size > 0)
18040 {
18041 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18042 ARM_BX_GLUE_SECTION_NAME);
18043
18044 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18045 (output_bfd, osi.sec->output_section);
18046
18047 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18048 }
18049
18050 /* Long calls stubs. */
18051 if (htab->stub_bfd && htab->stub_bfd->sections)
18052 {
18053 asection* stub_sec;
18054
18055 for (stub_sec = htab->stub_bfd->sections;
18056 stub_sec != NULL;
18057 stub_sec = stub_sec->next)
18058 {
18059 /* Ignore non-stub sections. */
18060 if (!strstr (stub_sec->name, STUB_SUFFIX))
18061 continue;
18062
18063 osi.sec = stub_sec;
18064
18065 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18066 (output_bfd, osi.sec->output_section);
18067
18068 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18069 }
18070 }
18071
18072 /* Finally, output mapping symbols for the PLT. */
18073 if (htab->root.splt && htab->root.splt->size > 0)
18074 {
18075 osi.sec = htab->root.splt;
18076 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18077 (output_bfd, osi.sec->output_section));
18078
18079 /* Output mapping symbols for the plt header. */
18080 if (htab->root.target_os == is_vxworks)
18081 {
18082 /* VxWorks shared libraries have no PLT header. */
18083 if (!bfd_link_pic (info))
18084 {
18085 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18086 return FALSE;
18087 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18088 return FALSE;
18089 }
18090 }
18091 else if (htab->root.target_os == is_nacl)
18092 {
18093 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18094 return FALSE;
18095 }
18096 else if (using_thumb_only (htab) && !htab->fdpic_p)
18097 {
18098 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18099 return FALSE;
18100 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18101 return FALSE;
18102 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18103 return FALSE;
18104 }
18105 else if (!htab->fdpic_p)
18106 {
18107 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18108 return FALSE;
18109 #ifndef FOUR_WORD_PLT
18110 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18111 return FALSE;
18112 #endif
18113 }
18114 }
18115 if (htab->root.target_os == is_nacl
18116 && htab->root.iplt
18117 && htab->root.iplt->size > 0)
18118 {
18119 /* NaCl uses a special first entry in .iplt too. */
18120 osi.sec = htab->root.iplt;
18121 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18122 (output_bfd, osi.sec->output_section));
18123 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18124 return FALSE;
18125 }
18126 if ((htab->root.splt && htab->root.splt->size > 0)
18127 || (htab->root.iplt && htab->root.iplt->size > 0))
18128 {
18129 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18130 for (input_bfd = info->input_bfds;
18131 input_bfd != NULL;
18132 input_bfd = input_bfd->link.next)
18133 {
18134 struct arm_local_iplt_info **local_iplt;
18135 unsigned int i, num_syms;
18136
18137 local_iplt = elf32_arm_local_iplt (input_bfd);
18138 if (local_iplt != NULL)
18139 {
18140 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18141 for (i = 0; i < num_syms; i++)
18142 if (local_iplt[i] != NULL
18143 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18144 &local_iplt[i]->root,
18145 &local_iplt[i]->arm))
18146 return FALSE;
18147 }
18148 }
18149 }
18150 if (htab->root.tlsdesc_plt != 0)
18151 {
18152 /* Mapping symbols for the lazy tls trampoline. */
18153 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18154 htab->root.tlsdesc_plt))
18155 return FALSE;
18156
18157 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18158 htab->root.tlsdesc_plt + 24))
18159 return FALSE;
18160 }
18161 if (htab->tls_trampoline != 0)
18162 {
18163 /* Mapping symbols for the tls trampoline. */
18164 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18165 return FALSE;
18166 #ifdef FOUR_WORD_PLT
18167 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18168 htab->tls_trampoline + 12))
18169 return FALSE;
18170 #endif
18171 }
18172
18173 return TRUE;
18174 }
18175
18176 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18177 the import library. All SYMCOUNT symbols of ABFD can be examined
18178 from their pointers in SYMS. Pointers of symbols to keep should be
18179 stored continuously at the beginning of that array.
18180
18181 Returns the number of symbols to keep. */
18182
18183 static unsigned int
18184 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18185 struct bfd_link_info *info,
18186 asymbol **syms, long symcount)
18187 {
18188 size_t maxnamelen;
18189 char *cmse_name;
18190 long src_count, dst_count = 0;
18191 struct elf32_arm_link_hash_table *htab;
18192
18193 htab = elf32_arm_hash_table (info);
18194 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18195 symcount = 0;
18196
18197 maxnamelen = 128;
18198 cmse_name = (char *) bfd_malloc (maxnamelen);
18199 BFD_ASSERT (cmse_name);
18200
18201 for (src_count = 0; src_count < symcount; src_count++)
18202 {
18203 struct elf32_arm_link_hash_entry *cmse_hash;
18204 asymbol *sym;
18205 flagword flags;
18206 char *name;
18207 size_t namelen;
18208
18209 sym = syms[src_count];
18210 flags = sym->flags;
18211 name = (char *) bfd_asymbol_name (sym);
18212
18213 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18214 continue;
18215 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18216 continue;
18217
18218 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18219 if (namelen > maxnamelen)
18220 {
18221 cmse_name = (char *)
18222 bfd_realloc (cmse_name, namelen);
18223 maxnamelen = namelen;
18224 }
18225 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18226 cmse_hash = (struct elf32_arm_link_hash_entry *)
18227 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18228
18229 if (!cmse_hash
18230 || (cmse_hash->root.root.type != bfd_link_hash_defined
18231 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18232 || cmse_hash->root.type != STT_FUNC)
18233 continue;
18234
18235 syms[dst_count++] = sym;
18236 }
18237 free (cmse_name);
18238
18239 syms[dst_count] = NULL;
18240
18241 return dst_count;
18242 }
18243
18244 /* Filter symbols of ABFD to include in the import library. All
18245 SYMCOUNT symbols of ABFD can be examined from their pointers in
18246 SYMS. Pointers of symbols to keep should be stored continuously at
18247 the beginning of that array.
18248
18249 Returns the number of symbols to keep. */
18250
18251 static unsigned int
18252 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18253 struct bfd_link_info *info,
18254 asymbol **syms, long symcount)
18255 {
18256 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18257
18258 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18259 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18260 library to be a relocatable object file. */
18261 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18262 if (globals->cmse_implib)
18263 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18264 else
18265 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18266 }
18267
18268 /* Allocate target specific section data. */
18269
18270 static bfd_boolean
18271 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18272 {
18273 if (!sec->used_by_bfd)
18274 {
18275 _arm_elf_section_data *sdata;
18276 size_t amt = sizeof (*sdata);
18277
18278 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18279 if (sdata == NULL)
18280 return FALSE;
18281 sec->used_by_bfd = sdata;
18282 }
18283
18284 return _bfd_elf_new_section_hook (abfd, sec);
18285 }
18286
18287
18288 /* Used to order a list of mapping symbols by address. */
18289
18290 static int
18291 elf32_arm_compare_mapping (const void * a, const void * b)
18292 {
18293 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18294 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18295
18296 if (amap->vma > bmap->vma)
18297 return 1;
18298 else if (amap->vma < bmap->vma)
18299 return -1;
18300 else if (amap->type > bmap->type)
18301 /* Ensure results do not depend on the host qsort for objects with
18302 multiple mapping symbols at the same address by sorting on type
18303 after vma. */
18304 return 1;
18305 else if (amap->type < bmap->type)
18306 return -1;
18307 else
18308 return 0;
18309 }
18310
18311 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18312
18313 static unsigned long
18314 offset_prel31 (unsigned long addr, bfd_vma offset)
18315 {
18316 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18317 }
18318
18319 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18320 relocations. */
18321
18322 static void
18323 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18324 {
18325 unsigned long first_word = bfd_get_32 (output_bfd, from);
18326 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18327
18328 /* High bit of first word is supposed to be zero. */
18329 if ((first_word & 0x80000000ul) == 0)
18330 first_word = offset_prel31 (first_word, offset);
18331
18332 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18333 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18334 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18335 second_word = offset_prel31 (second_word, offset);
18336
18337 bfd_put_32 (output_bfd, first_word, to);
18338 bfd_put_32 (output_bfd, second_word, to + 4);
18339 }
18340
18341 /* Data for make_branch_to_a8_stub(). */
18342
18343 struct a8_branch_to_stub_data
18344 {
18345 asection *writing_section;
18346 bfd_byte *contents;
18347 };
18348
18349
18350 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18351 places for a particular section. */
18352
18353 static bfd_boolean
18354 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18355 void *in_arg)
18356 {
18357 struct elf32_arm_stub_hash_entry *stub_entry;
18358 struct a8_branch_to_stub_data *data;
18359 bfd_byte *contents;
18360 unsigned long branch_insn;
18361 bfd_vma veneered_insn_loc, veneer_entry_loc;
18362 bfd_signed_vma branch_offset;
18363 bfd *abfd;
18364 unsigned int loc;
18365
18366 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18367 data = (struct a8_branch_to_stub_data *) in_arg;
18368
18369 if (stub_entry->target_section != data->writing_section
18370 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18371 return TRUE;
18372
18373 contents = data->contents;
18374
18375 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18376 generated when both source and target are in the same section. */
18377 veneered_insn_loc = stub_entry->target_section->output_section->vma
18378 + stub_entry->target_section->output_offset
18379 + stub_entry->source_value;
18380
18381 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18382 + stub_entry->stub_sec->output_offset
18383 + stub_entry->stub_offset;
18384
18385 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18386 veneered_insn_loc &= ~3u;
18387
18388 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18389
18390 abfd = stub_entry->target_section->owner;
18391 loc = stub_entry->source_value;
18392
18393 /* We attempt to avoid this condition by setting stubs_always_after_branch
18394 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18395 This check is just to be on the safe side... */
18396 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18397 {
18398 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18399 "allocated in unsafe location"), abfd);
18400 return FALSE;
18401 }
18402
18403 switch (stub_entry->stub_type)
18404 {
18405 case arm_stub_a8_veneer_b:
18406 case arm_stub_a8_veneer_b_cond:
18407 branch_insn = 0xf0009000;
18408 goto jump24;
18409
18410 case arm_stub_a8_veneer_blx:
18411 branch_insn = 0xf000e800;
18412 goto jump24;
18413
18414 case arm_stub_a8_veneer_bl:
18415 {
18416 unsigned int i1, j1, i2, j2, s;
18417
18418 branch_insn = 0xf000d000;
18419
18420 jump24:
18421 if (branch_offset < -16777216 || branch_offset > 16777214)
18422 {
18423 /* There's not much we can do apart from complain if this
18424 happens. */
18425 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18426 "of range (input file too large)"), abfd);
18427 return FALSE;
18428 }
18429
18430 /* i1 = not(j1 eor s), so:
18431 not i1 = j1 eor s
18432 j1 = (not i1) eor s. */
18433
18434 branch_insn |= (branch_offset >> 1) & 0x7ff;
18435 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18436 i2 = (branch_offset >> 22) & 1;
18437 i1 = (branch_offset >> 23) & 1;
18438 s = (branch_offset >> 24) & 1;
18439 j1 = (!i1) ^ s;
18440 j2 = (!i2) ^ s;
18441 branch_insn |= j2 << 11;
18442 branch_insn |= j1 << 13;
18443 branch_insn |= s << 26;
18444 }
18445 break;
18446
18447 default:
18448 BFD_FAIL ();
18449 return FALSE;
18450 }
18451
18452 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18453 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18454
18455 return TRUE;
18456 }
18457
18458 /* Beginning of stm32l4xx work-around. */
18459
18460 /* Functions encoding instructions necessary for the emission of the
18461 fix-stm32l4xx-629360.
18462 Encoding is extracted from the
18463 ARM (C) Architecture Reference Manual
18464 ARMv7-A and ARMv7-R edition
18465 ARM DDI 0406C.b (ID072512). */
18466
18467 static inline bfd_vma
18468 create_instruction_branch_absolute (int branch_offset)
18469 {
18470 /* A8.8.18 B (A8-334)
18471 B target_address (Encoding T4). */
18472 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18473 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18474 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18475
18476 int s = ((branch_offset & 0x1000000) >> 24);
18477 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18478 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18479
18480 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18481 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18482
18483 bfd_vma patched_inst = 0xf0009000
18484 | s << 26 /* S. */
18485 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18486 | j1 << 13 /* J1. */
18487 | j2 << 11 /* J2. */
18488 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18489
18490 return patched_inst;
18491 }
18492
18493 static inline bfd_vma
18494 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18495 {
18496 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18497 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18498 bfd_vma patched_inst = 0xe8900000
18499 | (/*W=*/wback << 21)
18500 | (base_reg << 16)
18501 | (reg_mask & 0x0000ffff);
18502
18503 return patched_inst;
18504 }
18505
18506 static inline bfd_vma
18507 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18508 {
18509 /* A8.8.60 LDMDB/LDMEA (A8-402)
18510 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18511 bfd_vma patched_inst = 0xe9100000
18512 | (/*W=*/wback << 21)
18513 | (base_reg << 16)
18514 | (reg_mask & 0x0000ffff);
18515
18516 return patched_inst;
18517 }
18518
18519 static inline bfd_vma
18520 create_instruction_mov (int target_reg, int source_reg)
18521 {
18522 /* A8.8.103 MOV (register) (A8-486)
18523 MOV Rd, Rm (Encoding T1). */
18524 bfd_vma patched_inst = 0x4600
18525 | (target_reg & 0x7)
18526 | ((target_reg & 0x8) >> 3) << 7
18527 | (source_reg << 3);
18528
18529 return patched_inst;
18530 }
18531
18532 static inline bfd_vma
18533 create_instruction_sub (int target_reg, int source_reg, int value)
18534 {
18535 /* A8.8.221 SUB (immediate) (A8-708)
18536 SUB Rd, Rn, #value (Encoding T3). */
18537 bfd_vma patched_inst = 0xf1a00000
18538 | (target_reg << 8)
18539 | (source_reg << 16)
18540 | (/*S=*/0 << 20)
18541 | ((value & 0x800) >> 11) << 26
18542 | ((value & 0x700) >> 8) << 12
18543 | (value & 0x0ff);
18544
18545 return patched_inst;
18546 }
18547
18548 static inline bfd_vma
18549 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18550 int first_reg)
18551 {
18552 /* A8.8.332 VLDM (A8-922)
18553 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18554 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18555 | (/*W=*/wback << 21)
18556 | (base_reg << 16)
18557 | (num_words & 0x000000ff)
18558 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18559 | (first_reg & 0x00000001) << 22;
18560
18561 return patched_inst;
18562 }
18563
18564 static inline bfd_vma
18565 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18566 int first_reg)
18567 {
18568 /* A8.8.332 VLDM (A8-922)
18569 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18570 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18571 | (base_reg << 16)
18572 | (num_words & 0x000000ff)
18573 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18574 | (first_reg & 0x00000001) << 22;
18575
18576 return patched_inst;
18577 }
18578
18579 static inline bfd_vma
18580 create_instruction_udf_w (int value)
18581 {
18582 /* A8.8.247 UDF (A8-758)
18583 Undefined (Encoding T2). */
18584 bfd_vma patched_inst = 0xf7f0a000
18585 | (value & 0x00000fff)
18586 | (value & 0x000f0000) << 16;
18587
18588 return patched_inst;
18589 }
18590
18591 static inline bfd_vma
18592 create_instruction_udf (int value)
18593 {
18594 /* A8.8.247 UDF (A8-758)
18595 Undefined (Encoding T1). */
18596 bfd_vma patched_inst = 0xde00
18597 | (value & 0xff);
18598
18599 return patched_inst;
18600 }
18601
18602 /* Functions writing an instruction in memory, returning the next
18603 memory position to write to. */
18604
18605 static inline bfd_byte *
18606 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18607 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18608 {
18609 put_thumb2_insn (htab, output_bfd, insn, pt);
18610 return pt + 4;
18611 }
18612
18613 static inline bfd_byte *
18614 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18615 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18616 {
18617 put_thumb_insn (htab, output_bfd, insn, pt);
18618 return pt + 2;
18619 }
18620
18621 /* Function filling up a region in memory with T1 and T2 UDFs taking
18622 care of alignment. */
18623
18624 static bfd_byte *
18625 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18626 bfd * output_bfd,
18627 const bfd_byte * const base_stub_contents,
18628 bfd_byte * const from_stub_contents,
18629 const bfd_byte * const end_stub_contents)
18630 {
18631 bfd_byte *current_stub_contents = from_stub_contents;
18632
18633 /* Fill the remaining of the stub with deterministic contents : UDF
18634 instructions.
18635 Check if realignment is needed on modulo 4 frontier using T1, to
18636 further use T2. */
18637 if ((current_stub_contents < end_stub_contents)
18638 && !((current_stub_contents - base_stub_contents) % 2)
18639 && ((current_stub_contents - base_stub_contents) % 4))
18640 current_stub_contents =
18641 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18642 create_instruction_udf (0));
18643
18644 for (; current_stub_contents < end_stub_contents;)
18645 current_stub_contents =
18646 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18647 create_instruction_udf_w (0));
18648
18649 return current_stub_contents;
18650 }
18651
18652 /* Functions writing the stream of instructions equivalent to the
18653 derived sequence for ldmia, ldmdb, vldm respectively. */
18654
18655 static void
18656 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18657 bfd * output_bfd,
18658 const insn32 initial_insn,
18659 const bfd_byte *const initial_insn_addr,
18660 bfd_byte *const base_stub_contents)
18661 {
18662 int wback = (initial_insn & 0x00200000) >> 21;
18663 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18664 int insn_all_registers = initial_insn & 0x0000ffff;
18665 int insn_low_registers, insn_high_registers;
18666 int usable_register_mask;
18667 int nb_registers = elf32_arm_popcount (insn_all_registers);
18668 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18669 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18670 bfd_byte *current_stub_contents = base_stub_contents;
18671
18672 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18673
18674 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18675 smaller than 8 registers load sequences that do not cause the
18676 hardware issue. */
18677 if (nb_registers <= 8)
18678 {
18679 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18680 current_stub_contents =
18681 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18682 initial_insn);
18683
18684 /* B initial_insn_addr+4. */
18685 if (!restore_pc)
18686 current_stub_contents =
18687 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18688 create_instruction_branch_absolute
18689 (initial_insn_addr - current_stub_contents));
18690
18691 /* Fill the remaining of the stub with deterministic contents. */
18692 current_stub_contents =
18693 stm32l4xx_fill_stub_udf (htab, output_bfd,
18694 base_stub_contents, current_stub_contents,
18695 base_stub_contents +
18696 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18697
18698 return;
18699 }
18700
18701 /* - reg_list[13] == 0. */
18702 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18703
18704 /* - reg_list[14] & reg_list[15] != 1. */
18705 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18706
18707 /* - if (wback==1) reg_list[rn] == 0. */
18708 BFD_ASSERT (!wback || !restore_rn);
18709
18710 /* - nb_registers > 8. */
18711 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18712
18713 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18714
18715 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18716 - One with the 7 lowest registers (register mask 0x007F)
18717 This LDM will finally contain between 2 and 7 registers
18718 - One with the 7 highest registers (register mask 0xDF80)
18719 This ldm will finally contain between 2 and 7 registers. */
18720 insn_low_registers = insn_all_registers & 0x007F;
18721 insn_high_registers = insn_all_registers & 0xDF80;
18722
18723 /* A spare register may be needed during this veneer to temporarily
18724 handle the base register. This register will be restored with the
18725 last LDM operation.
18726 The usable register may be any general purpose register (that
18727 excludes PC, SP, LR : register mask is 0x1FFF). */
18728 usable_register_mask = 0x1FFF;
18729
18730 /* Generate the stub function. */
18731 if (wback)
18732 {
18733 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18734 current_stub_contents =
18735 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18736 create_instruction_ldmia
18737 (rn, /*wback=*/1, insn_low_registers));
18738
18739 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18740 current_stub_contents =
18741 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18742 create_instruction_ldmia
18743 (rn, /*wback=*/1, insn_high_registers));
18744 if (!restore_pc)
18745 {
18746 /* B initial_insn_addr+4. */
18747 current_stub_contents =
18748 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18749 create_instruction_branch_absolute
18750 (initial_insn_addr - current_stub_contents));
18751 }
18752 }
18753 else /* if (!wback). */
18754 {
18755 ri = rn;
18756
18757 /* If Rn is not part of the high-register-list, move it there. */
18758 if (!(insn_high_registers & (1 << rn)))
18759 {
18760 /* Choose a Ri in the high-register-list that will be restored. */
18761 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18762
18763 /* MOV Ri, Rn. */
18764 current_stub_contents =
18765 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18766 create_instruction_mov (ri, rn));
18767 }
18768
18769 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18770 current_stub_contents =
18771 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18772 create_instruction_ldmia
18773 (ri, /*wback=*/1, insn_low_registers));
18774
18775 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18776 current_stub_contents =
18777 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18778 create_instruction_ldmia
18779 (ri, /*wback=*/0, insn_high_registers));
18780
18781 if (!restore_pc)
18782 {
18783 /* B initial_insn_addr+4. */
18784 current_stub_contents =
18785 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18786 create_instruction_branch_absolute
18787 (initial_insn_addr - current_stub_contents));
18788 }
18789 }
18790
18791 /* Fill the remaining of the stub with deterministic contents. */
18792 current_stub_contents =
18793 stm32l4xx_fill_stub_udf (htab, output_bfd,
18794 base_stub_contents, current_stub_contents,
18795 base_stub_contents +
18796 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18797 }
18798
18799 static void
18800 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18801 bfd * output_bfd,
18802 const insn32 initial_insn,
18803 const bfd_byte *const initial_insn_addr,
18804 bfd_byte *const base_stub_contents)
18805 {
18806 int wback = (initial_insn & 0x00200000) >> 21;
18807 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18808 int insn_all_registers = initial_insn & 0x0000ffff;
18809 int insn_low_registers, insn_high_registers;
18810 int usable_register_mask;
18811 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18812 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18813 int nb_registers = elf32_arm_popcount (insn_all_registers);
18814 bfd_byte *current_stub_contents = base_stub_contents;
18815
18816 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18817
18818 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18819 smaller than 8 registers load sequences that do not cause the
18820 hardware issue. */
18821 if (nb_registers <= 8)
18822 {
18823 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18824 current_stub_contents =
18825 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18826 initial_insn);
18827
18828 /* B initial_insn_addr+4. */
18829 current_stub_contents =
18830 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18831 create_instruction_branch_absolute
18832 (initial_insn_addr - current_stub_contents));
18833
18834 /* Fill the remaining of the stub with deterministic contents. */
18835 current_stub_contents =
18836 stm32l4xx_fill_stub_udf (htab, output_bfd,
18837 base_stub_contents, current_stub_contents,
18838 base_stub_contents +
18839 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18840
18841 return;
18842 }
18843
18844 /* - reg_list[13] == 0. */
18845 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18846
18847 /* - reg_list[14] & reg_list[15] != 1. */
18848 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18849
18850 /* - if (wback==1) reg_list[rn] == 0. */
18851 BFD_ASSERT (!wback || !restore_rn);
18852
18853 /* - nb_registers > 8. */
18854 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18855
18856 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18857
18858 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18859 - One with the 7 lowest registers (register mask 0x007F)
18860 This LDM will finally contain between 2 and 7 registers
18861 - One with the 7 highest registers (register mask 0xDF80)
18862 This ldm will finally contain between 2 and 7 registers. */
18863 insn_low_registers = insn_all_registers & 0x007F;
18864 insn_high_registers = insn_all_registers & 0xDF80;
18865
18866 /* A spare register may be needed during this veneer to temporarily
18867 handle the base register. This register will be restored with
18868 the last LDM operation.
18869 The usable register may be any general purpose register (that excludes
18870 PC, SP, LR : register mask is 0x1FFF). */
18871 usable_register_mask = 0x1FFF;
18872
18873 /* Generate the stub function. */
18874 if (!wback && !restore_pc && !restore_rn)
18875 {
18876 /* Choose a Ri in the low-register-list that will be restored. */
18877 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18878
18879 /* MOV Ri, Rn. */
18880 current_stub_contents =
18881 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18882 create_instruction_mov (ri, rn));
18883
18884 /* LDMDB Ri!, {R-high-register-list}. */
18885 current_stub_contents =
18886 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18887 create_instruction_ldmdb
18888 (ri, /*wback=*/1, insn_high_registers));
18889
18890 /* LDMDB Ri, {R-low-register-list}. */
18891 current_stub_contents =
18892 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18893 create_instruction_ldmdb
18894 (ri, /*wback=*/0, insn_low_registers));
18895
18896 /* B initial_insn_addr+4. */
18897 current_stub_contents =
18898 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18899 create_instruction_branch_absolute
18900 (initial_insn_addr - current_stub_contents));
18901 }
18902 else if (wback && !restore_pc && !restore_rn)
18903 {
18904 /* LDMDB Rn!, {R-high-register-list}. */
18905 current_stub_contents =
18906 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18907 create_instruction_ldmdb
18908 (rn, /*wback=*/1, insn_high_registers));
18909
18910 /* LDMDB Rn!, {R-low-register-list}. */
18911 current_stub_contents =
18912 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18913 create_instruction_ldmdb
18914 (rn, /*wback=*/1, insn_low_registers));
18915
18916 /* B initial_insn_addr+4. */
18917 current_stub_contents =
18918 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18919 create_instruction_branch_absolute
18920 (initial_insn_addr - current_stub_contents));
18921 }
18922 else if (!wback && restore_pc && !restore_rn)
18923 {
18924 /* Choose a Ri in the high-register-list that will be restored. */
18925 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18926
18927 /* SUB Ri, Rn, #(4*nb_registers). */
18928 current_stub_contents =
18929 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18930 create_instruction_sub (ri, rn, (4 * nb_registers)));
18931
18932 /* LDMIA Ri!, {R-low-register-list}. */
18933 current_stub_contents =
18934 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18935 create_instruction_ldmia
18936 (ri, /*wback=*/1, insn_low_registers));
18937
18938 /* LDMIA Ri, {R-high-register-list}. */
18939 current_stub_contents =
18940 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18941 create_instruction_ldmia
18942 (ri, /*wback=*/0, insn_high_registers));
18943 }
18944 else if (wback && restore_pc && !restore_rn)
18945 {
18946 /* Choose a Ri in the high-register-list that will be restored. */
18947 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18948
18949 /* SUB Rn, Rn, #(4*nb_registers) */
18950 current_stub_contents =
18951 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18952 create_instruction_sub (rn, rn, (4 * nb_registers)));
18953
18954 /* MOV Ri, Rn. */
18955 current_stub_contents =
18956 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18957 create_instruction_mov (ri, rn));
18958
18959 /* LDMIA Ri!, {R-low-register-list}. */
18960 current_stub_contents =
18961 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18962 create_instruction_ldmia
18963 (ri, /*wback=*/1, insn_low_registers));
18964
18965 /* LDMIA Ri, {R-high-register-list}. */
18966 current_stub_contents =
18967 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18968 create_instruction_ldmia
18969 (ri, /*wback=*/0, insn_high_registers));
18970 }
18971 else if (!wback && !restore_pc && restore_rn)
18972 {
18973 ri = rn;
18974 if (!(insn_low_registers & (1 << rn)))
18975 {
18976 /* Choose a Ri in the low-register-list that will be restored. */
18977 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18978
18979 /* MOV Ri, Rn. */
18980 current_stub_contents =
18981 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18982 create_instruction_mov (ri, rn));
18983 }
18984
18985 /* LDMDB Ri!, {R-high-register-list}. */
18986 current_stub_contents =
18987 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18988 create_instruction_ldmdb
18989 (ri, /*wback=*/1, insn_high_registers));
18990
18991 /* LDMDB Ri, {R-low-register-list}. */
18992 current_stub_contents =
18993 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18994 create_instruction_ldmdb
18995 (ri, /*wback=*/0, insn_low_registers));
18996
18997 /* B initial_insn_addr+4. */
18998 current_stub_contents =
18999 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19000 create_instruction_branch_absolute
19001 (initial_insn_addr - current_stub_contents));
19002 }
19003 else if (!wback && restore_pc && restore_rn)
19004 {
19005 ri = rn;
19006 if (!(insn_high_registers & (1 << rn)))
19007 {
19008 /* Choose a Ri in the high-register-list that will be restored. */
19009 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19010 }
19011
19012 /* SUB Ri, Rn, #(4*nb_registers). */
19013 current_stub_contents =
19014 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19015 create_instruction_sub (ri, rn, (4 * nb_registers)));
19016
19017 /* LDMIA Ri!, {R-low-register-list}. */
19018 current_stub_contents =
19019 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19020 create_instruction_ldmia
19021 (ri, /*wback=*/1, insn_low_registers));
19022
19023 /* LDMIA Ri, {R-high-register-list}. */
19024 current_stub_contents =
19025 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19026 create_instruction_ldmia
19027 (ri, /*wback=*/0, insn_high_registers));
19028 }
19029 else if (wback && restore_rn)
19030 {
19031 /* The assembler should not have accepted to encode this. */
19032 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19033 "undefined behavior.\n");
19034 }
19035
19036 /* Fill the remaining of the stub with deterministic contents. */
19037 current_stub_contents =
19038 stm32l4xx_fill_stub_udf (htab, output_bfd,
19039 base_stub_contents, current_stub_contents,
19040 base_stub_contents +
19041 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19042
19043 }
19044
19045 static void
19046 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19047 bfd * output_bfd,
19048 const insn32 initial_insn,
19049 const bfd_byte *const initial_insn_addr,
19050 bfd_byte *const base_stub_contents)
19051 {
19052 int num_words = initial_insn & 0xff;
19053 bfd_byte *current_stub_contents = base_stub_contents;
19054
19055 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19056
19057 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19058 smaller than 8 words load sequences that do not cause the
19059 hardware issue. */
19060 if (num_words <= 8)
19061 {
19062 /* Untouched instruction. */
19063 current_stub_contents =
19064 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19065 initial_insn);
19066
19067 /* B initial_insn_addr+4. */
19068 current_stub_contents =
19069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19070 create_instruction_branch_absolute
19071 (initial_insn_addr - current_stub_contents));
19072 }
19073 else
19074 {
19075 bfd_boolean is_dp = /* DP encoding. */
19076 (initial_insn & 0xfe100f00) == 0xec100b00;
19077 bfd_boolean is_ia_nobang = /* (IA without !). */
19078 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19079 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19080 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19081 bfd_boolean is_db_bang = /* (DB with !). */
19082 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19083 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19084 /* d = UInt (Vd:D);. */
19085 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19086 | (((unsigned int)initial_insn << 9) >> 31);
19087
19088 /* Compute the number of 8-words chunks needed to split. */
19089 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19090 int chunk;
19091
19092 /* The test coverage has been done assuming the following
19093 hypothesis that exactly one of the previous is_ predicates is
19094 true. */
19095 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19096 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19097
19098 /* We treat the cutting of the words in one pass for all
19099 cases, then we emit the adjustments:
19100
19101 vldm rx, {...}
19102 -> vldm rx!, {8_words_or_less} for each needed 8_word
19103 -> sub rx, rx, #size (list)
19104
19105 vldm rx!, {...}
19106 -> vldm rx!, {8_words_or_less} for each needed 8_word
19107 This also handles vpop instruction (when rx is sp)
19108
19109 vldmd rx!, {...}
19110 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19111 for (chunk = 0; chunk < chunks; ++chunk)
19112 {
19113 bfd_vma new_insn = 0;
19114
19115 if (is_ia_nobang || is_ia_bang)
19116 {
19117 new_insn = create_instruction_vldmia
19118 (base_reg,
19119 is_dp,
19120 /*wback= . */1,
19121 chunks - (chunk + 1) ?
19122 8 : num_words - chunk * 8,
19123 first_reg + chunk * 8);
19124 }
19125 else if (is_db_bang)
19126 {
19127 new_insn = create_instruction_vldmdb
19128 (base_reg,
19129 is_dp,
19130 chunks - (chunk + 1) ?
19131 8 : num_words - chunk * 8,
19132 first_reg + chunk * 8);
19133 }
19134
19135 if (new_insn)
19136 current_stub_contents =
19137 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19138 new_insn);
19139 }
19140
19141 /* Only this case requires the base register compensation
19142 subtract. */
19143 if (is_ia_nobang)
19144 {
19145 current_stub_contents =
19146 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19147 create_instruction_sub
19148 (base_reg, base_reg, 4*num_words));
19149 }
19150
19151 /* B initial_insn_addr+4. */
19152 current_stub_contents =
19153 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19154 create_instruction_branch_absolute
19155 (initial_insn_addr - current_stub_contents));
19156 }
19157
19158 /* Fill the remaining of the stub with deterministic contents. */
19159 current_stub_contents =
19160 stm32l4xx_fill_stub_udf (htab, output_bfd,
19161 base_stub_contents, current_stub_contents,
19162 base_stub_contents +
19163 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19164 }
19165
19166 static void
19167 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19168 bfd * output_bfd,
19169 const insn32 wrong_insn,
19170 const bfd_byte *const wrong_insn_addr,
19171 bfd_byte *const stub_contents)
19172 {
19173 if (is_thumb2_ldmia (wrong_insn))
19174 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19175 wrong_insn, wrong_insn_addr,
19176 stub_contents);
19177 else if (is_thumb2_ldmdb (wrong_insn))
19178 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19179 wrong_insn, wrong_insn_addr,
19180 stub_contents);
19181 else if (is_thumb2_vldm (wrong_insn))
19182 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19183 wrong_insn, wrong_insn_addr,
19184 stub_contents);
19185 }
19186
19187 /* End of stm32l4xx work-around. */
19188
19189
19190 /* Do code byteswapping. Return FALSE afterwards so that the section is
19191 written out as normal. */
19192
19193 static bfd_boolean
19194 elf32_arm_write_section (bfd *output_bfd,
19195 struct bfd_link_info *link_info,
19196 asection *sec,
19197 bfd_byte *contents)
19198 {
19199 unsigned int mapcount, errcount;
19200 _arm_elf_section_data *arm_data;
19201 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19202 elf32_arm_section_map *map;
19203 elf32_vfp11_erratum_list *errnode;
19204 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19205 bfd_vma ptr;
19206 bfd_vma end;
19207 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19208 bfd_byte tmp;
19209 unsigned int i;
19210
19211 if (globals == NULL)
19212 return FALSE;
19213
19214 /* If this section has not been allocated an _arm_elf_section_data
19215 structure then we cannot record anything. */
19216 arm_data = get_arm_elf_section_data (sec);
19217 if (arm_data == NULL)
19218 return FALSE;
19219
19220 mapcount = arm_data->mapcount;
19221 map = arm_data->map;
19222 errcount = arm_data->erratumcount;
19223
19224 if (errcount != 0)
19225 {
19226 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19227
19228 for (errnode = arm_data->erratumlist; errnode != 0;
19229 errnode = errnode->next)
19230 {
19231 bfd_vma target = errnode->vma - offset;
19232
19233 switch (errnode->type)
19234 {
19235 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19236 {
19237 bfd_vma branch_to_veneer;
19238 /* Original condition code of instruction, plus bit mask for
19239 ARM B instruction. */
19240 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19241 | 0x0a000000;
19242
19243 /* The instruction is before the label. */
19244 target -= 4;
19245
19246 /* Above offset included in -4 below. */
19247 branch_to_veneer = errnode->u.b.veneer->vma
19248 - errnode->vma - 4;
19249
19250 if ((signed) branch_to_veneer < -(1 << 25)
19251 || (signed) branch_to_veneer >= (1 << 25))
19252 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19253 "range"), output_bfd);
19254
19255 insn |= (branch_to_veneer >> 2) & 0xffffff;
19256 contents[endianflip ^ target] = insn & 0xff;
19257 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19258 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19259 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19260 }
19261 break;
19262
19263 case VFP11_ERRATUM_ARM_VENEER:
19264 {
19265 bfd_vma branch_from_veneer;
19266 unsigned int insn;
19267
19268 /* Take size of veneer into account. */
19269 branch_from_veneer = errnode->u.v.branch->vma
19270 - errnode->vma - 12;
19271
19272 if ((signed) branch_from_veneer < -(1 << 25)
19273 || (signed) branch_from_veneer >= (1 << 25))
19274 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19275 "range"), output_bfd);
19276
19277 /* Original instruction. */
19278 insn = errnode->u.v.branch->u.b.vfp_insn;
19279 contents[endianflip ^ target] = insn & 0xff;
19280 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19281 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19282 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19283
19284 /* Branch back to insn after original insn. */
19285 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19286 contents[endianflip ^ (target + 4)] = insn & 0xff;
19287 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19288 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19289 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19290 }
19291 break;
19292
19293 default:
19294 abort ();
19295 }
19296 }
19297 }
19298
19299 if (arm_data->stm32l4xx_erratumcount != 0)
19300 {
19301 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19302 stm32l4xx_errnode != 0;
19303 stm32l4xx_errnode = stm32l4xx_errnode->next)
19304 {
19305 bfd_vma target = stm32l4xx_errnode->vma - offset;
19306
19307 switch (stm32l4xx_errnode->type)
19308 {
19309 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19310 {
19311 unsigned int insn;
19312 bfd_vma branch_to_veneer =
19313 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19314
19315 if ((signed) branch_to_veneer < -(1 << 24)
19316 || (signed) branch_to_veneer >= (1 << 24))
19317 {
19318 bfd_vma out_of_range =
19319 ((signed) branch_to_veneer < -(1 << 24)) ?
19320 - branch_to_veneer - (1 << 24) :
19321 ((signed) branch_to_veneer >= (1 << 24)) ?
19322 branch_to_veneer - (1 << 24) : 0;
19323
19324 _bfd_error_handler
19325 (_("%pB(%#" PRIx64 "): error: "
19326 "cannot create STM32L4XX veneer; "
19327 "jump out of range by %" PRId64 " bytes; "
19328 "cannot encode branch instruction"),
19329 output_bfd,
19330 (uint64_t) (stm32l4xx_errnode->vma - 4),
19331 (int64_t) out_of_range);
19332 continue;
19333 }
19334
19335 insn = create_instruction_branch_absolute
19336 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19337
19338 /* The instruction is before the label. */
19339 target -= 4;
19340
19341 put_thumb2_insn (globals, output_bfd,
19342 (bfd_vma) insn, contents + target);
19343 }
19344 break;
19345
19346 case STM32L4XX_ERRATUM_VENEER:
19347 {
19348 bfd_byte * veneer;
19349 bfd_byte * veneer_r;
19350 unsigned int insn;
19351
19352 veneer = contents + target;
19353 veneer_r = veneer
19354 + stm32l4xx_errnode->u.b.veneer->vma
19355 - stm32l4xx_errnode->vma - 4;
19356
19357 if ((signed) (veneer_r - veneer -
19358 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19359 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19360 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19361 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19362 || (signed) (veneer_r - veneer) >= (1 << 24))
19363 {
19364 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19365 "veneer"), output_bfd);
19366 continue;
19367 }
19368
19369 /* Original instruction. */
19370 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19371
19372 stm32l4xx_create_replacing_stub
19373 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19374 }
19375 break;
19376
19377 default:
19378 abort ();
19379 }
19380 }
19381 }
19382
19383 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19384 {
19385 arm_unwind_table_edit *edit_node
19386 = arm_data->u.exidx.unwind_edit_list;
19387 /* Now, sec->size is the size of the section we will write. The original
19388 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19389 markers) was sec->rawsize. (This isn't the case if we perform no
19390 edits, then rawsize will be zero and we should use size). */
19391 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19392 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19393 unsigned int in_index, out_index;
19394 bfd_vma add_to_offsets = 0;
19395
19396 if (edited_contents == NULL)
19397 return FALSE;
19398 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19399 {
19400 if (edit_node)
19401 {
19402 unsigned int edit_index = edit_node->index;
19403
19404 if (in_index < edit_index && in_index * 8 < input_size)
19405 {
19406 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19407 contents + in_index * 8, add_to_offsets);
19408 out_index++;
19409 in_index++;
19410 }
19411 else if (in_index == edit_index
19412 || (in_index * 8 >= input_size
19413 && edit_index == UINT_MAX))
19414 {
19415 switch (edit_node->type)
19416 {
19417 case DELETE_EXIDX_ENTRY:
19418 in_index++;
19419 add_to_offsets += 8;
19420 break;
19421
19422 case INSERT_EXIDX_CANTUNWIND_AT_END:
19423 {
19424 asection *text_sec = edit_node->linked_section;
19425 bfd_vma text_offset = text_sec->output_section->vma
19426 + text_sec->output_offset
19427 + text_sec->size;
19428 bfd_vma exidx_offset = offset + out_index * 8;
19429 unsigned long prel31_offset;
19430
19431 /* Note: this is meant to be equivalent to an
19432 R_ARM_PREL31 relocation. These synthetic
19433 EXIDX_CANTUNWIND markers are not relocated by the
19434 usual BFD method. */
19435 prel31_offset = (text_offset - exidx_offset)
19436 & 0x7ffffffful;
19437 if (bfd_link_relocatable (link_info))
19438 {
19439 /* Here relocation for new EXIDX_CANTUNWIND is
19440 created, so there is no need to
19441 adjust offset by hand. */
19442 prel31_offset = text_sec->output_offset
19443 + text_sec->size;
19444 }
19445
19446 /* First address we can't unwind. */
19447 bfd_put_32 (output_bfd, prel31_offset,
19448 &edited_contents[out_index * 8]);
19449
19450 /* Code for EXIDX_CANTUNWIND. */
19451 bfd_put_32 (output_bfd, 0x1,
19452 &edited_contents[out_index * 8 + 4]);
19453
19454 out_index++;
19455 add_to_offsets -= 8;
19456 }
19457 break;
19458 }
19459
19460 edit_node = edit_node->next;
19461 }
19462 }
19463 else
19464 {
19465 /* No more edits, copy remaining entries verbatim. */
19466 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19467 contents + in_index * 8, add_to_offsets);
19468 out_index++;
19469 in_index++;
19470 }
19471 }
19472
19473 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19474 bfd_set_section_contents (output_bfd, sec->output_section,
19475 edited_contents,
19476 (file_ptr) sec->output_offset, sec->size);
19477
19478 return TRUE;
19479 }
19480
19481 /* Fix code to point to Cortex-A8 erratum stubs. */
19482 if (globals->fix_cortex_a8)
19483 {
19484 struct a8_branch_to_stub_data data;
19485
19486 data.writing_section = sec;
19487 data.contents = contents;
19488
19489 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19490 & data);
19491 }
19492
19493 if (mapcount == 0)
19494 return FALSE;
19495
19496 if (globals->byteswap_code)
19497 {
19498 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19499
19500 ptr = map[0].vma;
19501 for (i = 0; i < mapcount; i++)
19502 {
19503 if (i == mapcount - 1)
19504 end = sec->size;
19505 else
19506 end = map[i + 1].vma;
19507
19508 switch (map[i].type)
19509 {
19510 case 'a':
19511 /* Byte swap code words. */
19512 while (ptr + 3 < end)
19513 {
19514 tmp = contents[ptr];
19515 contents[ptr] = contents[ptr + 3];
19516 contents[ptr + 3] = tmp;
19517 tmp = contents[ptr + 1];
19518 contents[ptr + 1] = contents[ptr + 2];
19519 contents[ptr + 2] = tmp;
19520 ptr += 4;
19521 }
19522 break;
19523
19524 case 't':
19525 /* Byte swap code halfwords. */
19526 while (ptr + 1 < end)
19527 {
19528 tmp = contents[ptr];
19529 contents[ptr] = contents[ptr + 1];
19530 contents[ptr + 1] = tmp;
19531 ptr += 2;
19532 }
19533 break;
19534
19535 case 'd':
19536 /* Leave data alone. */
19537 break;
19538 }
19539 ptr = end;
19540 }
19541 }
19542
19543 free (map);
19544 arm_data->mapcount = -1;
19545 arm_data->mapsize = 0;
19546 arm_data->map = NULL;
19547
19548 return FALSE;
19549 }
19550
19551 /* Mangle thumb function symbols as we read them in. */
19552
19553 static bfd_boolean
19554 elf32_arm_swap_symbol_in (bfd * abfd,
19555 const void *psrc,
19556 const void *pshn,
19557 Elf_Internal_Sym *dst)
19558 {
19559 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19560 return FALSE;
19561 dst->st_target_internal = 0;
19562
19563 /* New EABI objects mark thumb function symbols by setting the low bit of
19564 the address. */
19565 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19566 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19567 {
19568 if (dst->st_value & 1)
19569 {
19570 dst->st_value &= ~(bfd_vma) 1;
19571 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19572 ST_BRANCH_TO_THUMB);
19573 }
19574 else
19575 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19576 }
19577 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19578 {
19579 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19580 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19581 }
19582 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19583 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19584 else
19585 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19586
19587 return TRUE;
19588 }
19589
19590
19591 /* Mangle thumb function symbols as we write them out. */
19592
19593 static void
19594 elf32_arm_swap_symbol_out (bfd *abfd,
19595 const Elf_Internal_Sym *src,
19596 void *cdst,
19597 void *shndx)
19598 {
19599 Elf_Internal_Sym newsym;
19600
19601 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19602 of the address set, as per the new EABI. We do this unconditionally
19603 because objcopy does not set the elf header flags until after
19604 it writes out the symbol table. */
19605 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19606 {
19607 newsym = *src;
19608 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19609 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19610 if (newsym.st_shndx != SHN_UNDEF)
19611 {
19612 /* Do this only for defined symbols. At link type, the static
19613 linker will simulate the work of dynamic linker of resolving
19614 symbols and will carry over the thumbness of found symbols to
19615 the output symbol table. It's not clear how it happens, but
19616 the thumbness of undefined symbols can well be different at
19617 runtime, and writing '1' for them will be confusing for users
19618 and possibly for dynamic linker itself.
19619 */
19620 newsym.st_value |= 1;
19621 }
19622
19623 src = &newsym;
19624 }
19625 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19626 }
19627
19628 /* Add the PT_ARM_EXIDX program header. */
19629
19630 static bfd_boolean
19631 elf32_arm_modify_segment_map (bfd *abfd,
19632 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19633 {
19634 struct elf_segment_map *m;
19635 asection *sec;
19636
19637 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19638 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19639 {
19640 /* If there is already a PT_ARM_EXIDX header, then we do not
19641 want to add another one. This situation arises when running
19642 "strip"; the input binary already has the header. */
19643 m = elf_seg_map (abfd);
19644 while (m && m->p_type != PT_ARM_EXIDX)
19645 m = m->next;
19646 if (!m)
19647 {
19648 m = (struct elf_segment_map *)
19649 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19650 if (m == NULL)
19651 return FALSE;
19652 m->p_type = PT_ARM_EXIDX;
19653 m->count = 1;
19654 m->sections[0] = sec;
19655
19656 m->next = elf_seg_map (abfd);
19657 elf_seg_map (abfd) = m;
19658 }
19659 }
19660
19661 return TRUE;
19662 }
19663
19664 /* We may add a PT_ARM_EXIDX program header. */
19665
19666 static int
19667 elf32_arm_additional_program_headers (bfd *abfd,
19668 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19669 {
19670 asection *sec;
19671
19672 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19673 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19674 return 1;
19675 else
19676 return 0;
19677 }
19678
19679 /* Hook called by the linker routine which adds symbols from an object
19680 file. */
19681
19682 static bfd_boolean
19683 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19684 Elf_Internal_Sym *sym, const char **namep,
19685 flagword *flagsp, asection **secp, bfd_vma *valp)
19686 {
19687 if (elf32_arm_hash_table (info) == NULL)
19688 return FALSE;
19689
19690 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19691 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19692 flagsp, secp, valp))
19693 return FALSE;
19694
19695 return TRUE;
19696 }
19697
19698 /* We use this to override swap_symbol_in and swap_symbol_out. */
19699 const struct elf_size_info elf32_arm_size_info =
19700 {
19701 sizeof (Elf32_External_Ehdr),
19702 sizeof (Elf32_External_Phdr),
19703 sizeof (Elf32_External_Shdr),
19704 sizeof (Elf32_External_Rel),
19705 sizeof (Elf32_External_Rela),
19706 sizeof (Elf32_External_Sym),
19707 sizeof (Elf32_External_Dyn),
19708 sizeof (Elf_External_Note),
19709 4,
19710 1,
19711 32, 2,
19712 ELFCLASS32, EV_CURRENT,
19713 bfd_elf32_write_out_phdrs,
19714 bfd_elf32_write_shdrs_and_ehdr,
19715 bfd_elf32_checksum_contents,
19716 bfd_elf32_write_relocs,
19717 elf32_arm_swap_symbol_in,
19718 elf32_arm_swap_symbol_out,
19719 bfd_elf32_slurp_reloc_table,
19720 bfd_elf32_slurp_symbol_table,
19721 bfd_elf32_swap_dyn_in,
19722 bfd_elf32_swap_dyn_out,
19723 bfd_elf32_swap_reloc_in,
19724 bfd_elf32_swap_reloc_out,
19725 bfd_elf32_swap_reloca_in,
19726 bfd_elf32_swap_reloca_out
19727 };
19728
19729 static bfd_vma
19730 read_code32 (const bfd *abfd, const bfd_byte *addr)
19731 {
19732 /* V7 BE8 code is always little endian. */
19733 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19734 return bfd_getl32 (addr);
19735
19736 return bfd_get_32 (abfd, addr);
19737 }
19738
19739 static bfd_vma
19740 read_code16 (const bfd *abfd, const bfd_byte *addr)
19741 {
19742 /* V7 BE8 code is always little endian. */
19743 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19744 return bfd_getl16 (addr);
19745
19746 return bfd_get_16 (abfd, addr);
19747 }
19748
19749 /* Return size of plt0 entry starting at ADDR
19750 or (bfd_vma) -1 if size can not be determined. */
19751
19752 static bfd_vma
19753 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19754 {
19755 bfd_vma first_word;
19756 bfd_vma plt0_size;
19757
19758 first_word = read_code32 (abfd, addr);
19759
19760 if (first_word == elf32_arm_plt0_entry[0])
19761 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19762 else if (first_word == elf32_thumb2_plt0_entry[0])
19763 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19764 else
19765 /* We don't yet handle this PLT format. */
19766 return (bfd_vma) -1;
19767
19768 return plt0_size;
19769 }
19770
19771 /* Return size of plt entry starting at offset OFFSET
19772 of plt section located at address START
19773 or (bfd_vma) -1 if size can not be determined. */
19774
19775 static bfd_vma
19776 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19777 {
19778 bfd_vma first_insn;
19779 bfd_vma plt_size = 0;
19780 const bfd_byte *addr = start + offset;
19781
19782 /* PLT entry size if fixed on Thumb-only platforms. */
19783 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19784 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19785
19786 /* Respect Thumb stub if necessary. */
19787 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19788 {
19789 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19790 }
19791
19792 /* Strip immediate from first add. */
19793 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19794
19795 #ifdef FOUR_WORD_PLT
19796 if (first_insn == elf32_arm_plt_entry[0])
19797 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19798 #else
19799 if (first_insn == elf32_arm_plt_entry_long[0])
19800 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19801 else if (first_insn == elf32_arm_plt_entry_short[0])
19802 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19803 #endif
19804 else
19805 /* We don't yet handle this PLT format. */
19806 return (bfd_vma) -1;
19807
19808 return plt_size;
19809 }
19810
19811 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19812
19813 static long
19814 elf32_arm_get_synthetic_symtab (bfd *abfd,
19815 long symcount ATTRIBUTE_UNUSED,
19816 asymbol **syms ATTRIBUTE_UNUSED,
19817 long dynsymcount,
19818 asymbol **dynsyms,
19819 asymbol **ret)
19820 {
19821 asection *relplt;
19822 asymbol *s;
19823 arelent *p;
19824 long count, i, n;
19825 size_t size;
19826 Elf_Internal_Shdr *hdr;
19827 char *names;
19828 asection *plt;
19829 bfd_vma offset;
19830 bfd_byte *data;
19831
19832 *ret = NULL;
19833
19834 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19835 return 0;
19836
19837 if (dynsymcount <= 0)
19838 return 0;
19839
19840 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19841 if (relplt == NULL)
19842 return 0;
19843
19844 hdr = &elf_section_data (relplt)->this_hdr;
19845 if (hdr->sh_link != elf_dynsymtab (abfd)
19846 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19847 return 0;
19848
19849 plt = bfd_get_section_by_name (abfd, ".plt");
19850 if (plt == NULL)
19851 return 0;
19852
19853 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19854 return -1;
19855
19856 data = plt->contents;
19857 if (data == NULL)
19858 {
19859 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19860 return -1;
19861 bfd_cache_section_contents((asection *) plt, data);
19862 }
19863
19864 count = relplt->size / hdr->sh_entsize;
19865 size = count * sizeof (asymbol);
19866 p = relplt->relocation;
19867 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19868 {
19869 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19870 if (p->addend != 0)
19871 size += sizeof ("+0x") - 1 + 8;
19872 }
19873
19874 s = *ret = (asymbol *) bfd_malloc (size);
19875 if (s == NULL)
19876 return -1;
19877
19878 offset = elf32_arm_plt0_size (abfd, data);
19879 if (offset == (bfd_vma) -1)
19880 return -1;
19881
19882 names = (char *) (s + count);
19883 p = relplt->relocation;
19884 n = 0;
19885 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19886 {
19887 size_t len;
19888
19889 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19890 if (plt_size == (bfd_vma) -1)
19891 break;
19892
19893 *s = **p->sym_ptr_ptr;
19894 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19895 we are defining a symbol, ensure one of them is set. */
19896 if ((s->flags & BSF_LOCAL) == 0)
19897 s->flags |= BSF_GLOBAL;
19898 s->flags |= BSF_SYNTHETIC;
19899 s->section = plt;
19900 s->value = offset;
19901 s->name = names;
19902 s->udata.p = NULL;
19903 len = strlen ((*p->sym_ptr_ptr)->name);
19904 memcpy (names, (*p->sym_ptr_ptr)->name, len);
19905 names += len;
19906 if (p->addend != 0)
19907 {
19908 char buf[30], *a;
19909
19910 memcpy (names, "+0x", sizeof ("+0x") - 1);
19911 names += sizeof ("+0x") - 1;
19912 bfd_sprintf_vma (abfd, buf, p->addend);
19913 for (a = buf; *a == '0'; ++a)
19914 ;
19915 len = strlen (a);
19916 memcpy (names, a, len);
19917 names += len;
19918 }
19919 memcpy (names, "@plt", sizeof ("@plt"));
19920 names += sizeof ("@plt");
19921 ++s, ++n;
19922 offset += plt_size;
19923 }
19924
19925 return n;
19926 }
19927
19928 static bfd_boolean
19929 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
19930 {
19931 if (hdr->sh_flags & SHF_ARM_PURECODE)
19932 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
19933 return TRUE;
19934 }
19935
19936 static flagword
19937 elf32_arm_lookup_section_flags (char *flag_name)
19938 {
19939 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
19940 return SHF_ARM_PURECODE;
19941
19942 return SEC_NO_FLAGS;
19943 }
19944
19945 static unsigned int
19946 elf32_arm_count_additional_relocs (asection *sec)
19947 {
19948 struct _arm_elf_section_data *arm_data;
19949 arm_data = get_arm_elf_section_data (sec);
19950
19951 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
19952 }
19953
19954 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
19955 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
19956 FALSE otherwise. ISECTION is the best guess matching section from the
19957 input bfd IBFD, but it might be NULL. */
19958
19959 static bfd_boolean
19960 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
19961 bfd *obfd ATTRIBUTE_UNUSED,
19962 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
19963 Elf_Internal_Shdr *osection)
19964 {
19965 switch (osection->sh_type)
19966 {
19967 case SHT_ARM_EXIDX:
19968 {
19969 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
19970 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
19971 unsigned i = 0;
19972
19973 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
19974 osection->sh_info = 0;
19975
19976 /* The sh_link field must be set to the text section associated with
19977 this index section. Unfortunately the ARM EHABI does not specify
19978 exactly how to determine this association. Our caller does try
19979 to match up OSECTION with its corresponding input section however
19980 so that is a good first guess. */
19981 if (isection != NULL
19982 && osection->bfd_section != NULL
19983 && isection->bfd_section != NULL
19984 && isection->bfd_section->output_section != NULL
19985 && isection->bfd_section->output_section == osection->bfd_section
19986 && iheaders != NULL
19987 && isection->sh_link > 0
19988 && isection->sh_link < elf_numsections (ibfd)
19989 && iheaders[isection->sh_link]->bfd_section != NULL
19990 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
19991 )
19992 {
19993 for (i = elf_numsections (obfd); i-- > 0;)
19994 if (oheaders[i]->bfd_section
19995 == iheaders[isection->sh_link]->bfd_section->output_section)
19996 break;
19997 }
19998
19999 if (i == 0)
20000 {
20001 /* Failing that we have to find a matching section ourselves. If
20002 we had the output section name available we could compare that
20003 with input section names. Unfortunately we don't. So instead
20004 we use a simple heuristic and look for the nearest executable
20005 section before this one. */
20006 for (i = elf_numsections (obfd); i-- > 0;)
20007 if (oheaders[i] == osection)
20008 break;
20009 if (i == 0)
20010 break;
20011
20012 while (i-- > 0)
20013 if (oheaders[i]->sh_type == SHT_PROGBITS
20014 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20015 == (SHF_ALLOC | SHF_EXECINSTR))
20016 break;
20017 }
20018
20019 if (i)
20020 {
20021 osection->sh_link = i;
20022 /* If the text section was part of a group
20023 then the index section should be too. */
20024 if (oheaders[i]->sh_flags & SHF_GROUP)
20025 osection->sh_flags |= SHF_GROUP;
20026 return TRUE;
20027 }
20028 }
20029 break;
20030
20031 case SHT_ARM_PREEMPTMAP:
20032 osection->sh_flags = SHF_ALLOC;
20033 break;
20034
20035 case SHT_ARM_ATTRIBUTES:
20036 case SHT_ARM_DEBUGOVERLAY:
20037 case SHT_ARM_OVERLAYSECTION:
20038 default:
20039 break;
20040 }
20041
20042 return FALSE;
20043 }
20044
20045 /* Returns TRUE if NAME is an ARM mapping symbol.
20046 Traditionally the symbols $a, $d and $t have been used.
20047 The ARM ELF standard also defines $x (for A64 code). It also allows a
20048 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20049 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20050 not support them here. $t.x indicates the start of ThumbEE instructions. */
20051
20052 static bfd_boolean
20053 is_arm_mapping_symbol (const char * name)
20054 {
20055 return name != NULL /* Paranoia. */
20056 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20057 the mapping symbols could have acquired a prefix.
20058 We do not support this here, since such symbols no
20059 longer conform to the ARM ELF ABI. */
20060 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20061 && (name[2] == 0 || name[2] == '.');
20062 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20063 any characters that follow the period are legal characters for the body
20064 of a symbol's name. For now we just assume that this is the case. */
20065 }
20066
20067 /* Make sure that mapping symbols in object files are not removed via the
20068 "strip --strip-unneeded" tool. These symbols are needed in order to
20069 correctly generate interworking veneers, and for byte swapping code
20070 regions. Once an object file has been linked, it is safe to remove the
20071 symbols as they will no longer be needed. */
20072
20073 static void
20074 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20075 {
20076 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20077 && sym->section != bfd_abs_section_ptr
20078 && is_arm_mapping_symbol (sym->name))
20079 sym->flags |= BSF_KEEP;
20080 }
20081
20082 #undef elf_backend_copy_special_section_fields
20083 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20084
20085 #define ELF_ARCH bfd_arch_arm
20086 #define ELF_TARGET_ID ARM_ELF_DATA
20087 #define ELF_MACHINE_CODE EM_ARM
20088 #ifdef __QNXTARGET__
20089 #define ELF_MAXPAGESIZE 0x1000
20090 #else
20091 #define ELF_MAXPAGESIZE 0x10000
20092 #endif
20093 #define ELF_MINPAGESIZE 0x1000
20094 #define ELF_COMMONPAGESIZE 0x1000
20095
20096 #define bfd_elf32_mkobject elf32_arm_mkobject
20097
20098 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20099 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20100 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20101 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20102 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20103 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20104 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20105 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20106 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20107 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20108 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20109 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20110
20111 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20112 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20113 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20114 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20115 #define elf_backend_check_relocs elf32_arm_check_relocs
20116 #define elf_backend_update_relocs elf32_arm_update_relocs
20117 #define elf_backend_relocate_section elf32_arm_relocate_section
20118 #define elf_backend_write_section elf32_arm_write_section
20119 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20120 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20121 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20122 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20123 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20124 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20125 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20126 #define elf_backend_init_file_header elf32_arm_init_file_header
20127 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20128 #define elf_backend_object_p elf32_arm_object_p
20129 #define elf_backend_fake_sections elf32_arm_fake_sections
20130 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20131 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20132 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20133 #define elf_backend_size_info elf32_arm_size_info
20134 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20135 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20136 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20137 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20138 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20139 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20140 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20141 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20142
20143 #define elf_backend_can_refcount 1
20144 #define elf_backend_can_gc_sections 1
20145 #define elf_backend_plt_readonly 1
20146 #define elf_backend_want_got_plt 1
20147 #define elf_backend_want_plt_sym 0
20148 #define elf_backend_want_dynrelro 1
20149 #define elf_backend_may_use_rel_p 1
20150 #define elf_backend_may_use_rela_p 0
20151 #define elf_backend_default_use_rela_p 0
20152 #define elf_backend_dtrel_excludes_plt 1
20153
20154 #define elf_backend_got_header_size 12
20155 #define elf_backend_extern_protected_data 1
20156
20157 #undef elf_backend_obj_attrs_vendor
20158 #define elf_backend_obj_attrs_vendor "aeabi"
20159 #undef elf_backend_obj_attrs_section
20160 #define elf_backend_obj_attrs_section ".ARM.attributes"
20161 #undef elf_backend_obj_attrs_arg_type
20162 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20163 #undef elf_backend_obj_attrs_section_type
20164 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20165 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20166 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20167
20168 #undef elf_backend_section_flags
20169 #define elf_backend_section_flags elf32_arm_section_flags
20170 #undef elf_backend_lookup_section_flags_hook
20171 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20172
20173 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20174
20175 #include "elf32-target.h"
20176
20177 /* Native Client targets. */
20178
20179 #undef TARGET_LITTLE_SYM
20180 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20181 #undef TARGET_LITTLE_NAME
20182 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20183 #undef TARGET_BIG_SYM
20184 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20185 #undef TARGET_BIG_NAME
20186 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20187
20188 /* Like elf32_arm_link_hash_table_create -- but overrides
20189 appropriately for NaCl. */
20190
20191 static struct bfd_link_hash_table *
20192 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20193 {
20194 struct bfd_link_hash_table *ret;
20195
20196 ret = elf32_arm_link_hash_table_create (abfd);
20197 if (ret)
20198 {
20199 struct elf32_arm_link_hash_table *htab
20200 = (struct elf32_arm_link_hash_table *) ret;
20201
20202 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20203 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20204 }
20205 return ret;
20206 }
20207
20208 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20209 really need to use elf32_arm_modify_segment_map. But we do it
20210 anyway just to reduce gratuitous differences with the stock ARM backend. */
20211
20212 static bfd_boolean
20213 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20214 {
20215 return (elf32_arm_modify_segment_map (abfd, info)
20216 && nacl_modify_segment_map (abfd, info));
20217 }
20218
20219 static bfd_boolean
20220 elf32_arm_nacl_final_write_processing (bfd *abfd)
20221 {
20222 arm_final_write_processing (abfd);
20223 return nacl_final_write_processing (abfd);
20224 }
20225
20226 static bfd_vma
20227 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20228 const arelent *rel ATTRIBUTE_UNUSED)
20229 {
20230 return plt->vma
20231 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20232 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20233 }
20234
20235 #undef elf32_bed
20236 #define elf32_bed elf32_arm_nacl_bed
20237 #undef bfd_elf32_bfd_link_hash_table_create
20238 #define bfd_elf32_bfd_link_hash_table_create \
20239 elf32_arm_nacl_link_hash_table_create
20240 #undef elf_backend_plt_alignment
20241 #define elf_backend_plt_alignment 4
20242 #undef elf_backend_modify_segment_map
20243 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20244 #undef elf_backend_modify_headers
20245 #define elf_backend_modify_headers nacl_modify_headers
20246 #undef elf_backend_final_write_processing
20247 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20248 #undef bfd_elf32_get_synthetic_symtab
20249 #undef elf_backend_plt_sym_val
20250 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20251 #undef elf_backend_copy_special_section_fields
20252
20253 #undef ELF_MINPAGESIZE
20254 #undef ELF_COMMONPAGESIZE
20255
20256 #undef ELF_TARGET_OS
20257 #define ELF_TARGET_OS is_nacl
20258
20259 #include "elf32-target.h"
20260
20261 /* Reset to defaults. */
20262 #undef elf_backend_plt_alignment
20263 #undef elf_backend_modify_segment_map
20264 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20265 #undef elf_backend_modify_headers
20266 #undef elf_backend_final_write_processing
20267 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20268 #undef ELF_MINPAGESIZE
20269 #define ELF_MINPAGESIZE 0x1000
20270 #undef ELF_COMMONPAGESIZE
20271 #define ELF_COMMONPAGESIZE 0x1000
20272
20273
20274 /* FDPIC Targets. */
20275
20276 #undef TARGET_LITTLE_SYM
20277 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20278 #undef TARGET_LITTLE_NAME
20279 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20280 #undef TARGET_BIG_SYM
20281 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20282 #undef TARGET_BIG_NAME
20283 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20284 #undef elf_match_priority
20285 #define elf_match_priority 128
20286 #undef ELF_OSABI
20287 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20288
20289 /* Like elf32_arm_link_hash_table_create -- but overrides
20290 appropriately for FDPIC. */
20291
20292 static struct bfd_link_hash_table *
20293 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20294 {
20295 struct bfd_link_hash_table *ret;
20296
20297 ret = elf32_arm_link_hash_table_create (abfd);
20298 if (ret)
20299 {
20300 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20301
20302 htab->fdpic_p = 1;
20303 }
20304 return ret;
20305 }
20306
20307 /* We need dynamic symbols for every section, since segments can
20308 relocate independently. */
20309 static bfd_boolean
20310 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20311 struct bfd_link_info *info
20312 ATTRIBUTE_UNUSED,
20313 asection *p ATTRIBUTE_UNUSED)
20314 {
20315 switch (elf_section_data (p)->this_hdr.sh_type)
20316 {
20317 case SHT_PROGBITS:
20318 case SHT_NOBITS:
20319 /* If sh_type is yet undecided, assume it could be
20320 SHT_PROGBITS/SHT_NOBITS. */
20321 case SHT_NULL:
20322 return FALSE;
20323
20324 /* There shouldn't be section relative relocations
20325 against any other section. */
20326 default:
20327 return TRUE;
20328 }
20329 }
20330
20331 #undef elf32_bed
20332 #define elf32_bed elf32_arm_fdpic_bed
20333
20334 #undef bfd_elf32_bfd_link_hash_table_create
20335 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20336
20337 #undef elf_backend_omit_section_dynsym
20338 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20339
20340 #undef ELF_TARGET_OS
20341
20342 #include "elf32-target.h"
20343
20344 #undef elf_match_priority
20345 #undef ELF_OSABI
20346 #undef elf_backend_omit_section_dynsym
20347
20348 /* VxWorks Targets. */
20349
20350 #undef TARGET_LITTLE_SYM
20351 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20352 #undef TARGET_LITTLE_NAME
20353 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20354 #undef TARGET_BIG_SYM
20355 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20356 #undef TARGET_BIG_NAME
20357 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20358
20359 /* Like elf32_arm_link_hash_table_create -- but overrides
20360 appropriately for VxWorks. */
20361
20362 static struct bfd_link_hash_table *
20363 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20364 {
20365 struct bfd_link_hash_table *ret;
20366
20367 ret = elf32_arm_link_hash_table_create (abfd);
20368 if (ret)
20369 {
20370 struct elf32_arm_link_hash_table *htab
20371 = (struct elf32_arm_link_hash_table *) ret;
20372 htab->use_rel = 0;
20373 }
20374 return ret;
20375 }
20376
20377 static bfd_boolean
20378 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20379 {
20380 arm_final_write_processing (abfd);
20381 return elf_vxworks_final_write_processing (abfd);
20382 }
20383
20384 #undef elf32_bed
20385 #define elf32_bed elf32_arm_vxworks_bed
20386
20387 #undef bfd_elf32_bfd_link_hash_table_create
20388 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20391 #undef elf_backend_emit_relocs
20392 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20393
20394 #undef elf_backend_may_use_rel_p
20395 #define elf_backend_may_use_rel_p 0
20396 #undef elf_backend_may_use_rela_p
20397 #define elf_backend_may_use_rela_p 1
20398 #undef elf_backend_default_use_rela_p
20399 #define elf_backend_default_use_rela_p 1
20400 #undef elf_backend_want_plt_sym
20401 #define elf_backend_want_plt_sym 1
20402 #undef ELF_MAXPAGESIZE
20403 #define ELF_MAXPAGESIZE 0x1000
20404 #undef ELF_TARGET_OS
20405 #define ELF_TARGET_OS is_vxworks
20406
20407 #include "elf32-target.h"
20408
20409
20410 /* Merge backend specific data from an object file to the output
20411 object file when linking. */
20412
20413 static bfd_boolean
20414 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20415 {
20416 bfd *obfd = info->output_bfd;
20417 flagword out_flags;
20418 flagword in_flags;
20419 bfd_boolean flags_compatible = TRUE;
20420 asection *sec;
20421
20422 /* Check if we have the same endianness. */
20423 if (! _bfd_generic_verify_endian_match (ibfd, info))
20424 return FALSE;
20425
20426 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20427 return TRUE;
20428
20429 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20430 return FALSE;
20431
20432 /* The input BFD must have had its flags initialised. */
20433 /* The following seems bogus to me -- The flags are initialized in
20434 the assembler but I don't think an elf_flags_init field is
20435 written into the object. */
20436 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20437
20438 in_flags = elf_elfheader (ibfd)->e_flags;
20439 out_flags = elf_elfheader (obfd)->e_flags;
20440
20441 /* In theory there is no reason why we couldn't handle this. However
20442 in practice it isn't even close to working and there is no real
20443 reason to want it. */
20444 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20445 && !(ibfd->flags & DYNAMIC)
20446 && (in_flags & EF_ARM_BE8))
20447 {
20448 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20449 ibfd);
20450 return FALSE;
20451 }
20452
20453 if (!elf_flags_init (obfd))
20454 {
20455 /* If the input is the default architecture and had the default
20456 flags then do not bother setting the flags for the output
20457 architecture, instead allow future merges to do this. If no
20458 future merges ever set these flags then they will retain their
20459 uninitialised values, which surprise surprise, correspond
20460 to the default values. */
20461 if (bfd_get_arch_info (ibfd)->the_default
20462 && elf_elfheader (ibfd)->e_flags == 0)
20463 return TRUE;
20464
20465 elf_flags_init (obfd) = TRUE;
20466 elf_elfheader (obfd)->e_flags = in_flags;
20467
20468 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20469 && bfd_get_arch_info (obfd)->the_default)
20470 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20471
20472 return TRUE;
20473 }
20474
20475 /* Determine what should happen if the input ARM architecture
20476 does not match the output ARM architecture. */
20477 if (! bfd_arm_merge_machines (ibfd, obfd))
20478 return FALSE;
20479
20480 /* Identical flags must be compatible. */
20481 if (in_flags == out_flags)
20482 return TRUE;
20483
20484 /* Check to see if the input BFD actually contains any sections. If
20485 not, its flags may not have been initialised either, but it
20486 cannot actually cause any incompatiblity. Do not short-circuit
20487 dynamic objects; their section list may be emptied by
20488 elf_link_add_object_symbols.
20489
20490 Also check to see if there are no code sections in the input.
20491 In this case there is no need to check for code specific flags.
20492 XXX - do we need to worry about floating-point format compatability
20493 in data sections ? */
20494 if (!(ibfd->flags & DYNAMIC))
20495 {
20496 bfd_boolean null_input_bfd = TRUE;
20497 bfd_boolean only_data_sections = TRUE;
20498
20499 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20500 {
20501 /* Ignore synthetic glue sections. */
20502 if (strcmp (sec->name, ".glue_7")
20503 && strcmp (sec->name, ".glue_7t"))
20504 {
20505 if ((bfd_section_flags (sec)
20506 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20507 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20508 only_data_sections = FALSE;
20509
20510 null_input_bfd = FALSE;
20511 break;
20512 }
20513 }
20514
20515 if (null_input_bfd || only_data_sections)
20516 return TRUE;
20517 }
20518
20519 /* Complain about various flag mismatches. */
20520 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20521 EF_ARM_EABI_VERSION (out_flags)))
20522 {
20523 _bfd_error_handler
20524 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20525 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20526 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20527 return FALSE;
20528 }
20529
20530 /* Not sure what needs to be checked for EABI versions >= 1. */
20531 /* VxWorks libraries do not use these flags. */
20532 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20533 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20534 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20535 {
20536 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20537 {
20538 _bfd_error_handler
20539 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20540 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20541 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20542 flags_compatible = FALSE;
20543 }
20544
20545 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20546 {
20547 if (in_flags & EF_ARM_APCS_FLOAT)
20548 _bfd_error_handler
20549 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20550 ibfd, obfd);
20551 else
20552 _bfd_error_handler
20553 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20554 ibfd, obfd);
20555
20556 flags_compatible = FALSE;
20557 }
20558
20559 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20560 {
20561 if (in_flags & EF_ARM_VFP_FLOAT)
20562 _bfd_error_handler
20563 (_("error: %pB uses %s instructions, whereas %pB does not"),
20564 ibfd, "VFP", obfd);
20565 else
20566 _bfd_error_handler
20567 (_("error: %pB uses %s instructions, whereas %pB does not"),
20568 ibfd, "FPA", obfd);
20569
20570 flags_compatible = FALSE;
20571 }
20572
20573 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20574 {
20575 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20576 _bfd_error_handler
20577 (_("error: %pB uses %s instructions, whereas %pB does not"),
20578 ibfd, "Maverick", obfd);
20579 else
20580 _bfd_error_handler
20581 (_("error: %pB does not use %s instructions, whereas %pB does"),
20582 ibfd, "Maverick", obfd);
20583
20584 flags_compatible = FALSE;
20585 }
20586
20587 #ifdef EF_ARM_SOFT_FLOAT
20588 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20589 {
20590 /* We can allow interworking between code that is VFP format
20591 layout, and uses either soft float or integer regs for
20592 passing floating point arguments and results. We already
20593 know that the APCS_FLOAT flags match; similarly for VFP
20594 flags. */
20595 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20596 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20597 {
20598 if (in_flags & EF_ARM_SOFT_FLOAT)
20599 _bfd_error_handler
20600 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20601 ibfd, obfd);
20602 else
20603 _bfd_error_handler
20604 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20605 ibfd, obfd);
20606
20607 flags_compatible = FALSE;
20608 }
20609 }
20610 #endif
20611
20612 /* Interworking mismatch is only a warning. */
20613 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20614 {
20615 if (in_flags & EF_ARM_INTERWORK)
20616 {
20617 _bfd_error_handler
20618 (_("warning: %pB supports interworking, whereas %pB does not"),
20619 ibfd, obfd);
20620 }
20621 else
20622 {
20623 _bfd_error_handler
20624 (_("warning: %pB does not support interworking, whereas %pB does"),
20625 ibfd, obfd);
20626 }
20627 }
20628 }
20629
20630 return flags_compatible;
20631 }
This page took 0.677571 seconds and 4 git commands to generate.