a0b0be3f16078db8da092708d464ea6d578fe4e2
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static bfd_boolean
1843 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1850 {
1851 /* xgettext:c-format */
1852 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1853 abfd, r_type);
1854 bfd_set_error (bfd_error_bad_value);
1855 return FALSE;
1856 }
1857 return TRUE;
1858 }
1859
1860 struct elf32_arm_reloc_map
1861 {
1862 bfd_reloc_code_real_type bfd_reloc_val;
1863 unsigned char elf_reloc_val;
1864 };
1865
1866 /* All entries in this list must also be present in elf32_arm_howto_table. */
1867 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1868 {
1869 {BFD_RELOC_NONE, R_ARM_NONE},
1870 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1871 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1872 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1873 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1874 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1875 {BFD_RELOC_32, R_ARM_ABS32},
1876 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1877 {BFD_RELOC_8, R_ARM_ABS8},
1878 {BFD_RELOC_16, R_ARM_ABS16},
1879 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1880 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1881 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1882 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1883 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1884 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1885 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1886 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1887 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1888 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1889 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1890 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1891 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1892 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1893 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1894 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1895 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1896 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1897 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1898 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1899 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1900 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1901 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1902 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1903 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1904 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1905 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1906 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1907 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1908 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1909 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1910 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1911 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1912 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1913 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1914 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1915 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1916 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1917 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1918 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1919 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1920 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1921 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1922 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1923 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1924 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1925 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1926 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1927 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1928 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1929 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1930 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1931 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1932 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1933 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1934 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1935 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1936 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1937 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1938 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1939 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1940 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1941 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1942 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1943 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1944 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1945 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1946 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1947 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1948 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1949 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1950 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1951 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1952 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1953 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1954 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1955 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1956 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1957 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1958 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1959 };
1960
1961 static reloc_howto_type *
1962 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1963 bfd_reloc_code_real_type code)
1964 {
1965 unsigned int i;
1966
1967 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1968 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1969 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1970
1971 return NULL;
1972 }
1973
1974 static reloc_howto_type *
1975 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1976 const char *r_name)
1977 {
1978 unsigned int i;
1979
1980 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1981 if (elf32_arm_howto_table_1[i].name != NULL
1982 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1983 return &elf32_arm_howto_table_1[i];
1984
1985 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1986 if (elf32_arm_howto_table_2[i].name != NULL
1987 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1988 return &elf32_arm_howto_table_2[i];
1989
1990 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1991 if (elf32_arm_howto_table_3[i].name != NULL
1992 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1993 return &elf32_arm_howto_table_3[i];
1994
1995 return NULL;
1996 }
1997
1998 /* Support for core dump NOTE sections. */
1999
2000 static bfd_boolean
2001 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2002 {
2003 int offset;
2004 size_t size;
2005
2006 switch (note->descsz)
2007 {
2008 default:
2009 return FALSE;
2010
2011 case 148: /* Linux/ARM 32-bit. */
2012 /* pr_cursig */
2013 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2014
2015 /* pr_pid */
2016 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2017
2018 /* pr_reg */
2019 offset = 72;
2020 size = 72;
2021
2022 break;
2023 }
2024
2025 /* Make a ".reg/999" section. */
2026 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2027 size, note->descpos + offset);
2028 }
2029
2030 static bfd_boolean
2031 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2032 {
2033 switch (note->descsz)
2034 {
2035 default:
2036 return FALSE;
2037
2038 case 124: /* Linux/ARM elf_prpsinfo. */
2039 elf_tdata (abfd)->core->pid
2040 = bfd_get_32 (abfd, note->descdata + 12);
2041 elf_tdata (abfd)->core->program
2042 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2043 elf_tdata (abfd)->core->command
2044 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2045 }
2046
2047 /* Note that for some reason, a spurious space is tacked
2048 onto the end of the args in some (at least one anyway)
2049 implementations, so strip it off if it exists. */
2050 {
2051 char *command = elf_tdata (abfd)->core->command;
2052 int n = strlen (command);
2053
2054 if (0 < n && command[n - 1] == ' ')
2055 command[n - 1] = '\0';
2056 }
2057
2058 return TRUE;
2059 }
2060
2061 static char *
2062 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2063 int note_type, ...)
2064 {
2065 switch (note_type)
2066 {
2067 default:
2068 return NULL;
2069
2070 case NT_PRPSINFO:
2071 {
2072 char data[124];
2073 va_list ap;
2074
2075 va_start (ap, note_type);
2076 memset (data, 0, sizeof (data));
2077 strncpy (data + 28, va_arg (ap, const char *), 16);
2078 strncpy (data + 44, va_arg (ap, const char *), 80);
2079 va_end (ap);
2080
2081 return elfcore_write_note (abfd, buf, bufsiz,
2082 "CORE", note_type, data, sizeof (data));
2083 }
2084
2085 case NT_PRSTATUS:
2086 {
2087 char data[148];
2088 va_list ap;
2089 long pid;
2090 int cursig;
2091 const void *greg;
2092
2093 va_start (ap, note_type);
2094 memset (data, 0, sizeof (data));
2095 pid = va_arg (ap, long);
2096 bfd_put_32 (abfd, pid, data + 24);
2097 cursig = va_arg (ap, int);
2098 bfd_put_16 (abfd, cursig, data + 12);
2099 greg = va_arg (ap, const void *);
2100 memcpy (data + 72, greg, 72);
2101 va_end (ap);
2102
2103 return elfcore_write_note (abfd, buf, bufsiz,
2104 "CORE", note_type, data, sizeof (data));
2105 }
2106 }
2107 }
2108
2109 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2110 #define TARGET_LITTLE_NAME "elf32-littlearm"
2111 #define TARGET_BIG_SYM arm_elf32_be_vec
2112 #define TARGET_BIG_NAME "elf32-bigarm"
2113
2114 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2115 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2116 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2117
2118 typedef unsigned long int insn32;
2119 typedef unsigned short int insn16;
2120
2121 /* In lieu of proper flags, assume all EABIv4 or later objects are
2122 interworkable. */
2123 #define INTERWORK_FLAG(abfd) \
2124 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2125 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2126 || ((abfd)->flags & BFD_LINKER_CREATED))
2127
2128 /* The linker script knows the section names for placement.
2129 The entry_names are used to do simple name mangling on the stubs.
2130 Given a function name, and its type, the stub can be found. The
2131 name can be changed. The only requirement is the %s be present. */
2132 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2133 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2134
2135 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2136 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2137
2138 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2139 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2140
2141 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2142 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2143
2144 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2145 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2146
2147 #define STUB_ENTRY_NAME "__%s_veneer"
2148
2149 #define CMSE_PREFIX "__acle_se_"
2150
2151 /* The name of the dynamic interpreter. This is put in the .interp
2152 section. */
2153 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2154
2155 static const unsigned long tls_trampoline [] =
2156 {
2157 0xe08e0000, /* add r0, lr, r0 */
2158 0xe5901004, /* ldr r1, [r0,#4] */
2159 0xe12fff11, /* bx r1 */
2160 };
2161
2162 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2163 {
2164 0xe52d2004, /* push {r2} */
2165 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2166 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2167 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2168 0xe081100f, /* 2: add r1, pc */
2169 0xe12fff12, /* bx r2 */
2170 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2171 + dl_tlsdesc_lazy_resolver(GOT) */
2172 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2173 };
2174
2175 #ifdef FOUR_WORD_PLT
2176
2177 /* The first entry in a procedure linkage table looks like
2178 this. It is set up so that any shared library function that is
2179 called before the relocation has been set up calls the dynamic
2180 linker first. */
2181 static const bfd_vma elf32_arm_plt0_entry [] =
2182 {
2183 0xe52de004, /* str lr, [sp, #-4]! */
2184 0xe59fe010, /* ldr lr, [pc, #16] */
2185 0xe08fe00e, /* add lr, pc, lr */
2186 0xe5bef008, /* ldr pc, [lr, #8]! */
2187 };
2188
2189 /* Subsequent entries in a procedure linkage table look like
2190 this. */
2191 static const bfd_vma elf32_arm_plt_entry [] =
2192 {
2193 0xe28fc600, /* add ip, pc, #NN */
2194 0xe28cca00, /* add ip, ip, #NN */
2195 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2196 0x00000000, /* unused */
2197 };
2198
2199 #else /* not FOUR_WORD_PLT */
2200
2201 /* The first entry in a procedure linkage table looks like
2202 this. It is set up so that any shared library function that is
2203 called before the relocation has been set up calls the dynamic
2204 linker first. */
2205 static const bfd_vma elf32_arm_plt0_entry [] =
2206 {
2207 0xe52de004, /* str lr, [sp, #-4]! */
2208 0xe59fe004, /* ldr lr, [pc, #4] */
2209 0xe08fe00e, /* add lr, pc, lr */
2210 0xe5bef008, /* ldr pc, [lr, #8]! */
2211 0x00000000, /* &GOT[0] - . */
2212 };
2213
2214 /* By default subsequent entries in a procedure linkage table look like
2215 this. Offsets that don't fit into 28 bits will cause link error. */
2216 static const bfd_vma elf32_arm_plt_entry_short [] =
2217 {
2218 0xe28fc600, /* add ip, pc, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 /* When explicitly asked, we'll use this "long" entry format
2224 which can cope with arbitrary displacements. */
2225 static const bfd_vma elf32_arm_plt_entry_long [] =
2226 {
2227 0xe28fc200, /* add ip, pc, #0xN0000000 */
2228 0xe28cc600, /* add ip, ip, #0xNN00000 */
2229 0xe28cca00, /* add ip, ip, #0xNN000 */
2230 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2231 };
2232
2233 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2234
2235 #endif /* not FOUR_WORD_PLT */
2236
2237 /* The first entry in a procedure linkage table looks like this.
2238 It is set up so that any shared library function that is called before the
2239 relocation has been set up calls the dynamic linker first. */
2240 static const bfd_vma elf32_thumb2_plt0_entry [] =
2241 {
2242 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2243 an instruction maybe encoded to one or two array elements. */
2244 0xf8dfb500, /* push {lr} */
2245 0x44fee008, /* ldr.w lr, [pc, #8] */
2246 /* add lr, pc */
2247 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2248 0x00000000, /* &GOT[0] - . */
2249 };
2250
2251 /* Subsequent entries in a procedure linkage table for thumb only target
2252 look like this. */
2253 static const bfd_vma elf32_thumb2_plt_entry [] =
2254 {
2255 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2256 an instruction maybe encoded to one or two array elements. */
2257 0x0c00f240, /* movw ip, #0xNNNN */
2258 0x0c00f2c0, /* movt ip, #0xNNNN */
2259 0xf8dc44fc, /* add ip, pc */
2260 0xbf00f000 /* ldr.w pc, [ip] */
2261 /* nop */
2262 };
2263
2264 /* The format of the first entry in the procedure linkage table
2265 for a VxWorks executable. */
2266 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2267 {
2268 0xe52dc008, /* str ip,[sp,#-8]! */
2269 0xe59fc000, /* ldr ip,[pc] */
2270 0xe59cf008, /* ldr pc,[ip,#8] */
2271 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2272 };
2273
2274 /* The format of subsequent entries in a VxWorks executable. */
2275 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2276 {
2277 0xe59fc000, /* ldr ip,[pc] */
2278 0xe59cf000, /* ldr pc,[ip] */
2279 0x00000000, /* .long @got */
2280 0xe59fc000, /* ldr ip,[pc] */
2281 0xea000000, /* b _PLT */
2282 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2283 };
2284
2285 /* The format of entries in a VxWorks shared library. */
2286 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2287 {
2288 0xe59fc000, /* ldr ip,[pc] */
2289 0xe79cf009, /* ldr pc,[ip,r9] */
2290 0x00000000, /* .long @got */
2291 0xe59fc000, /* ldr ip,[pc] */
2292 0xe599f008, /* ldr pc,[r9,#8] */
2293 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2294 };
2295
2296 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2297 #define PLT_THUMB_STUB_SIZE 4
2298 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2299 {
2300 0x4778, /* bx pc */
2301 0x46c0 /* nop */
2302 };
2303
2304 /* The entries in a PLT when using a DLL-based target with multiple
2305 address spaces. */
2306 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2307 {
2308 0xe51ff004, /* ldr pc, [pc, #-4] */
2309 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2310 };
2311
2312 /* The first entry in a procedure linkage table looks like
2313 this. It is set up so that any shared library function that is
2314 called before the relocation has been set up calls the dynamic
2315 linker first. */
2316 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2317 {
2318 /* First bundle: */
2319 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2320 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2321 0xe08cc00f, /* add ip, ip, pc */
2322 0xe52dc008, /* str ip, [sp, #-8]! */
2323 /* Second bundle: */
2324 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2325 0xe59cc000, /* ldr ip, [ip] */
2326 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2327 0xe12fff1c, /* bx ip */
2328 /* Third bundle: */
2329 0xe320f000, /* nop */
2330 0xe320f000, /* nop */
2331 0xe320f000, /* nop */
2332 /* .Lplt_tail: */
2333 0xe50dc004, /* str ip, [sp, #-4] */
2334 /* Fourth bundle: */
2335 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2336 0xe59cc000, /* ldr ip, [ip] */
2337 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2338 0xe12fff1c, /* bx ip */
2339 };
2340 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2341
2342 /* Subsequent entries in a procedure linkage table look like this. */
2343 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2344 {
2345 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2346 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2347 0xe08cc00f, /* add ip, ip, pc */
2348 0xea000000, /* b .Lplt_tail */
2349 };
2350
2351 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2352 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2353 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2354 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2355 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2356 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2357 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2358 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2359
2360 enum stub_insn_type
2361 {
2362 THUMB16_TYPE = 1,
2363 THUMB32_TYPE,
2364 ARM_TYPE,
2365 DATA_TYPE
2366 };
2367
2368 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2369 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2370 is inserted in arm_build_one_stub(). */
2371 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2372 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2373 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2374 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2375 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2376 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2377 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2378 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2379
2380 typedef struct
2381 {
2382 bfd_vma data;
2383 enum stub_insn_type type;
2384 unsigned int r_type;
2385 int reloc_addend;
2386 } insn_sequence;
2387
2388 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2389 to reach the stub if necessary. */
2390 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2391 {
2392 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2393 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2394 };
2395
2396 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2397 available. */
2398 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2399 {
2400 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2401 ARM_INSN (0xe12fff1c), /* bx ip */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2406 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2407 {
2408 THUMB16_INSN (0xb401), /* push {r0} */
2409 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2410 THUMB16_INSN (0x4684), /* mov ip, r0 */
2411 THUMB16_INSN (0xbc01), /* pop {r0} */
2412 THUMB16_INSN (0x4760), /* bx ip */
2413 THUMB16_INSN (0xbf00), /* nop */
2414 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2415 };
2416
2417 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2418 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2419 {
2420 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2421 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2422 };
2423
2424 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2425 M-profile architectures. */
2426 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2427 {
2428 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2429 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2430 THUMB16_INSN (0x4760), /* bx ip */
2431 };
2432
2433 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2434 allowed. */
2435 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2436 {
2437 THUMB16_INSN (0x4778), /* bx pc */
2438 THUMB16_INSN (0x46c0), /* nop */
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2440 ARM_INSN (0xe12fff1c), /* bx ip */
2441 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2442 };
2443
2444 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2445 available. */
2446 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2447 {
2448 THUMB16_INSN (0x4778), /* bx pc */
2449 THUMB16_INSN (0x46c0), /* nop */
2450 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2451 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2452 };
2453
2454 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2455 one, when the destination is close enough. */
2456 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2457 {
2458 THUMB16_INSN (0x4778), /* bx pc */
2459 THUMB16_INSN (0x46c0), /* nop */
2460 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2461 };
2462
2463 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2464 blx to reach the stub if necessary. */
2465 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2466 {
2467 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2468 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2469 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2470 };
2471
2472 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2473 blx to reach the stub if necessary. We can not add into pc;
2474 it is not guaranteed to mode switch (different in ARMv6 and
2475 ARMv7). */
2476 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2477 {
2478 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2479 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2480 ARM_INSN (0xe12fff1c), /* bx ip */
2481 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2482 };
2483
2484 /* V4T ARM -> ARM long branch stub, PIC. */
2485 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2486 {
2487 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2488 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2489 ARM_INSN (0xe12fff1c), /* bx ip */
2490 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2491 };
2492
2493 /* V4T Thumb -> ARM long branch stub, PIC. */
2494 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2495 {
2496 THUMB16_INSN (0x4778), /* bx pc */
2497 THUMB16_INSN (0x46c0), /* nop */
2498 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2499 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2500 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2501 };
2502
2503 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2504 architectures. */
2505 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2506 {
2507 THUMB16_INSN (0xb401), /* push {r0} */
2508 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2509 THUMB16_INSN (0x46fc), /* mov ip, pc */
2510 THUMB16_INSN (0x4484), /* add ip, r0 */
2511 THUMB16_INSN (0xbc01), /* pop {r0} */
2512 THUMB16_INSN (0x4760), /* bx ip */
2513 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2514 };
2515
2516 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2517 allowed. */
2518 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2519 {
2520 THUMB16_INSN (0x4778), /* bx pc */
2521 THUMB16_INSN (0x46c0), /* nop */
2522 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2523 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2524 ARM_INSN (0xe12fff1c), /* bx ip */
2525 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2526 };
2527
2528 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2529 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2530 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2531 {
2532 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2533 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2534 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2535 };
2536
2537 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2538 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2539 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2540 {
2541 THUMB16_INSN (0x4778), /* bx pc */
2542 THUMB16_INSN (0x46c0), /* nop */
2543 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2544 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2545 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2546 };
2547
2548 /* NaCl ARM -> ARM long branch stub. */
2549 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2550 {
2551 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2552 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2553 ARM_INSN (0xe12fff1c), /* bx ip */
2554 ARM_INSN (0xe320f000), /* nop */
2555 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2556 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2557 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2558 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2559 };
2560
2561 /* NaCl ARM -> ARM long branch stub, PIC. */
2562 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2563 {
2564 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2565 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2566 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2567 ARM_INSN (0xe12fff1c), /* bx ip */
2568 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2569 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2570 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2571 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2572 };
2573
2574 /* Stub used for transition to secure state (aka SG veneer). */
2575 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2576 {
2577 THUMB32_INSN (0xe97fe97f), /* sg. */
2578 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2579 };
2580
2581
2582 /* Cortex-A8 erratum-workaround stubs. */
2583
2584 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2585 can't use a conditional branch to reach this stub). */
2586
2587 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2588 {
2589 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2590 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2591 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2592 };
2593
2594 /* Stub used for b.w and bl.w instructions. */
2595
2596 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2597 {
2598 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2599 };
2600
2601 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2602 {
2603 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2604 };
2605
2606 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2607 instruction (which switches to ARM mode) to point to this stub. Jump to the
2608 real destination using an ARM-mode branch. */
2609
2610 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2611 {
2612 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2613 };
2614
2615 /* For each section group there can be a specially created linker section
2616 to hold the stubs for that group. The name of the stub section is based
2617 upon the name of another section within that group with the suffix below
2618 applied.
2619
2620 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2621 create what appeared to be a linker stub section when it actually
2622 contained user code/data. For example, consider this fragment:
2623
2624 const char * stubborn_problems[] = { "np" };
2625
2626 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2627 section called:
2628
2629 .data.rel.local.stubborn_problems
2630
2631 This then causes problems in arm32_arm_build_stubs() as it triggers:
2632
2633 // Ignore non-stub sections.
2634 if (!strstr (stub_sec->name, STUB_SUFFIX))
2635 continue;
2636
2637 And so the section would be ignored instead of being processed. Hence
2638 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2639 C identifier. */
2640 #define STUB_SUFFIX ".__stub"
2641
2642 /* One entry per long/short branch stub defined above. */
2643 #define DEF_STUBS \
2644 DEF_STUB(long_branch_any_any) \
2645 DEF_STUB(long_branch_v4t_arm_thumb) \
2646 DEF_STUB(long_branch_thumb_only) \
2647 DEF_STUB(long_branch_v4t_thumb_thumb) \
2648 DEF_STUB(long_branch_v4t_thumb_arm) \
2649 DEF_STUB(short_branch_v4t_thumb_arm) \
2650 DEF_STUB(long_branch_any_arm_pic) \
2651 DEF_STUB(long_branch_any_thumb_pic) \
2652 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2653 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2654 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2655 DEF_STUB(long_branch_thumb_only_pic) \
2656 DEF_STUB(long_branch_any_tls_pic) \
2657 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2658 DEF_STUB(long_branch_arm_nacl) \
2659 DEF_STUB(long_branch_arm_nacl_pic) \
2660 DEF_STUB(cmse_branch_thumb_only) \
2661 DEF_STUB(a8_veneer_b_cond) \
2662 DEF_STUB(a8_veneer_b) \
2663 DEF_STUB(a8_veneer_bl) \
2664 DEF_STUB(a8_veneer_blx) \
2665 DEF_STUB(long_branch_thumb2_only) \
2666 DEF_STUB(long_branch_thumb2_only_pure)
2667
2668 #define DEF_STUB(x) arm_stub_##x,
2669 enum elf32_arm_stub_type
2670 {
2671 arm_stub_none,
2672 DEF_STUBS
2673 max_stub_type
2674 };
2675 #undef DEF_STUB
2676
2677 /* Note the first a8_veneer type. */
2678 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2679
2680 typedef struct
2681 {
2682 const insn_sequence* template_sequence;
2683 int template_size;
2684 } stub_def;
2685
2686 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2687 static const stub_def stub_definitions[] =
2688 {
2689 {NULL, 0},
2690 DEF_STUBS
2691 };
2692
2693 struct elf32_arm_stub_hash_entry
2694 {
2695 /* Base hash table entry structure. */
2696 struct bfd_hash_entry root;
2697
2698 /* The stub section. */
2699 asection *stub_sec;
2700
2701 /* Offset within stub_sec of the beginning of this stub. */
2702 bfd_vma stub_offset;
2703
2704 /* Given the symbol's value and its section we can determine its final
2705 value when building the stubs (so the stub knows where to jump). */
2706 bfd_vma target_value;
2707 asection *target_section;
2708
2709 /* Same as above but for the source of the branch to the stub. Used for
2710 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2711 such, source section does not need to be recorded since Cortex-A8 erratum
2712 workaround stubs are only generated when both source and target are in the
2713 same section. */
2714 bfd_vma source_value;
2715
2716 /* The instruction which caused this stub to be generated (only valid for
2717 Cortex-A8 erratum workaround stubs at present). */
2718 unsigned long orig_insn;
2719
2720 /* The stub type. */
2721 enum elf32_arm_stub_type stub_type;
2722 /* Its encoding size in bytes. */
2723 int stub_size;
2724 /* Its template. */
2725 const insn_sequence *stub_template;
2726 /* The size of the template (number of entries). */
2727 int stub_template_size;
2728
2729 /* The symbol table entry, if any, that this was derived from. */
2730 struct elf32_arm_link_hash_entry *h;
2731
2732 /* Type of branch. */
2733 enum arm_st_branch_type branch_type;
2734
2735 /* Where this stub is being called from, or, in the case of combined
2736 stub sections, the first input section in the group. */
2737 asection *id_sec;
2738
2739 /* The name for the local symbol at the start of this stub. The
2740 stub name in the hash table has to be unique; this does not, so
2741 it can be friendlier. */
2742 char *output_name;
2743 };
2744
2745 /* Used to build a map of a section. This is required for mixed-endian
2746 code/data. */
2747
2748 typedef struct elf32_elf_section_map
2749 {
2750 bfd_vma vma;
2751 char type;
2752 }
2753 elf32_arm_section_map;
2754
2755 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2756
2757 typedef enum
2758 {
2759 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2760 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2761 VFP11_ERRATUM_ARM_VENEER,
2762 VFP11_ERRATUM_THUMB_VENEER
2763 }
2764 elf32_vfp11_erratum_type;
2765
2766 typedef struct elf32_vfp11_erratum_list
2767 {
2768 struct elf32_vfp11_erratum_list *next;
2769 bfd_vma vma;
2770 union
2771 {
2772 struct
2773 {
2774 struct elf32_vfp11_erratum_list *veneer;
2775 unsigned int vfp_insn;
2776 } b;
2777 struct
2778 {
2779 struct elf32_vfp11_erratum_list *branch;
2780 unsigned int id;
2781 } v;
2782 } u;
2783 elf32_vfp11_erratum_type type;
2784 }
2785 elf32_vfp11_erratum_list;
2786
2787 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2788 veneer. */
2789 typedef enum
2790 {
2791 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2792 STM32L4XX_ERRATUM_VENEER
2793 }
2794 elf32_stm32l4xx_erratum_type;
2795
2796 typedef struct elf32_stm32l4xx_erratum_list
2797 {
2798 struct elf32_stm32l4xx_erratum_list *next;
2799 bfd_vma vma;
2800 union
2801 {
2802 struct
2803 {
2804 struct elf32_stm32l4xx_erratum_list *veneer;
2805 unsigned int insn;
2806 } b;
2807 struct
2808 {
2809 struct elf32_stm32l4xx_erratum_list *branch;
2810 unsigned int id;
2811 } v;
2812 } u;
2813 elf32_stm32l4xx_erratum_type type;
2814 }
2815 elf32_stm32l4xx_erratum_list;
2816
2817 typedef enum
2818 {
2819 DELETE_EXIDX_ENTRY,
2820 INSERT_EXIDX_CANTUNWIND_AT_END
2821 }
2822 arm_unwind_edit_type;
2823
2824 /* A (sorted) list of edits to apply to an unwind table. */
2825 typedef struct arm_unwind_table_edit
2826 {
2827 arm_unwind_edit_type type;
2828 /* Note: we sometimes want to insert an unwind entry corresponding to a
2829 section different from the one we're currently writing out, so record the
2830 (text) section this edit relates to here. */
2831 asection *linked_section;
2832 unsigned int index;
2833 struct arm_unwind_table_edit *next;
2834 }
2835 arm_unwind_table_edit;
2836
2837 typedef struct _arm_elf_section_data
2838 {
2839 /* Information about mapping symbols. */
2840 struct bfd_elf_section_data elf;
2841 unsigned int mapcount;
2842 unsigned int mapsize;
2843 elf32_arm_section_map *map;
2844 /* Information about CPU errata. */
2845 unsigned int erratumcount;
2846 elf32_vfp11_erratum_list *erratumlist;
2847 unsigned int stm32l4xx_erratumcount;
2848 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2849 unsigned int additional_reloc_count;
2850 /* Information about unwind tables. */
2851 union
2852 {
2853 /* Unwind info attached to a text section. */
2854 struct
2855 {
2856 asection *arm_exidx_sec;
2857 } text;
2858
2859 /* Unwind info attached to an .ARM.exidx section. */
2860 struct
2861 {
2862 arm_unwind_table_edit *unwind_edit_list;
2863 arm_unwind_table_edit *unwind_edit_tail;
2864 } exidx;
2865 } u;
2866 }
2867 _arm_elf_section_data;
2868
2869 #define elf32_arm_section_data(sec) \
2870 ((_arm_elf_section_data *) elf_section_data (sec))
2871
2872 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2873 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2874 so may be created multiple times: we use an array of these entries whilst
2875 relaxing which we can refresh easily, then create stubs for each potentially
2876 erratum-triggering instruction once we've settled on a solution. */
2877
2878 struct a8_erratum_fix
2879 {
2880 bfd *input_bfd;
2881 asection *section;
2882 bfd_vma offset;
2883 bfd_vma target_offset;
2884 unsigned long orig_insn;
2885 char *stub_name;
2886 enum elf32_arm_stub_type stub_type;
2887 enum arm_st_branch_type branch_type;
2888 };
2889
2890 /* A table of relocs applied to branches which might trigger Cortex-A8
2891 erratum. */
2892
2893 struct a8_erratum_reloc
2894 {
2895 bfd_vma from;
2896 bfd_vma destination;
2897 struct elf32_arm_link_hash_entry *hash;
2898 const char *sym_name;
2899 unsigned int r_type;
2900 enum arm_st_branch_type branch_type;
2901 bfd_boolean non_a8_stub;
2902 };
2903
2904 /* The size of the thread control block. */
2905 #define TCB_SIZE 8
2906
2907 /* ARM-specific information about a PLT entry, over and above the usual
2908 gotplt_union. */
2909 struct arm_plt_info
2910 {
2911 /* We reference count Thumb references to a PLT entry separately,
2912 so that we can emit the Thumb trampoline only if needed. */
2913 bfd_signed_vma thumb_refcount;
2914
2915 /* Some references from Thumb code may be eliminated by BL->BLX
2916 conversion, so record them separately. */
2917 bfd_signed_vma maybe_thumb_refcount;
2918
2919 /* How many of the recorded PLT accesses were from non-call relocations.
2920 This information is useful when deciding whether anything takes the
2921 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2922 non-call references to the function should resolve directly to the
2923 real runtime target. */
2924 unsigned int noncall_refcount;
2925
2926 /* Since PLT entries have variable size if the Thumb prologue is
2927 used, we need to record the index into .got.plt instead of
2928 recomputing it from the PLT offset. */
2929 bfd_signed_vma got_offset;
2930 };
2931
2932 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2933 struct arm_local_iplt_info
2934 {
2935 /* The information that is usually found in the generic ELF part of
2936 the hash table entry. */
2937 union gotplt_union root;
2938
2939 /* The information that is usually found in the ARM-specific part of
2940 the hash table entry. */
2941 struct arm_plt_info arm;
2942
2943 /* A list of all potential dynamic relocations against this symbol. */
2944 struct elf_dyn_relocs *dyn_relocs;
2945 };
2946
2947 struct elf_arm_obj_tdata
2948 {
2949 struct elf_obj_tdata root;
2950
2951 /* tls_type for each local got entry. */
2952 char *local_got_tls_type;
2953
2954 /* GOTPLT entries for TLS descriptors. */
2955 bfd_vma *local_tlsdesc_gotent;
2956
2957 /* Information for local symbols that need entries in .iplt. */
2958 struct arm_local_iplt_info **local_iplt;
2959
2960 /* Zero to warn when linking objects with incompatible enum sizes. */
2961 int no_enum_size_warning;
2962
2963 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2964 int no_wchar_size_warning;
2965 };
2966
2967 #define elf_arm_tdata(bfd) \
2968 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2969
2970 #define elf32_arm_local_got_tls_type(bfd) \
2971 (elf_arm_tdata (bfd)->local_got_tls_type)
2972
2973 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2974 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2975
2976 #define elf32_arm_local_iplt(bfd) \
2977 (elf_arm_tdata (bfd)->local_iplt)
2978
2979 #define is_arm_elf(bfd) \
2980 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2981 && elf_tdata (bfd) != NULL \
2982 && elf_object_id (bfd) == ARM_ELF_DATA)
2983
2984 static bfd_boolean
2985 elf32_arm_mkobject (bfd *abfd)
2986 {
2987 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2988 ARM_ELF_DATA);
2989 }
2990
2991 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2992
2993 /* Arm ELF linker hash entry. */
2994 struct elf32_arm_link_hash_entry
2995 {
2996 struct elf_link_hash_entry root;
2997
2998 /* Track dynamic relocs copied for this symbol. */
2999 struct elf_dyn_relocs *dyn_relocs;
3000
3001 /* ARM-specific PLT information. */
3002 struct arm_plt_info plt;
3003
3004 #define GOT_UNKNOWN 0
3005 #define GOT_NORMAL 1
3006 #define GOT_TLS_GD 2
3007 #define GOT_TLS_IE 4
3008 #define GOT_TLS_GDESC 8
3009 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3010 unsigned int tls_type : 8;
3011
3012 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3013 unsigned int is_iplt : 1;
3014
3015 unsigned int unused : 23;
3016
3017 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3018 starting at the end of the jump table. */
3019 bfd_vma tlsdesc_got;
3020
3021 /* The symbol marking the real symbol location for exported thumb
3022 symbols with Arm stubs. */
3023 struct elf_link_hash_entry *export_glue;
3024
3025 /* A pointer to the most recently used stub hash entry against this
3026 symbol. */
3027 struct elf32_arm_stub_hash_entry *stub_cache;
3028 };
3029
3030 /* Traverse an arm ELF linker hash table. */
3031 #define elf32_arm_link_hash_traverse(table, func, info) \
3032 (elf_link_hash_traverse \
3033 (&(table)->root, \
3034 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3035 (info)))
3036
3037 /* Get the ARM elf linker hash table from a link_info structure. */
3038 #define elf32_arm_hash_table(info) \
3039 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3040 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3041
3042 #define arm_stub_hash_lookup(table, string, create, copy) \
3043 ((struct elf32_arm_stub_hash_entry *) \
3044 bfd_hash_lookup ((table), (string), (create), (copy)))
3045
3046 /* Array to keep track of which stub sections have been created, and
3047 information on stub grouping. */
3048 struct map_stub
3049 {
3050 /* This is the section to which stubs in the group will be
3051 attached. */
3052 asection *link_sec;
3053 /* The stub section. */
3054 asection *stub_sec;
3055 };
3056
3057 #define elf32_arm_compute_jump_table_size(htab) \
3058 ((htab)->next_tls_desc_index * 4)
3059
3060 /* ARM ELF linker hash table. */
3061 struct elf32_arm_link_hash_table
3062 {
3063 /* The main hash table. */
3064 struct elf_link_hash_table root;
3065
3066 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3067 bfd_size_type thumb_glue_size;
3068
3069 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3070 bfd_size_type arm_glue_size;
3071
3072 /* The size in bytes of section containing the ARMv4 BX veneers. */
3073 bfd_size_type bx_glue_size;
3074
3075 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3076 veneer has been populated. */
3077 bfd_vma bx_glue_offset[15];
3078
3079 /* The size in bytes of the section containing glue for VFP11 erratum
3080 veneers. */
3081 bfd_size_type vfp11_erratum_glue_size;
3082
3083 /* The size in bytes of the section containing glue for STM32L4XX erratum
3084 veneers. */
3085 bfd_size_type stm32l4xx_erratum_glue_size;
3086
3087 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3088 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3089 elf32_arm_write_section(). */
3090 struct a8_erratum_fix *a8_erratum_fixes;
3091 unsigned int num_a8_erratum_fixes;
3092
3093 /* An arbitrary input BFD chosen to hold the glue sections. */
3094 bfd * bfd_of_glue_owner;
3095
3096 /* Nonzero to output a BE8 image. */
3097 int byteswap_code;
3098
3099 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3100 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3101 int target1_is_rel;
3102
3103 /* The relocation to use for R_ARM_TARGET2 relocations. */
3104 int target2_reloc;
3105
3106 /* 0 = Ignore R_ARM_V4BX.
3107 1 = Convert BX to MOV PC.
3108 2 = Generate v4 interworing stubs. */
3109 int fix_v4bx;
3110
3111 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3112 int fix_cortex_a8;
3113
3114 /* Whether we should fix the ARM1176 BLX immediate issue. */
3115 int fix_arm1176;
3116
3117 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3118 int use_blx;
3119
3120 /* What sort of code sequences we should look for which may trigger the
3121 VFP11 denorm erratum. */
3122 bfd_arm_vfp11_fix vfp11_fix;
3123
3124 /* Global counter for the number of fixes we have emitted. */
3125 int num_vfp11_fixes;
3126
3127 /* What sort of code sequences we should look for which may trigger the
3128 STM32L4XX erratum. */
3129 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3130
3131 /* Global counter for the number of fixes we have emitted. */
3132 int num_stm32l4xx_fixes;
3133
3134 /* Nonzero to force PIC branch veneers. */
3135 int pic_veneer;
3136
3137 /* The number of bytes in the initial entry in the PLT. */
3138 bfd_size_type plt_header_size;
3139
3140 /* The number of bytes in the subsequent PLT etries. */
3141 bfd_size_type plt_entry_size;
3142
3143 /* True if the target system is VxWorks. */
3144 int vxworks_p;
3145
3146 /* True if the target system is Symbian OS. */
3147 int symbian_p;
3148
3149 /* True if the target system is Native Client. */
3150 int nacl_p;
3151
3152 /* True if the target uses REL relocations. */
3153 bfd_boolean use_rel;
3154
3155 /* Nonzero if import library must be a secure gateway import library
3156 as per ARMv8-M Security Extensions. */
3157 int cmse_implib;
3158
3159 /* The import library whose symbols' address must remain stable in
3160 the import library generated. */
3161 bfd *in_implib_bfd;
3162
3163 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3164 bfd_vma next_tls_desc_index;
3165
3166 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3167 bfd_vma num_tls_desc;
3168
3169 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3170 asection *srelplt2;
3171
3172 /* The offset into splt of the PLT entry for the TLS descriptor
3173 resolver. Special values are 0, if not necessary (or not found
3174 to be necessary yet), and -1 if needed but not determined
3175 yet. */
3176 bfd_vma dt_tlsdesc_plt;
3177
3178 /* The offset into sgot of the GOT entry used by the PLT entry
3179 above. */
3180 bfd_vma dt_tlsdesc_got;
3181
3182 /* Offset in .plt section of tls_arm_trampoline. */
3183 bfd_vma tls_trampoline;
3184
3185 /* Data for R_ARM_TLS_LDM32 relocations. */
3186 union
3187 {
3188 bfd_signed_vma refcount;
3189 bfd_vma offset;
3190 } tls_ldm_got;
3191
3192 /* Small local sym cache. */
3193 struct sym_cache sym_cache;
3194
3195 /* For convenience in allocate_dynrelocs. */
3196 bfd * obfd;
3197
3198 /* The amount of space used by the reserved portion of the sgotplt
3199 section, plus whatever space is used by the jump slots. */
3200 bfd_vma sgotplt_jump_table_size;
3201
3202 /* The stub hash table. */
3203 struct bfd_hash_table stub_hash_table;
3204
3205 /* Linker stub bfd. */
3206 bfd *stub_bfd;
3207
3208 /* Linker call-backs. */
3209 asection * (*add_stub_section) (const char *, asection *, asection *,
3210 unsigned int);
3211 void (*layout_sections_again) (void);
3212
3213 /* Array to keep track of which stub sections have been created, and
3214 information on stub grouping. */
3215 struct map_stub *stub_group;
3216
3217 /* Input stub section holding secure gateway veneers. */
3218 asection *cmse_stub_sec;
3219
3220 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3221 start to be allocated. */
3222 bfd_vma new_cmse_stub_offset;
3223
3224 /* Number of elements in stub_group. */
3225 unsigned int top_id;
3226
3227 /* Assorted information used by elf32_arm_size_stubs. */
3228 unsigned int bfd_count;
3229 unsigned int top_index;
3230 asection **input_list;
3231
3232 /* True if the target system uses FDPIC. */
3233 int fdpic_p;
3234 };
3235
3236 static inline int
3237 ctz (unsigned int mask)
3238 {
3239 #if GCC_VERSION >= 3004
3240 return __builtin_ctz (mask);
3241 #else
3242 unsigned int i;
3243
3244 for (i = 0; i < 8 * sizeof (mask); i++)
3245 {
3246 if (mask & 0x1)
3247 break;
3248 mask = (mask >> 1);
3249 }
3250 return i;
3251 #endif
3252 }
3253
3254 static inline int
3255 elf32_arm_popcount (unsigned int mask)
3256 {
3257 #if GCC_VERSION >= 3004
3258 return __builtin_popcount (mask);
3259 #else
3260 unsigned int i;
3261 int sum = 0;
3262
3263 for (i = 0; i < 8 * sizeof (mask); i++)
3264 {
3265 if (mask & 0x1)
3266 sum++;
3267 mask = (mask >> 1);
3268 }
3269 return sum;
3270 #endif
3271 }
3272
3273 /* Create an entry in an ARM ELF linker hash table. */
3274
3275 static struct bfd_hash_entry *
3276 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3277 struct bfd_hash_table * table,
3278 const char * string)
3279 {
3280 struct elf32_arm_link_hash_entry * ret =
3281 (struct elf32_arm_link_hash_entry *) entry;
3282
3283 /* Allocate the structure if it has not already been allocated by a
3284 subclass. */
3285 if (ret == NULL)
3286 ret = (struct elf32_arm_link_hash_entry *)
3287 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3288 if (ret == NULL)
3289 return (struct bfd_hash_entry *) ret;
3290
3291 /* Call the allocation method of the superclass. */
3292 ret = ((struct elf32_arm_link_hash_entry *)
3293 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3294 table, string));
3295 if (ret != NULL)
3296 {
3297 ret->dyn_relocs = NULL;
3298 ret->tls_type = GOT_UNKNOWN;
3299 ret->tlsdesc_got = (bfd_vma) -1;
3300 ret->plt.thumb_refcount = 0;
3301 ret->plt.maybe_thumb_refcount = 0;
3302 ret->plt.noncall_refcount = 0;
3303 ret->plt.got_offset = -1;
3304 ret->is_iplt = FALSE;
3305 ret->export_glue = NULL;
3306
3307 ret->stub_cache = NULL;
3308 }
3309
3310 return (struct bfd_hash_entry *) ret;
3311 }
3312
3313 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3314 symbols. */
3315
3316 static bfd_boolean
3317 elf32_arm_allocate_local_sym_info (bfd *abfd)
3318 {
3319 if (elf_local_got_refcounts (abfd) == NULL)
3320 {
3321 bfd_size_type num_syms;
3322 bfd_size_type size;
3323 char *data;
3324
3325 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3326 size = num_syms * (sizeof (bfd_signed_vma)
3327 + sizeof (struct arm_local_iplt_info *)
3328 + sizeof (bfd_vma)
3329 + sizeof (char));
3330 data = bfd_zalloc (abfd, size);
3331 if (data == NULL)
3332 return FALSE;
3333
3334 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3335 data += num_syms * sizeof (bfd_signed_vma);
3336
3337 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3338 data += num_syms * sizeof (struct arm_local_iplt_info *);
3339
3340 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3341 data += num_syms * sizeof (bfd_vma);
3342
3343 elf32_arm_local_got_tls_type (abfd) = data;
3344 }
3345 return TRUE;
3346 }
3347
3348 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3349 to input bfd ABFD. Create the information if it doesn't already exist.
3350 Return null if an allocation fails. */
3351
3352 static struct arm_local_iplt_info *
3353 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3354 {
3355 struct arm_local_iplt_info **ptr;
3356
3357 if (!elf32_arm_allocate_local_sym_info (abfd))
3358 return NULL;
3359
3360 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3361 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3362 if (*ptr == NULL)
3363 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3364 return *ptr;
3365 }
3366
3367 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3368 in ABFD's symbol table. If the symbol is global, H points to its
3369 hash table entry, otherwise H is null.
3370
3371 Return true if the symbol does have PLT information. When returning
3372 true, point *ROOT_PLT at the target-independent reference count/offset
3373 union and *ARM_PLT at the ARM-specific information. */
3374
3375 static bfd_boolean
3376 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3377 struct elf32_arm_link_hash_entry *h,
3378 unsigned long r_symndx, union gotplt_union **root_plt,
3379 struct arm_plt_info **arm_plt)
3380 {
3381 struct arm_local_iplt_info *local_iplt;
3382
3383 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3384 return FALSE;
3385
3386 if (h != NULL)
3387 {
3388 *root_plt = &h->root.plt;
3389 *arm_plt = &h->plt;
3390 return TRUE;
3391 }
3392
3393 if (elf32_arm_local_iplt (abfd) == NULL)
3394 return FALSE;
3395
3396 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3397 if (local_iplt == NULL)
3398 return FALSE;
3399
3400 *root_plt = &local_iplt->root;
3401 *arm_plt = &local_iplt->arm;
3402 return TRUE;
3403 }
3404
3405 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3406 before it. */
3407
3408 static bfd_boolean
3409 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3410 struct arm_plt_info *arm_plt)
3411 {
3412 struct elf32_arm_link_hash_table *htab;
3413
3414 htab = elf32_arm_hash_table (info);
3415 return (arm_plt->thumb_refcount != 0
3416 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3417 }
3418
3419 /* Return a pointer to the head of the dynamic reloc list that should
3420 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3421 ABFD's symbol table. Return null if an error occurs. */
3422
3423 static struct elf_dyn_relocs **
3424 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3425 Elf_Internal_Sym *isym)
3426 {
3427 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3428 {
3429 struct arm_local_iplt_info *local_iplt;
3430
3431 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3432 if (local_iplt == NULL)
3433 return NULL;
3434 return &local_iplt->dyn_relocs;
3435 }
3436 else
3437 {
3438 /* Track dynamic relocs needed for local syms too.
3439 We really need local syms available to do this
3440 easily. Oh well. */
3441 asection *s;
3442 void *vpp;
3443
3444 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3445 if (s == NULL)
3446 abort ();
3447
3448 vpp = &elf_section_data (s)->local_dynrel;
3449 return (struct elf_dyn_relocs **) vpp;
3450 }
3451 }
3452
3453 /* Initialize an entry in the stub hash table. */
3454
3455 static struct bfd_hash_entry *
3456 stub_hash_newfunc (struct bfd_hash_entry *entry,
3457 struct bfd_hash_table *table,
3458 const char *string)
3459 {
3460 /* Allocate the structure if it has not already been allocated by a
3461 subclass. */
3462 if (entry == NULL)
3463 {
3464 entry = (struct bfd_hash_entry *)
3465 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3466 if (entry == NULL)
3467 return entry;
3468 }
3469
3470 /* Call the allocation method of the superclass. */
3471 entry = bfd_hash_newfunc (entry, table, string);
3472 if (entry != NULL)
3473 {
3474 struct elf32_arm_stub_hash_entry *eh;
3475
3476 /* Initialize the local fields. */
3477 eh = (struct elf32_arm_stub_hash_entry *) entry;
3478 eh->stub_sec = NULL;
3479 eh->stub_offset = (bfd_vma) -1;
3480 eh->source_value = 0;
3481 eh->target_value = 0;
3482 eh->target_section = NULL;
3483 eh->orig_insn = 0;
3484 eh->stub_type = arm_stub_none;
3485 eh->stub_size = 0;
3486 eh->stub_template = NULL;
3487 eh->stub_template_size = -1;
3488 eh->h = NULL;
3489 eh->id_sec = NULL;
3490 eh->output_name = NULL;
3491 }
3492
3493 return entry;
3494 }
3495
3496 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3497 shortcuts to them in our hash table. */
3498
3499 static bfd_boolean
3500 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3501 {
3502 struct elf32_arm_link_hash_table *htab;
3503
3504 htab = elf32_arm_hash_table (info);
3505 if (htab == NULL)
3506 return FALSE;
3507
3508 /* BPABI objects never have a GOT, or associated sections. */
3509 if (htab->symbian_p)
3510 return TRUE;
3511
3512 if (! _bfd_elf_create_got_section (dynobj, info))
3513 return FALSE;
3514
3515 return TRUE;
3516 }
3517
3518 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3519
3520 static bfd_boolean
3521 create_ifunc_sections (struct bfd_link_info *info)
3522 {
3523 struct elf32_arm_link_hash_table *htab;
3524 const struct elf_backend_data *bed;
3525 bfd *dynobj;
3526 asection *s;
3527 flagword flags;
3528
3529 htab = elf32_arm_hash_table (info);
3530 dynobj = htab->root.dynobj;
3531 bed = get_elf_backend_data (dynobj);
3532 flags = bed->dynamic_sec_flags;
3533
3534 if (htab->root.iplt == NULL)
3535 {
3536 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3537 flags | SEC_READONLY | SEC_CODE);
3538 if (s == NULL
3539 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3540 return FALSE;
3541 htab->root.iplt = s;
3542 }
3543
3544 if (htab->root.irelplt == NULL)
3545 {
3546 s = bfd_make_section_anyway_with_flags (dynobj,
3547 RELOC_SECTION (htab, ".iplt"),
3548 flags | SEC_READONLY);
3549 if (s == NULL
3550 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3551 return FALSE;
3552 htab->root.irelplt = s;
3553 }
3554
3555 if (htab->root.igotplt == NULL)
3556 {
3557 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3558 if (s == NULL
3559 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3560 return FALSE;
3561 htab->root.igotplt = s;
3562 }
3563 return TRUE;
3564 }
3565
3566 /* Determine if we're dealing with a Thumb only architecture. */
3567
3568 static bfd_boolean
3569 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3570 {
3571 int arch;
3572 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3573 Tag_CPU_arch_profile);
3574
3575 if (profile)
3576 return profile == 'M';
3577
3578 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3579
3580 /* Force return logic to be reviewed for each new architecture. */
3581 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3582
3583 if (arch == TAG_CPU_ARCH_V6_M
3584 || arch == TAG_CPU_ARCH_V6S_M
3585 || arch == TAG_CPU_ARCH_V7E_M
3586 || arch == TAG_CPU_ARCH_V8M_BASE
3587 || arch == TAG_CPU_ARCH_V8M_MAIN)
3588 return TRUE;
3589
3590 return FALSE;
3591 }
3592
3593 /* Determine if we're dealing with a Thumb-2 object. */
3594
3595 static bfd_boolean
3596 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3597 {
3598 int arch;
3599 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3600 Tag_THUMB_ISA_use);
3601
3602 if (thumb_isa)
3603 return thumb_isa == 2;
3604
3605 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3606
3607 /* Force return logic to be reviewed for each new architecture. */
3608 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3609
3610 return (arch == TAG_CPU_ARCH_V6T2
3611 || arch == TAG_CPU_ARCH_V7
3612 || arch == TAG_CPU_ARCH_V7E_M
3613 || arch == TAG_CPU_ARCH_V8
3614 || arch == TAG_CPU_ARCH_V8R
3615 || arch == TAG_CPU_ARCH_V8M_MAIN);
3616 }
3617
3618 /* Determine whether Thumb-2 BL instruction is available. */
3619
3620 static bfd_boolean
3621 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3622 {
3623 int arch =
3624 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3625
3626 /* Force return logic to be reviewed for each new architecture. */
3627 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3628
3629 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3630 return (arch == TAG_CPU_ARCH_V6T2
3631 || arch >= TAG_CPU_ARCH_V7);
3632 }
3633
3634 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3635 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3636 hash table. */
3637
3638 static bfd_boolean
3639 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3640 {
3641 struct elf32_arm_link_hash_table *htab;
3642
3643 htab = elf32_arm_hash_table (info);
3644 if (htab == NULL)
3645 return FALSE;
3646
3647 if (!htab->root.sgot && !create_got_section (dynobj, info))
3648 return FALSE;
3649
3650 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3651 return FALSE;
3652
3653 if (htab->vxworks_p)
3654 {
3655 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3656 return FALSE;
3657
3658 if (bfd_link_pic (info))
3659 {
3660 htab->plt_header_size = 0;
3661 htab->plt_entry_size
3662 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3663 }
3664 else
3665 {
3666 htab->plt_header_size
3667 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3668 htab->plt_entry_size
3669 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3670 }
3671
3672 if (elf_elfheader (dynobj))
3673 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3674 }
3675 else
3676 {
3677 /* PR ld/16017
3678 Test for thumb only architectures. Note - we cannot just call
3679 using_thumb_only() as the attributes in the output bfd have not been
3680 initialised at this point, so instead we use the input bfd. */
3681 bfd * saved_obfd = htab->obfd;
3682
3683 htab->obfd = dynobj;
3684 if (using_thumb_only (htab))
3685 {
3686 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3687 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3688 }
3689 htab->obfd = saved_obfd;
3690 }
3691
3692 if (!htab->root.splt
3693 || !htab->root.srelplt
3694 || !htab->root.sdynbss
3695 || (!bfd_link_pic (info) && !htab->root.srelbss))
3696 abort ();
3697
3698 return TRUE;
3699 }
3700
3701 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3702
3703 static void
3704 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3705 struct elf_link_hash_entry *dir,
3706 struct elf_link_hash_entry *ind)
3707 {
3708 struct elf32_arm_link_hash_entry *edir, *eind;
3709
3710 edir = (struct elf32_arm_link_hash_entry *) dir;
3711 eind = (struct elf32_arm_link_hash_entry *) ind;
3712
3713 if (eind->dyn_relocs != NULL)
3714 {
3715 if (edir->dyn_relocs != NULL)
3716 {
3717 struct elf_dyn_relocs **pp;
3718 struct elf_dyn_relocs *p;
3719
3720 /* Add reloc counts against the indirect sym to the direct sym
3721 list. Merge any entries against the same section. */
3722 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3723 {
3724 struct elf_dyn_relocs *q;
3725
3726 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3727 if (q->sec == p->sec)
3728 {
3729 q->pc_count += p->pc_count;
3730 q->count += p->count;
3731 *pp = p->next;
3732 break;
3733 }
3734 if (q == NULL)
3735 pp = &p->next;
3736 }
3737 *pp = edir->dyn_relocs;
3738 }
3739
3740 edir->dyn_relocs = eind->dyn_relocs;
3741 eind->dyn_relocs = NULL;
3742 }
3743
3744 if (ind->root.type == bfd_link_hash_indirect)
3745 {
3746 /* Copy over PLT info. */
3747 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3748 eind->plt.thumb_refcount = 0;
3749 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3750 eind->plt.maybe_thumb_refcount = 0;
3751 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3752 eind->plt.noncall_refcount = 0;
3753
3754 /* We should only allocate a function to .iplt once the final
3755 symbol information is known. */
3756 BFD_ASSERT (!eind->is_iplt);
3757
3758 if (dir->got.refcount <= 0)
3759 {
3760 edir->tls_type = eind->tls_type;
3761 eind->tls_type = GOT_UNKNOWN;
3762 }
3763 }
3764
3765 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3766 }
3767
3768 /* Destroy an ARM elf linker hash table. */
3769
3770 static void
3771 elf32_arm_link_hash_table_free (bfd *obfd)
3772 {
3773 struct elf32_arm_link_hash_table *ret
3774 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3775
3776 bfd_hash_table_free (&ret->stub_hash_table);
3777 _bfd_elf_link_hash_table_free (obfd);
3778 }
3779
3780 /* Create an ARM elf linker hash table. */
3781
3782 static struct bfd_link_hash_table *
3783 elf32_arm_link_hash_table_create (bfd *abfd)
3784 {
3785 struct elf32_arm_link_hash_table *ret;
3786 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3787
3788 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3789 if (ret == NULL)
3790 return NULL;
3791
3792 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3793 elf32_arm_link_hash_newfunc,
3794 sizeof (struct elf32_arm_link_hash_entry),
3795 ARM_ELF_DATA))
3796 {
3797 free (ret);
3798 return NULL;
3799 }
3800
3801 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3802 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3803 #ifdef FOUR_WORD_PLT
3804 ret->plt_header_size = 16;
3805 ret->plt_entry_size = 16;
3806 #else
3807 ret->plt_header_size = 20;
3808 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3809 #endif
3810 ret->use_rel = TRUE;
3811 ret->obfd = abfd;
3812 ret->fdpic_p = 0;
3813
3814 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3815 sizeof (struct elf32_arm_stub_hash_entry)))
3816 {
3817 _bfd_elf_link_hash_table_free (abfd);
3818 return NULL;
3819 }
3820 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3821
3822 return &ret->root.root;
3823 }
3824
3825 /* Determine what kind of NOPs are available. */
3826
3827 static bfd_boolean
3828 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3829 {
3830 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3831 Tag_CPU_arch);
3832
3833 /* Force return logic to be reviewed for each new architecture. */
3834 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3835
3836 return (arch == TAG_CPU_ARCH_V6T2
3837 || arch == TAG_CPU_ARCH_V6K
3838 || arch == TAG_CPU_ARCH_V7
3839 || arch == TAG_CPU_ARCH_V8
3840 || arch == TAG_CPU_ARCH_V8R);
3841 }
3842
3843 static bfd_boolean
3844 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3845 {
3846 switch (stub_type)
3847 {
3848 case arm_stub_long_branch_thumb_only:
3849 case arm_stub_long_branch_thumb2_only:
3850 case arm_stub_long_branch_thumb2_only_pure:
3851 case arm_stub_long_branch_v4t_thumb_arm:
3852 case arm_stub_short_branch_v4t_thumb_arm:
3853 case arm_stub_long_branch_v4t_thumb_arm_pic:
3854 case arm_stub_long_branch_v4t_thumb_tls_pic:
3855 case arm_stub_long_branch_thumb_only_pic:
3856 case arm_stub_cmse_branch_thumb_only:
3857 return TRUE;
3858 case arm_stub_none:
3859 BFD_FAIL ();
3860 return FALSE;
3861 break;
3862 default:
3863 return FALSE;
3864 }
3865 }
3866
3867 /* Determine the type of stub needed, if any, for a call. */
3868
3869 static enum elf32_arm_stub_type
3870 arm_type_of_stub (struct bfd_link_info *info,
3871 asection *input_sec,
3872 const Elf_Internal_Rela *rel,
3873 unsigned char st_type,
3874 enum arm_st_branch_type *actual_branch_type,
3875 struct elf32_arm_link_hash_entry *hash,
3876 bfd_vma destination,
3877 asection *sym_sec,
3878 bfd *input_bfd,
3879 const char *name)
3880 {
3881 bfd_vma location;
3882 bfd_signed_vma branch_offset;
3883 unsigned int r_type;
3884 struct elf32_arm_link_hash_table * globals;
3885 bfd_boolean thumb2, thumb2_bl, thumb_only;
3886 enum elf32_arm_stub_type stub_type = arm_stub_none;
3887 int use_plt = 0;
3888 enum arm_st_branch_type branch_type = *actual_branch_type;
3889 union gotplt_union *root_plt;
3890 struct arm_plt_info *arm_plt;
3891 int arch;
3892 int thumb2_movw;
3893
3894 if (branch_type == ST_BRANCH_LONG)
3895 return stub_type;
3896
3897 globals = elf32_arm_hash_table (info);
3898 if (globals == NULL)
3899 return stub_type;
3900
3901 thumb_only = using_thumb_only (globals);
3902 thumb2 = using_thumb2 (globals);
3903 thumb2_bl = using_thumb2_bl (globals);
3904
3905 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3906
3907 /* True for architectures that implement the thumb2 movw instruction. */
3908 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
3909
3910 /* Determine where the call point is. */
3911 location = (input_sec->output_offset
3912 + input_sec->output_section->vma
3913 + rel->r_offset);
3914
3915 r_type = ELF32_R_TYPE (rel->r_info);
3916
3917 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3918 are considering a function call relocation. */
3919 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3920 || r_type == R_ARM_THM_JUMP19)
3921 && branch_type == ST_BRANCH_TO_ARM)
3922 branch_type = ST_BRANCH_TO_THUMB;
3923
3924 /* For TLS call relocs, it is the caller's responsibility to provide
3925 the address of the appropriate trampoline. */
3926 if (r_type != R_ARM_TLS_CALL
3927 && r_type != R_ARM_THM_TLS_CALL
3928 && elf32_arm_get_plt_info (input_bfd, globals, hash,
3929 ELF32_R_SYM (rel->r_info), &root_plt,
3930 &arm_plt)
3931 && root_plt->offset != (bfd_vma) -1)
3932 {
3933 asection *splt;
3934
3935 if (hash == NULL || hash->is_iplt)
3936 splt = globals->root.iplt;
3937 else
3938 splt = globals->root.splt;
3939 if (splt != NULL)
3940 {
3941 use_plt = 1;
3942
3943 /* Note when dealing with PLT entries: the main PLT stub is in
3944 ARM mode, so if the branch is in Thumb mode, another
3945 Thumb->ARM stub will be inserted later just before the ARM
3946 PLT stub. If a long branch stub is needed, we'll add a
3947 Thumb->Arm one and branch directly to the ARM PLT entry.
3948 Here, we have to check if a pre-PLT Thumb->ARM stub
3949 is needed and if it will be close enough. */
3950
3951 destination = (splt->output_section->vma
3952 + splt->output_offset
3953 + root_plt->offset);
3954 st_type = STT_FUNC;
3955
3956 /* Thumb branch/call to PLT: it can become a branch to ARM
3957 or to Thumb. We must perform the same checks and
3958 corrections as in elf32_arm_final_link_relocate. */
3959 if ((r_type == R_ARM_THM_CALL)
3960 || (r_type == R_ARM_THM_JUMP24))
3961 {
3962 if (globals->use_blx
3963 && r_type == R_ARM_THM_CALL
3964 && !thumb_only)
3965 {
3966 /* If the Thumb BLX instruction is available, convert
3967 the BL to a BLX instruction to call the ARM-mode
3968 PLT entry. */
3969 branch_type = ST_BRANCH_TO_ARM;
3970 }
3971 else
3972 {
3973 if (!thumb_only)
3974 /* Target the Thumb stub before the ARM PLT entry. */
3975 destination -= PLT_THUMB_STUB_SIZE;
3976 branch_type = ST_BRANCH_TO_THUMB;
3977 }
3978 }
3979 else
3980 {
3981 branch_type = ST_BRANCH_TO_ARM;
3982 }
3983 }
3984 }
3985 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3986 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3987
3988 branch_offset = (bfd_signed_vma)(destination - location);
3989
3990 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3991 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3992 {
3993 /* Handle cases where:
3994 - this call goes too far (different Thumb/Thumb2 max
3995 distance)
3996 - it's a Thumb->Arm call and blx is not available, or it's a
3997 Thumb->Arm branch (not bl). A stub is needed in this case,
3998 but only if this call is not through a PLT entry. Indeed,
3999 PLT stubs handle mode switching already. */
4000 if ((!thumb2_bl
4001 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4002 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4003 || (thumb2_bl
4004 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4005 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4006 || (thumb2
4007 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4008 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4009 && (r_type == R_ARM_THM_JUMP19))
4010 || (branch_type == ST_BRANCH_TO_ARM
4011 && (((r_type == R_ARM_THM_CALL
4012 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4013 || (r_type == R_ARM_THM_JUMP24)
4014 || (r_type == R_ARM_THM_JUMP19))
4015 && !use_plt))
4016 {
4017 /* If we need to insert a Thumb-Thumb long branch stub to a
4018 PLT, use one that branches directly to the ARM PLT
4019 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4020 stub, undo this now. */
4021 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4022 {
4023 branch_type = ST_BRANCH_TO_ARM;
4024 branch_offset += PLT_THUMB_STUB_SIZE;
4025 }
4026
4027 if (branch_type == ST_BRANCH_TO_THUMB)
4028 {
4029 /* Thumb to thumb. */
4030 if (!thumb_only)
4031 {
4032 if (input_sec->flags & SEC_ELF_PURECODE)
4033 _bfd_error_handler
4034 (_("%pB(%pA): warning: long branch veneers used in"
4035 " section with SHF_ARM_PURECODE section"
4036 " attribute is only supported for M-profile"
4037 " targets that implement the movw instruction"),
4038 input_bfd, input_sec);
4039
4040 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4041 /* PIC stubs. */
4042 ? ((globals->use_blx
4043 && (r_type == R_ARM_THM_CALL))
4044 /* V5T and above. Stub starts with ARM code, so
4045 we must be able to switch mode before
4046 reaching it, which is only possible for 'bl'
4047 (ie R_ARM_THM_CALL relocation). */
4048 ? arm_stub_long_branch_any_thumb_pic
4049 /* On V4T, use Thumb code only. */
4050 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4051
4052 /* non-PIC stubs. */
4053 : ((globals->use_blx
4054 && (r_type == R_ARM_THM_CALL))
4055 /* V5T and above. */
4056 ? arm_stub_long_branch_any_any
4057 /* V4T. */
4058 : arm_stub_long_branch_v4t_thumb_thumb);
4059 }
4060 else
4061 {
4062 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4063 stub_type = arm_stub_long_branch_thumb2_only_pure;
4064 else
4065 {
4066 if (input_sec->flags & SEC_ELF_PURECODE)
4067 _bfd_error_handler
4068 (_("%pB(%pA): warning: long branch veneers used in"
4069 " section with SHF_ARM_PURECODE section"
4070 " attribute is only supported for M-profile"
4071 " targets that implement the movw instruction"),
4072 input_bfd, input_sec);
4073
4074 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4075 /* PIC stub. */
4076 ? arm_stub_long_branch_thumb_only_pic
4077 /* non-PIC stub. */
4078 : (thumb2 ? arm_stub_long_branch_thumb2_only
4079 : arm_stub_long_branch_thumb_only);
4080 }
4081 }
4082 }
4083 else
4084 {
4085 if (input_sec->flags & SEC_ELF_PURECODE)
4086 _bfd_error_handler
4087 (_("%pB(%pA): warning: long branch veneers used in"
4088 " section with SHF_ARM_PURECODE section"
4089 " attribute is only supported" " for M-profile"
4090 " targets that implement the movw instruction"),
4091 input_bfd, input_sec);
4092
4093 /* Thumb to arm. */
4094 if (sym_sec != NULL
4095 && sym_sec->owner != NULL
4096 && !INTERWORK_FLAG (sym_sec->owner))
4097 {
4098 _bfd_error_handler
4099 (_("%pB(%s): warning: interworking not enabled;"
4100 " first occurrence: %pB: %s call to %s"),
4101 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4102 }
4103
4104 stub_type =
4105 (bfd_link_pic (info) | globals->pic_veneer)
4106 /* PIC stubs. */
4107 ? (r_type == R_ARM_THM_TLS_CALL
4108 /* TLS PIC stubs. */
4109 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4110 : arm_stub_long_branch_v4t_thumb_tls_pic)
4111 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4112 /* V5T PIC and above. */
4113 ? arm_stub_long_branch_any_arm_pic
4114 /* V4T PIC stub. */
4115 : arm_stub_long_branch_v4t_thumb_arm_pic))
4116
4117 /* non-PIC stubs. */
4118 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4119 /* V5T and above. */
4120 ? arm_stub_long_branch_any_any
4121 /* V4T. */
4122 : arm_stub_long_branch_v4t_thumb_arm);
4123
4124 /* Handle v4t short branches. */
4125 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4126 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4127 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4128 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4129 }
4130 }
4131 }
4132 else if (r_type == R_ARM_CALL
4133 || r_type == R_ARM_JUMP24
4134 || r_type == R_ARM_PLT32
4135 || r_type == R_ARM_TLS_CALL)
4136 {
4137 if (input_sec->flags & SEC_ELF_PURECODE)
4138 _bfd_error_handler
4139 (_("%pB(%pA): warning: long branch veneers used in"
4140 " section with SHF_ARM_PURECODE section"
4141 " attribute is only supported for M-profile"
4142 " targets that implement the movw instruction"),
4143 input_bfd, input_sec);
4144 if (branch_type == ST_BRANCH_TO_THUMB)
4145 {
4146 /* Arm to thumb. */
4147
4148 if (sym_sec != NULL
4149 && sym_sec->owner != NULL
4150 && !INTERWORK_FLAG (sym_sec->owner))
4151 {
4152 _bfd_error_handler
4153 (_("%pB(%s): warning: interworking not enabled;"
4154 " first occurrence: %pB: %s call to %s"),
4155 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4156 }
4157
4158 /* We have an extra 2-bytes reach because of
4159 the mode change (bit 24 (H) of BLX encoding). */
4160 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4161 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4162 || (r_type == R_ARM_CALL && !globals->use_blx)
4163 || (r_type == R_ARM_JUMP24)
4164 || (r_type == R_ARM_PLT32))
4165 {
4166 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4167 /* PIC stubs. */
4168 ? ((globals->use_blx)
4169 /* V5T and above. */
4170 ? arm_stub_long_branch_any_thumb_pic
4171 /* V4T stub. */
4172 : arm_stub_long_branch_v4t_arm_thumb_pic)
4173
4174 /* non-PIC stubs. */
4175 : ((globals->use_blx)
4176 /* V5T and above. */
4177 ? arm_stub_long_branch_any_any
4178 /* V4T. */
4179 : arm_stub_long_branch_v4t_arm_thumb);
4180 }
4181 }
4182 else
4183 {
4184 /* Arm to arm. */
4185 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4186 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4187 {
4188 stub_type =
4189 (bfd_link_pic (info) | globals->pic_veneer)
4190 /* PIC stubs. */
4191 ? (r_type == R_ARM_TLS_CALL
4192 /* TLS PIC Stub. */
4193 ? arm_stub_long_branch_any_tls_pic
4194 : (globals->nacl_p
4195 ? arm_stub_long_branch_arm_nacl_pic
4196 : arm_stub_long_branch_any_arm_pic))
4197 /* non-PIC stubs. */
4198 : (globals->nacl_p
4199 ? arm_stub_long_branch_arm_nacl
4200 : arm_stub_long_branch_any_any);
4201 }
4202 }
4203 }
4204
4205 /* If a stub is needed, record the actual destination type. */
4206 if (stub_type != arm_stub_none)
4207 *actual_branch_type = branch_type;
4208
4209 return stub_type;
4210 }
4211
4212 /* Build a name for an entry in the stub hash table. */
4213
4214 static char *
4215 elf32_arm_stub_name (const asection *input_section,
4216 const asection *sym_sec,
4217 const struct elf32_arm_link_hash_entry *hash,
4218 const Elf_Internal_Rela *rel,
4219 enum elf32_arm_stub_type stub_type)
4220 {
4221 char *stub_name;
4222 bfd_size_type len;
4223
4224 if (hash)
4225 {
4226 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4227 stub_name = (char *) bfd_malloc (len);
4228 if (stub_name != NULL)
4229 sprintf (stub_name, "%08x_%s+%x_%d",
4230 input_section->id & 0xffffffff,
4231 hash->root.root.root.string,
4232 (int) rel->r_addend & 0xffffffff,
4233 (int) stub_type);
4234 }
4235 else
4236 {
4237 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4238 stub_name = (char *) bfd_malloc (len);
4239 if (stub_name != NULL)
4240 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4241 input_section->id & 0xffffffff,
4242 sym_sec->id & 0xffffffff,
4243 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4244 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4245 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4246 (int) rel->r_addend & 0xffffffff,
4247 (int) stub_type);
4248 }
4249
4250 return stub_name;
4251 }
4252
4253 /* Look up an entry in the stub hash. Stub entries are cached because
4254 creating the stub name takes a bit of time. */
4255
4256 static struct elf32_arm_stub_hash_entry *
4257 elf32_arm_get_stub_entry (const asection *input_section,
4258 const asection *sym_sec,
4259 struct elf_link_hash_entry *hash,
4260 const Elf_Internal_Rela *rel,
4261 struct elf32_arm_link_hash_table *htab,
4262 enum elf32_arm_stub_type stub_type)
4263 {
4264 struct elf32_arm_stub_hash_entry *stub_entry;
4265 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4266 const asection *id_sec;
4267
4268 if ((input_section->flags & SEC_CODE) == 0)
4269 return NULL;
4270
4271 /* If this input section is part of a group of sections sharing one
4272 stub section, then use the id of the first section in the group.
4273 Stub names need to include a section id, as there may well be
4274 more than one stub used to reach say, printf, and we need to
4275 distinguish between them. */
4276 BFD_ASSERT (input_section->id <= htab->top_id);
4277 id_sec = htab->stub_group[input_section->id].link_sec;
4278
4279 if (h != NULL && h->stub_cache != NULL
4280 && h->stub_cache->h == h
4281 && h->stub_cache->id_sec == id_sec
4282 && h->stub_cache->stub_type == stub_type)
4283 {
4284 stub_entry = h->stub_cache;
4285 }
4286 else
4287 {
4288 char *stub_name;
4289
4290 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4291 if (stub_name == NULL)
4292 return NULL;
4293
4294 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4295 stub_name, FALSE, FALSE);
4296 if (h != NULL)
4297 h->stub_cache = stub_entry;
4298
4299 free (stub_name);
4300 }
4301
4302 return stub_entry;
4303 }
4304
4305 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4306 section. */
4307
4308 static bfd_boolean
4309 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4310 {
4311 if (stub_type >= max_stub_type)
4312 abort (); /* Should be unreachable. */
4313
4314 switch (stub_type)
4315 {
4316 case arm_stub_cmse_branch_thumb_only:
4317 return TRUE;
4318
4319 default:
4320 return FALSE;
4321 }
4322
4323 abort (); /* Should be unreachable. */
4324 }
4325
4326 /* Required alignment (as a power of 2) for the dedicated section holding
4327 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4328 with input sections. */
4329
4330 static int
4331 arm_dedicated_stub_output_section_required_alignment
4332 (enum elf32_arm_stub_type stub_type)
4333 {
4334 if (stub_type >= max_stub_type)
4335 abort (); /* Should be unreachable. */
4336
4337 switch (stub_type)
4338 {
4339 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4340 boundary. */
4341 case arm_stub_cmse_branch_thumb_only:
4342 return 5;
4343
4344 default:
4345 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4346 return 0;
4347 }
4348
4349 abort (); /* Should be unreachable. */
4350 }
4351
4352 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4353 NULL if veneers of this type are interspersed with input sections. */
4354
4355 static const char *
4356 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4357 {
4358 if (stub_type >= max_stub_type)
4359 abort (); /* Should be unreachable. */
4360
4361 switch (stub_type)
4362 {
4363 case arm_stub_cmse_branch_thumb_only:
4364 return ".gnu.sgstubs";
4365
4366 default:
4367 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4368 return NULL;
4369 }
4370
4371 abort (); /* Should be unreachable. */
4372 }
4373
4374 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4375 returns the address of the hash table field in HTAB holding a pointer to the
4376 corresponding input section. Otherwise, returns NULL. */
4377
4378 static asection **
4379 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4380 enum elf32_arm_stub_type stub_type)
4381 {
4382 if (stub_type >= max_stub_type)
4383 abort (); /* Should be unreachable. */
4384
4385 switch (stub_type)
4386 {
4387 case arm_stub_cmse_branch_thumb_only:
4388 return &htab->cmse_stub_sec;
4389
4390 default:
4391 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4392 return NULL;
4393 }
4394
4395 abort (); /* Should be unreachable. */
4396 }
4397
4398 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4399 is the section that branch into veneer and can be NULL if stub should go in
4400 a dedicated output section. Returns a pointer to the stub section, and the
4401 section to which the stub section will be attached (in *LINK_SEC_P).
4402 LINK_SEC_P may be NULL. */
4403
4404 static asection *
4405 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4406 struct elf32_arm_link_hash_table *htab,
4407 enum elf32_arm_stub_type stub_type)
4408 {
4409 asection *link_sec, *out_sec, **stub_sec_p;
4410 const char *stub_sec_prefix;
4411 bfd_boolean dedicated_output_section =
4412 arm_dedicated_stub_output_section_required (stub_type);
4413 int align;
4414
4415 if (dedicated_output_section)
4416 {
4417 bfd *output_bfd = htab->obfd;
4418 const char *out_sec_name =
4419 arm_dedicated_stub_output_section_name (stub_type);
4420 link_sec = NULL;
4421 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4422 stub_sec_prefix = out_sec_name;
4423 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4424 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4425 if (out_sec == NULL)
4426 {
4427 _bfd_error_handler (_("no address assigned to the veneers output "
4428 "section %s"), out_sec_name);
4429 return NULL;
4430 }
4431 }
4432 else
4433 {
4434 BFD_ASSERT (section->id <= htab->top_id);
4435 link_sec = htab->stub_group[section->id].link_sec;
4436 BFD_ASSERT (link_sec != NULL);
4437 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4438 if (*stub_sec_p == NULL)
4439 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4440 stub_sec_prefix = link_sec->name;
4441 out_sec = link_sec->output_section;
4442 align = htab->nacl_p ? 4 : 3;
4443 }
4444
4445 if (*stub_sec_p == NULL)
4446 {
4447 size_t namelen;
4448 bfd_size_type len;
4449 char *s_name;
4450
4451 namelen = strlen (stub_sec_prefix);
4452 len = namelen + sizeof (STUB_SUFFIX);
4453 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4454 if (s_name == NULL)
4455 return NULL;
4456
4457 memcpy (s_name, stub_sec_prefix, namelen);
4458 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4459 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4460 align);
4461 if (*stub_sec_p == NULL)
4462 return NULL;
4463
4464 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4465 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4466 | SEC_KEEP;
4467 }
4468
4469 if (!dedicated_output_section)
4470 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4471
4472 if (link_sec_p)
4473 *link_sec_p = link_sec;
4474
4475 return *stub_sec_p;
4476 }
4477
4478 /* Add a new stub entry to the stub hash. Not all fields of the new
4479 stub entry are initialised. */
4480
4481 static struct elf32_arm_stub_hash_entry *
4482 elf32_arm_add_stub (const char *stub_name, asection *section,
4483 struct elf32_arm_link_hash_table *htab,
4484 enum elf32_arm_stub_type stub_type)
4485 {
4486 asection *link_sec;
4487 asection *stub_sec;
4488 struct elf32_arm_stub_hash_entry *stub_entry;
4489
4490 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4491 stub_type);
4492 if (stub_sec == NULL)
4493 return NULL;
4494
4495 /* Enter this entry into the linker stub hash table. */
4496 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4497 TRUE, FALSE);
4498 if (stub_entry == NULL)
4499 {
4500 if (section == NULL)
4501 section = stub_sec;
4502 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4503 section->owner, stub_name);
4504 return NULL;
4505 }
4506
4507 stub_entry->stub_sec = stub_sec;
4508 stub_entry->stub_offset = (bfd_vma) -1;
4509 stub_entry->id_sec = link_sec;
4510
4511 return stub_entry;
4512 }
4513
4514 /* Store an Arm insn into an output section not processed by
4515 elf32_arm_write_section. */
4516
4517 static void
4518 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4519 bfd * output_bfd, bfd_vma val, void * ptr)
4520 {
4521 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4522 bfd_putl32 (val, ptr);
4523 else
4524 bfd_putb32 (val, ptr);
4525 }
4526
4527 /* Store a 16-bit Thumb insn into an output section not processed by
4528 elf32_arm_write_section. */
4529
4530 static void
4531 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4532 bfd * output_bfd, bfd_vma val, void * ptr)
4533 {
4534 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4535 bfd_putl16 (val, ptr);
4536 else
4537 bfd_putb16 (val, ptr);
4538 }
4539
4540 /* Store a Thumb2 insn into an output section not processed by
4541 elf32_arm_write_section. */
4542
4543 static void
4544 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4545 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4546 {
4547 /* T2 instructions are 16-bit streamed. */
4548 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4549 {
4550 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4551 bfd_putl16 ((val & 0xffff), ptr + 2);
4552 }
4553 else
4554 {
4555 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4556 bfd_putb16 ((val & 0xffff), ptr + 2);
4557 }
4558 }
4559
4560 /* If it's possible to change R_TYPE to a more efficient access
4561 model, return the new reloc type. */
4562
4563 static unsigned
4564 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4565 struct elf_link_hash_entry *h)
4566 {
4567 int is_local = (h == NULL);
4568
4569 if (bfd_link_pic (info)
4570 || (h && h->root.type == bfd_link_hash_undefweak))
4571 return r_type;
4572
4573 /* We do not support relaxations for Old TLS models. */
4574 switch (r_type)
4575 {
4576 case R_ARM_TLS_GOTDESC:
4577 case R_ARM_TLS_CALL:
4578 case R_ARM_THM_TLS_CALL:
4579 case R_ARM_TLS_DESCSEQ:
4580 case R_ARM_THM_TLS_DESCSEQ:
4581 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4582 }
4583
4584 return r_type;
4585 }
4586
4587 static bfd_reloc_status_type elf32_arm_final_link_relocate
4588 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4589 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4590 const char *, unsigned char, enum arm_st_branch_type,
4591 struct elf_link_hash_entry *, bfd_boolean *, char **);
4592
4593 static unsigned int
4594 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4595 {
4596 switch (stub_type)
4597 {
4598 case arm_stub_a8_veneer_b_cond:
4599 case arm_stub_a8_veneer_b:
4600 case arm_stub_a8_veneer_bl:
4601 return 2;
4602
4603 case arm_stub_long_branch_any_any:
4604 case arm_stub_long_branch_v4t_arm_thumb:
4605 case arm_stub_long_branch_thumb_only:
4606 case arm_stub_long_branch_thumb2_only:
4607 case arm_stub_long_branch_thumb2_only_pure:
4608 case arm_stub_long_branch_v4t_thumb_thumb:
4609 case arm_stub_long_branch_v4t_thumb_arm:
4610 case arm_stub_short_branch_v4t_thumb_arm:
4611 case arm_stub_long_branch_any_arm_pic:
4612 case arm_stub_long_branch_any_thumb_pic:
4613 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4614 case arm_stub_long_branch_v4t_arm_thumb_pic:
4615 case arm_stub_long_branch_v4t_thumb_arm_pic:
4616 case arm_stub_long_branch_thumb_only_pic:
4617 case arm_stub_long_branch_any_tls_pic:
4618 case arm_stub_long_branch_v4t_thumb_tls_pic:
4619 case arm_stub_cmse_branch_thumb_only:
4620 case arm_stub_a8_veneer_blx:
4621 return 4;
4622
4623 case arm_stub_long_branch_arm_nacl:
4624 case arm_stub_long_branch_arm_nacl_pic:
4625 return 16;
4626
4627 default:
4628 abort (); /* Should be unreachable. */
4629 }
4630 }
4631
4632 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4633 veneering (TRUE) or have their own symbol (FALSE). */
4634
4635 static bfd_boolean
4636 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4637 {
4638 if (stub_type >= max_stub_type)
4639 abort (); /* Should be unreachable. */
4640
4641 switch (stub_type)
4642 {
4643 case arm_stub_cmse_branch_thumb_only:
4644 return TRUE;
4645
4646 default:
4647 return FALSE;
4648 }
4649
4650 abort (); /* Should be unreachable. */
4651 }
4652
4653 /* Returns the padding needed for the dedicated section used stubs of type
4654 STUB_TYPE. */
4655
4656 static int
4657 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4658 {
4659 if (stub_type >= max_stub_type)
4660 abort (); /* Should be unreachable. */
4661
4662 switch (stub_type)
4663 {
4664 case arm_stub_cmse_branch_thumb_only:
4665 return 32;
4666
4667 default:
4668 return 0;
4669 }
4670
4671 abort (); /* Should be unreachable. */
4672 }
4673
4674 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4675 returns the address of the hash table field in HTAB holding the offset at
4676 which new veneers should be layed out in the stub section. */
4677
4678 static bfd_vma*
4679 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4680 enum elf32_arm_stub_type stub_type)
4681 {
4682 switch (stub_type)
4683 {
4684 case arm_stub_cmse_branch_thumb_only:
4685 return &htab->new_cmse_stub_offset;
4686
4687 default:
4688 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4689 return NULL;
4690 }
4691 }
4692
4693 static bfd_boolean
4694 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4695 void * in_arg)
4696 {
4697 #define MAXRELOCS 3
4698 bfd_boolean removed_sg_veneer;
4699 struct elf32_arm_stub_hash_entry *stub_entry;
4700 struct elf32_arm_link_hash_table *globals;
4701 struct bfd_link_info *info;
4702 asection *stub_sec;
4703 bfd *stub_bfd;
4704 bfd_byte *loc;
4705 bfd_vma sym_value;
4706 int template_size;
4707 int size;
4708 const insn_sequence *template_sequence;
4709 int i;
4710 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4711 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4712 int nrelocs = 0;
4713 int just_allocated = 0;
4714
4715 /* Massage our args to the form they really have. */
4716 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4717 info = (struct bfd_link_info *) in_arg;
4718
4719 globals = elf32_arm_hash_table (info);
4720 if (globals == NULL)
4721 return FALSE;
4722
4723 stub_sec = stub_entry->stub_sec;
4724
4725 if ((globals->fix_cortex_a8 < 0)
4726 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4727 /* We have to do less-strictly-aligned fixes last. */
4728 return TRUE;
4729
4730 /* Assign a slot at the end of section if none assigned yet. */
4731 if (stub_entry->stub_offset == (bfd_vma) -1)
4732 {
4733 stub_entry->stub_offset = stub_sec->size;
4734 just_allocated = 1;
4735 }
4736 loc = stub_sec->contents + stub_entry->stub_offset;
4737
4738 stub_bfd = stub_sec->owner;
4739
4740 /* This is the address of the stub destination. */
4741 sym_value = (stub_entry->target_value
4742 + stub_entry->target_section->output_offset
4743 + stub_entry->target_section->output_section->vma);
4744
4745 template_sequence = stub_entry->stub_template;
4746 template_size = stub_entry->stub_template_size;
4747
4748 size = 0;
4749 for (i = 0; i < template_size; i++)
4750 {
4751 switch (template_sequence[i].type)
4752 {
4753 case THUMB16_TYPE:
4754 {
4755 bfd_vma data = (bfd_vma) template_sequence[i].data;
4756 if (template_sequence[i].reloc_addend != 0)
4757 {
4758 /* We've borrowed the reloc_addend field to mean we should
4759 insert a condition code into this (Thumb-1 branch)
4760 instruction. See THUMB16_BCOND_INSN. */
4761 BFD_ASSERT ((data & 0xff00) == 0xd000);
4762 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4763 }
4764 bfd_put_16 (stub_bfd, data, loc + size);
4765 size += 2;
4766 }
4767 break;
4768
4769 case THUMB32_TYPE:
4770 bfd_put_16 (stub_bfd,
4771 (template_sequence[i].data >> 16) & 0xffff,
4772 loc + size);
4773 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4774 loc + size + 2);
4775 if (template_sequence[i].r_type != R_ARM_NONE)
4776 {
4777 stub_reloc_idx[nrelocs] = i;
4778 stub_reloc_offset[nrelocs++] = size;
4779 }
4780 size += 4;
4781 break;
4782
4783 case ARM_TYPE:
4784 bfd_put_32 (stub_bfd, template_sequence[i].data,
4785 loc + size);
4786 /* Handle cases where the target is encoded within the
4787 instruction. */
4788 if (template_sequence[i].r_type == R_ARM_JUMP24)
4789 {
4790 stub_reloc_idx[nrelocs] = i;
4791 stub_reloc_offset[nrelocs++] = size;
4792 }
4793 size += 4;
4794 break;
4795
4796 case DATA_TYPE:
4797 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4798 stub_reloc_idx[nrelocs] = i;
4799 stub_reloc_offset[nrelocs++] = size;
4800 size += 4;
4801 break;
4802
4803 default:
4804 BFD_FAIL ();
4805 return FALSE;
4806 }
4807 }
4808
4809 if (just_allocated)
4810 stub_sec->size += size;
4811
4812 /* Stub size has already been computed in arm_size_one_stub. Check
4813 consistency. */
4814 BFD_ASSERT (size == stub_entry->stub_size);
4815
4816 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4817 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4818 sym_value |= 1;
4819
4820 /* Assume non empty slots have at least one and at most MAXRELOCS entries
4821 to relocate in each stub. */
4822 removed_sg_veneer =
4823 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
4824 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
4825
4826 for (i = 0; i < nrelocs; i++)
4827 {
4828 Elf_Internal_Rela rel;
4829 bfd_boolean unresolved_reloc;
4830 char *error_message;
4831 bfd_vma points_to =
4832 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4833
4834 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4835 rel.r_info = ELF32_R_INFO (0,
4836 template_sequence[stub_reloc_idx[i]].r_type);
4837 rel.r_addend = 0;
4838
4839 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4840 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4841 template should refer back to the instruction after the original
4842 branch. We use target_section as Cortex-A8 erratum workaround stubs
4843 are only generated when both source and target are in the same
4844 section. */
4845 points_to = stub_entry->target_section->output_section->vma
4846 + stub_entry->target_section->output_offset
4847 + stub_entry->source_value;
4848
4849 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4850 (template_sequence[stub_reloc_idx[i]].r_type),
4851 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4852 points_to, info, stub_entry->target_section, "", STT_FUNC,
4853 stub_entry->branch_type,
4854 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4855 &error_message);
4856 }
4857
4858 return TRUE;
4859 #undef MAXRELOCS
4860 }
4861
4862 /* Calculate the template, template size and instruction size for a stub.
4863 Return value is the instruction size. */
4864
4865 static unsigned int
4866 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4867 const insn_sequence **stub_template,
4868 int *stub_template_size)
4869 {
4870 const insn_sequence *template_sequence = NULL;
4871 int template_size = 0, i;
4872 unsigned int size;
4873
4874 template_sequence = stub_definitions[stub_type].template_sequence;
4875 if (stub_template)
4876 *stub_template = template_sequence;
4877
4878 template_size = stub_definitions[stub_type].template_size;
4879 if (stub_template_size)
4880 *stub_template_size = template_size;
4881
4882 size = 0;
4883 for (i = 0; i < template_size; i++)
4884 {
4885 switch (template_sequence[i].type)
4886 {
4887 case THUMB16_TYPE:
4888 size += 2;
4889 break;
4890
4891 case ARM_TYPE:
4892 case THUMB32_TYPE:
4893 case DATA_TYPE:
4894 size += 4;
4895 break;
4896
4897 default:
4898 BFD_FAIL ();
4899 return 0;
4900 }
4901 }
4902
4903 return size;
4904 }
4905
4906 /* As above, but don't actually build the stub. Just bump offset so
4907 we know stub section sizes. */
4908
4909 static bfd_boolean
4910 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4911 void *in_arg ATTRIBUTE_UNUSED)
4912 {
4913 struct elf32_arm_stub_hash_entry *stub_entry;
4914 const insn_sequence *template_sequence;
4915 int template_size, size;
4916
4917 /* Massage our args to the form they really have. */
4918 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4919
4920 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4921 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4922
4923 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4924 &template_size);
4925
4926 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
4927 if (stub_entry->stub_template_size)
4928 {
4929 stub_entry->stub_size = size;
4930 stub_entry->stub_template = template_sequence;
4931 stub_entry->stub_template_size = template_size;
4932 }
4933
4934 /* Already accounted for. */
4935 if (stub_entry->stub_offset != (bfd_vma) -1)
4936 return TRUE;
4937
4938 size = (size + 7) & ~7;
4939 stub_entry->stub_sec->size += size;
4940
4941 return TRUE;
4942 }
4943
4944 /* External entry points for sizing and building linker stubs. */
4945
4946 /* Set up various things so that we can make a list of input sections
4947 for each output section included in the link. Returns -1 on error,
4948 0 when no stubs will be needed, and 1 on success. */
4949
4950 int
4951 elf32_arm_setup_section_lists (bfd *output_bfd,
4952 struct bfd_link_info *info)
4953 {
4954 bfd *input_bfd;
4955 unsigned int bfd_count;
4956 unsigned int top_id, top_index;
4957 asection *section;
4958 asection **input_list, **list;
4959 bfd_size_type amt;
4960 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4961
4962 if (htab == NULL)
4963 return 0;
4964 if (! is_elf_hash_table (htab))
4965 return 0;
4966
4967 /* Count the number of input BFDs and find the top input section id. */
4968 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4969 input_bfd != NULL;
4970 input_bfd = input_bfd->link.next)
4971 {
4972 bfd_count += 1;
4973 for (section = input_bfd->sections;
4974 section != NULL;
4975 section = section->next)
4976 {
4977 if (top_id < section->id)
4978 top_id = section->id;
4979 }
4980 }
4981 htab->bfd_count = bfd_count;
4982
4983 amt = sizeof (struct map_stub) * (top_id + 1);
4984 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4985 if (htab->stub_group == NULL)
4986 return -1;
4987 htab->top_id = top_id;
4988
4989 /* We can't use output_bfd->section_count here to find the top output
4990 section index as some sections may have been removed, and
4991 _bfd_strip_section_from_output doesn't renumber the indices. */
4992 for (section = output_bfd->sections, top_index = 0;
4993 section != NULL;
4994 section = section->next)
4995 {
4996 if (top_index < section->index)
4997 top_index = section->index;
4998 }
4999
5000 htab->top_index = top_index;
5001 amt = sizeof (asection *) * (top_index + 1);
5002 input_list = (asection **) bfd_malloc (amt);
5003 htab->input_list = input_list;
5004 if (input_list == NULL)
5005 return -1;
5006
5007 /* For sections we aren't interested in, mark their entries with a
5008 value we can check later. */
5009 list = input_list + top_index;
5010 do
5011 *list = bfd_abs_section_ptr;
5012 while (list-- != input_list);
5013
5014 for (section = output_bfd->sections;
5015 section != NULL;
5016 section = section->next)
5017 {
5018 if ((section->flags & SEC_CODE) != 0)
5019 input_list[section->index] = NULL;
5020 }
5021
5022 return 1;
5023 }
5024
5025 /* The linker repeatedly calls this function for each input section,
5026 in the order that input sections are linked into output sections.
5027 Build lists of input sections to determine groupings between which
5028 we may insert linker stubs. */
5029
5030 void
5031 elf32_arm_next_input_section (struct bfd_link_info *info,
5032 asection *isec)
5033 {
5034 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5035
5036 if (htab == NULL)
5037 return;
5038
5039 if (isec->output_section->index <= htab->top_index)
5040 {
5041 asection **list = htab->input_list + isec->output_section->index;
5042
5043 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5044 {
5045 /* Steal the link_sec pointer for our list. */
5046 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5047 /* This happens to make the list in reverse order,
5048 which we reverse later. */
5049 PREV_SEC (isec) = *list;
5050 *list = isec;
5051 }
5052 }
5053 }
5054
5055 /* See whether we can group stub sections together. Grouping stub
5056 sections may result in fewer stubs. More importantly, we need to
5057 put all .init* and .fini* stubs at the end of the .init or
5058 .fini output sections respectively, because glibc splits the
5059 _init and _fini functions into multiple parts. Putting a stub in
5060 the middle of a function is not a good idea. */
5061
5062 static void
5063 group_sections (struct elf32_arm_link_hash_table *htab,
5064 bfd_size_type stub_group_size,
5065 bfd_boolean stubs_always_after_branch)
5066 {
5067 asection **list = htab->input_list;
5068
5069 do
5070 {
5071 asection *tail = *list;
5072 asection *head;
5073
5074 if (tail == bfd_abs_section_ptr)
5075 continue;
5076
5077 /* Reverse the list: we must avoid placing stubs at the
5078 beginning of the section because the beginning of the text
5079 section may be required for an interrupt vector in bare metal
5080 code. */
5081 #define NEXT_SEC PREV_SEC
5082 head = NULL;
5083 while (tail != NULL)
5084 {
5085 /* Pop from tail. */
5086 asection *item = tail;
5087 tail = PREV_SEC (item);
5088
5089 /* Push on head. */
5090 NEXT_SEC (item) = head;
5091 head = item;
5092 }
5093
5094 while (head != NULL)
5095 {
5096 asection *curr;
5097 asection *next;
5098 bfd_vma stub_group_start = head->output_offset;
5099 bfd_vma end_of_next;
5100
5101 curr = head;
5102 while (NEXT_SEC (curr) != NULL)
5103 {
5104 next = NEXT_SEC (curr);
5105 end_of_next = next->output_offset + next->size;
5106 if (end_of_next - stub_group_start >= stub_group_size)
5107 /* End of NEXT is too far from start, so stop. */
5108 break;
5109 /* Add NEXT to the group. */
5110 curr = next;
5111 }
5112
5113 /* OK, the size from the start to the start of CURR is less
5114 than stub_group_size and thus can be handled by one stub
5115 section. (Or the head section is itself larger than
5116 stub_group_size, in which case we may be toast.)
5117 We should really be keeping track of the total size of
5118 stubs added here, as stubs contribute to the final output
5119 section size. */
5120 do
5121 {
5122 next = NEXT_SEC (head);
5123 /* Set up this stub group. */
5124 htab->stub_group[head->id].link_sec = curr;
5125 }
5126 while (head != curr && (head = next) != NULL);
5127
5128 /* But wait, there's more! Input sections up to stub_group_size
5129 bytes after the stub section can be handled by it too. */
5130 if (!stubs_always_after_branch)
5131 {
5132 stub_group_start = curr->output_offset + curr->size;
5133
5134 while (next != NULL)
5135 {
5136 end_of_next = next->output_offset + next->size;
5137 if (end_of_next - stub_group_start >= stub_group_size)
5138 /* End of NEXT is too far from stubs, so stop. */
5139 break;
5140 /* Add NEXT to the stub group. */
5141 head = next;
5142 next = NEXT_SEC (head);
5143 htab->stub_group[head->id].link_sec = curr;
5144 }
5145 }
5146 head = next;
5147 }
5148 }
5149 while (list++ != htab->input_list + htab->top_index);
5150
5151 free (htab->input_list);
5152 #undef PREV_SEC
5153 #undef NEXT_SEC
5154 }
5155
5156 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5157 erratum fix. */
5158
5159 static int
5160 a8_reloc_compare (const void *a, const void *b)
5161 {
5162 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5163 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5164
5165 if (ra->from < rb->from)
5166 return -1;
5167 else if (ra->from > rb->from)
5168 return 1;
5169 else
5170 return 0;
5171 }
5172
5173 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5174 const char *, char **);
5175
5176 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5177 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5178 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5179 otherwise. */
5180
5181 static bfd_boolean
5182 cortex_a8_erratum_scan (bfd *input_bfd,
5183 struct bfd_link_info *info,
5184 struct a8_erratum_fix **a8_fixes_p,
5185 unsigned int *num_a8_fixes_p,
5186 unsigned int *a8_fix_table_size_p,
5187 struct a8_erratum_reloc *a8_relocs,
5188 unsigned int num_a8_relocs,
5189 unsigned prev_num_a8_fixes,
5190 bfd_boolean *stub_changed_p)
5191 {
5192 asection *section;
5193 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5194 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5195 unsigned int num_a8_fixes = *num_a8_fixes_p;
5196 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5197
5198 if (htab == NULL)
5199 return FALSE;
5200
5201 for (section = input_bfd->sections;
5202 section != NULL;
5203 section = section->next)
5204 {
5205 bfd_byte *contents = NULL;
5206 struct _arm_elf_section_data *sec_data;
5207 unsigned int span;
5208 bfd_vma base_vma;
5209
5210 if (elf_section_type (section) != SHT_PROGBITS
5211 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5212 || (section->flags & SEC_EXCLUDE) != 0
5213 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5214 || (section->output_section == bfd_abs_section_ptr))
5215 continue;
5216
5217 base_vma = section->output_section->vma + section->output_offset;
5218
5219 if (elf_section_data (section)->this_hdr.contents != NULL)
5220 contents = elf_section_data (section)->this_hdr.contents;
5221 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5222 return TRUE;
5223
5224 sec_data = elf32_arm_section_data (section);
5225
5226 for (span = 0; span < sec_data->mapcount; span++)
5227 {
5228 unsigned int span_start = sec_data->map[span].vma;
5229 unsigned int span_end = (span == sec_data->mapcount - 1)
5230 ? section->size : sec_data->map[span + 1].vma;
5231 unsigned int i;
5232 char span_type = sec_data->map[span].type;
5233 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5234
5235 if (span_type != 't')
5236 continue;
5237
5238 /* Span is entirely within a single 4KB region: skip scanning. */
5239 if (((base_vma + span_start) & ~0xfff)
5240 == ((base_vma + span_end) & ~0xfff))
5241 continue;
5242
5243 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5244
5245 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5246 * The branch target is in the same 4KB region as the
5247 first half of the branch.
5248 * The instruction before the branch is a 32-bit
5249 length non-branch instruction. */
5250 for (i = span_start; i < span_end;)
5251 {
5252 unsigned int insn = bfd_getl16 (&contents[i]);
5253 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5254 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5255
5256 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5257 insn_32bit = TRUE;
5258
5259 if (insn_32bit)
5260 {
5261 /* Load the rest of the insn (in manual-friendly order). */
5262 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5263
5264 /* Encoding T4: B<c>.W. */
5265 is_b = (insn & 0xf800d000) == 0xf0009000;
5266 /* Encoding T1: BL<c>.W. */
5267 is_bl = (insn & 0xf800d000) == 0xf000d000;
5268 /* Encoding T2: BLX<c>.W. */
5269 is_blx = (insn & 0xf800d000) == 0xf000c000;
5270 /* Encoding T3: B<c>.W (not permitted in IT block). */
5271 is_bcc = (insn & 0xf800d000) == 0xf0008000
5272 && (insn & 0x07f00000) != 0x03800000;
5273 }
5274
5275 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5276
5277 if (((base_vma + i) & 0xfff) == 0xffe
5278 && insn_32bit
5279 && is_32bit_branch
5280 && last_was_32bit
5281 && ! last_was_branch)
5282 {
5283 bfd_signed_vma offset = 0;
5284 bfd_boolean force_target_arm = FALSE;
5285 bfd_boolean force_target_thumb = FALSE;
5286 bfd_vma target;
5287 enum elf32_arm_stub_type stub_type = arm_stub_none;
5288 struct a8_erratum_reloc key, *found;
5289 bfd_boolean use_plt = FALSE;
5290
5291 key.from = base_vma + i;
5292 found = (struct a8_erratum_reloc *)
5293 bsearch (&key, a8_relocs, num_a8_relocs,
5294 sizeof (struct a8_erratum_reloc),
5295 &a8_reloc_compare);
5296
5297 if (found)
5298 {
5299 char *error_message = NULL;
5300 struct elf_link_hash_entry *entry;
5301
5302 /* We don't care about the error returned from this
5303 function, only if there is glue or not. */
5304 entry = find_thumb_glue (info, found->sym_name,
5305 &error_message);
5306
5307 if (entry)
5308 found->non_a8_stub = TRUE;
5309
5310 /* Keep a simpler condition, for the sake of clarity. */
5311 if (htab->root.splt != NULL && found->hash != NULL
5312 && found->hash->root.plt.offset != (bfd_vma) -1)
5313 use_plt = TRUE;
5314
5315 if (found->r_type == R_ARM_THM_CALL)
5316 {
5317 if (found->branch_type == ST_BRANCH_TO_ARM
5318 || use_plt)
5319 force_target_arm = TRUE;
5320 else
5321 force_target_thumb = TRUE;
5322 }
5323 }
5324
5325 /* Check if we have an offending branch instruction. */
5326
5327 if (found && found->non_a8_stub)
5328 /* We've already made a stub for this instruction, e.g.
5329 it's a long branch or a Thumb->ARM stub. Assume that
5330 stub will suffice to work around the A8 erratum (see
5331 setting of always_after_branch above). */
5332 ;
5333 else if (is_bcc)
5334 {
5335 offset = (insn & 0x7ff) << 1;
5336 offset |= (insn & 0x3f0000) >> 4;
5337 offset |= (insn & 0x2000) ? 0x40000 : 0;
5338 offset |= (insn & 0x800) ? 0x80000 : 0;
5339 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5340 if (offset & 0x100000)
5341 offset |= ~ ((bfd_signed_vma) 0xfffff);
5342 stub_type = arm_stub_a8_veneer_b_cond;
5343 }
5344 else if (is_b || is_bl || is_blx)
5345 {
5346 int s = (insn & 0x4000000) != 0;
5347 int j1 = (insn & 0x2000) != 0;
5348 int j2 = (insn & 0x800) != 0;
5349 int i1 = !(j1 ^ s);
5350 int i2 = !(j2 ^ s);
5351
5352 offset = (insn & 0x7ff) << 1;
5353 offset |= (insn & 0x3ff0000) >> 4;
5354 offset |= i2 << 22;
5355 offset |= i1 << 23;
5356 offset |= s << 24;
5357 if (offset & 0x1000000)
5358 offset |= ~ ((bfd_signed_vma) 0xffffff);
5359
5360 if (is_blx)
5361 offset &= ~ ((bfd_signed_vma) 3);
5362
5363 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5364 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5365 }
5366
5367 if (stub_type != arm_stub_none)
5368 {
5369 bfd_vma pc_for_insn = base_vma + i + 4;
5370
5371 /* The original instruction is a BL, but the target is
5372 an ARM instruction. If we were not making a stub,
5373 the BL would have been converted to a BLX. Use the
5374 BLX stub instead in that case. */
5375 if (htab->use_blx && force_target_arm
5376 && stub_type == arm_stub_a8_veneer_bl)
5377 {
5378 stub_type = arm_stub_a8_veneer_blx;
5379 is_blx = TRUE;
5380 is_bl = FALSE;
5381 }
5382 /* Conversely, if the original instruction was
5383 BLX but the target is Thumb mode, use the BL
5384 stub. */
5385 else if (force_target_thumb
5386 && stub_type == arm_stub_a8_veneer_blx)
5387 {
5388 stub_type = arm_stub_a8_veneer_bl;
5389 is_blx = FALSE;
5390 is_bl = TRUE;
5391 }
5392
5393 if (is_blx)
5394 pc_for_insn &= ~ ((bfd_vma) 3);
5395
5396 /* If we found a relocation, use the proper destination,
5397 not the offset in the (unrelocated) instruction.
5398 Note this is always done if we switched the stub type
5399 above. */
5400 if (found)
5401 offset =
5402 (bfd_signed_vma) (found->destination - pc_for_insn);
5403
5404 /* If the stub will use a Thumb-mode branch to a
5405 PLT target, redirect it to the preceding Thumb
5406 entry point. */
5407 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5408 offset -= PLT_THUMB_STUB_SIZE;
5409
5410 target = pc_for_insn + offset;
5411
5412 /* The BLX stub is ARM-mode code. Adjust the offset to
5413 take the different PC value (+8 instead of +4) into
5414 account. */
5415 if (stub_type == arm_stub_a8_veneer_blx)
5416 offset += 4;
5417
5418 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5419 {
5420 char *stub_name = NULL;
5421
5422 if (num_a8_fixes == a8_fix_table_size)
5423 {
5424 a8_fix_table_size *= 2;
5425 a8_fixes = (struct a8_erratum_fix *)
5426 bfd_realloc (a8_fixes,
5427 sizeof (struct a8_erratum_fix)
5428 * a8_fix_table_size);
5429 }
5430
5431 if (num_a8_fixes < prev_num_a8_fixes)
5432 {
5433 /* If we're doing a subsequent scan,
5434 check if we've found the same fix as
5435 before, and try and reuse the stub
5436 name. */
5437 stub_name = a8_fixes[num_a8_fixes].stub_name;
5438 if ((a8_fixes[num_a8_fixes].section != section)
5439 || (a8_fixes[num_a8_fixes].offset != i))
5440 {
5441 free (stub_name);
5442 stub_name = NULL;
5443 *stub_changed_p = TRUE;
5444 }
5445 }
5446
5447 if (!stub_name)
5448 {
5449 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5450 if (stub_name != NULL)
5451 sprintf (stub_name, "%x:%x", section->id, i);
5452 }
5453
5454 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5455 a8_fixes[num_a8_fixes].section = section;
5456 a8_fixes[num_a8_fixes].offset = i;
5457 a8_fixes[num_a8_fixes].target_offset =
5458 target - base_vma;
5459 a8_fixes[num_a8_fixes].orig_insn = insn;
5460 a8_fixes[num_a8_fixes].stub_name = stub_name;
5461 a8_fixes[num_a8_fixes].stub_type = stub_type;
5462 a8_fixes[num_a8_fixes].branch_type =
5463 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5464
5465 num_a8_fixes++;
5466 }
5467 }
5468 }
5469
5470 i += insn_32bit ? 4 : 2;
5471 last_was_32bit = insn_32bit;
5472 last_was_branch = is_32bit_branch;
5473 }
5474 }
5475
5476 if (elf_section_data (section)->this_hdr.contents == NULL)
5477 free (contents);
5478 }
5479
5480 *a8_fixes_p = a8_fixes;
5481 *num_a8_fixes_p = num_a8_fixes;
5482 *a8_fix_table_size_p = a8_fix_table_size;
5483
5484 return FALSE;
5485 }
5486
5487 /* Create or update a stub entry depending on whether the stub can already be
5488 found in HTAB. The stub is identified by:
5489 - its type STUB_TYPE
5490 - its source branch (note that several can share the same stub) whose
5491 section and relocation (if any) are given by SECTION and IRELA
5492 respectively
5493 - its target symbol whose input section, hash, name, value and branch type
5494 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5495 respectively
5496
5497 If found, the value of the stub's target symbol is updated from SYM_VALUE
5498 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5499 TRUE and the stub entry is initialized.
5500
5501 Returns the stub that was created or updated, or NULL if an error
5502 occurred. */
5503
5504 static struct elf32_arm_stub_hash_entry *
5505 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5506 enum elf32_arm_stub_type stub_type, asection *section,
5507 Elf_Internal_Rela *irela, asection *sym_sec,
5508 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5509 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5510 bfd_boolean *new_stub)
5511 {
5512 const asection *id_sec;
5513 char *stub_name;
5514 struct elf32_arm_stub_hash_entry *stub_entry;
5515 unsigned int r_type;
5516 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5517
5518 BFD_ASSERT (stub_type != arm_stub_none);
5519 *new_stub = FALSE;
5520
5521 if (sym_claimed)
5522 stub_name = sym_name;
5523 else
5524 {
5525 BFD_ASSERT (irela);
5526 BFD_ASSERT (section);
5527 BFD_ASSERT (section->id <= htab->top_id);
5528
5529 /* Support for grouping stub sections. */
5530 id_sec = htab->stub_group[section->id].link_sec;
5531
5532 /* Get the name of this stub. */
5533 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5534 stub_type);
5535 if (!stub_name)
5536 return NULL;
5537 }
5538
5539 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5540 FALSE);
5541 /* The proper stub has already been created, just update its value. */
5542 if (stub_entry != NULL)
5543 {
5544 if (!sym_claimed)
5545 free (stub_name);
5546 stub_entry->target_value = sym_value;
5547 return stub_entry;
5548 }
5549
5550 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5551 if (stub_entry == NULL)
5552 {
5553 if (!sym_claimed)
5554 free (stub_name);
5555 return NULL;
5556 }
5557
5558 stub_entry->target_value = sym_value;
5559 stub_entry->target_section = sym_sec;
5560 stub_entry->stub_type = stub_type;
5561 stub_entry->h = hash;
5562 stub_entry->branch_type = branch_type;
5563
5564 if (sym_claimed)
5565 stub_entry->output_name = sym_name;
5566 else
5567 {
5568 if (sym_name == NULL)
5569 sym_name = "unnamed";
5570 stub_entry->output_name = (char *)
5571 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5572 + strlen (sym_name));
5573 if (stub_entry->output_name == NULL)
5574 {
5575 free (stub_name);
5576 return NULL;
5577 }
5578
5579 /* For historical reasons, use the existing names for ARM-to-Thumb and
5580 Thumb-to-ARM stubs. */
5581 r_type = ELF32_R_TYPE (irela->r_info);
5582 if ((r_type == (unsigned int) R_ARM_THM_CALL
5583 || r_type == (unsigned int) R_ARM_THM_JUMP24
5584 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5585 && branch_type == ST_BRANCH_TO_ARM)
5586 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5587 else if ((r_type == (unsigned int) R_ARM_CALL
5588 || r_type == (unsigned int) R_ARM_JUMP24)
5589 && branch_type == ST_BRANCH_TO_THUMB)
5590 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5591 else
5592 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5593 }
5594
5595 *new_stub = TRUE;
5596 return stub_entry;
5597 }
5598
5599 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5600 gateway veneer to transition from non secure to secure state and create them
5601 accordingly.
5602
5603 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5604 defines the conditions that govern Secure Gateway veneer creation for a
5605 given symbol <SYM> as follows:
5606 - it has function type
5607 - it has non local binding
5608 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5609 same type, binding and value as <SYM> (called normal symbol).
5610 An entry function can handle secure state transition itself in which case
5611 its special symbol would have a different value from the normal symbol.
5612
5613 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5614 entry mapping while HTAB gives the name to hash entry mapping.
5615 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5616 created.
5617
5618 The return value gives whether a stub failed to be allocated. */
5619
5620 static bfd_boolean
5621 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5622 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5623 int *cmse_stub_created)
5624 {
5625 const struct elf_backend_data *bed;
5626 Elf_Internal_Shdr *symtab_hdr;
5627 unsigned i, j, sym_count, ext_start;
5628 Elf_Internal_Sym *cmse_sym, *local_syms;
5629 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5630 enum arm_st_branch_type branch_type;
5631 char *sym_name, *lsym_name;
5632 bfd_vma sym_value;
5633 asection *section;
5634 struct elf32_arm_stub_hash_entry *stub_entry;
5635 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5636
5637 bed = get_elf_backend_data (input_bfd);
5638 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5639 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5640 ext_start = symtab_hdr->sh_info;
5641 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5642 && out_attr[Tag_CPU_arch_profile].i == 'M');
5643
5644 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5645 if (local_syms == NULL)
5646 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5647 symtab_hdr->sh_info, 0, NULL, NULL,
5648 NULL);
5649 if (symtab_hdr->sh_info && local_syms == NULL)
5650 return FALSE;
5651
5652 /* Scan symbols. */
5653 for (i = 0; i < sym_count; i++)
5654 {
5655 cmse_invalid = FALSE;
5656
5657 if (i < ext_start)
5658 {
5659 cmse_sym = &local_syms[i];
5660 /* Not a special symbol. */
5661 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5662 continue;
5663 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5664 symtab_hdr->sh_link,
5665 cmse_sym->st_name);
5666 /* Special symbol with local binding. */
5667 cmse_invalid = TRUE;
5668 }
5669 else
5670 {
5671 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5672 sym_name = (char *) cmse_hash->root.root.root.string;
5673
5674 /* Not a special symbol. */
5675 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5676 continue;
5677
5678 /* Special symbol has incorrect binding or type. */
5679 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5680 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5681 || cmse_hash->root.type != STT_FUNC)
5682 cmse_invalid = TRUE;
5683 }
5684
5685 if (!is_v8m)
5686 {
5687 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5688 "ARMv8-M architecture or later"),
5689 input_bfd, sym_name);
5690 is_v8m = TRUE; /* Avoid multiple warning. */
5691 ret = FALSE;
5692 }
5693
5694 if (cmse_invalid)
5695 {
5696 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5697 " a global or weak function symbol"),
5698 input_bfd, sym_name);
5699 ret = FALSE;
5700 if (i < ext_start)
5701 continue;
5702 }
5703
5704 sym_name += strlen (CMSE_PREFIX);
5705 hash = (struct elf32_arm_link_hash_entry *)
5706 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5707
5708 /* No associated normal symbol or it is neither global nor weak. */
5709 if (!hash
5710 || (hash->root.root.type != bfd_link_hash_defined
5711 && hash->root.root.type != bfd_link_hash_defweak)
5712 || hash->root.type != STT_FUNC)
5713 {
5714 /* Initialize here to avoid warning about use of possibly
5715 uninitialized variable. */
5716 j = 0;
5717
5718 if (!hash)
5719 {
5720 /* Searching for a normal symbol with local binding. */
5721 for (; j < ext_start; j++)
5722 {
5723 lsym_name =
5724 bfd_elf_string_from_elf_section (input_bfd,
5725 symtab_hdr->sh_link,
5726 local_syms[j].st_name);
5727 if (!strcmp (sym_name, lsym_name))
5728 break;
5729 }
5730 }
5731
5732 if (hash || j < ext_start)
5733 {
5734 _bfd_error_handler
5735 (_("%pB: invalid standard symbol `%s'; it must be "
5736 "a global or weak function symbol"),
5737 input_bfd, sym_name);
5738 }
5739 else
5740 _bfd_error_handler
5741 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
5742 ret = FALSE;
5743 if (!hash)
5744 continue;
5745 }
5746
5747 sym_value = hash->root.root.u.def.value;
5748 section = hash->root.root.u.def.section;
5749
5750 if (cmse_hash->root.root.u.def.section != section)
5751 {
5752 _bfd_error_handler
5753 (_("%pB: `%s' and its special symbol are in different sections"),
5754 input_bfd, sym_name);
5755 ret = FALSE;
5756 }
5757 if (cmse_hash->root.root.u.def.value != sym_value)
5758 continue; /* Ignore: could be an entry function starting with SG. */
5759
5760 /* If this section is a link-once section that will be discarded, then
5761 don't create any stubs. */
5762 if (section->output_section == NULL)
5763 {
5764 _bfd_error_handler
5765 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
5766 continue;
5767 }
5768
5769 if (hash->root.size == 0)
5770 {
5771 _bfd_error_handler
5772 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
5773 ret = FALSE;
5774 }
5775
5776 if (!ret)
5777 continue;
5778 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5779 stub_entry
5780 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
5781 NULL, NULL, section, hash, sym_name,
5782 sym_value, branch_type, &new_stub);
5783
5784 if (stub_entry == NULL)
5785 ret = FALSE;
5786 else
5787 {
5788 BFD_ASSERT (new_stub);
5789 (*cmse_stub_created)++;
5790 }
5791 }
5792
5793 if (!symtab_hdr->contents)
5794 free (local_syms);
5795 return ret;
5796 }
5797
5798 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
5799 code entry function, ie can be called from non secure code without using a
5800 veneer. */
5801
5802 static bfd_boolean
5803 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
5804 {
5805 bfd_byte contents[4];
5806 uint32_t first_insn;
5807 asection *section;
5808 file_ptr offset;
5809 bfd *abfd;
5810
5811 /* Defined symbol of function type. */
5812 if (hash->root.root.type != bfd_link_hash_defined
5813 && hash->root.root.type != bfd_link_hash_defweak)
5814 return FALSE;
5815 if (hash->root.type != STT_FUNC)
5816 return FALSE;
5817
5818 /* Read first instruction. */
5819 section = hash->root.root.u.def.section;
5820 abfd = section->owner;
5821 offset = hash->root.root.u.def.value - section->vma;
5822 if (!bfd_get_section_contents (abfd, section, contents, offset,
5823 sizeof (contents)))
5824 return FALSE;
5825
5826 first_insn = bfd_get_32 (abfd, contents);
5827
5828 /* Starts by SG instruction. */
5829 return first_insn == 0xe97fe97f;
5830 }
5831
5832 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
5833 secure gateway veneers (ie. the veneers was not in the input import library)
5834 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
5835
5836 static bfd_boolean
5837 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
5838 {
5839 struct elf32_arm_stub_hash_entry *stub_entry;
5840 struct bfd_link_info *info;
5841
5842 /* Massage our args to the form they really have. */
5843 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5844 info = (struct bfd_link_info *) gen_info;
5845
5846 if (info->out_implib_bfd)
5847 return TRUE;
5848
5849 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
5850 return TRUE;
5851
5852 if (stub_entry->stub_offset == (bfd_vma) -1)
5853 _bfd_error_handler (" %s", stub_entry->output_name);
5854
5855 return TRUE;
5856 }
5857
5858 /* Set offset of each secure gateway veneers so that its address remain
5859 identical to the one in the input import library referred by
5860 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
5861 (present in input import library but absent from the executable being
5862 linked) or if new veneers appeared and there is no output import library
5863 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
5864 number of secure gateway veneers found in the input import library.
5865
5866 The function returns whether an error occurred. If no error occurred,
5867 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
5868 and this function and HTAB->new_cmse_stub_offset is set to the biggest
5869 veneer observed set for new veneers to be layed out after. */
5870
5871 static bfd_boolean
5872 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
5873 struct elf32_arm_link_hash_table *htab,
5874 int *cmse_stub_created)
5875 {
5876 long symsize;
5877 char *sym_name;
5878 flagword flags;
5879 long i, symcount;
5880 bfd *in_implib_bfd;
5881 asection *stub_out_sec;
5882 bfd_boolean ret = TRUE;
5883 Elf_Internal_Sym *intsym;
5884 const char *out_sec_name;
5885 bfd_size_type cmse_stub_size;
5886 asymbol **sympp = NULL, *sym;
5887 struct elf32_arm_link_hash_entry *hash;
5888 const insn_sequence *cmse_stub_template;
5889 struct elf32_arm_stub_hash_entry *stub_entry;
5890 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
5891 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
5892 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
5893
5894 /* No input secure gateway import library. */
5895 if (!htab->in_implib_bfd)
5896 return TRUE;
5897
5898 in_implib_bfd = htab->in_implib_bfd;
5899 if (!htab->cmse_implib)
5900 {
5901 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
5902 "Gateway import libraries"), in_implib_bfd);
5903 return FALSE;
5904 }
5905
5906 /* Get symbol table size. */
5907 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
5908 if (symsize < 0)
5909 return FALSE;
5910
5911 /* Read in the input secure gateway import library's symbol table. */
5912 sympp = (asymbol **) xmalloc (symsize);
5913 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
5914 if (symcount < 0)
5915 {
5916 ret = FALSE;
5917 goto free_sym_buf;
5918 }
5919
5920 htab->new_cmse_stub_offset = 0;
5921 cmse_stub_size =
5922 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
5923 &cmse_stub_template,
5924 &cmse_stub_template_size);
5925 out_sec_name =
5926 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
5927 stub_out_sec =
5928 bfd_get_section_by_name (htab->obfd, out_sec_name);
5929 if (stub_out_sec != NULL)
5930 cmse_stub_sec_vma = stub_out_sec->vma;
5931
5932 /* Set addresses of veneers mentionned in input secure gateway import
5933 library's symbol table. */
5934 for (i = 0; i < symcount; i++)
5935 {
5936 sym = sympp[i];
5937 flags = sym->flags;
5938 sym_name = (char *) bfd_asymbol_name (sym);
5939 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
5940
5941 if (sym->section != bfd_abs_section_ptr
5942 || !(flags & (BSF_GLOBAL | BSF_WEAK))
5943 || (flags & BSF_FUNCTION) != BSF_FUNCTION
5944 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
5945 != ST_BRANCH_TO_THUMB))
5946 {
5947 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
5948 "symbol should be absolute, global and "
5949 "refer to Thumb functions"),
5950 in_implib_bfd, sym_name);
5951 ret = FALSE;
5952 continue;
5953 }
5954
5955 veneer_value = bfd_asymbol_value (sym);
5956 stub_offset = veneer_value - cmse_stub_sec_vma;
5957 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
5958 FALSE, FALSE);
5959 hash = (struct elf32_arm_link_hash_entry *)
5960 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5961
5962 /* Stub entry should have been created by cmse_scan or the symbol be of
5963 a secure function callable from non secure code. */
5964 if (!stub_entry && !hash)
5965 {
5966 bfd_boolean new_stub;
5967
5968 _bfd_error_handler
5969 (_("entry function `%s' disappeared from secure code"), sym_name);
5970 hash = (struct elf32_arm_link_hash_entry *)
5971 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
5972 stub_entry
5973 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
5974 NULL, NULL, bfd_abs_section_ptr, hash,
5975 sym_name, veneer_value,
5976 ST_BRANCH_TO_THUMB, &new_stub);
5977 if (stub_entry == NULL)
5978 ret = FALSE;
5979 else
5980 {
5981 BFD_ASSERT (new_stub);
5982 new_cmse_stubs_created++;
5983 (*cmse_stub_created)++;
5984 }
5985 stub_entry->stub_template_size = stub_entry->stub_size = 0;
5986 stub_entry->stub_offset = stub_offset;
5987 }
5988 /* Symbol found is not callable from non secure code. */
5989 else if (!stub_entry)
5990 {
5991 if (!cmse_entry_fct_p (hash))
5992 {
5993 _bfd_error_handler (_("`%s' refers to a non entry function"),
5994 sym_name);
5995 ret = FALSE;
5996 }
5997 continue;
5998 }
5999 else
6000 {
6001 /* Only stubs for SG veneers should have been created. */
6002 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6003
6004 /* Check visibility hasn't changed. */
6005 if (!!(flags & BSF_GLOBAL)
6006 != (hash->root.root.type == bfd_link_hash_defined))
6007 _bfd_error_handler
6008 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6009 sym_name);
6010
6011 stub_entry->stub_offset = stub_offset;
6012 }
6013
6014 /* Size should match that of a SG veneer. */
6015 if (intsym->st_size != cmse_stub_size)
6016 {
6017 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6018 in_implib_bfd, sym_name);
6019 ret = FALSE;
6020 }
6021
6022 /* Previous veneer address is before current SG veneer section. */
6023 if (veneer_value < cmse_stub_sec_vma)
6024 {
6025 /* Avoid offset underflow. */
6026 if (stub_entry)
6027 stub_entry->stub_offset = 0;
6028 stub_offset = 0;
6029 ret = FALSE;
6030 }
6031
6032 /* Complain if stub offset not a multiple of stub size. */
6033 if (stub_offset % cmse_stub_size)
6034 {
6035 _bfd_error_handler
6036 (_("offset of veneer for entry function `%s' not a multiple of "
6037 "its size"), sym_name);
6038 ret = FALSE;
6039 }
6040
6041 if (!ret)
6042 continue;
6043
6044 new_cmse_stubs_created--;
6045 if (veneer_value < cmse_stub_array_start)
6046 cmse_stub_array_start = veneer_value;
6047 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6048 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6049 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6050 }
6051
6052 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6053 {
6054 BFD_ASSERT (new_cmse_stubs_created > 0);
6055 _bfd_error_handler
6056 (_("new entry function(s) introduced but no output import library "
6057 "specified:"));
6058 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6059 }
6060
6061 if (cmse_stub_array_start != cmse_stub_sec_vma)
6062 {
6063 _bfd_error_handler
6064 (_("start address of `%s' is different from previous link"),
6065 out_sec_name);
6066 ret = FALSE;
6067 }
6068
6069 free_sym_buf:
6070 free (sympp);
6071 return ret;
6072 }
6073
6074 /* Determine and set the size of the stub section for a final link.
6075
6076 The basic idea here is to examine all the relocations looking for
6077 PC-relative calls to a target that is unreachable with a "bl"
6078 instruction. */
6079
6080 bfd_boolean
6081 elf32_arm_size_stubs (bfd *output_bfd,
6082 bfd *stub_bfd,
6083 struct bfd_link_info *info,
6084 bfd_signed_vma group_size,
6085 asection * (*add_stub_section) (const char *, asection *,
6086 asection *,
6087 unsigned int),
6088 void (*layout_sections_again) (void))
6089 {
6090 bfd_boolean ret = TRUE;
6091 obj_attribute *out_attr;
6092 int cmse_stub_created = 0;
6093 bfd_size_type stub_group_size;
6094 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6095 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6096 struct a8_erratum_fix *a8_fixes = NULL;
6097 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6098 struct a8_erratum_reloc *a8_relocs = NULL;
6099 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6100
6101 if (htab == NULL)
6102 return FALSE;
6103
6104 if (htab->fix_cortex_a8)
6105 {
6106 a8_fixes = (struct a8_erratum_fix *)
6107 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6108 a8_relocs = (struct a8_erratum_reloc *)
6109 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6110 }
6111
6112 /* Propagate mach to stub bfd, because it may not have been
6113 finalized when we created stub_bfd. */
6114 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6115 bfd_get_mach (output_bfd));
6116
6117 /* Stash our params away. */
6118 htab->stub_bfd = stub_bfd;
6119 htab->add_stub_section = add_stub_section;
6120 htab->layout_sections_again = layout_sections_again;
6121 stubs_always_after_branch = group_size < 0;
6122
6123 out_attr = elf_known_obj_attributes_proc (output_bfd);
6124 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6125
6126 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6127 as the first half of a 32-bit branch straddling two 4K pages. This is a
6128 crude way of enforcing that. */
6129 if (htab->fix_cortex_a8)
6130 stubs_always_after_branch = 1;
6131
6132 if (group_size < 0)
6133 stub_group_size = -group_size;
6134 else
6135 stub_group_size = group_size;
6136
6137 if (stub_group_size == 1)
6138 {
6139 /* Default values. */
6140 /* Thumb branch range is +-4MB has to be used as the default
6141 maximum size (a given section can contain both ARM and Thumb
6142 code, so the worst case has to be taken into account).
6143
6144 This value is 24K less than that, which allows for 2025
6145 12-byte stubs. If we exceed that, then we will fail to link.
6146 The user will have to relink with an explicit group size
6147 option. */
6148 stub_group_size = 4170000;
6149 }
6150
6151 group_sections (htab, stub_group_size, stubs_always_after_branch);
6152
6153 /* If we're applying the cortex A8 fix, we need to determine the
6154 program header size now, because we cannot change it later --
6155 that could alter section placements. Notice the A8 erratum fix
6156 ends up requiring the section addresses to remain unchanged
6157 modulo the page size. That's something we cannot represent
6158 inside BFD, and we don't want to force the section alignment to
6159 be the page size. */
6160 if (htab->fix_cortex_a8)
6161 (*htab->layout_sections_again) ();
6162
6163 while (1)
6164 {
6165 bfd *input_bfd;
6166 unsigned int bfd_indx;
6167 asection *stub_sec;
6168 enum elf32_arm_stub_type stub_type;
6169 bfd_boolean stub_changed = FALSE;
6170 unsigned prev_num_a8_fixes = num_a8_fixes;
6171
6172 num_a8_fixes = 0;
6173 for (input_bfd = info->input_bfds, bfd_indx = 0;
6174 input_bfd != NULL;
6175 input_bfd = input_bfd->link.next, bfd_indx++)
6176 {
6177 Elf_Internal_Shdr *symtab_hdr;
6178 asection *section;
6179 Elf_Internal_Sym *local_syms = NULL;
6180
6181 if (!is_arm_elf (input_bfd))
6182 continue;
6183
6184 num_a8_relocs = 0;
6185
6186 /* We'll need the symbol table in a second. */
6187 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6188 if (symtab_hdr->sh_info == 0)
6189 continue;
6190
6191 /* Limit scan of symbols to object file whose profile is
6192 Microcontroller to not hinder performance in the general case. */
6193 if (m_profile && first_veneer_scan)
6194 {
6195 struct elf_link_hash_entry **sym_hashes;
6196
6197 sym_hashes = elf_sym_hashes (input_bfd);
6198 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6199 &cmse_stub_created))
6200 goto error_ret_free_local;
6201
6202 if (cmse_stub_created != 0)
6203 stub_changed = TRUE;
6204 }
6205
6206 /* Walk over each section attached to the input bfd. */
6207 for (section = input_bfd->sections;
6208 section != NULL;
6209 section = section->next)
6210 {
6211 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6212
6213 /* If there aren't any relocs, then there's nothing more
6214 to do. */
6215 if ((section->flags & SEC_RELOC) == 0
6216 || section->reloc_count == 0
6217 || (section->flags & SEC_CODE) == 0)
6218 continue;
6219
6220 /* If this section is a link-once section that will be
6221 discarded, then don't create any stubs. */
6222 if (section->output_section == NULL
6223 || section->output_section->owner != output_bfd)
6224 continue;
6225
6226 /* Get the relocs. */
6227 internal_relocs
6228 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6229 NULL, info->keep_memory);
6230 if (internal_relocs == NULL)
6231 goto error_ret_free_local;
6232
6233 /* Now examine each relocation. */
6234 irela = internal_relocs;
6235 irelaend = irela + section->reloc_count;
6236 for (; irela < irelaend; irela++)
6237 {
6238 unsigned int r_type, r_indx;
6239 asection *sym_sec;
6240 bfd_vma sym_value;
6241 bfd_vma destination;
6242 struct elf32_arm_link_hash_entry *hash;
6243 const char *sym_name;
6244 unsigned char st_type;
6245 enum arm_st_branch_type branch_type;
6246 bfd_boolean created_stub = FALSE;
6247
6248 r_type = ELF32_R_TYPE (irela->r_info);
6249 r_indx = ELF32_R_SYM (irela->r_info);
6250
6251 if (r_type >= (unsigned int) R_ARM_max)
6252 {
6253 bfd_set_error (bfd_error_bad_value);
6254 error_ret_free_internal:
6255 if (elf_section_data (section)->relocs == NULL)
6256 free (internal_relocs);
6257 /* Fall through. */
6258 error_ret_free_local:
6259 if (local_syms != NULL
6260 && (symtab_hdr->contents
6261 != (unsigned char *) local_syms))
6262 free (local_syms);
6263 return FALSE;
6264 }
6265
6266 hash = NULL;
6267 if (r_indx >= symtab_hdr->sh_info)
6268 hash = elf32_arm_hash_entry
6269 (elf_sym_hashes (input_bfd)
6270 [r_indx - symtab_hdr->sh_info]);
6271
6272 /* Only look for stubs on branch instructions, or
6273 non-relaxed TLSCALL */
6274 if ((r_type != (unsigned int) R_ARM_CALL)
6275 && (r_type != (unsigned int) R_ARM_THM_CALL)
6276 && (r_type != (unsigned int) R_ARM_JUMP24)
6277 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6278 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6279 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6280 && (r_type != (unsigned int) R_ARM_PLT32)
6281 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6282 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6283 && r_type == elf32_arm_tls_transition
6284 (info, r_type, &hash->root)
6285 && ((hash ? hash->tls_type
6286 : (elf32_arm_local_got_tls_type
6287 (input_bfd)[r_indx]))
6288 & GOT_TLS_GDESC) != 0))
6289 continue;
6290
6291 /* Now determine the call target, its name, value,
6292 section. */
6293 sym_sec = NULL;
6294 sym_value = 0;
6295 destination = 0;
6296 sym_name = NULL;
6297
6298 if (r_type == (unsigned int) R_ARM_TLS_CALL
6299 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6300 {
6301 /* A non-relaxed TLS call. The target is the
6302 plt-resident trampoline and nothing to do
6303 with the symbol. */
6304 BFD_ASSERT (htab->tls_trampoline > 0);
6305 sym_sec = htab->root.splt;
6306 sym_value = htab->tls_trampoline;
6307 hash = 0;
6308 st_type = STT_FUNC;
6309 branch_type = ST_BRANCH_TO_ARM;
6310 }
6311 else if (!hash)
6312 {
6313 /* It's a local symbol. */
6314 Elf_Internal_Sym *sym;
6315
6316 if (local_syms == NULL)
6317 {
6318 local_syms
6319 = (Elf_Internal_Sym *) symtab_hdr->contents;
6320 if (local_syms == NULL)
6321 local_syms
6322 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6323 symtab_hdr->sh_info, 0,
6324 NULL, NULL, NULL);
6325 if (local_syms == NULL)
6326 goto error_ret_free_internal;
6327 }
6328
6329 sym = local_syms + r_indx;
6330 if (sym->st_shndx == SHN_UNDEF)
6331 sym_sec = bfd_und_section_ptr;
6332 else if (sym->st_shndx == SHN_ABS)
6333 sym_sec = bfd_abs_section_ptr;
6334 else if (sym->st_shndx == SHN_COMMON)
6335 sym_sec = bfd_com_section_ptr;
6336 else
6337 sym_sec =
6338 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6339
6340 if (!sym_sec)
6341 /* This is an undefined symbol. It can never
6342 be resolved. */
6343 continue;
6344
6345 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6346 sym_value = sym->st_value;
6347 destination = (sym_value + irela->r_addend
6348 + sym_sec->output_offset
6349 + sym_sec->output_section->vma);
6350 st_type = ELF_ST_TYPE (sym->st_info);
6351 branch_type =
6352 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6353 sym_name
6354 = bfd_elf_string_from_elf_section (input_bfd,
6355 symtab_hdr->sh_link,
6356 sym->st_name);
6357 }
6358 else
6359 {
6360 /* It's an external symbol. */
6361 while (hash->root.root.type == bfd_link_hash_indirect
6362 || hash->root.root.type == bfd_link_hash_warning)
6363 hash = ((struct elf32_arm_link_hash_entry *)
6364 hash->root.root.u.i.link);
6365
6366 if (hash->root.root.type == bfd_link_hash_defined
6367 || hash->root.root.type == bfd_link_hash_defweak)
6368 {
6369 sym_sec = hash->root.root.u.def.section;
6370 sym_value = hash->root.root.u.def.value;
6371
6372 struct elf32_arm_link_hash_table *globals =
6373 elf32_arm_hash_table (info);
6374
6375 /* For a destination in a shared library,
6376 use the PLT stub as target address to
6377 decide whether a branch stub is
6378 needed. */
6379 if (globals != NULL
6380 && globals->root.splt != NULL
6381 && hash != NULL
6382 && hash->root.plt.offset != (bfd_vma) -1)
6383 {
6384 sym_sec = globals->root.splt;
6385 sym_value = hash->root.plt.offset;
6386 if (sym_sec->output_section != NULL)
6387 destination = (sym_value
6388 + sym_sec->output_offset
6389 + sym_sec->output_section->vma);
6390 }
6391 else if (sym_sec->output_section != NULL)
6392 destination = (sym_value + irela->r_addend
6393 + sym_sec->output_offset
6394 + sym_sec->output_section->vma);
6395 }
6396 else if ((hash->root.root.type == bfd_link_hash_undefined)
6397 || (hash->root.root.type == bfd_link_hash_undefweak))
6398 {
6399 /* For a shared library, use the PLT stub as
6400 target address to decide whether a long
6401 branch stub is needed.
6402 For absolute code, they cannot be handled. */
6403 struct elf32_arm_link_hash_table *globals =
6404 elf32_arm_hash_table (info);
6405
6406 if (globals != NULL
6407 && globals->root.splt != NULL
6408 && hash != NULL
6409 && hash->root.plt.offset != (bfd_vma) -1)
6410 {
6411 sym_sec = globals->root.splt;
6412 sym_value = hash->root.plt.offset;
6413 if (sym_sec->output_section != NULL)
6414 destination = (sym_value
6415 + sym_sec->output_offset
6416 + sym_sec->output_section->vma);
6417 }
6418 else
6419 continue;
6420 }
6421 else
6422 {
6423 bfd_set_error (bfd_error_bad_value);
6424 goto error_ret_free_internal;
6425 }
6426 st_type = hash->root.type;
6427 branch_type =
6428 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6429 sym_name = hash->root.root.root.string;
6430 }
6431
6432 do
6433 {
6434 bfd_boolean new_stub;
6435 struct elf32_arm_stub_hash_entry *stub_entry;
6436
6437 /* Determine what (if any) linker stub is needed. */
6438 stub_type = arm_type_of_stub (info, section, irela,
6439 st_type, &branch_type,
6440 hash, destination, sym_sec,
6441 input_bfd, sym_name);
6442 if (stub_type == arm_stub_none)
6443 break;
6444
6445 /* We've either created a stub for this reloc already,
6446 or we are about to. */
6447 stub_entry =
6448 elf32_arm_create_stub (htab, stub_type, section, irela,
6449 sym_sec, hash,
6450 (char *) sym_name, sym_value,
6451 branch_type, &new_stub);
6452
6453 created_stub = stub_entry != NULL;
6454 if (!created_stub)
6455 goto error_ret_free_internal;
6456 else if (!new_stub)
6457 break;
6458 else
6459 stub_changed = TRUE;
6460 }
6461 while (0);
6462
6463 /* Look for relocations which might trigger Cortex-A8
6464 erratum. */
6465 if (htab->fix_cortex_a8
6466 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6467 || r_type == (unsigned int) R_ARM_THM_JUMP19
6468 || r_type == (unsigned int) R_ARM_THM_CALL
6469 || r_type == (unsigned int) R_ARM_THM_XPC22))
6470 {
6471 bfd_vma from = section->output_section->vma
6472 + section->output_offset
6473 + irela->r_offset;
6474
6475 if ((from & 0xfff) == 0xffe)
6476 {
6477 /* Found a candidate. Note we haven't checked the
6478 destination is within 4K here: if we do so (and
6479 don't create an entry in a8_relocs) we can't tell
6480 that a branch should have been relocated when
6481 scanning later. */
6482 if (num_a8_relocs == a8_reloc_table_size)
6483 {
6484 a8_reloc_table_size *= 2;
6485 a8_relocs = (struct a8_erratum_reloc *)
6486 bfd_realloc (a8_relocs,
6487 sizeof (struct a8_erratum_reloc)
6488 * a8_reloc_table_size);
6489 }
6490
6491 a8_relocs[num_a8_relocs].from = from;
6492 a8_relocs[num_a8_relocs].destination = destination;
6493 a8_relocs[num_a8_relocs].r_type = r_type;
6494 a8_relocs[num_a8_relocs].branch_type = branch_type;
6495 a8_relocs[num_a8_relocs].sym_name = sym_name;
6496 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6497 a8_relocs[num_a8_relocs].hash = hash;
6498
6499 num_a8_relocs++;
6500 }
6501 }
6502 }
6503
6504 /* We're done with the internal relocs, free them. */
6505 if (elf_section_data (section)->relocs == NULL)
6506 free (internal_relocs);
6507 }
6508
6509 if (htab->fix_cortex_a8)
6510 {
6511 /* Sort relocs which might apply to Cortex-A8 erratum. */
6512 qsort (a8_relocs, num_a8_relocs,
6513 sizeof (struct a8_erratum_reloc),
6514 &a8_reloc_compare);
6515
6516 /* Scan for branches which might trigger Cortex-A8 erratum. */
6517 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6518 &num_a8_fixes, &a8_fix_table_size,
6519 a8_relocs, num_a8_relocs,
6520 prev_num_a8_fixes, &stub_changed)
6521 != 0)
6522 goto error_ret_free_local;
6523 }
6524
6525 if (local_syms != NULL
6526 && symtab_hdr->contents != (unsigned char *) local_syms)
6527 {
6528 if (!info->keep_memory)
6529 free (local_syms);
6530 else
6531 symtab_hdr->contents = (unsigned char *) local_syms;
6532 }
6533 }
6534
6535 if (first_veneer_scan
6536 && !set_cmse_veneer_addr_from_implib (info, htab,
6537 &cmse_stub_created))
6538 ret = FALSE;
6539
6540 if (prev_num_a8_fixes != num_a8_fixes)
6541 stub_changed = TRUE;
6542
6543 if (!stub_changed)
6544 break;
6545
6546 /* OK, we've added some stubs. Find out the new size of the
6547 stub sections. */
6548 for (stub_sec = htab->stub_bfd->sections;
6549 stub_sec != NULL;
6550 stub_sec = stub_sec->next)
6551 {
6552 /* Ignore non-stub sections. */
6553 if (!strstr (stub_sec->name, STUB_SUFFIX))
6554 continue;
6555
6556 stub_sec->size = 0;
6557 }
6558
6559 /* Add new SG veneers after those already in the input import
6560 library. */
6561 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6562 stub_type++)
6563 {
6564 bfd_vma *start_offset_p;
6565 asection **stub_sec_p;
6566
6567 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6568 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6569 if (start_offset_p == NULL)
6570 continue;
6571
6572 BFD_ASSERT (stub_sec_p != NULL);
6573 if (*stub_sec_p != NULL)
6574 (*stub_sec_p)->size = *start_offset_p;
6575 }
6576
6577 /* Compute stub section size, considering padding. */
6578 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6579 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6580 stub_type++)
6581 {
6582 int size, padding;
6583 asection **stub_sec_p;
6584
6585 padding = arm_dedicated_stub_section_padding (stub_type);
6586 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6587 /* Skip if no stub input section or no stub section padding
6588 required. */
6589 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6590 continue;
6591 /* Stub section padding required but no dedicated section. */
6592 BFD_ASSERT (stub_sec_p);
6593
6594 size = (*stub_sec_p)->size;
6595 size = (size + padding - 1) & ~(padding - 1);
6596 (*stub_sec_p)->size = size;
6597 }
6598
6599 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6600 if (htab->fix_cortex_a8)
6601 for (i = 0; i < num_a8_fixes; i++)
6602 {
6603 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6604 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6605
6606 if (stub_sec == NULL)
6607 return FALSE;
6608
6609 stub_sec->size
6610 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6611 NULL);
6612 }
6613
6614
6615 /* Ask the linker to do its stuff. */
6616 (*htab->layout_sections_again) ();
6617 first_veneer_scan = FALSE;
6618 }
6619
6620 /* Add stubs for Cortex-A8 erratum fixes now. */
6621 if (htab->fix_cortex_a8)
6622 {
6623 for (i = 0; i < num_a8_fixes; i++)
6624 {
6625 struct elf32_arm_stub_hash_entry *stub_entry;
6626 char *stub_name = a8_fixes[i].stub_name;
6627 asection *section = a8_fixes[i].section;
6628 unsigned int section_id = a8_fixes[i].section->id;
6629 asection *link_sec = htab->stub_group[section_id].link_sec;
6630 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6631 const insn_sequence *template_sequence;
6632 int template_size, size = 0;
6633
6634 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6635 TRUE, FALSE);
6636 if (stub_entry == NULL)
6637 {
6638 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6639 section->owner, stub_name);
6640 return FALSE;
6641 }
6642
6643 stub_entry->stub_sec = stub_sec;
6644 stub_entry->stub_offset = (bfd_vma) -1;
6645 stub_entry->id_sec = link_sec;
6646 stub_entry->stub_type = a8_fixes[i].stub_type;
6647 stub_entry->source_value = a8_fixes[i].offset;
6648 stub_entry->target_section = a8_fixes[i].section;
6649 stub_entry->target_value = a8_fixes[i].target_offset;
6650 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6651 stub_entry->branch_type = a8_fixes[i].branch_type;
6652
6653 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6654 &template_sequence,
6655 &template_size);
6656
6657 stub_entry->stub_size = size;
6658 stub_entry->stub_template = template_sequence;
6659 stub_entry->stub_template_size = template_size;
6660 }
6661
6662 /* Stash the Cortex-A8 erratum fix array for use later in
6663 elf32_arm_write_section(). */
6664 htab->a8_erratum_fixes = a8_fixes;
6665 htab->num_a8_erratum_fixes = num_a8_fixes;
6666 }
6667 else
6668 {
6669 htab->a8_erratum_fixes = NULL;
6670 htab->num_a8_erratum_fixes = 0;
6671 }
6672 return ret;
6673 }
6674
6675 /* Build all the stubs associated with the current output file. The
6676 stubs are kept in a hash table attached to the main linker hash
6677 table. We also set up the .plt entries for statically linked PIC
6678 functions here. This function is called via arm_elf_finish in the
6679 linker. */
6680
6681 bfd_boolean
6682 elf32_arm_build_stubs (struct bfd_link_info *info)
6683 {
6684 asection *stub_sec;
6685 struct bfd_hash_table *table;
6686 enum elf32_arm_stub_type stub_type;
6687 struct elf32_arm_link_hash_table *htab;
6688
6689 htab = elf32_arm_hash_table (info);
6690 if (htab == NULL)
6691 return FALSE;
6692
6693 for (stub_sec = htab->stub_bfd->sections;
6694 stub_sec != NULL;
6695 stub_sec = stub_sec->next)
6696 {
6697 bfd_size_type size;
6698
6699 /* Ignore non-stub sections. */
6700 if (!strstr (stub_sec->name, STUB_SUFFIX))
6701 continue;
6702
6703 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6704 must at least be done for stub section requiring padding and for SG
6705 veneers to ensure that a non secure code branching to a removed SG
6706 veneer causes an error. */
6707 size = stub_sec->size;
6708 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6709 if (stub_sec->contents == NULL && size != 0)
6710 return FALSE;
6711
6712 stub_sec->size = 0;
6713 }
6714
6715 /* Add new SG veneers after those already in the input import library. */
6716 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6717 {
6718 bfd_vma *start_offset_p;
6719 asection **stub_sec_p;
6720
6721 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6722 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6723 if (start_offset_p == NULL)
6724 continue;
6725
6726 BFD_ASSERT (stub_sec_p != NULL);
6727 if (*stub_sec_p != NULL)
6728 (*stub_sec_p)->size = *start_offset_p;
6729 }
6730
6731 /* Build the stubs as directed by the stub hash table. */
6732 table = &htab->stub_hash_table;
6733 bfd_hash_traverse (table, arm_build_one_stub, info);
6734 if (htab->fix_cortex_a8)
6735 {
6736 /* Place the cortex a8 stubs last. */
6737 htab->fix_cortex_a8 = -1;
6738 bfd_hash_traverse (table, arm_build_one_stub, info);
6739 }
6740
6741 return TRUE;
6742 }
6743
6744 /* Locate the Thumb encoded calling stub for NAME. */
6745
6746 static struct elf_link_hash_entry *
6747 find_thumb_glue (struct bfd_link_info *link_info,
6748 const char *name,
6749 char **error_message)
6750 {
6751 char *tmp_name;
6752 struct elf_link_hash_entry *hash;
6753 struct elf32_arm_link_hash_table *hash_table;
6754
6755 /* We need a pointer to the armelf specific hash table. */
6756 hash_table = elf32_arm_hash_table (link_info);
6757 if (hash_table == NULL)
6758 return NULL;
6759
6760 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6761 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
6762
6763 BFD_ASSERT (tmp_name);
6764
6765 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
6766
6767 hash = elf_link_hash_lookup
6768 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6769
6770 if (hash == NULL
6771 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
6772 "Thumb", tmp_name, name) == -1)
6773 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6774
6775 free (tmp_name);
6776
6777 return hash;
6778 }
6779
6780 /* Locate the ARM encoded calling stub for NAME. */
6781
6782 static struct elf_link_hash_entry *
6783 find_arm_glue (struct bfd_link_info *link_info,
6784 const char *name,
6785 char **error_message)
6786 {
6787 char *tmp_name;
6788 struct elf_link_hash_entry *myh;
6789 struct elf32_arm_link_hash_table *hash_table;
6790
6791 /* We need a pointer to the elfarm specific hash table. */
6792 hash_table = elf32_arm_hash_table (link_info);
6793 if (hash_table == NULL)
6794 return NULL;
6795
6796 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6797 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6798
6799 BFD_ASSERT (tmp_name);
6800
6801 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6802
6803 myh = elf_link_hash_lookup
6804 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6805
6806 if (myh == NULL
6807 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
6808 "ARM", tmp_name, name) == -1)
6809 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6810
6811 free (tmp_name);
6812
6813 return myh;
6814 }
6815
6816 /* ARM->Thumb glue (static images):
6817
6818 .arm
6819 __func_from_arm:
6820 ldr r12, __func_addr
6821 bx r12
6822 __func_addr:
6823 .word func @ behave as if you saw a ARM_32 reloc.
6824
6825 (v5t static images)
6826 .arm
6827 __func_from_arm:
6828 ldr pc, __func_addr
6829 __func_addr:
6830 .word func @ behave as if you saw a ARM_32 reloc.
6831
6832 (relocatable images)
6833 .arm
6834 __func_from_arm:
6835 ldr r12, __func_offset
6836 add r12, r12, pc
6837 bx r12
6838 __func_offset:
6839 .word func - . */
6840
6841 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6842 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6843 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6844 static const insn32 a2t3_func_addr_insn = 0x00000001;
6845
6846 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6847 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6848 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6849
6850 #define ARM2THUMB_PIC_GLUE_SIZE 16
6851 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6852 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6853 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6854
6855 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6856
6857 .thumb .thumb
6858 .align 2 .align 2
6859 __func_from_thumb: __func_from_thumb:
6860 bx pc push {r6, lr}
6861 nop ldr r6, __func_addr
6862 .arm mov lr, pc
6863 b func bx r6
6864 .arm
6865 ;; back_to_thumb
6866 ldmia r13! {r6, lr}
6867 bx lr
6868 __func_addr:
6869 .word func */
6870
6871 #define THUMB2ARM_GLUE_SIZE 8
6872 static const insn16 t2a1_bx_pc_insn = 0x4778;
6873 static const insn16 t2a2_noop_insn = 0x46c0;
6874 static const insn32 t2a3_b_insn = 0xea000000;
6875
6876 #define VFP11_ERRATUM_VENEER_SIZE 8
6877 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6878 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6879
6880 #define ARM_BX_VENEER_SIZE 12
6881 static const insn32 armbx1_tst_insn = 0xe3100001;
6882 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6883 static const insn32 armbx3_bx_insn = 0xe12fff10;
6884
6885 #ifndef ELFARM_NABI_C_INCLUDED
6886 static void
6887 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6888 {
6889 asection * s;
6890 bfd_byte * contents;
6891
6892 if (size == 0)
6893 {
6894 /* Do not include empty glue sections in the output. */
6895 if (abfd != NULL)
6896 {
6897 s = bfd_get_linker_section (abfd, name);
6898 if (s != NULL)
6899 s->flags |= SEC_EXCLUDE;
6900 }
6901 return;
6902 }
6903
6904 BFD_ASSERT (abfd != NULL);
6905
6906 s = bfd_get_linker_section (abfd, name);
6907 BFD_ASSERT (s != NULL);
6908
6909 contents = (bfd_byte *) bfd_alloc (abfd, size);
6910
6911 BFD_ASSERT (s->size == size);
6912 s->contents = contents;
6913 }
6914
6915 bfd_boolean
6916 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6917 {
6918 struct elf32_arm_link_hash_table * globals;
6919
6920 globals = elf32_arm_hash_table (info);
6921 BFD_ASSERT (globals != NULL);
6922
6923 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6924 globals->arm_glue_size,
6925 ARM2THUMB_GLUE_SECTION_NAME);
6926
6927 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6928 globals->thumb_glue_size,
6929 THUMB2ARM_GLUE_SECTION_NAME);
6930
6931 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6932 globals->vfp11_erratum_glue_size,
6933 VFP11_ERRATUM_VENEER_SECTION_NAME);
6934
6935 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6936 globals->stm32l4xx_erratum_glue_size,
6937 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6938
6939 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6940 globals->bx_glue_size,
6941 ARM_BX_GLUE_SECTION_NAME);
6942
6943 return TRUE;
6944 }
6945
6946 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6947 returns the symbol identifying the stub. */
6948
6949 static struct elf_link_hash_entry *
6950 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6951 struct elf_link_hash_entry * h)
6952 {
6953 const char * name = h->root.root.string;
6954 asection * s;
6955 char * tmp_name;
6956 struct elf_link_hash_entry * myh;
6957 struct bfd_link_hash_entry * bh;
6958 struct elf32_arm_link_hash_table * globals;
6959 bfd_vma val;
6960 bfd_size_type size;
6961
6962 globals = elf32_arm_hash_table (link_info);
6963 BFD_ASSERT (globals != NULL);
6964 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6965
6966 s = bfd_get_linker_section
6967 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6968
6969 BFD_ASSERT (s != NULL);
6970
6971 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6972 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6973
6974 BFD_ASSERT (tmp_name);
6975
6976 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6977
6978 myh = elf_link_hash_lookup
6979 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6980
6981 if (myh != NULL)
6982 {
6983 /* We've already seen this guy. */
6984 free (tmp_name);
6985 return myh;
6986 }
6987
6988 /* The only trick here is using hash_table->arm_glue_size as the value.
6989 Even though the section isn't allocated yet, this is where we will be
6990 putting it. The +1 on the value marks that the stub has not been
6991 output yet - not that it is a Thumb function. */
6992 bh = NULL;
6993 val = globals->arm_glue_size + 1;
6994 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6995 tmp_name, BSF_GLOBAL, s, val,
6996 NULL, TRUE, FALSE, &bh);
6997
6998 myh = (struct elf_link_hash_entry *) bh;
6999 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7000 myh->forced_local = 1;
7001
7002 free (tmp_name);
7003
7004 if (bfd_link_pic (link_info)
7005 || globals->root.is_relocatable_executable
7006 || globals->pic_veneer)
7007 size = ARM2THUMB_PIC_GLUE_SIZE;
7008 else if (globals->use_blx)
7009 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7010 else
7011 size = ARM2THUMB_STATIC_GLUE_SIZE;
7012
7013 s->size += size;
7014 globals->arm_glue_size += size;
7015
7016 return myh;
7017 }
7018
7019 /* Allocate space for ARMv4 BX veneers. */
7020
7021 static void
7022 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7023 {
7024 asection * s;
7025 struct elf32_arm_link_hash_table *globals;
7026 char *tmp_name;
7027 struct elf_link_hash_entry *myh;
7028 struct bfd_link_hash_entry *bh;
7029 bfd_vma val;
7030
7031 /* BX PC does not need a veneer. */
7032 if (reg == 15)
7033 return;
7034
7035 globals = elf32_arm_hash_table (link_info);
7036 BFD_ASSERT (globals != NULL);
7037 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7038
7039 /* Check if this veneer has already been allocated. */
7040 if (globals->bx_glue_offset[reg])
7041 return;
7042
7043 s = bfd_get_linker_section
7044 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7045
7046 BFD_ASSERT (s != NULL);
7047
7048 /* Add symbol for veneer. */
7049 tmp_name = (char *)
7050 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7051
7052 BFD_ASSERT (tmp_name);
7053
7054 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7055
7056 myh = elf_link_hash_lookup
7057 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7058
7059 BFD_ASSERT (myh == NULL);
7060
7061 bh = NULL;
7062 val = globals->bx_glue_size;
7063 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7064 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7065 NULL, TRUE, FALSE, &bh);
7066
7067 myh = (struct elf_link_hash_entry *) bh;
7068 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7069 myh->forced_local = 1;
7070
7071 s->size += ARM_BX_VENEER_SIZE;
7072 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7073 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7074 }
7075
7076
7077 /* Add an entry to the code/data map for section SEC. */
7078
7079 static void
7080 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7081 {
7082 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7083 unsigned int newidx;
7084
7085 if (sec_data->map == NULL)
7086 {
7087 sec_data->map = (elf32_arm_section_map *)
7088 bfd_malloc (sizeof (elf32_arm_section_map));
7089 sec_data->mapcount = 0;
7090 sec_data->mapsize = 1;
7091 }
7092
7093 newidx = sec_data->mapcount++;
7094
7095 if (sec_data->mapcount > sec_data->mapsize)
7096 {
7097 sec_data->mapsize *= 2;
7098 sec_data->map = (elf32_arm_section_map *)
7099 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7100 * sizeof (elf32_arm_section_map));
7101 }
7102
7103 if (sec_data->map)
7104 {
7105 sec_data->map[newidx].vma = vma;
7106 sec_data->map[newidx].type = type;
7107 }
7108 }
7109
7110
7111 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7112 veneers are handled for now. */
7113
7114 static bfd_vma
7115 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7116 elf32_vfp11_erratum_list *branch,
7117 bfd *branch_bfd,
7118 asection *branch_sec,
7119 unsigned int offset)
7120 {
7121 asection *s;
7122 struct elf32_arm_link_hash_table *hash_table;
7123 char *tmp_name;
7124 struct elf_link_hash_entry *myh;
7125 struct bfd_link_hash_entry *bh;
7126 bfd_vma val;
7127 struct _arm_elf_section_data *sec_data;
7128 elf32_vfp11_erratum_list *newerr;
7129
7130 hash_table = elf32_arm_hash_table (link_info);
7131 BFD_ASSERT (hash_table != NULL);
7132 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7133
7134 s = bfd_get_linker_section
7135 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7136
7137 sec_data = elf32_arm_section_data (s);
7138
7139 BFD_ASSERT (s != NULL);
7140
7141 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7142 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7143
7144 BFD_ASSERT (tmp_name);
7145
7146 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7147 hash_table->num_vfp11_fixes);
7148
7149 myh = elf_link_hash_lookup
7150 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7151
7152 BFD_ASSERT (myh == NULL);
7153
7154 bh = NULL;
7155 val = hash_table->vfp11_erratum_glue_size;
7156 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7157 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7158 NULL, TRUE, FALSE, &bh);
7159
7160 myh = (struct elf_link_hash_entry *) bh;
7161 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7162 myh->forced_local = 1;
7163
7164 /* Link veneer back to calling location. */
7165 sec_data->erratumcount += 1;
7166 newerr = (elf32_vfp11_erratum_list *)
7167 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7168
7169 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7170 newerr->vma = -1;
7171 newerr->u.v.branch = branch;
7172 newerr->u.v.id = hash_table->num_vfp11_fixes;
7173 branch->u.b.veneer = newerr;
7174
7175 newerr->next = sec_data->erratumlist;
7176 sec_data->erratumlist = newerr;
7177
7178 /* A symbol for the return from the veneer. */
7179 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7180 hash_table->num_vfp11_fixes);
7181
7182 myh = elf_link_hash_lookup
7183 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7184
7185 if (myh != NULL)
7186 abort ();
7187
7188 bh = NULL;
7189 val = offset + 4;
7190 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7191 branch_sec, val, NULL, TRUE, FALSE, &bh);
7192
7193 myh = (struct elf_link_hash_entry *) bh;
7194 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7195 myh->forced_local = 1;
7196
7197 free (tmp_name);
7198
7199 /* Generate a mapping symbol for the veneer section, and explicitly add an
7200 entry for that symbol to the code/data map for the section. */
7201 if (hash_table->vfp11_erratum_glue_size == 0)
7202 {
7203 bh = NULL;
7204 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7205 ever requires this erratum fix. */
7206 _bfd_generic_link_add_one_symbol (link_info,
7207 hash_table->bfd_of_glue_owner, "$a",
7208 BSF_LOCAL, s, 0, NULL,
7209 TRUE, FALSE, &bh);
7210
7211 myh = (struct elf_link_hash_entry *) bh;
7212 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7213 myh->forced_local = 1;
7214
7215 /* The elf32_arm_init_maps function only cares about symbols from input
7216 BFDs. We must make a note of this generated mapping symbol
7217 ourselves so that code byteswapping works properly in
7218 elf32_arm_write_section. */
7219 elf32_arm_section_map_add (s, 'a', 0);
7220 }
7221
7222 s->size += VFP11_ERRATUM_VENEER_SIZE;
7223 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7224 hash_table->num_vfp11_fixes++;
7225
7226 /* The offset of the veneer. */
7227 return val;
7228 }
7229
7230 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7231 veneers need to be handled because used only in Cortex-M. */
7232
7233 static bfd_vma
7234 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7235 elf32_stm32l4xx_erratum_list *branch,
7236 bfd *branch_bfd,
7237 asection *branch_sec,
7238 unsigned int offset,
7239 bfd_size_type veneer_size)
7240 {
7241 asection *s;
7242 struct elf32_arm_link_hash_table *hash_table;
7243 char *tmp_name;
7244 struct elf_link_hash_entry *myh;
7245 struct bfd_link_hash_entry *bh;
7246 bfd_vma val;
7247 struct _arm_elf_section_data *sec_data;
7248 elf32_stm32l4xx_erratum_list *newerr;
7249
7250 hash_table = elf32_arm_hash_table (link_info);
7251 BFD_ASSERT (hash_table != NULL);
7252 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7253
7254 s = bfd_get_linker_section
7255 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7256
7257 BFD_ASSERT (s != NULL);
7258
7259 sec_data = elf32_arm_section_data (s);
7260
7261 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7262 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7263
7264 BFD_ASSERT (tmp_name);
7265
7266 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7267 hash_table->num_stm32l4xx_fixes);
7268
7269 myh = elf_link_hash_lookup
7270 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7271
7272 BFD_ASSERT (myh == NULL);
7273
7274 bh = NULL;
7275 val = hash_table->stm32l4xx_erratum_glue_size;
7276 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7277 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7278 NULL, TRUE, FALSE, &bh);
7279
7280 myh = (struct elf_link_hash_entry *) bh;
7281 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7282 myh->forced_local = 1;
7283
7284 /* Link veneer back to calling location. */
7285 sec_data->stm32l4xx_erratumcount += 1;
7286 newerr = (elf32_stm32l4xx_erratum_list *)
7287 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7288
7289 newerr->type = STM32L4XX_ERRATUM_VENEER;
7290 newerr->vma = -1;
7291 newerr->u.v.branch = branch;
7292 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7293 branch->u.b.veneer = newerr;
7294
7295 newerr->next = sec_data->stm32l4xx_erratumlist;
7296 sec_data->stm32l4xx_erratumlist = newerr;
7297
7298 /* A symbol for the return from the veneer. */
7299 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7300 hash_table->num_stm32l4xx_fixes);
7301
7302 myh = elf_link_hash_lookup
7303 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7304
7305 if (myh != NULL)
7306 abort ();
7307
7308 bh = NULL;
7309 val = offset + 4;
7310 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7311 branch_sec, val, NULL, TRUE, FALSE, &bh);
7312
7313 myh = (struct elf_link_hash_entry *) bh;
7314 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7315 myh->forced_local = 1;
7316
7317 free (tmp_name);
7318
7319 /* Generate a mapping symbol for the veneer section, and explicitly add an
7320 entry for that symbol to the code/data map for the section. */
7321 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7322 {
7323 bh = NULL;
7324 /* Creates a THUMB symbol since there is no other choice. */
7325 _bfd_generic_link_add_one_symbol (link_info,
7326 hash_table->bfd_of_glue_owner, "$t",
7327 BSF_LOCAL, s, 0, NULL,
7328 TRUE, FALSE, &bh);
7329
7330 myh = (struct elf_link_hash_entry *) bh;
7331 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7332 myh->forced_local = 1;
7333
7334 /* The elf32_arm_init_maps function only cares about symbols from input
7335 BFDs. We must make a note of this generated mapping symbol
7336 ourselves so that code byteswapping works properly in
7337 elf32_arm_write_section. */
7338 elf32_arm_section_map_add (s, 't', 0);
7339 }
7340
7341 s->size += veneer_size;
7342 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7343 hash_table->num_stm32l4xx_fixes++;
7344
7345 /* The offset of the veneer. */
7346 return val;
7347 }
7348
7349 #define ARM_GLUE_SECTION_FLAGS \
7350 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7351 | SEC_READONLY | SEC_LINKER_CREATED)
7352
7353 /* Create a fake section for use by the ARM backend of the linker. */
7354
7355 static bfd_boolean
7356 arm_make_glue_section (bfd * abfd, const char * name)
7357 {
7358 asection * sec;
7359
7360 sec = bfd_get_linker_section (abfd, name);
7361 if (sec != NULL)
7362 /* Already made. */
7363 return TRUE;
7364
7365 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7366
7367 if (sec == NULL
7368 || !bfd_set_section_alignment (abfd, sec, 2))
7369 return FALSE;
7370
7371 /* Set the gc mark to prevent the section from being removed by garbage
7372 collection, despite the fact that no relocs refer to this section. */
7373 sec->gc_mark = 1;
7374
7375 return TRUE;
7376 }
7377
7378 /* Set size of .plt entries. This function is called from the
7379 linker scripts in ld/emultempl/{armelf}.em. */
7380
7381 void
7382 bfd_elf32_arm_use_long_plt (void)
7383 {
7384 elf32_arm_use_long_plt_entry = TRUE;
7385 }
7386
7387 /* Add the glue sections to ABFD. This function is called from the
7388 linker scripts in ld/emultempl/{armelf}.em. */
7389
7390 bfd_boolean
7391 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7392 struct bfd_link_info *info)
7393 {
7394 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7395 bfd_boolean dostm32l4xx = globals
7396 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7397 bfd_boolean addglue;
7398
7399 /* If we are only performing a partial
7400 link do not bother adding the glue. */
7401 if (bfd_link_relocatable (info))
7402 return TRUE;
7403
7404 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7405 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7406 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7407 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7408
7409 if (!dostm32l4xx)
7410 return addglue;
7411
7412 return addglue
7413 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7414 }
7415
7416 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7417 ensures they are not marked for deletion by
7418 strip_excluded_output_sections () when veneers are going to be created
7419 later. Not doing so would trigger assert on empty section size in
7420 lang_size_sections_1 (). */
7421
7422 void
7423 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7424 {
7425 enum elf32_arm_stub_type stub_type;
7426
7427 /* If we are only performing a partial
7428 link do not bother adding the glue. */
7429 if (bfd_link_relocatable (info))
7430 return;
7431
7432 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7433 {
7434 asection *out_sec;
7435 const char *out_sec_name;
7436
7437 if (!arm_dedicated_stub_output_section_required (stub_type))
7438 continue;
7439
7440 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7441 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7442 if (out_sec != NULL)
7443 out_sec->flags |= SEC_KEEP;
7444 }
7445 }
7446
7447 /* Select a BFD to be used to hold the sections used by the glue code.
7448 This function is called from the linker scripts in ld/emultempl/
7449 {armelf/pe}.em. */
7450
7451 bfd_boolean
7452 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7453 {
7454 struct elf32_arm_link_hash_table *globals;
7455
7456 /* If we are only performing a partial link
7457 do not bother getting a bfd to hold the glue. */
7458 if (bfd_link_relocatable (info))
7459 return TRUE;
7460
7461 /* Make sure we don't attach the glue sections to a dynamic object. */
7462 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7463
7464 globals = elf32_arm_hash_table (info);
7465 BFD_ASSERT (globals != NULL);
7466
7467 if (globals->bfd_of_glue_owner != NULL)
7468 return TRUE;
7469
7470 /* Save the bfd for later use. */
7471 globals->bfd_of_glue_owner = abfd;
7472
7473 return TRUE;
7474 }
7475
7476 static void
7477 check_use_blx (struct elf32_arm_link_hash_table *globals)
7478 {
7479 int cpu_arch;
7480
7481 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7482 Tag_CPU_arch);
7483
7484 if (globals->fix_arm1176)
7485 {
7486 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7487 globals->use_blx = 1;
7488 }
7489 else
7490 {
7491 if (cpu_arch > TAG_CPU_ARCH_V4T)
7492 globals->use_blx = 1;
7493 }
7494 }
7495
7496 bfd_boolean
7497 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7498 struct bfd_link_info *link_info)
7499 {
7500 Elf_Internal_Shdr *symtab_hdr;
7501 Elf_Internal_Rela *internal_relocs = NULL;
7502 Elf_Internal_Rela *irel, *irelend;
7503 bfd_byte *contents = NULL;
7504
7505 asection *sec;
7506 struct elf32_arm_link_hash_table *globals;
7507
7508 /* If we are only performing a partial link do not bother
7509 to construct any glue. */
7510 if (bfd_link_relocatable (link_info))
7511 return TRUE;
7512
7513 /* Here we have a bfd that is to be included on the link. We have a
7514 hook to do reloc rummaging, before section sizes are nailed down. */
7515 globals = elf32_arm_hash_table (link_info);
7516 BFD_ASSERT (globals != NULL);
7517
7518 check_use_blx (globals);
7519
7520 if (globals->byteswap_code && !bfd_big_endian (abfd))
7521 {
7522 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7523 abfd);
7524 return FALSE;
7525 }
7526
7527 /* PR 5398: If we have not decided to include any loadable sections in
7528 the output then we will not have a glue owner bfd. This is OK, it
7529 just means that there is nothing else for us to do here. */
7530 if (globals->bfd_of_glue_owner == NULL)
7531 return TRUE;
7532
7533 /* Rummage around all the relocs and map the glue vectors. */
7534 sec = abfd->sections;
7535
7536 if (sec == NULL)
7537 return TRUE;
7538
7539 for (; sec != NULL; sec = sec->next)
7540 {
7541 if (sec->reloc_count == 0)
7542 continue;
7543
7544 if ((sec->flags & SEC_EXCLUDE) != 0)
7545 continue;
7546
7547 symtab_hdr = & elf_symtab_hdr (abfd);
7548
7549 /* Load the relocs. */
7550 internal_relocs
7551 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7552
7553 if (internal_relocs == NULL)
7554 goto error_return;
7555
7556 irelend = internal_relocs + sec->reloc_count;
7557 for (irel = internal_relocs; irel < irelend; irel++)
7558 {
7559 long r_type;
7560 unsigned long r_index;
7561
7562 struct elf_link_hash_entry *h;
7563
7564 r_type = ELF32_R_TYPE (irel->r_info);
7565 r_index = ELF32_R_SYM (irel->r_info);
7566
7567 /* These are the only relocation types we care about. */
7568 if ( r_type != R_ARM_PC24
7569 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7570 continue;
7571
7572 /* Get the section contents if we haven't done so already. */
7573 if (contents == NULL)
7574 {
7575 /* Get cached copy if it exists. */
7576 if (elf_section_data (sec)->this_hdr.contents != NULL)
7577 contents = elf_section_data (sec)->this_hdr.contents;
7578 else
7579 {
7580 /* Go get them off disk. */
7581 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7582 goto error_return;
7583 }
7584 }
7585
7586 if (r_type == R_ARM_V4BX)
7587 {
7588 int reg;
7589
7590 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7591 record_arm_bx_glue (link_info, reg);
7592 continue;
7593 }
7594
7595 /* If the relocation is not against a symbol it cannot concern us. */
7596 h = NULL;
7597
7598 /* We don't care about local symbols. */
7599 if (r_index < symtab_hdr->sh_info)
7600 continue;
7601
7602 /* This is an external symbol. */
7603 r_index -= symtab_hdr->sh_info;
7604 h = (struct elf_link_hash_entry *)
7605 elf_sym_hashes (abfd)[r_index];
7606
7607 /* If the relocation is against a static symbol it must be within
7608 the current section and so cannot be a cross ARM/Thumb relocation. */
7609 if (h == NULL)
7610 continue;
7611
7612 /* If the call will go through a PLT entry then we do not need
7613 glue. */
7614 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7615 continue;
7616
7617 switch (r_type)
7618 {
7619 case R_ARM_PC24:
7620 /* This one is a call from arm code. We need to look up
7621 the target of the call. If it is a thumb target, we
7622 insert glue. */
7623 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7624 == ST_BRANCH_TO_THUMB)
7625 record_arm_to_thumb_glue (link_info, h);
7626 break;
7627
7628 default:
7629 abort ();
7630 }
7631 }
7632
7633 if (contents != NULL
7634 && elf_section_data (sec)->this_hdr.contents != contents)
7635 free (contents);
7636 contents = NULL;
7637
7638 if (internal_relocs != NULL
7639 && elf_section_data (sec)->relocs != internal_relocs)
7640 free (internal_relocs);
7641 internal_relocs = NULL;
7642 }
7643
7644 return TRUE;
7645
7646 error_return:
7647 if (contents != NULL
7648 && elf_section_data (sec)->this_hdr.contents != contents)
7649 free (contents);
7650 if (internal_relocs != NULL
7651 && elf_section_data (sec)->relocs != internal_relocs)
7652 free (internal_relocs);
7653
7654 return FALSE;
7655 }
7656 #endif
7657
7658
7659 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7660
7661 void
7662 bfd_elf32_arm_init_maps (bfd *abfd)
7663 {
7664 Elf_Internal_Sym *isymbuf;
7665 Elf_Internal_Shdr *hdr;
7666 unsigned int i, localsyms;
7667
7668 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7669 if (! is_arm_elf (abfd))
7670 return;
7671
7672 if ((abfd->flags & DYNAMIC) != 0)
7673 return;
7674
7675 hdr = & elf_symtab_hdr (abfd);
7676 localsyms = hdr->sh_info;
7677
7678 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7679 should contain the number of local symbols, which should come before any
7680 global symbols. Mapping symbols are always local. */
7681 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7682 NULL);
7683
7684 /* No internal symbols read? Skip this BFD. */
7685 if (isymbuf == NULL)
7686 return;
7687
7688 for (i = 0; i < localsyms; i++)
7689 {
7690 Elf_Internal_Sym *isym = &isymbuf[i];
7691 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7692 const char *name;
7693
7694 if (sec != NULL
7695 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7696 {
7697 name = bfd_elf_string_from_elf_section (abfd,
7698 hdr->sh_link, isym->st_name);
7699
7700 if (bfd_is_arm_special_symbol_name (name,
7701 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7702 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7703 }
7704 }
7705 }
7706
7707
7708 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7709 say what they wanted. */
7710
7711 void
7712 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7713 {
7714 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7715 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7716
7717 if (globals == NULL)
7718 return;
7719
7720 if (globals->fix_cortex_a8 == -1)
7721 {
7722 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7723 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7724 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7725 || out_attr[Tag_CPU_arch_profile].i == 0))
7726 globals->fix_cortex_a8 = 1;
7727 else
7728 globals->fix_cortex_a8 = 0;
7729 }
7730 }
7731
7732
7733 void
7734 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
7735 {
7736 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7737 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7738
7739 if (globals == NULL)
7740 return;
7741 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
7742 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
7743 {
7744 switch (globals->vfp11_fix)
7745 {
7746 case BFD_ARM_VFP11_FIX_DEFAULT:
7747 case BFD_ARM_VFP11_FIX_NONE:
7748 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7749 break;
7750
7751 default:
7752 /* Give a warning, but do as the user requests anyway. */
7753 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
7754 "workaround is not necessary for target architecture"), obfd);
7755 }
7756 }
7757 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
7758 /* For earlier architectures, we might need the workaround, but do not
7759 enable it by default. If users is running with broken hardware, they
7760 must enable the erratum fix explicitly. */
7761 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7762 }
7763
7764 void
7765 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
7766 {
7767 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7768 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7769
7770 if (globals == NULL)
7771 return;
7772
7773 /* We assume only Cortex-M4 may require the fix. */
7774 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7775 || out_attr[Tag_CPU_arch_profile].i != 'M')
7776 {
7777 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7778 /* Give a warning, but do as the user requests anyway. */
7779 _bfd_error_handler
7780 (_("%pB: warning: selected STM32L4XX erratum "
7781 "workaround is not necessary for target architecture"), obfd);
7782 }
7783 }
7784
7785 enum bfd_arm_vfp11_pipe
7786 {
7787 VFP11_FMAC,
7788 VFP11_LS,
7789 VFP11_DS,
7790 VFP11_BAD
7791 };
7792
7793 /* Return a VFP register number. This is encoded as RX:X for single-precision
7794 registers, or X:RX for double-precision registers, where RX is the group of
7795 four bits in the instruction encoding and X is the single extension bit.
7796 RX and X fields are specified using their lowest (starting) bit. The return
7797 value is:
7798
7799 0...31: single-precision registers s0...s31
7800 32...63: double-precision registers d0...d31.
7801
7802 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7803 encounter VFP3 instructions, so we allow the full range for DP registers. */
7804
7805 static unsigned int
7806 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7807 unsigned int x)
7808 {
7809 if (is_double)
7810 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7811 else
7812 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7813 }
7814
7815 /* Set bits in *WMASK according to a register number REG as encoded by
7816 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7817
7818 static void
7819 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7820 {
7821 if (reg < 32)
7822 *wmask |= 1 << reg;
7823 else if (reg < 48)
7824 *wmask |= 3 << ((reg - 32) * 2);
7825 }
7826
7827 /* Return TRUE if WMASK overwrites anything in REGS. */
7828
7829 static bfd_boolean
7830 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7831 {
7832 int i;
7833
7834 for (i = 0; i < numregs; i++)
7835 {
7836 unsigned int reg = regs[i];
7837
7838 if (reg < 32 && (wmask & (1 << reg)) != 0)
7839 return TRUE;
7840
7841 reg -= 32;
7842
7843 if (reg >= 16)
7844 continue;
7845
7846 if ((wmask & (3 << (reg * 2))) != 0)
7847 return TRUE;
7848 }
7849
7850 return FALSE;
7851 }
7852
7853 /* In this function, we're interested in two things: finding input registers
7854 for VFP data-processing instructions, and finding the set of registers which
7855 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7856 hold the written set, so FLDM etc. are easy to deal with (we're only
7857 interested in 32 SP registers or 16 dp registers, due to the VFP version
7858 implemented by the chip in question). DP registers are marked by setting
7859 both SP registers in the write mask). */
7860
7861 static enum bfd_arm_vfp11_pipe
7862 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7863 int *numregs)
7864 {
7865 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7866 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7867
7868 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7869 {
7870 unsigned int pqrs;
7871 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7872 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7873
7874 pqrs = ((insn & 0x00800000) >> 20)
7875 | ((insn & 0x00300000) >> 19)
7876 | ((insn & 0x00000040) >> 6);
7877
7878 switch (pqrs)
7879 {
7880 case 0: /* fmac[sd]. */
7881 case 1: /* fnmac[sd]. */
7882 case 2: /* fmsc[sd]. */
7883 case 3: /* fnmsc[sd]. */
7884 vpipe = VFP11_FMAC;
7885 bfd_arm_vfp11_write_mask (destmask, fd);
7886 regs[0] = fd;
7887 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7888 regs[2] = fm;
7889 *numregs = 3;
7890 break;
7891
7892 case 4: /* fmul[sd]. */
7893 case 5: /* fnmul[sd]. */
7894 case 6: /* fadd[sd]. */
7895 case 7: /* fsub[sd]. */
7896 vpipe = VFP11_FMAC;
7897 goto vfp_binop;
7898
7899 case 8: /* fdiv[sd]. */
7900 vpipe = VFP11_DS;
7901 vfp_binop:
7902 bfd_arm_vfp11_write_mask (destmask, fd);
7903 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7904 regs[1] = fm;
7905 *numregs = 2;
7906 break;
7907
7908 case 15: /* extended opcode. */
7909 {
7910 unsigned int extn = ((insn >> 15) & 0x1e)
7911 | ((insn >> 7) & 1);
7912
7913 switch (extn)
7914 {
7915 case 0: /* fcpy[sd]. */
7916 case 1: /* fabs[sd]. */
7917 case 2: /* fneg[sd]. */
7918 case 8: /* fcmp[sd]. */
7919 case 9: /* fcmpe[sd]. */
7920 case 10: /* fcmpz[sd]. */
7921 case 11: /* fcmpez[sd]. */
7922 case 16: /* fuito[sd]. */
7923 case 17: /* fsito[sd]. */
7924 case 24: /* ftoui[sd]. */
7925 case 25: /* ftouiz[sd]. */
7926 case 26: /* ftosi[sd]. */
7927 case 27: /* ftosiz[sd]. */
7928 /* These instructions will not bounce due to underflow. */
7929 *numregs = 0;
7930 vpipe = VFP11_FMAC;
7931 break;
7932
7933 case 3: /* fsqrt[sd]. */
7934 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7935 registers to cause the erratum in previous instructions. */
7936 bfd_arm_vfp11_write_mask (destmask, fd);
7937 vpipe = VFP11_DS;
7938 break;
7939
7940 case 15: /* fcvt{ds,sd}. */
7941 {
7942 int rnum = 0;
7943
7944 bfd_arm_vfp11_write_mask (destmask, fd);
7945
7946 /* Only FCVTSD can underflow. */
7947 if ((insn & 0x100) != 0)
7948 regs[rnum++] = fm;
7949
7950 *numregs = rnum;
7951
7952 vpipe = VFP11_FMAC;
7953 }
7954 break;
7955
7956 default:
7957 return VFP11_BAD;
7958 }
7959 }
7960 break;
7961
7962 default:
7963 return VFP11_BAD;
7964 }
7965 }
7966 /* Two-register transfer. */
7967 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7968 {
7969 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7970
7971 if ((insn & 0x100000) == 0)
7972 {
7973 if (is_double)
7974 bfd_arm_vfp11_write_mask (destmask, fm);
7975 else
7976 {
7977 bfd_arm_vfp11_write_mask (destmask, fm);
7978 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7979 }
7980 }
7981
7982 vpipe = VFP11_LS;
7983 }
7984 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7985 {
7986 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7987 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7988
7989 switch (puw)
7990 {
7991 case 0: /* Two-reg transfer. We should catch these above. */
7992 abort ();
7993
7994 case 2: /* fldm[sdx]. */
7995 case 3:
7996 case 5:
7997 {
7998 unsigned int i, offset = insn & 0xff;
7999
8000 if (is_double)
8001 offset >>= 1;
8002
8003 for (i = fd; i < fd + offset; i++)
8004 bfd_arm_vfp11_write_mask (destmask, i);
8005 }
8006 break;
8007
8008 case 4: /* fld[sd]. */
8009 case 6:
8010 bfd_arm_vfp11_write_mask (destmask, fd);
8011 break;
8012
8013 default:
8014 return VFP11_BAD;
8015 }
8016
8017 vpipe = VFP11_LS;
8018 }
8019 /* Single-register transfer. Note L==0. */
8020 else if ((insn & 0x0f100e10) == 0x0e000a10)
8021 {
8022 unsigned int opcode = (insn >> 21) & 7;
8023 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8024
8025 switch (opcode)
8026 {
8027 case 0: /* fmsr/fmdlr. */
8028 case 1: /* fmdhr. */
8029 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8030 destination register. I don't know if this is exactly right,
8031 but it is the conservative choice. */
8032 bfd_arm_vfp11_write_mask (destmask, fn);
8033 break;
8034
8035 case 7: /* fmxr. */
8036 break;
8037 }
8038
8039 vpipe = VFP11_LS;
8040 }
8041
8042 return vpipe;
8043 }
8044
8045
8046 static int elf32_arm_compare_mapping (const void * a, const void * b);
8047
8048
8049 /* Look for potentially-troublesome code sequences which might trigger the
8050 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8051 (available from ARM) for details of the erratum. A short version is
8052 described in ld.texinfo. */
8053
8054 bfd_boolean
8055 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8056 {
8057 asection *sec;
8058 bfd_byte *contents = NULL;
8059 int state = 0;
8060 int regs[3], numregs = 0;
8061 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8062 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8063
8064 if (globals == NULL)
8065 return FALSE;
8066
8067 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8068 The states transition as follows:
8069
8070 0 -> 1 (vector) or 0 -> 2 (scalar)
8071 A VFP FMAC-pipeline instruction has been seen. Fill
8072 regs[0]..regs[numregs-1] with its input operands. Remember this
8073 instruction in 'first_fmac'.
8074
8075 1 -> 2
8076 Any instruction, except for a VFP instruction which overwrites
8077 regs[*].
8078
8079 1 -> 3 [ -> 0 ] or
8080 2 -> 3 [ -> 0 ]
8081 A VFP instruction has been seen which overwrites any of regs[*].
8082 We must make a veneer! Reset state to 0 before examining next
8083 instruction.
8084
8085 2 -> 0
8086 If we fail to match anything in state 2, reset to state 0 and reset
8087 the instruction pointer to the instruction after 'first_fmac'.
8088
8089 If the VFP11 vector mode is in use, there must be at least two unrelated
8090 instructions between anti-dependent VFP11 instructions to properly avoid
8091 triggering the erratum, hence the use of the extra state 1. */
8092
8093 /* If we are only performing a partial link do not bother
8094 to construct any glue. */
8095 if (bfd_link_relocatable (link_info))
8096 return TRUE;
8097
8098 /* Skip if this bfd does not correspond to an ELF image. */
8099 if (! is_arm_elf (abfd))
8100 return TRUE;
8101
8102 /* We should have chosen a fix type by the time we get here. */
8103 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8104
8105 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8106 return TRUE;
8107
8108 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8109 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8110 return TRUE;
8111
8112 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8113 {
8114 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8115 struct _arm_elf_section_data *sec_data;
8116
8117 /* If we don't have executable progbits, we're not interested in this
8118 section. Also skip if section is to be excluded. */
8119 if (elf_section_type (sec) != SHT_PROGBITS
8120 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8121 || (sec->flags & SEC_EXCLUDE) != 0
8122 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8123 || sec->output_section == bfd_abs_section_ptr
8124 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8125 continue;
8126
8127 sec_data = elf32_arm_section_data (sec);
8128
8129 if (sec_data->mapcount == 0)
8130 continue;
8131
8132 if (elf_section_data (sec)->this_hdr.contents != NULL)
8133 contents = elf_section_data (sec)->this_hdr.contents;
8134 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8135 goto error_return;
8136
8137 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8138 elf32_arm_compare_mapping);
8139
8140 for (span = 0; span < sec_data->mapcount; span++)
8141 {
8142 unsigned int span_start = sec_data->map[span].vma;
8143 unsigned int span_end = (span == sec_data->mapcount - 1)
8144 ? sec->size : sec_data->map[span + 1].vma;
8145 char span_type = sec_data->map[span].type;
8146
8147 /* FIXME: Only ARM mode is supported at present. We may need to
8148 support Thumb-2 mode also at some point. */
8149 if (span_type != 'a')
8150 continue;
8151
8152 for (i = span_start; i < span_end;)
8153 {
8154 unsigned int next_i = i + 4;
8155 unsigned int insn = bfd_big_endian (abfd)
8156 ? (contents[i] << 24)
8157 | (contents[i + 1] << 16)
8158 | (contents[i + 2] << 8)
8159 | contents[i + 3]
8160 : (contents[i + 3] << 24)
8161 | (contents[i + 2] << 16)
8162 | (contents[i + 1] << 8)
8163 | contents[i];
8164 unsigned int writemask = 0;
8165 enum bfd_arm_vfp11_pipe vpipe;
8166
8167 switch (state)
8168 {
8169 case 0:
8170 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8171 &numregs);
8172 /* I'm assuming the VFP11 erratum can trigger with denorm
8173 operands on either the FMAC or the DS pipeline. This might
8174 lead to slightly overenthusiastic veneer insertion. */
8175 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8176 {
8177 state = use_vector ? 1 : 2;
8178 first_fmac = i;
8179 veneer_of_insn = insn;
8180 }
8181 break;
8182
8183 case 1:
8184 {
8185 int other_regs[3], other_numregs;
8186 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8187 other_regs,
8188 &other_numregs);
8189 if (vpipe != VFP11_BAD
8190 && bfd_arm_vfp11_antidependency (writemask, regs,
8191 numregs))
8192 state = 3;
8193 else
8194 state = 2;
8195 }
8196 break;
8197
8198 case 2:
8199 {
8200 int other_regs[3], other_numregs;
8201 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8202 other_regs,
8203 &other_numregs);
8204 if (vpipe != VFP11_BAD
8205 && bfd_arm_vfp11_antidependency (writemask, regs,
8206 numregs))
8207 state = 3;
8208 else
8209 {
8210 state = 0;
8211 next_i = first_fmac + 4;
8212 }
8213 }
8214 break;
8215
8216 case 3:
8217 abort (); /* Should be unreachable. */
8218 }
8219
8220 if (state == 3)
8221 {
8222 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8223 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8224
8225 elf32_arm_section_data (sec)->erratumcount += 1;
8226
8227 newerr->u.b.vfp_insn = veneer_of_insn;
8228
8229 switch (span_type)
8230 {
8231 case 'a':
8232 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8233 break;
8234
8235 default:
8236 abort ();
8237 }
8238
8239 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8240 first_fmac);
8241
8242 newerr->vma = -1;
8243
8244 newerr->next = sec_data->erratumlist;
8245 sec_data->erratumlist = newerr;
8246
8247 state = 0;
8248 }
8249
8250 i = next_i;
8251 }
8252 }
8253
8254 if (contents != NULL
8255 && elf_section_data (sec)->this_hdr.contents != contents)
8256 free (contents);
8257 contents = NULL;
8258 }
8259
8260 return TRUE;
8261
8262 error_return:
8263 if (contents != NULL
8264 && elf_section_data (sec)->this_hdr.contents != contents)
8265 free (contents);
8266
8267 return FALSE;
8268 }
8269
8270 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8271 after sections have been laid out, using specially-named symbols. */
8272
8273 void
8274 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8275 struct bfd_link_info *link_info)
8276 {
8277 asection *sec;
8278 struct elf32_arm_link_hash_table *globals;
8279 char *tmp_name;
8280
8281 if (bfd_link_relocatable (link_info))
8282 return;
8283
8284 /* Skip if this bfd does not correspond to an ELF image. */
8285 if (! is_arm_elf (abfd))
8286 return;
8287
8288 globals = elf32_arm_hash_table (link_info);
8289 if (globals == NULL)
8290 return;
8291
8292 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8293 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8294
8295 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8296 {
8297 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8298 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8299
8300 for (; errnode != NULL; errnode = errnode->next)
8301 {
8302 struct elf_link_hash_entry *myh;
8303 bfd_vma vma;
8304
8305 switch (errnode->type)
8306 {
8307 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8308 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8309 /* Find veneer symbol. */
8310 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8311 errnode->u.b.veneer->u.v.id);
8312
8313 myh = elf_link_hash_lookup
8314 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8315
8316 if (myh == NULL)
8317 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8318 abfd, "VFP11", tmp_name);
8319
8320 vma = myh->root.u.def.section->output_section->vma
8321 + myh->root.u.def.section->output_offset
8322 + myh->root.u.def.value;
8323
8324 errnode->u.b.veneer->vma = vma;
8325 break;
8326
8327 case VFP11_ERRATUM_ARM_VENEER:
8328 case VFP11_ERRATUM_THUMB_VENEER:
8329 /* Find return location. */
8330 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8331 errnode->u.v.id);
8332
8333 myh = elf_link_hash_lookup
8334 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8335
8336 if (myh == NULL)
8337 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8338 abfd, "VFP11", tmp_name);
8339
8340 vma = myh->root.u.def.section->output_section->vma
8341 + myh->root.u.def.section->output_offset
8342 + myh->root.u.def.value;
8343
8344 errnode->u.v.branch->vma = vma;
8345 break;
8346
8347 default:
8348 abort ();
8349 }
8350 }
8351 }
8352
8353 free (tmp_name);
8354 }
8355
8356 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8357 return locations after sections have been laid out, using
8358 specially-named symbols. */
8359
8360 void
8361 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8362 struct bfd_link_info *link_info)
8363 {
8364 asection *sec;
8365 struct elf32_arm_link_hash_table *globals;
8366 char *tmp_name;
8367
8368 if (bfd_link_relocatable (link_info))
8369 return;
8370
8371 /* Skip if this bfd does not correspond to an ELF image. */
8372 if (! is_arm_elf (abfd))
8373 return;
8374
8375 globals = elf32_arm_hash_table (link_info);
8376 if (globals == NULL)
8377 return;
8378
8379 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8380 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8381
8382 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8383 {
8384 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8385 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8386
8387 for (; errnode != NULL; errnode = errnode->next)
8388 {
8389 struct elf_link_hash_entry *myh;
8390 bfd_vma vma;
8391
8392 switch (errnode->type)
8393 {
8394 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8395 /* Find veneer symbol. */
8396 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8397 errnode->u.b.veneer->u.v.id);
8398
8399 myh = elf_link_hash_lookup
8400 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8401
8402 if (myh == NULL)
8403 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8404 abfd, "STM32L4XX", tmp_name);
8405
8406 vma = myh->root.u.def.section->output_section->vma
8407 + myh->root.u.def.section->output_offset
8408 + myh->root.u.def.value;
8409
8410 errnode->u.b.veneer->vma = vma;
8411 break;
8412
8413 case STM32L4XX_ERRATUM_VENEER:
8414 /* Find return location. */
8415 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8416 errnode->u.v.id);
8417
8418 myh = elf_link_hash_lookup
8419 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8420
8421 if (myh == NULL)
8422 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8423 abfd, "STM32L4XX", tmp_name);
8424
8425 vma = myh->root.u.def.section->output_section->vma
8426 + myh->root.u.def.section->output_offset
8427 + myh->root.u.def.value;
8428
8429 errnode->u.v.branch->vma = vma;
8430 break;
8431
8432 default:
8433 abort ();
8434 }
8435 }
8436 }
8437
8438 free (tmp_name);
8439 }
8440
8441 static inline bfd_boolean
8442 is_thumb2_ldmia (const insn32 insn)
8443 {
8444 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8445 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8446 return (insn & 0xffd02000) == 0xe8900000;
8447 }
8448
8449 static inline bfd_boolean
8450 is_thumb2_ldmdb (const insn32 insn)
8451 {
8452 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8453 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8454 return (insn & 0xffd02000) == 0xe9100000;
8455 }
8456
8457 static inline bfd_boolean
8458 is_thumb2_vldm (const insn32 insn)
8459 {
8460 /* A6.5 Extension register load or store instruction
8461 A7.7.229
8462 We look for SP 32-bit and DP 64-bit registers.
8463 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8464 <list> is consecutive 64-bit registers
8465 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8466 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8467 <list> is consecutive 32-bit registers
8468 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8469 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8470 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8471 return
8472 (((insn & 0xfe100f00) == 0xec100b00) ||
8473 ((insn & 0xfe100f00) == 0xec100a00))
8474 && /* (IA without !). */
8475 (((((insn << 7) >> 28) & 0xd) == 0x4)
8476 /* (IA with !), includes VPOP (when reg number is SP). */
8477 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8478 /* (DB with !). */
8479 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8480 }
8481
8482 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8483 VLDM opcode and:
8484 - computes the number and the mode of memory accesses
8485 - decides if the replacement should be done:
8486 . replaces only if > 8-word accesses
8487 . or (testing purposes only) replaces all accesses. */
8488
8489 static bfd_boolean
8490 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8491 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8492 {
8493 int nb_words = 0;
8494
8495 /* The field encoding the register list is the same for both LDMIA
8496 and LDMDB encodings. */
8497 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8498 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8499 else if (is_thumb2_vldm (insn))
8500 nb_words = (insn & 0xff);
8501
8502 /* DEFAULT mode accounts for the real bug condition situation,
8503 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8504 return
8505 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8506 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8507 }
8508
8509 /* Look for potentially-troublesome code sequences which might trigger
8510 the STM STM32L4XX erratum. */
8511
8512 bfd_boolean
8513 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8514 struct bfd_link_info *link_info)
8515 {
8516 asection *sec;
8517 bfd_byte *contents = NULL;
8518 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8519
8520 if (globals == NULL)
8521 return FALSE;
8522
8523 /* If we are only performing a partial link do not bother
8524 to construct any glue. */
8525 if (bfd_link_relocatable (link_info))
8526 return TRUE;
8527
8528 /* Skip if this bfd does not correspond to an ELF image. */
8529 if (! is_arm_elf (abfd))
8530 return TRUE;
8531
8532 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8533 return TRUE;
8534
8535 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8536 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8537 return TRUE;
8538
8539 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8540 {
8541 unsigned int i, span;
8542 struct _arm_elf_section_data *sec_data;
8543
8544 /* If we don't have executable progbits, we're not interested in this
8545 section. Also skip if section is to be excluded. */
8546 if (elf_section_type (sec) != SHT_PROGBITS
8547 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8548 || (sec->flags & SEC_EXCLUDE) != 0
8549 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8550 || sec->output_section == bfd_abs_section_ptr
8551 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8552 continue;
8553
8554 sec_data = elf32_arm_section_data (sec);
8555
8556 if (sec_data->mapcount == 0)
8557 continue;
8558
8559 if (elf_section_data (sec)->this_hdr.contents != NULL)
8560 contents = elf_section_data (sec)->this_hdr.contents;
8561 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8562 goto error_return;
8563
8564 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8565 elf32_arm_compare_mapping);
8566
8567 for (span = 0; span < sec_data->mapcount; span++)
8568 {
8569 unsigned int span_start = sec_data->map[span].vma;
8570 unsigned int span_end = (span == sec_data->mapcount - 1)
8571 ? sec->size : sec_data->map[span + 1].vma;
8572 char span_type = sec_data->map[span].type;
8573 int itblock_current_pos = 0;
8574
8575 /* Only Thumb2 mode need be supported with this CM4 specific
8576 code, we should not encounter any arm mode eg span_type
8577 != 'a'. */
8578 if (span_type != 't')
8579 continue;
8580
8581 for (i = span_start; i < span_end;)
8582 {
8583 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8584 bfd_boolean insn_32bit = FALSE;
8585 bfd_boolean is_ldm = FALSE;
8586 bfd_boolean is_vldm = FALSE;
8587 bfd_boolean is_not_last_in_it_block = FALSE;
8588
8589 /* The first 16-bits of all 32-bit thumb2 instructions start
8590 with opcode[15..13]=0b111 and the encoded op1 can be anything
8591 except opcode[12..11]!=0b00.
8592 See 32-bit Thumb instruction encoding. */
8593 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8594 insn_32bit = TRUE;
8595
8596 /* Compute the predicate that tells if the instruction
8597 is concerned by the IT block
8598 - Creates an error if there is a ldm that is not
8599 last in the IT block thus cannot be replaced
8600 - Otherwise we can create a branch at the end of the
8601 IT block, it will be controlled naturally by IT
8602 with the proper pseudo-predicate
8603 - So the only interesting predicate is the one that
8604 tells that we are not on the last item of an IT
8605 block. */
8606 if (itblock_current_pos != 0)
8607 is_not_last_in_it_block = !!--itblock_current_pos;
8608
8609 if (insn_32bit)
8610 {
8611 /* Load the rest of the insn (in manual-friendly order). */
8612 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8613 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8614 is_vldm = is_thumb2_vldm (insn);
8615
8616 /* Veneers are created for (v)ldm depending on
8617 option flags and memory accesses conditions; but
8618 if the instruction is not the last instruction of
8619 an IT block, we cannot create a jump there, so we
8620 bail out. */
8621 if ((is_ldm || is_vldm)
8622 && stm32l4xx_need_create_replacing_stub
8623 (insn, globals->stm32l4xx_fix))
8624 {
8625 if (is_not_last_in_it_block)
8626 {
8627 _bfd_error_handler
8628 /* xgettext:c-format */
8629 (_("%pB(%pA+%#x): error: multiple load detected"
8630 " in non-last IT block instruction:"
8631 " STM32L4XX veneer cannot be generated; "
8632 "use gcc option -mrestrict-it to generate"
8633 " only one instruction per IT block"),
8634 abfd, sec, i);
8635 }
8636 else
8637 {
8638 elf32_stm32l4xx_erratum_list *newerr =
8639 (elf32_stm32l4xx_erratum_list *)
8640 bfd_zmalloc
8641 (sizeof (elf32_stm32l4xx_erratum_list));
8642
8643 elf32_arm_section_data (sec)
8644 ->stm32l4xx_erratumcount += 1;
8645 newerr->u.b.insn = insn;
8646 /* We create only thumb branches. */
8647 newerr->type =
8648 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8649 record_stm32l4xx_erratum_veneer
8650 (link_info, newerr, abfd, sec,
8651 i,
8652 is_ldm ?
8653 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8654 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8655 newerr->vma = -1;
8656 newerr->next = sec_data->stm32l4xx_erratumlist;
8657 sec_data->stm32l4xx_erratumlist = newerr;
8658 }
8659 }
8660 }
8661 else
8662 {
8663 /* A7.7.37 IT p208
8664 IT blocks are only encoded in T1
8665 Encoding T1: IT{x{y{z}}} <firstcond>
8666 1 0 1 1 - 1 1 1 1 - firstcond - mask
8667 if mask = '0000' then see 'related encodings'
8668 We don't deal with UNPREDICTABLE, just ignore these.
8669 There can be no nested IT blocks so an IT block
8670 is naturally a new one for which it is worth
8671 computing its size. */
8672 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8673 && ((insn & 0x000f) != 0x0000);
8674 /* If we have a new IT block we compute its size. */
8675 if (is_newitblock)
8676 {
8677 /* Compute the number of instructions controlled
8678 by the IT block, it will be used to decide
8679 whether we are inside an IT block or not. */
8680 unsigned int mask = insn & 0x000f;
8681 itblock_current_pos = 4 - ctz (mask);
8682 }
8683 }
8684
8685 i += insn_32bit ? 4 : 2;
8686 }
8687 }
8688
8689 if (contents != NULL
8690 && elf_section_data (sec)->this_hdr.contents != contents)
8691 free (contents);
8692 contents = NULL;
8693 }
8694
8695 return TRUE;
8696
8697 error_return:
8698 if (contents != NULL
8699 && elf_section_data (sec)->this_hdr.contents != contents)
8700 free (contents);
8701
8702 return FALSE;
8703 }
8704
8705 /* Set target relocation values needed during linking. */
8706
8707 void
8708 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8709 struct bfd_link_info *link_info,
8710 struct elf32_arm_params *params)
8711 {
8712 struct elf32_arm_link_hash_table *globals;
8713
8714 globals = elf32_arm_hash_table (link_info);
8715 if (globals == NULL)
8716 return;
8717
8718 globals->target1_is_rel = params->target1_is_rel;
8719 if (strcmp (params->target2_type, "rel") == 0)
8720 globals->target2_reloc = R_ARM_REL32;
8721 else if (strcmp (params->target2_type, "abs") == 0)
8722 globals->target2_reloc = R_ARM_ABS32;
8723 else if (strcmp (params->target2_type, "got-rel") == 0)
8724 globals->target2_reloc = R_ARM_GOT_PREL;
8725 else
8726 {
8727 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
8728 params->target2_type);
8729 }
8730 globals->fix_v4bx = params->fix_v4bx;
8731 globals->use_blx |= params->use_blx;
8732 globals->vfp11_fix = params->vfp11_denorm_fix;
8733 globals->stm32l4xx_fix = params->stm32l4xx_fix;
8734 globals->pic_veneer = params->pic_veneer;
8735 globals->fix_cortex_a8 = params->fix_cortex_a8;
8736 globals->fix_arm1176 = params->fix_arm1176;
8737 globals->cmse_implib = params->cmse_implib;
8738 globals->in_implib_bfd = params->in_implib_bfd;
8739
8740 BFD_ASSERT (is_arm_elf (output_bfd));
8741 elf_arm_tdata (output_bfd)->no_enum_size_warning
8742 = params->no_enum_size_warning;
8743 elf_arm_tdata (output_bfd)->no_wchar_size_warning
8744 = params->no_wchar_size_warning;
8745 }
8746
8747 /* Replace the target offset of a Thumb bl or b.w instruction. */
8748
8749 static void
8750 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
8751 {
8752 bfd_vma upper;
8753 bfd_vma lower;
8754 int reloc_sign;
8755
8756 BFD_ASSERT ((offset & 1) == 0);
8757
8758 upper = bfd_get_16 (abfd, insn);
8759 lower = bfd_get_16 (abfd, insn + 2);
8760 reloc_sign = (offset < 0) ? 1 : 0;
8761 upper = (upper & ~(bfd_vma) 0x7ff)
8762 | ((offset >> 12) & 0x3ff)
8763 | (reloc_sign << 10);
8764 lower = (lower & ~(bfd_vma) 0x2fff)
8765 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
8766 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
8767 | ((offset >> 1) & 0x7ff);
8768 bfd_put_16 (abfd, upper, insn);
8769 bfd_put_16 (abfd, lower, insn + 2);
8770 }
8771
8772 /* Thumb code calling an ARM function. */
8773
8774 static int
8775 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8776 const char * name,
8777 bfd * input_bfd,
8778 bfd * output_bfd,
8779 asection * input_section,
8780 bfd_byte * hit_data,
8781 asection * sym_sec,
8782 bfd_vma offset,
8783 bfd_signed_vma addend,
8784 bfd_vma val,
8785 char **error_message)
8786 {
8787 asection * s = 0;
8788 bfd_vma my_offset;
8789 long int ret_offset;
8790 struct elf_link_hash_entry * myh;
8791 struct elf32_arm_link_hash_table * globals;
8792
8793 myh = find_thumb_glue (info, name, error_message);
8794 if (myh == NULL)
8795 return FALSE;
8796
8797 globals = elf32_arm_hash_table (info);
8798 BFD_ASSERT (globals != NULL);
8799 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8800
8801 my_offset = myh->root.u.def.value;
8802
8803 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8804 THUMB2ARM_GLUE_SECTION_NAME);
8805
8806 BFD_ASSERT (s != NULL);
8807 BFD_ASSERT (s->contents != NULL);
8808 BFD_ASSERT (s->output_section != NULL);
8809
8810 if ((my_offset & 0x01) == 0x01)
8811 {
8812 if (sym_sec != NULL
8813 && sym_sec->owner != NULL
8814 && !INTERWORK_FLAG (sym_sec->owner))
8815 {
8816 _bfd_error_handler
8817 (_("%pB(%s): warning: interworking not enabled;"
8818 " first occurrence: %pB: %s call to %s"),
8819 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
8820
8821 return FALSE;
8822 }
8823
8824 --my_offset;
8825 myh->root.u.def.value = my_offset;
8826
8827 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8828 s->contents + my_offset);
8829
8830 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8831 s->contents + my_offset + 2);
8832
8833 ret_offset =
8834 /* Address of destination of the stub. */
8835 ((bfd_signed_vma) val)
8836 - ((bfd_signed_vma)
8837 /* Offset from the start of the current section
8838 to the start of the stubs. */
8839 (s->output_offset
8840 /* Offset of the start of this stub from the start of the stubs. */
8841 + my_offset
8842 /* Address of the start of the current section. */
8843 + s->output_section->vma)
8844 /* The branch instruction is 4 bytes into the stub. */
8845 + 4
8846 /* ARM branches work from the pc of the instruction + 8. */
8847 + 8);
8848
8849 put_arm_insn (globals, output_bfd,
8850 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8851 s->contents + my_offset + 4);
8852 }
8853
8854 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8855
8856 /* Now go back and fix up the original BL insn to point to here. */
8857 ret_offset =
8858 /* Address of where the stub is located. */
8859 (s->output_section->vma + s->output_offset + my_offset)
8860 /* Address of where the BL is located. */
8861 - (input_section->output_section->vma + input_section->output_offset
8862 + offset)
8863 /* Addend in the relocation. */
8864 - addend
8865 /* Biassing for PC-relative addressing. */
8866 - 8;
8867
8868 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8869
8870 return TRUE;
8871 }
8872
8873 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8874
8875 static struct elf_link_hash_entry *
8876 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8877 const char * name,
8878 bfd * input_bfd,
8879 bfd * output_bfd,
8880 asection * sym_sec,
8881 bfd_vma val,
8882 asection * s,
8883 char ** error_message)
8884 {
8885 bfd_vma my_offset;
8886 long int ret_offset;
8887 struct elf_link_hash_entry * myh;
8888 struct elf32_arm_link_hash_table * globals;
8889
8890 myh = find_arm_glue (info, name, error_message);
8891 if (myh == NULL)
8892 return NULL;
8893
8894 globals = elf32_arm_hash_table (info);
8895 BFD_ASSERT (globals != NULL);
8896 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8897
8898 my_offset = myh->root.u.def.value;
8899
8900 if ((my_offset & 0x01) == 0x01)
8901 {
8902 if (sym_sec != NULL
8903 && sym_sec->owner != NULL
8904 && !INTERWORK_FLAG (sym_sec->owner))
8905 {
8906 _bfd_error_handler
8907 (_("%pB(%s): warning: interworking not enabled;"
8908 " first occurrence: %pB: %s call to %s"),
8909 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
8910 }
8911
8912 --my_offset;
8913 myh->root.u.def.value = my_offset;
8914
8915 if (bfd_link_pic (info)
8916 || globals->root.is_relocatable_executable
8917 || globals->pic_veneer)
8918 {
8919 /* For relocatable objects we can't use absolute addresses,
8920 so construct the address from a relative offset. */
8921 /* TODO: If the offset is small it's probably worth
8922 constructing the address with adds. */
8923 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8924 s->contents + my_offset);
8925 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8926 s->contents + my_offset + 4);
8927 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8928 s->contents + my_offset + 8);
8929 /* Adjust the offset by 4 for the position of the add,
8930 and 8 for the pipeline offset. */
8931 ret_offset = (val - (s->output_offset
8932 + s->output_section->vma
8933 + my_offset + 12))
8934 | 1;
8935 bfd_put_32 (output_bfd, ret_offset,
8936 s->contents + my_offset + 12);
8937 }
8938 else if (globals->use_blx)
8939 {
8940 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8941 s->contents + my_offset);
8942
8943 /* It's a thumb address. Add the low order bit. */
8944 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8945 s->contents + my_offset + 4);
8946 }
8947 else
8948 {
8949 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8950 s->contents + my_offset);
8951
8952 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8953 s->contents + my_offset + 4);
8954
8955 /* It's a thumb address. Add the low order bit. */
8956 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8957 s->contents + my_offset + 8);
8958
8959 my_offset += 12;
8960 }
8961 }
8962
8963 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8964
8965 return myh;
8966 }
8967
8968 /* Arm code calling a Thumb function. */
8969
8970 static int
8971 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8972 const char * name,
8973 bfd * input_bfd,
8974 bfd * output_bfd,
8975 asection * input_section,
8976 bfd_byte * hit_data,
8977 asection * sym_sec,
8978 bfd_vma offset,
8979 bfd_signed_vma addend,
8980 bfd_vma val,
8981 char **error_message)
8982 {
8983 unsigned long int tmp;
8984 bfd_vma my_offset;
8985 asection * s;
8986 long int ret_offset;
8987 struct elf_link_hash_entry * myh;
8988 struct elf32_arm_link_hash_table * globals;
8989
8990 globals = elf32_arm_hash_table (info);
8991 BFD_ASSERT (globals != NULL);
8992 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8993
8994 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8995 ARM2THUMB_GLUE_SECTION_NAME);
8996 BFD_ASSERT (s != NULL);
8997 BFD_ASSERT (s->contents != NULL);
8998 BFD_ASSERT (s->output_section != NULL);
8999
9000 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9001 sym_sec, val, s, error_message);
9002 if (!myh)
9003 return FALSE;
9004
9005 my_offset = myh->root.u.def.value;
9006 tmp = bfd_get_32 (input_bfd, hit_data);
9007 tmp = tmp & 0xFF000000;
9008
9009 /* Somehow these are both 4 too far, so subtract 8. */
9010 ret_offset = (s->output_offset
9011 + my_offset
9012 + s->output_section->vma
9013 - (input_section->output_offset
9014 + input_section->output_section->vma
9015 + offset + addend)
9016 - 8);
9017
9018 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9019
9020 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9021
9022 return TRUE;
9023 }
9024
9025 /* Populate Arm stub for an exported Thumb function. */
9026
9027 static bfd_boolean
9028 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9029 {
9030 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9031 asection * s;
9032 struct elf_link_hash_entry * myh;
9033 struct elf32_arm_link_hash_entry *eh;
9034 struct elf32_arm_link_hash_table * globals;
9035 asection *sec;
9036 bfd_vma val;
9037 char *error_message;
9038
9039 eh = elf32_arm_hash_entry (h);
9040 /* Allocate stubs for exported Thumb functions on v4t. */
9041 if (eh->export_glue == NULL)
9042 return TRUE;
9043
9044 globals = elf32_arm_hash_table (info);
9045 BFD_ASSERT (globals != NULL);
9046 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9047
9048 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9049 ARM2THUMB_GLUE_SECTION_NAME);
9050 BFD_ASSERT (s != NULL);
9051 BFD_ASSERT (s->contents != NULL);
9052 BFD_ASSERT (s->output_section != NULL);
9053
9054 sec = eh->export_glue->root.u.def.section;
9055
9056 BFD_ASSERT (sec->output_section != NULL);
9057
9058 val = eh->export_glue->root.u.def.value + sec->output_offset
9059 + sec->output_section->vma;
9060
9061 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9062 h->root.u.def.section->owner,
9063 globals->obfd, sec, val, s,
9064 &error_message);
9065 BFD_ASSERT (myh);
9066 return TRUE;
9067 }
9068
9069 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9070
9071 static bfd_vma
9072 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9073 {
9074 bfd_byte *p;
9075 bfd_vma glue_addr;
9076 asection *s;
9077 struct elf32_arm_link_hash_table *globals;
9078
9079 globals = elf32_arm_hash_table (info);
9080 BFD_ASSERT (globals != NULL);
9081 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9082
9083 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9084 ARM_BX_GLUE_SECTION_NAME);
9085 BFD_ASSERT (s != NULL);
9086 BFD_ASSERT (s->contents != NULL);
9087 BFD_ASSERT (s->output_section != NULL);
9088
9089 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9090
9091 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9092
9093 if ((globals->bx_glue_offset[reg] & 1) == 0)
9094 {
9095 p = s->contents + glue_addr;
9096 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9097 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9098 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9099 globals->bx_glue_offset[reg] |= 1;
9100 }
9101
9102 return glue_addr + s->output_section->vma + s->output_offset;
9103 }
9104
9105 /* Generate Arm stubs for exported Thumb symbols. */
9106 static void
9107 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9108 struct bfd_link_info *link_info)
9109 {
9110 struct elf32_arm_link_hash_table * globals;
9111
9112 if (link_info == NULL)
9113 /* Ignore this if we are not called by the ELF backend linker. */
9114 return;
9115
9116 globals = elf32_arm_hash_table (link_info);
9117 if (globals == NULL)
9118 return;
9119
9120 /* If blx is available then exported Thumb symbols are OK and there is
9121 nothing to do. */
9122 if (globals->use_blx)
9123 return;
9124
9125 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9126 link_info);
9127 }
9128
9129 /* Reserve space for COUNT dynamic relocations in relocation selection
9130 SRELOC. */
9131
9132 static void
9133 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9134 bfd_size_type count)
9135 {
9136 struct elf32_arm_link_hash_table *htab;
9137
9138 htab = elf32_arm_hash_table (info);
9139 BFD_ASSERT (htab->root.dynamic_sections_created);
9140 if (sreloc == NULL)
9141 abort ();
9142 sreloc->size += RELOC_SIZE (htab) * count;
9143 }
9144
9145 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9146 dynamic, the relocations should go in SRELOC, otherwise they should
9147 go in the special .rel.iplt section. */
9148
9149 static void
9150 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9151 bfd_size_type count)
9152 {
9153 struct elf32_arm_link_hash_table *htab;
9154
9155 htab = elf32_arm_hash_table (info);
9156 if (!htab->root.dynamic_sections_created)
9157 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9158 else
9159 {
9160 BFD_ASSERT (sreloc != NULL);
9161 sreloc->size += RELOC_SIZE (htab) * count;
9162 }
9163 }
9164
9165 /* Add relocation REL to the end of relocation section SRELOC. */
9166
9167 static void
9168 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9169 asection *sreloc, Elf_Internal_Rela *rel)
9170 {
9171 bfd_byte *loc;
9172 struct elf32_arm_link_hash_table *htab;
9173
9174 htab = elf32_arm_hash_table (info);
9175 if (!htab->root.dynamic_sections_created
9176 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9177 sreloc = htab->root.irelplt;
9178 if (sreloc == NULL)
9179 abort ();
9180 loc = sreloc->contents;
9181 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9182 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9183 abort ();
9184 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9185 }
9186
9187 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9188 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9189 to .plt. */
9190
9191 static void
9192 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9193 bfd_boolean is_iplt_entry,
9194 union gotplt_union *root_plt,
9195 struct arm_plt_info *arm_plt)
9196 {
9197 struct elf32_arm_link_hash_table *htab;
9198 asection *splt;
9199 asection *sgotplt;
9200
9201 htab = elf32_arm_hash_table (info);
9202
9203 if (is_iplt_entry)
9204 {
9205 splt = htab->root.iplt;
9206 sgotplt = htab->root.igotplt;
9207
9208 /* NaCl uses a special first entry in .iplt too. */
9209 if (htab->nacl_p && splt->size == 0)
9210 splt->size += htab->plt_header_size;
9211
9212 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9213 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9214 }
9215 else
9216 {
9217 splt = htab->root.splt;
9218 sgotplt = htab->root.sgotplt;
9219
9220 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9221 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9222
9223 /* If this is the first .plt entry, make room for the special
9224 first entry. */
9225 if (splt->size == 0)
9226 splt->size += htab->plt_header_size;
9227
9228 htab->next_tls_desc_index++;
9229 }
9230
9231 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9232 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9233 splt->size += PLT_THUMB_STUB_SIZE;
9234 root_plt->offset = splt->size;
9235 splt->size += htab->plt_entry_size;
9236
9237 if (!htab->symbian_p)
9238 {
9239 /* We also need to make an entry in the .got.plt section, which
9240 will be placed in the .got section by the linker script. */
9241 if (is_iplt_entry)
9242 arm_plt->got_offset = sgotplt->size;
9243 else
9244 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9245 sgotplt->size += 4;
9246 }
9247 }
9248
9249 static bfd_vma
9250 arm_movw_immediate (bfd_vma value)
9251 {
9252 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9253 }
9254
9255 static bfd_vma
9256 arm_movt_immediate (bfd_vma value)
9257 {
9258 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9259 }
9260
9261 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9262 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9263 Otherwise, DYNINDX is the index of the symbol in the dynamic
9264 symbol table and SYM_VALUE is undefined.
9265
9266 ROOT_PLT points to the offset of the PLT entry from the start of its
9267 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9268 bookkeeping information.
9269
9270 Returns FALSE if there was a problem. */
9271
9272 static bfd_boolean
9273 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9274 union gotplt_union *root_plt,
9275 struct arm_plt_info *arm_plt,
9276 int dynindx, bfd_vma sym_value)
9277 {
9278 struct elf32_arm_link_hash_table *htab;
9279 asection *sgot;
9280 asection *splt;
9281 asection *srel;
9282 bfd_byte *loc;
9283 bfd_vma plt_index;
9284 Elf_Internal_Rela rel;
9285 bfd_vma plt_header_size;
9286 bfd_vma got_header_size;
9287
9288 htab = elf32_arm_hash_table (info);
9289
9290 /* Pick the appropriate sections and sizes. */
9291 if (dynindx == -1)
9292 {
9293 splt = htab->root.iplt;
9294 sgot = htab->root.igotplt;
9295 srel = htab->root.irelplt;
9296
9297 /* There are no reserved entries in .igot.plt, and no special
9298 first entry in .iplt. */
9299 got_header_size = 0;
9300 plt_header_size = 0;
9301 }
9302 else
9303 {
9304 splt = htab->root.splt;
9305 sgot = htab->root.sgotplt;
9306 srel = htab->root.srelplt;
9307
9308 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9309 plt_header_size = htab->plt_header_size;
9310 }
9311 BFD_ASSERT (splt != NULL && srel != NULL);
9312
9313 /* Fill in the entry in the procedure linkage table. */
9314 if (htab->symbian_p)
9315 {
9316 BFD_ASSERT (dynindx >= 0);
9317 put_arm_insn (htab, output_bfd,
9318 elf32_arm_symbian_plt_entry[0],
9319 splt->contents + root_plt->offset);
9320 bfd_put_32 (output_bfd,
9321 elf32_arm_symbian_plt_entry[1],
9322 splt->contents + root_plt->offset + 4);
9323
9324 /* Fill in the entry in the .rel.plt section. */
9325 rel.r_offset = (splt->output_section->vma
9326 + splt->output_offset
9327 + root_plt->offset + 4);
9328 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9329
9330 /* Get the index in the procedure linkage table which
9331 corresponds to this symbol. This is the index of this symbol
9332 in all the symbols for which we are making plt entries. The
9333 first entry in the procedure linkage table is reserved. */
9334 plt_index = ((root_plt->offset - plt_header_size)
9335 / htab->plt_entry_size);
9336 }
9337 else
9338 {
9339 bfd_vma got_offset, got_address, plt_address;
9340 bfd_vma got_displacement, initial_got_entry;
9341 bfd_byte * ptr;
9342
9343 BFD_ASSERT (sgot != NULL);
9344
9345 /* Get the offset into the .(i)got.plt table of the entry that
9346 corresponds to this function. */
9347 got_offset = (arm_plt->got_offset & -2);
9348
9349 /* Get the index in the procedure linkage table which
9350 corresponds to this symbol. This is the index of this symbol
9351 in all the symbols for which we are making plt entries.
9352 After the reserved .got.plt entries, all symbols appear in
9353 the same order as in .plt. */
9354 plt_index = (got_offset - got_header_size) / 4;
9355
9356 /* Calculate the address of the GOT entry. */
9357 got_address = (sgot->output_section->vma
9358 + sgot->output_offset
9359 + got_offset);
9360
9361 /* ...and the address of the PLT entry. */
9362 plt_address = (splt->output_section->vma
9363 + splt->output_offset
9364 + root_plt->offset);
9365
9366 ptr = splt->contents + root_plt->offset;
9367 if (htab->vxworks_p && bfd_link_pic (info))
9368 {
9369 unsigned int i;
9370 bfd_vma val;
9371
9372 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9373 {
9374 val = elf32_arm_vxworks_shared_plt_entry[i];
9375 if (i == 2)
9376 val |= got_address - sgot->output_section->vma;
9377 if (i == 5)
9378 val |= plt_index * RELOC_SIZE (htab);
9379 if (i == 2 || i == 5)
9380 bfd_put_32 (output_bfd, val, ptr);
9381 else
9382 put_arm_insn (htab, output_bfd, val, ptr);
9383 }
9384 }
9385 else if (htab->vxworks_p)
9386 {
9387 unsigned int i;
9388 bfd_vma val;
9389
9390 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9391 {
9392 val = elf32_arm_vxworks_exec_plt_entry[i];
9393 if (i == 2)
9394 val |= got_address;
9395 if (i == 4)
9396 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9397 if (i == 5)
9398 val |= plt_index * RELOC_SIZE (htab);
9399 if (i == 2 || i == 5)
9400 bfd_put_32 (output_bfd, val, ptr);
9401 else
9402 put_arm_insn (htab, output_bfd, val, ptr);
9403 }
9404
9405 loc = (htab->srelplt2->contents
9406 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9407
9408 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9409 referencing the GOT for this PLT entry. */
9410 rel.r_offset = plt_address + 8;
9411 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9412 rel.r_addend = got_offset;
9413 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9414 loc += RELOC_SIZE (htab);
9415
9416 /* Create the R_ARM_ABS32 relocation referencing the
9417 beginning of the PLT for this GOT entry. */
9418 rel.r_offset = got_address;
9419 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9420 rel.r_addend = 0;
9421 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9422 }
9423 else if (htab->nacl_p)
9424 {
9425 /* Calculate the displacement between the PLT slot and the
9426 common tail that's part of the special initial PLT slot. */
9427 int32_t tail_displacement
9428 = ((splt->output_section->vma + splt->output_offset
9429 + ARM_NACL_PLT_TAIL_OFFSET)
9430 - (plt_address + htab->plt_entry_size + 4));
9431 BFD_ASSERT ((tail_displacement & 3) == 0);
9432 tail_displacement >>= 2;
9433
9434 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9435 || (-tail_displacement & 0xff000000) == 0);
9436
9437 /* Calculate the displacement between the PLT slot and the entry
9438 in the GOT. The offset accounts for the value produced by
9439 adding to pc in the penultimate instruction of the PLT stub. */
9440 got_displacement = (got_address
9441 - (plt_address + htab->plt_entry_size));
9442
9443 /* NaCl does not support interworking at all. */
9444 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9445
9446 put_arm_insn (htab, output_bfd,
9447 elf32_arm_nacl_plt_entry[0]
9448 | arm_movw_immediate (got_displacement),
9449 ptr + 0);
9450 put_arm_insn (htab, output_bfd,
9451 elf32_arm_nacl_plt_entry[1]
9452 | arm_movt_immediate (got_displacement),
9453 ptr + 4);
9454 put_arm_insn (htab, output_bfd,
9455 elf32_arm_nacl_plt_entry[2],
9456 ptr + 8);
9457 put_arm_insn (htab, output_bfd,
9458 elf32_arm_nacl_plt_entry[3]
9459 | (tail_displacement & 0x00ffffff),
9460 ptr + 12);
9461 }
9462 else if (using_thumb_only (htab))
9463 {
9464 /* PR ld/16017: Generate thumb only PLT entries. */
9465 if (!using_thumb2 (htab))
9466 {
9467 /* FIXME: We ought to be able to generate thumb-1 PLT
9468 instructions... */
9469 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9470 output_bfd);
9471 return FALSE;
9472 }
9473
9474 /* Calculate the displacement between the PLT slot and the entry in
9475 the GOT. The 12-byte offset accounts for the value produced by
9476 adding to pc in the 3rd instruction of the PLT stub. */
9477 got_displacement = got_address - (plt_address + 12);
9478
9479 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9480 instead of 'put_thumb_insn'. */
9481 put_arm_insn (htab, output_bfd,
9482 elf32_thumb2_plt_entry[0]
9483 | ((got_displacement & 0x000000ff) << 16)
9484 | ((got_displacement & 0x00000700) << 20)
9485 | ((got_displacement & 0x00000800) >> 1)
9486 | ((got_displacement & 0x0000f000) >> 12),
9487 ptr + 0);
9488 put_arm_insn (htab, output_bfd,
9489 elf32_thumb2_plt_entry[1]
9490 | ((got_displacement & 0x00ff0000) )
9491 | ((got_displacement & 0x07000000) << 4)
9492 | ((got_displacement & 0x08000000) >> 17)
9493 | ((got_displacement & 0xf0000000) >> 28),
9494 ptr + 4);
9495 put_arm_insn (htab, output_bfd,
9496 elf32_thumb2_plt_entry[2],
9497 ptr + 8);
9498 put_arm_insn (htab, output_bfd,
9499 elf32_thumb2_plt_entry[3],
9500 ptr + 12);
9501 }
9502 else
9503 {
9504 /* Calculate the displacement between the PLT slot and the
9505 entry in the GOT. The eight-byte offset accounts for the
9506 value produced by adding to pc in the first instruction
9507 of the PLT stub. */
9508 got_displacement = got_address - (plt_address + 8);
9509
9510 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9511 {
9512 put_thumb_insn (htab, output_bfd,
9513 elf32_arm_plt_thumb_stub[0], ptr - 4);
9514 put_thumb_insn (htab, output_bfd,
9515 elf32_arm_plt_thumb_stub[1], ptr - 2);
9516 }
9517
9518 if (!elf32_arm_use_long_plt_entry)
9519 {
9520 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9521
9522 put_arm_insn (htab, output_bfd,
9523 elf32_arm_plt_entry_short[0]
9524 | ((got_displacement & 0x0ff00000) >> 20),
9525 ptr + 0);
9526 put_arm_insn (htab, output_bfd,
9527 elf32_arm_plt_entry_short[1]
9528 | ((got_displacement & 0x000ff000) >> 12),
9529 ptr+ 4);
9530 put_arm_insn (htab, output_bfd,
9531 elf32_arm_plt_entry_short[2]
9532 | (got_displacement & 0x00000fff),
9533 ptr + 8);
9534 #ifdef FOUR_WORD_PLT
9535 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9536 #endif
9537 }
9538 else
9539 {
9540 put_arm_insn (htab, output_bfd,
9541 elf32_arm_plt_entry_long[0]
9542 | ((got_displacement & 0xf0000000) >> 28),
9543 ptr + 0);
9544 put_arm_insn (htab, output_bfd,
9545 elf32_arm_plt_entry_long[1]
9546 | ((got_displacement & 0x0ff00000) >> 20),
9547 ptr + 4);
9548 put_arm_insn (htab, output_bfd,
9549 elf32_arm_plt_entry_long[2]
9550 | ((got_displacement & 0x000ff000) >> 12),
9551 ptr+ 8);
9552 put_arm_insn (htab, output_bfd,
9553 elf32_arm_plt_entry_long[3]
9554 | (got_displacement & 0x00000fff),
9555 ptr + 12);
9556 }
9557 }
9558
9559 /* Fill in the entry in the .rel(a).(i)plt section. */
9560 rel.r_offset = got_address;
9561 rel.r_addend = 0;
9562 if (dynindx == -1)
9563 {
9564 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9565 The dynamic linker or static executable then calls SYM_VALUE
9566 to determine the correct run-time value of the .igot.plt entry. */
9567 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9568 initial_got_entry = sym_value;
9569 }
9570 else
9571 {
9572 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9573 initial_got_entry = (splt->output_section->vma
9574 + splt->output_offset);
9575 }
9576
9577 /* Fill in the entry in the global offset table. */
9578 bfd_put_32 (output_bfd, initial_got_entry,
9579 sgot->contents + got_offset);
9580 }
9581
9582 if (dynindx == -1)
9583 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9584 else
9585 {
9586 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9587 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9588 }
9589
9590 return TRUE;
9591 }
9592
9593 /* Some relocations map to different relocations depending on the
9594 target. Return the real relocation. */
9595
9596 static int
9597 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9598 int r_type)
9599 {
9600 switch (r_type)
9601 {
9602 case R_ARM_TARGET1:
9603 if (globals->target1_is_rel)
9604 return R_ARM_REL32;
9605 else
9606 return R_ARM_ABS32;
9607
9608 case R_ARM_TARGET2:
9609 return globals->target2_reloc;
9610
9611 default:
9612 return r_type;
9613 }
9614 }
9615
9616 /* Return the base VMA address which should be subtracted from real addresses
9617 when resolving @dtpoff relocation.
9618 This is PT_TLS segment p_vaddr. */
9619
9620 static bfd_vma
9621 dtpoff_base (struct bfd_link_info *info)
9622 {
9623 /* If tls_sec is NULL, we should have signalled an error already. */
9624 if (elf_hash_table (info)->tls_sec == NULL)
9625 return 0;
9626 return elf_hash_table (info)->tls_sec->vma;
9627 }
9628
9629 /* Return the relocation value for @tpoff relocation
9630 if STT_TLS virtual address is ADDRESS. */
9631
9632 static bfd_vma
9633 tpoff (struct bfd_link_info *info, bfd_vma address)
9634 {
9635 struct elf_link_hash_table *htab = elf_hash_table (info);
9636 bfd_vma base;
9637
9638 /* If tls_sec is NULL, we should have signalled an error already. */
9639 if (htab->tls_sec == NULL)
9640 return 0;
9641 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
9642 return address - htab->tls_sec->vma + base;
9643 }
9644
9645 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
9646 VALUE is the relocation value. */
9647
9648 static bfd_reloc_status_type
9649 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
9650 {
9651 if (value > 0xfff)
9652 return bfd_reloc_overflow;
9653
9654 value |= bfd_get_32 (abfd, data) & 0xfffff000;
9655 bfd_put_32 (abfd, value, data);
9656 return bfd_reloc_ok;
9657 }
9658
9659 /* Handle TLS relaxations. Relaxing is possible for symbols that use
9660 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
9661 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
9662
9663 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
9664 is to then call final_link_relocate. Return other values in the
9665 case of error.
9666
9667 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
9668 the pre-relaxed code. It would be nice if the relocs were updated
9669 to match the optimization. */
9670
9671 static bfd_reloc_status_type
9672 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
9673 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
9674 Elf_Internal_Rela *rel, unsigned long is_local)
9675 {
9676 unsigned long insn;
9677
9678 switch (ELF32_R_TYPE (rel->r_info))
9679 {
9680 default:
9681 return bfd_reloc_notsupported;
9682
9683 case R_ARM_TLS_GOTDESC:
9684 if (is_local)
9685 insn = 0;
9686 else
9687 {
9688 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
9689 if (insn & 1)
9690 insn -= 5; /* THUMB */
9691 else
9692 insn -= 8; /* ARM */
9693 }
9694 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9695 return bfd_reloc_continue;
9696
9697 case R_ARM_THM_TLS_DESCSEQ:
9698 /* Thumb insn. */
9699 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
9700 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
9701 {
9702 if (is_local)
9703 /* nop */
9704 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9705 }
9706 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
9707 {
9708 if (is_local)
9709 /* nop */
9710 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9711 else
9712 /* ldr rx,[ry] */
9713 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
9714 }
9715 else if ((insn & 0xff87) == 0x4780) /* blx rx */
9716 {
9717 if (is_local)
9718 /* nop */
9719 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9720 else
9721 /* mov r0, rx */
9722 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
9723 contents + rel->r_offset);
9724 }
9725 else
9726 {
9727 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9728 /* It's a 32 bit instruction, fetch the rest of it for
9729 error generation. */
9730 insn = (insn << 16)
9731 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
9732 _bfd_error_handler
9733 /* xgettext:c-format */
9734 (_("%pB(%pA+%#" PRIx64 "): "
9735 "unexpected %s instruction '%#lx' in TLS trampoline"),
9736 input_bfd, input_sec, (uint64_t) rel->r_offset,
9737 "Thumb", insn);
9738 return bfd_reloc_notsupported;
9739 }
9740 break;
9741
9742 case R_ARM_TLS_DESCSEQ:
9743 /* arm insn. */
9744 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
9745 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
9746 {
9747 if (is_local)
9748 /* mov rx, ry */
9749 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
9750 contents + rel->r_offset);
9751 }
9752 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
9753 {
9754 if (is_local)
9755 /* nop */
9756 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9757 else
9758 /* ldr rx,[ry] */
9759 bfd_put_32 (input_bfd, insn & 0xfffff000,
9760 contents + rel->r_offset);
9761 }
9762 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
9763 {
9764 if (is_local)
9765 /* nop */
9766 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9767 else
9768 /* mov r0, rx */
9769 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
9770 contents + rel->r_offset);
9771 }
9772 else
9773 {
9774 _bfd_error_handler
9775 /* xgettext:c-format */
9776 (_("%pB(%pA+%#" PRIx64 "): "
9777 "unexpected %s instruction '%#lx' in TLS trampoline"),
9778 input_bfd, input_sec, (uint64_t) rel->r_offset,
9779 "ARM", insn);
9780 return bfd_reloc_notsupported;
9781 }
9782 break;
9783
9784 case R_ARM_TLS_CALL:
9785 /* GD->IE relaxation, turn the instruction into 'nop' or
9786 'ldr r0, [pc,r0]' */
9787 insn = is_local ? 0xe1a00000 : 0xe79f0000;
9788 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9789 break;
9790
9791 case R_ARM_THM_TLS_CALL:
9792 /* GD->IE relaxation. */
9793 if (!is_local)
9794 /* add r0,pc; ldr r0, [r0] */
9795 insn = 0x44786800;
9796 else if (using_thumb2 (globals))
9797 /* nop.w */
9798 insn = 0xf3af8000;
9799 else
9800 /* nop; nop */
9801 insn = 0xbf00bf00;
9802
9803 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9804 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9805 break;
9806 }
9807 return bfd_reloc_ok;
9808 }
9809
9810 /* For a given value of n, calculate the value of G_n as required to
9811 deal with group relocations. We return it in the form of an
9812 encoded constant-and-rotation, together with the final residual. If n is
9813 specified as less than zero, then final_residual is filled with the
9814 input value and no further action is performed. */
9815
9816 static bfd_vma
9817 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9818 {
9819 int current_n;
9820 bfd_vma g_n;
9821 bfd_vma encoded_g_n = 0;
9822 bfd_vma residual = value; /* Also known as Y_n. */
9823
9824 for (current_n = 0; current_n <= n; current_n++)
9825 {
9826 int shift;
9827
9828 /* Calculate which part of the value to mask. */
9829 if (residual == 0)
9830 shift = 0;
9831 else
9832 {
9833 int msb;
9834
9835 /* Determine the most significant bit in the residual and
9836 align the resulting value to a 2-bit boundary. */
9837 for (msb = 30; msb >= 0; msb -= 2)
9838 if (residual & (3 << msb))
9839 break;
9840
9841 /* The desired shift is now (msb - 6), or zero, whichever
9842 is the greater. */
9843 shift = msb - 6;
9844 if (shift < 0)
9845 shift = 0;
9846 }
9847
9848 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9849 g_n = residual & (0xff << shift);
9850 encoded_g_n = (g_n >> shift)
9851 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9852
9853 /* Calculate the residual for the next time around. */
9854 residual &= ~g_n;
9855 }
9856
9857 *final_residual = residual;
9858
9859 return encoded_g_n;
9860 }
9861
9862 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9863 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9864
9865 static int
9866 identify_add_or_sub (bfd_vma insn)
9867 {
9868 int opcode = insn & 0x1e00000;
9869
9870 if (opcode == 1 << 23) /* ADD */
9871 return 1;
9872
9873 if (opcode == 1 << 22) /* SUB */
9874 return -1;
9875
9876 return 0;
9877 }
9878
9879 /* Perform a relocation as part of a final link. */
9880
9881 static bfd_reloc_status_type
9882 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9883 bfd * input_bfd,
9884 bfd * output_bfd,
9885 asection * input_section,
9886 bfd_byte * contents,
9887 Elf_Internal_Rela * rel,
9888 bfd_vma value,
9889 struct bfd_link_info * info,
9890 asection * sym_sec,
9891 const char * sym_name,
9892 unsigned char st_type,
9893 enum arm_st_branch_type branch_type,
9894 struct elf_link_hash_entry * h,
9895 bfd_boolean * unresolved_reloc_p,
9896 char ** error_message)
9897 {
9898 unsigned long r_type = howto->type;
9899 unsigned long r_symndx;
9900 bfd_byte * hit_data = contents + rel->r_offset;
9901 bfd_vma * local_got_offsets;
9902 bfd_vma * local_tlsdesc_gotents;
9903 asection * sgot;
9904 asection * splt;
9905 asection * sreloc = NULL;
9906 asection * srelgot;
9907 bfd_vma addend;
9908 bfd_signed_vma signed_addend;
9909 unsigned char dynreloc_st_type;
9910 bfd_vma dynreloc_value;
9911 struct elf32_arm_link_hash_table * globals;
9912 struct elf32_arm_link_hash_entry *eh;
9913 union gotplt_union *root_plt;
9914 struct arm_plt_info *arm_plt;
9915 bfd_vma plt_offset;
9916 bfd_vma gotplt_offset;
9917 bfd_boolean has_iplt_entry;
9918 bfd_boolean resolved_to_zero;
9919
9920 globals = elf32_arm_hash_table (info);
9921 if (globals == NULL)
9922 return bfd_reloc_notsupported;
9923
9924 BFD_ASSERT (is_arm_elf (input_bfd));
9925 BFD_ASSERT (howto != NULL);
9926
9927 /* Some relocation types map to different relocations depending on the
9928 target. We pick the right one here. */
9929 r_type = arm_real_reloc_type (globals, r_type);
9930
9931 /* It is possible to have linker relaxations on some TLS access
9932 models. Update our information here. */
9933 r_type = elf32_arm_tls_transition (info, r_type, h);
9934
9935 if (r_type != howto->type)
9936 howto = elf32_arm_howto_from_type (r_type);
9937
9938 eh = (struct elf32_arm_link_hash_entry *) h;
9939 sgot = globals->root.sgot;
9940 local_got_offsets = elf_local_got_offsets (input_bfd);
9941 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9942
9943 if (globals->root.dynamic_sections_created)
9944 srelgot = globals->root.srelgot;
9945 else
9946 srelgot = NULL;
9947
9948 r_symndx = ELF32_R_SYM (rel->r_info);
9949
9950 if (globals->use_rel)
9951 {
9952 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9953
9954 if (addend & ((howto->src_mask + 1) >> 1))
9955 {
9956 signed_addend = -1;
9957 signed_addend &= ~ howto->src_mask;
9958 signed_addend |= addend;
9959 }
9960 else
9961 signed_addend = addend;
9962 }
9963 else
9964 addend = signed_addend = rel->r_addend;
9965
9966 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9967 are resolving a function call relocation. */
9968 if (using_thumb_only (globals)
9969 && (r_type == R_ARM_THM_CALL
9970 || r_type == R_ARM_THM_JUMP24)
9971 && branch_type == ST_BRANCH_TO_ARM)
9972 branch_type = ST_BRANCH_TO_THUMB;
9973
9974 /* Record the symbol information that should be used in dynamic
9975 relocations. */
9976 dynreloc_st_type = st_type;
9977 dynreloc_value = value;
9978 if (branch_type == ST_BRANCH_TO_THUMB)
9979 dynreloc_value |= 1;
9980
9981 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9982 VALUE appropriately for relocations that we resolve at link time. */
9983 has_iplt_entry = FALSE;
9984 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
9985 &arm_plt)
9986 && root_plt->offset != (bfd_vma) -1)
9987 {
9988 plt_offset = root_plt->offset;
9989 gotplt_offset = arm_plt->got_offset;
9990
9991 if (h == NULL || eh->is_iplt)
9992 {
9993 has_iplt_entry = TRUE;
9994 splt = globals->root.iplt;
9995
9996 /* Populate .iplt entries here, because not all of them will
9997 be seen by finish_dynamic_symbol. The lower bit is set if
9998 we have already populated the entry. */
9999 if (plt_offset & 1)
10000 plt_offset--;
10001 else
10002 {
10003 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10004 -1, dynreloc_value))
10005 root_plt->offset |= 1;
10006 else
10007 return bfd_reloc_notsupported;
10008 }
10009
10010 /* Static relocations always resolve to the .iplt entry. */
10011 st_type = STT_FUNC;
10012 value = (splt->output_section->vma
10013 + splt->output_offset
10014 + plt_offset);
10015 branch_type = ST_BRANCH_TO_ARM;
10016
10017 /* If there are non-call relocations that resolve to the .iplt
10018 entry, then all dynamic ones must too. */
10019 if (arm_plt->noncall_refcount != 0)
10020 {
10021 dynreloc_st_type = st_type;
10022 dynreloc_value = value;
10023 }
10024 }
10025 else
10026 /* We populate the .plt entry in finish_dynamic_symbol. */
10027 splt = globals->root.splt;
10028 }
10029 else
10030 {
10031 splt = NULL;
10032 plt_offset = (bfd_vma) -1;
10033 gotplt_offset = (bfd_vma) -1;
10034 }
10035
10036 resolved_to_zero = (h != NULL
10037 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10038
10039 switch (r_type)
10040 {
10041 case R_ARM_NONE:
10042 /* We don't need to find a value for this symbol. It's just a
10043 marker. */
10044 *unresolved_reloc_p = FALSE;
10045 return bfd_reloc_ok;
10046
10047 case R_ARM_ABS12:
10048 if (!globals->vxworks_p)
10049 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10050 /* Fall through. */
10051
10052 case R_ARM_PC24:
10053 case R_ARM_ABS32:
10054 case R_ARM_ABS32_NOI:
10055 case R_ARM_REL32:
10056 case R_ARM_REL32_NOI:
10057 case R_ARM_CALL:
10058 case R_ARM_JUMP24:
10059 case R_ARM_XPC25:
10060 case R_ARM_PREL31:
10061 case R_ARM_PLT32:
10062 /* Handle relocations which should use the PLT entry. ABS32/REL32
10063 will use the symbol's value, which may point to a PLT entry, but we
10064 don't need to handle that here. If we created a PLT entry, all
10065 branches in this object should go to it, except if the PLT is too
10066 far away, in which case a long branch stub should be inserted. */
10067 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10068 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10069 && r_type != R_ARM_CALL
10070 && r_type != R_ARM_JUMP24
10071 && r_type != R_ARM_PLT32)
10072 && plt_offset != (bfd_vma) -1)
10073 {
10074 /* If we've created a .plt section, and assigned a PLT entry
10075 to this function, it must either be a STT_GNU_IFUNC reference
10076 or not be known to bind locally. In other cases, we should
10077 have cleared the PLT entry by now. */
10078 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10079
10080 value = (splt->output_section->vma
10081 + splt->output_offset
10082 + plt_offset);
10083 *unresolved_reloc_p = FALSE;
10084 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10085 contents, rel->r_offset, value,
10086 rel->r_addend);
10087 }
10088
10089 /* When generating a shared object or relocatable executable, these
10090 relocations are copied into the output file to be resolved at
10091 run time. */
10092 if ((bfd_link_pic (info)
10093 || globals->root.is_relocatable_executable)
10094 && (input_section->flags & SEC_ALLOC)
10095 && !(globals->vxworks_p
10096 && strcmp (input_section->output_section->name,
10097 ".tls_vars") == 0)
10098 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10099 || !SYMBOL_CALLS_LOCAL (info, h))
10100 && !(input_bfd == globals->stub_bfd
10101 && strstr (input_section->name, STUB_SUFFIX))
10102 && (h == NULL
10103 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10104 && !resolved_to_zero)
10105 || h->root.type != bfd_link_hash_undefweak)
10106 && r_type != R_ARM_PC24
10107 && r_type != R_ARM_CALL
10108 && r_type != R_ARM_JUMP24
10109 && r_type != R_ARM_PREL31
10110 && r_type != R_ARM_PLT32)
10111 {
10112 Elf_Internal_Rela outrel;
10113 bfd_boolean skip, relocate;
10114
10115 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10116 && !h->def_regular)
10117 {
10118 char *v = _("shared object");
10119
10120 if (bfd_link_executable (info))
10121 v = _("PIE executable");
10122
10123 _bfd_error_handler
10124 (_("%pB: relocation %s against external or undefined symbol `%s'"
10125 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10126 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10127 return bfd_reloc_notsupported;
10128 }
10129
10130 *unresolved_reloc_p = FALSE;
10131
10132 if (sreloc == NULL && globals->root.dynamic_sections_created)
10133 {
10134 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10135 ! globals->use_rel);
10136
10137 if (sreloc == NULL)
10138 return bfd_reloc_notsupported;
10139 }
10140
10141 skip = FALSE;
10142 relocate = FALSE;
10143
10144 outrel.r_addend = addend;
10145 outrel.r_offset =
10146 _bfd_elf_section_offset (output_bfd, info, input_section,
10147 rel->r_offset);
10148 if (outrel.r_offset == (bfd_vma) -1)
10149 skip = TRUE;
10150 else if (outrel.r_offset == (bfd_vma) -2)
10151 skip = TRUE, relocate = TRUE;
10152 outrel.r_offset += (input_section->output_section->vma
10153 + input_section->output_offset);
10154
10155 if (skip)
10156 memset (&outrel, 0, sizeof outrel);
10157 else if (h != NULL
10158 && h->dynindx != -1
10159 && (!bfd_link_pic (info)
10160 || !(bfd_link_pie (info)
10161 || SYMBOLIC_BIND (info, h))
10162 || !h->def_regular))
10163 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10164 else
10165 {
10166 int symbol;
10167
10168 /* This symbol is local, or marked to become local. */
10169 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
10170 if (globals->symbian_p)
10171 {
10172 asection *osec;
10173
10174 /* On Symbian OS, the data segment and text segement
10175 can be relocated independently. Therefore, we
10176 must indicate the segment to which this
10177 relocation is relative. The BPABI allows us to
10178 use any symbol in the right segment; we just use
10179 the section symbol as it is convenient. (We
10180 cannot use the symbol given by "h" directly as it
10181 will not appear in the dynamic symbol table.)
10182
10183 Note that the dynamic linker ignores the section
10184 symbol value, so we don't subtract osec->vma
10185 from the emitted reloc addend. */
10186 if (sym_sec)
10187 osec = sym_sec->output_section;
10188 else
10189 osec = input_section->output_section;
10190 symbol = elf_section_data (osec)->dynindx;
10191 if (symbol == 0)
10192 {
10193 struct elf_link_hash_table *htab = elf_hash_table (info);
10194
10195 if ((osec->flags & SEC_READONLY) == 0
10196 && htab->data_index_section != NULL)
10197 osec = htab->data_index_section;
10198 else
10199 osec = htab->text_index_section;
10200 symbol = elf_section_data (osec)->dynindx;
10201 }
10202 BFD_ASSERT (symbol != 0);
10203 }
10204 else
10205 /* On SVR4-ish systems, the dynamic loader cannot
10206 relocate the text and data segments independently,
10207 so the symbol does not matter. */
10208 symbol = 0;
10209 if (dynreloc_st_type == STT_GNU_IFUNC)
10210 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10211 to the .iplt entry. Instead, every non-call reference
10212 must use an R_ARM_IRELATIVE relocation to obtain the
10213 correct run-time address. */
10214 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10215 else
10216 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10217 if (globals->use_rel)
10218 relocate = TRUE;
10219 else
10220 outrel.r_addend += dynreloc_value;
10221 }
10222
10223 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10224
10225 /* If this reloc is against an external symbol, we do not want to
10226 fiddle with the addend. Otherwise, we need to include the symbol
10227 value so that it becomes an addend for the dynamic reloc. */
10228 if (! relocate)
10229 return bfd_reloc_ok;
10230
10231 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10232 contents, rel->r_offset,
10233 dynreloc_value, (bfd_vma) 0);
10234 }
10235 else switch (r_type)
10236 {
10237 case R_ARM_ABS12:
10238 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10239
10240 case R_ARM_XPC25: /* Arm BLX instruction. */
10241 case R_ARM_CALL:
10242 case R_ARM_JUMP24:
10243 case R_ARM_PC24: /* Arm B/BL instruction. */
10244 case R_ARM_PLT32:
10245 {
10246 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10247
10248 if (r_type == R_ARM_XPC25)
10249 {
10250 /* Check for Arm calling Arm function. */
10251 /* FIXME: Should we translate the instruction into a BL
10252 instruction instead ? */
10253 if (branch_type != ST_BRANCH_TO_THUMB)
10254 _bfd_error_handler
10255 (_("\%pB: warning: %s BLX instruction targets"
10256 " %s function '%s'"),
10257 input_bfd, "ARM",
10258 "ARM", h ? h->root.root.string : "(local)");
10259 }
10260 else if (r_type == R_ARM_PC24)
10261 {
10262 /* Check for Arm calling Thumb function. */
10263 if (branch_type == ST_BRANCH_TO_THUMB)
10264 {
10265 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10266 output_bfd, input_section,
10267 hit_data, sym_sec, rel->r_offset,
10268 signed_addend, value,
10269 error_message))
10270 return bfd_reloc_ok;
10271 else
10272 return bfd_reloc_dangerous;
10273 }
10274 }
10275
10276 /* Check if a stub has to be inserted because the
10277 destination is too far or we are changing mode. */
10278 if ( r_type == R_ARM_CALL
10279 || r_type == R_ARM_JUMP24
10280 || r_type == R_ARM_PLT32)
10281 {
10282 enum elf32_arm_stub_type stub_type = arm_stub_none;
10283 struct elf32_arm_link_hash_entry *hash;
10284
10285 hash = (struct elf32_arm_link_hash_entry *) h;
10286 stub_type = arm_type_of_stub (info, input_section, rel,
10287 st_type, &branch_type,
10288 hash, value, sym_sec,
10289 input_bfd, sym_name);
10290
10291 if (stub_type != arm_stub_none)
10292 {
10293 /* The target is out of reach, so redirect the
10294 branch to the local stub for this function. */
10295 stub_entry = elf32_arm_get_stub_entry (input_section,
10296 sym_sec, h,
10297 rel, globals,
10298 stub_type);
10299 {
10300 if (stub_entry != NULL)
10301 value = (stub_entry->stub_offset
10302 + stub_entry->stub_sec->output_offset
10303 + stub_entry->stub_sec->output_section->vma);
10304
10305 if (plt_offset != (bfd_vma) -1)
10306 *unresolved_reloc_p = FALSE;
10307 }
10308 }
10309 else
10310 {
10311 /* If the call goes through a PLT entry, make sure to
10312 check distance to the right destination address. */
10313 if (plt_offset != (bfd_vma) -1)
10314 {
10315 value = (splt->output_section->vma
10316 + splt->output_offset
10317 + plt_offset);
10318 *unresolved_reloc_p = FALSE;
10319 /* The PLT entry is in ARM mode, regardless of the
10320 target function. */
10321 branch_type = ST_BRANCH_TO_ARM;
10322 }
10323 }
10324 }
10325
10326 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10327 where:
10328 S is the address of the symbol in the relocation.
10329 P is address of the instruction being relocated.
10330 A is the addend (extracted from the instruction) in bytes.
10331
10332 S is held in 'value'.
10333 P is the base address of the section containing the
10334 instruction plus the offset of the reloc into that
10335 section, ie:
10336 (input_section->output_section->vma +
10337 input_section->output_offset +
10338 rel->r_offset).
10339 A is the addend, converted into bytes, ie:
10340 (signed_addend * 4)
10341
10342 Note: None of these operations have knowledge of the pipeline
10343 size of the processor, thus it is up to the assembler to
10344 encode this information into the addend. */
10345 value -= (input_section->output_section->vma
10346 + input_section->output_offset);
10347 value -= rel->r_offset;
10348 if (globals->use_rel)
10349 value += (signed_addend << howto->size);
10350 else
10351 /* RELA addends do not have to be adjusted by howto->size. */
10352 value += signed_addend;
10353
10354 signed_addend = value;
10355 signed_addend >>= howto->rightshift;
10356
10357 /* A branch to an undefined weak symbol is turned into a jump to
10358 the next instruction unless a PLT entry will be created.
10359 Do the same for local undefined symbols (but not for STN_UNDEF).
10360 The jump to the next instruction is optimized as a NOP depending
10361 on the architecture. */
10362 if (h ? (h->root.type == bfd_link_hash_undefweak
10363 && plt_offset == (bfd_vma) -1)
10364 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10365 {
10366 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10367
10368 if (arch_has_arm_nop (globals))
10369 value |= 0x0320f000;
10370 else
10371 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10372 }
10373 else
10374 {
10375 /* Perform a signed range check. */
10376 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10377 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10378 return bfd_reloc_overflow;
10379
10380 addend = (value & 2);
10381
10382 value = (signed_addend & howto->dst_mask)
10383 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10384
10385 if (r_type == R_ARM_CALL)
10386 {
10387 /* Set the H bit in the BLX instruction. */
10388 if (branch_type == ST_BRANCH_TO_THUMB)
10389 {
10390 if (addend)
10391 value |= (1 << 24);
10392 else
10393 value &= ~(bfd_vma)(1 << 24);
10394 }
10395
10396 /* Select the correct instruction (BL or BLX). */
10397 /* Only if we are not handling a BL to a stub. In this
10398 case, mode switching is performed by the stub. */
10399 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10400 value |= (1 << 28);
10401 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10402 {
10403 value &= ~(bfd_vma)(1 << 28);
10404 value |= (1 << 24);
10405 }
10406 }
10407 }
10408 }
10409 break;
10410
10411 case R_ARM_ABS32:
10412 value += addend;
10413 if (branch_type == ST_BRANCH_TO_THUMB)
10414 value |= 1;
10415 break;
10416
10417 case R_ARM_ABS32_NOI:
10418 value += addend;
10419 break;
10420
10421 case R_ARM_REL32:
10422 value += addend;
10423 if (branch_type == ST_BRANCH_TO_THUMB)
10424 value |= 1;
10425 value -= (input_section->output_section->vma
10426 + input_section->output_offset + rel->r_offset);
10427 break;
10428
10429 case R_ARM_REL32_NOI:
10430 value += addend;
10431 value -= (input_section->output_section->vma
10432 + input_section->output_offset + rel->r_offset);
10433 break;
10434
10435 case R_ARM_PREL31:
10436 value -= (input_section->output_section->vma
10437 + input_section->output_offset + rel->r_offset);
10438 value += signed_addend;
10439 if (! h || h->root.type != bfd_link_hash_undefweak)
10440 {
10441 /* Check for overflow. */
10442 if ((value ^ (value >> 1)) & (1 << 30))
10443 return bfd_reloc_overflow;
10444 }
10445 value &= 0x7fffffff;
10446 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10447 if (branch_type == ST_BRANCH_TO_THUMB)
10448 value |= 1;
10449 break;
10450 }
10451
10452 bfd_put_32 (input_bfd, value, hit_data);
10453 return bfd_reloc_ok;
10454
10455 case R_ARM_ABS8:
10456 /* PR 16202: Refectch the addend using the correct size. */
10457 if (globals->use_rel)
10458 addend = bfd_get_8 (input_bfd, hit_data);
10459 value += addend;
10460
10461 /* There is no way to tell whether the user intended to use a signed or
10462 unsigned addend. When checking for overflow we accept either,
10463 as specified by the AAELF. */
10464 if ((long) value > 0xff || (long) value < -0x80)
10465 return bfd_reloc_overflow;
10466
10467 bfd_put_8 (input_bfd, value, hit_data);
10468 return bfd_reloc_ok;
10469
10470 case R_ARM_ABS16:
10471 /* PR 16202: Refectch the addend using the correct size. */
10472 if (globals->use_rel)
10473 addend = bfd_get_16 (input_bfd, hit_data);
10474 value += addend;
10475
10476 /* See comment for R_ARM_ABS8. */
10477 if ((long) value > 0xffff || (long) value < -0x8000)
10478 return bfd_reloc_overflow;
10479
10480 bfd_put_16 (input_bfd, value, hit_data);
10481 return bfd_reloc_ok;
10482
10483 case R_ARM_THM_ABS5:
10484 /* Support ldr and str instructions for the thumb. */
10485 if (globals->use_rel)
10486 {
10487 /* Need to refetch addend. */
10488 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10489 /* ??? Need to determine shift amount from operand size. */
10490 addend >>= howto->rightshift;
10491 }
10492 value += addend;
10493
10494 /* ??? Isn't value unsigned? */
10495 if ((long) value > 0x1f || (long) value < -0x10)
10496 return bfd_reloc_overflow;
10497
10498 /* ??? Value needs to be properly shifted into place first. */
10499 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10500 bfd_put_16 (input_bfd, value, hit_data);
10501 return bfd_reloc_ok;
10502
10503 case R_ARM_THM_ALU_PREL_11_0:
10504 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10505 {
10506 bfd_vma insn;
10507 bfd_signed_vma relocation;
10508
10509 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10510 | bfd_get_16 (input_bfd, hit_data + 2);
10511
10512 if (globals->use_rel)
10513 {
10514 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10515 | ((insn & (1 << 26)) >> 15);
10516 if (insn & 0xf00000)
10517 signed_addend = -signed_addend;
10518 }
10519
10520 relocation = value + signed_addend;
10521 relocation -= Pa (input_section->output_section->vma
10522 + input_section->output_offset
10523 + rel->r_offset);
10524
10525 /* PR 21523: Use an absolute value. The user of this reloc will
10526 have already selected an ADD or SUB insn appropriately. */
10527 value = labs (relocation);
10528
10529 if (value >= 0x1000)
10530 return bfd_reloc_overflow;
10531
10532 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10533 if (branch_type == ST_BRANCH_TO_THUMB)
10534 value |= 1;
10535
10536 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10537 | ((value & 0x700) << 4)
10538 | ((value & 0x800) << 15);
10539 if (relocation < 0)
10540 insn |= 0xa00000;
10541
10542 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10543 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10544
10545 return bfd_reloc_ok;
10546 }
10547
10548 case R_ARM_THM_PC8:
10549 /* PR 10073: This reloc is not generated by the GNU toolchain,
10550 but it is supported for compatibility with third party libraries
10551 generated by other compilers, specifically the ARM/IAR. */
10552 {
10553 bfd_vma insn;
10554 bfd_signed_vma relocation;
10555
10556 insn = bfd_get_16 (input_bfd, hit_data);
10557
10558 if (globals->use_rel)
10559 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10560
10561 relocation = value + addend;
10562 relocation -= Pa (input_section->output_section->vma
10563 + input_section->output_offset
10564 + rel->r_offset);
10565
10566 value = relocation;
10567
10568 /* We do not check for overflow of this reloc. Although strictly
10569 speaking this is incorrect, it appears to be necessary in order
10570 to work with IAR generated relocs. Since GCC and GAS do not
10571 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10572 a problem for them. */
10573 value &= 0x3fc;
10574
10575 insn = (insn & 0xff00) | (value >> 2);
10576
10577 bfd_put_16 (input_bfd, insn, hit_data);
10578
10579 return bfd_reloc_ok;
10580 }
10581
10582 case R_ARM_THM_PC12:
10583 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10584 {
10585 bfd_vma insn;
10586 bfd_signed_vma relocation;
10587
10588 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10589 | bfd_get_16 (input_bfd, hit_data + 2);
10590
10591 if (globals->use_rel)
10592 {
10593 signed_addend = insn & 0xfff;
10594 if (!(insn & (1 << 23)))
10595 signed_addend = -signed_addend;
10596 }
10597
10598 relocation = value + signed_addend;
10599 relocation -= Pa (input_section->output_section->vma
10600 + input_section->output_offset
10601 + rel->r_offset);
10602
10603 value = relocation;
10604
10605 if (value >= 0x1000)
10606 return bfd_reloc_overflow;
10607
10608 insn = (insn & 0xff7ff000) | value;
10609 if (relocation >= 0)
10610 insn |= (1 << 23);
10611
10612 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10613 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10614
10615 return bfd_reloc_ok;
10616 }
10617
10618 case R_ARM_THM_XPC22:
10619 case R_ARM_THM_CALL:
10620 case R_ARM_THM_JUMP24:
10621 /* Thumb BL (branch long instruction). */
10622 {
10623 bfd_vma relocation;
10624 bfd_vma reloc_sign;
10625 bfd_boolean overflow = FALSE;
10626 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10627 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10628 bfd_signed_vma reloc_signed_max;
10629 bfd_signed_vma reloc_signed_min;
10630 bfd_vma check;
10631 bfd_signed_vma signed_check;
10632 int bitsize;
10633 const int thumb2 = using_thumb2 (globals);
10634 const int thumb2_bl = using_thumb2_bl (globals);
10635
10636 /* A branch to an undefined weak symbol is turned into a jump to
10637 the next instruction unless a PLT entry will be created.
10638 The jump to the next instruction is optimized as a NOP.W for
10639 Thumb-2 enabled architectures. */
10640 if (h && h->root.type == bfd_link_hash_undefweak
10641 && plt_offset == (bfd_vma) -1)
10642 {
10643 if (thumb2)
10644 {
10645 bfd_put_16 (input_bfd, 0xf3af, hit_data);
10646 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
10647 }
10648 else
10649 {
10650 bfd_put_16 (input_bfd, 0xe000, hit_data);
10651 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
10652 }
10653 return bfd_reloc_ok;
10654 }
10655
10656 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
10657 with Thumb-1) involving the J1 and J2 bits. */
10658 if (globals->use_rel)
10659 {
10660 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
10661 bfd_vma upper = upper_insn & 0x3ff;
10662 bfd_vma lower = lower_insn & 0x7ff;
10663 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
10664 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
10665 bfd_vma i1 = j1 ^ s ? 0 : 1;
10666 bfd_vma i2 = j2 ^ s ? 0 : 1;
10667
10668 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
10669 /* Sign extend. */
10670 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
10671
10672 signed_addend = addend;
10673 }
10674
10675 if (r_type == R_ARM_THM_XPC22)
10676 {
10677 /* Check for Thumb to Thumb call. */
10678 /* FIXME: Should we translate the instruction into a BL
10679 instruction instead ? */
10680 if (branch_type == ST_BRANCH_TO_THUMB)
10681 _bfd_error_handler
10682 (_("%pB: warning: %s BLX instruction targets"
10683 " %s function '%s'"),
10684 input_bfd, "Thumb",
10685 "Thumb", h ? h->root.root.string : "(local)");
10686 }
10687 else
10688 {
10689 /* If it is not a call to Thumb, assume call to Arm.
10690 If it is a call relative to a section name, then it is not a
10691 function call at all, but rather a long jump. Calls through
10692 the PLT do not require stubs. */
10693 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
10694 {
10695 if (globals->use_blx && r_type == R_ARM_THM_CALL)
10696 {
10697 /* Convert BL to BLX. */
10698 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10699 }
10700 else if (( r_type != R_ARM_THM_CALL)
10701 && (r_type != R_ARM_THM_JUMP24))
10702 {
10703 if (elf32_thumb_to_arm_stub
10704 (info, sym_name, input_bfd, output_bfd, input_section,
10705 hit_data, sym_sec, rel->r_offset, signed_addend, value,
10706 error_message))
10707 return bfd_reloc_ok;
10708 else
10709 return bfd_reloc_dangerous;
10710 }
10711 }
10712 else if (branch_type == ST_BRANCH_TO_THUMB
10713 && globals->use_blx
10714 && r_type == R_ARM_THM_CALL)
10715 {
10716 /* Make sure this is a BL. */
10717 lower_insn |= 0x1800;
10718 }
10719 }
10720
10721 enum elf32_arm_stub_type stub_type = arm_stub_none;
10722 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
10723 {
10724 /* Check if a stub has to be inserted because the destination
10725 is too far. */
10726 struct elf32_arm_stub_hash_entry *stub_entry;
10727 struct elf32_arm_link_hash_entry *hash;
10728
10729 hash = (struct elf32_arm_link_hash_entry *) h;
10730
10731 stub_type = arm_type_of_stub (info, input_section, rel,
10732 st_type, &branch_type,
10733 hash, value, sym_sec,
10734 input_bfd, sym_name);
10735
10736 if (stub_type != arm_stub_none)
10737 {
10738 /* The target is out of reach or we are changing modes, so
10739 redirect the branch to the local stub for this
10740 function. */
10741 stub_entry = elf32_arm_get_stub_entry (input_section,
10742 sym_sec, h,
10743 rel, globals,
10744 stub_type);
10745 if (stub_entry != NULL)
10746 {
10747 value = (stub_entry->stub_offset
10748 + stub_entry->stub_sec->output_offset
10749 + stub_entry->stub_sec->output_section->vma);
10750
10751 if (plt_offset != (bfd_vma) -1)
10752 *unresolved_reloc_p = FALSE;
10753 }
10754
10755 /* If this call becomes a call to Arm, force BLX. */
10756 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
10757 {
10758 if ((stub_entry
10759 && !arm_stub_is_thumb (stub_entry->stub_type))
10760 || branch_type != ST_BRANCH_TO_THUMB)
10761 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10762 }
10763 }
10764 }
10765
10766 /* Handle calls via the PLT. */
10767 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
10768 {
10769 value = (splt->output_section->vma
10770 + splt->output_offset
10771 + plt_offset);
10772
10773 if (globals->use_blx
10774 && r_type == R_ARM_THM_CALL
10775 && ! using_thumb_only (globals))
10776 {
10777 /* If the Thumb BLX instruction is available, convert
10778 the BL to a BLX instruction to call the ARM-mode
10779 PLT entry. */
10780 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10781 branch_type = ST_BRANCH_TO_ARM;
10782 }
10783 else
10784 {
10785 if (! using_thumb_only (globals))
10786 /* Target the Thumb stub before the ARM PLT entry. */
10787 value -= PLT_THUMB_STUB_SIZE;
10788 branch_type = ST_BRANCH_TO_THUMB;
10789 }
10790 *unresolved_reloc_p = FALSE;
10791 }
10792
10793 relocation = value + signed_addend;
10794
10795 relocation -= (input_section->output_section->vma
10796 + input_section->output_offset
10797 + rel->r_offset);
10798
10799 check = relocation >> howto->rightshift;
10800
10801 /* If this is a signed value, the rightshift just dropped
10802 leading 1 bits (assuming twos complement). */
10803 if ((bfd_signed_vma) relocation >= 0)
10804 signed_check = check;
10805 else
10806 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10807
10808 /* Calculate the permissable maximum and minimum values for
10809 this relocation according to whether we're relocating for
10810 Thumb-2 or not. */
10811 bitsize = howto->bitsize;
10812 if (!thumb2_bl)
10813 bitsize -= 2;
10814 reloc_signed_max = (1 << (bitsize - 1)) - 1;
10815 reloc_signed_min = ~reloc_signed_max;
10816
10817 /* Assumes two's complement. */
10818 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10819 overflow = TRUE;
10820
10821 if ((lower_insn & 0x5000) == 0x4000)
10822 /* For a BLX instruction, make sure that the relocation is rounded up
10823 to a word boundary. This follows the semantics of the instruction
10824 which specifies that bit 1 of the target address will come from bit
10825 1 of the base address. */
10826 relocation = (relocation + 2) & ~ 3;
10827
10828 /* Put RELOCATION back into the insn. Assumes two's complement.
10829 We use the Thumb-2 encoding, which is safe even if dealing with
10830 a Thumb-1 instruction by virtue of our overflow check above. */
10831 reloc_sign = (signed_check < 0) ? 1 : 0;
10832 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10833 | ((relocation >> 12) & 0x3ff)
10834 | (reloc_sign << 10);
10835 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10836 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10837 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10838 | ((relocation >> 1) & 0x7ff);
10839
10840 /* Put the relocated value back in the object file: */
10841 bfd_put_16 (input_bfd, upper_insn, hit_data);
10842 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10843
10844 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10845 }
10846 break;
10847
10848 case R_ARM_THM_JUMP19:
10849 /* Thumb32 conditional branch instruction. */
10850 {
10851 bfd_vma relocation;
10852 bfd_boolean overflow = FALSE;
10853 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10854 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10855 bfd_signed_vma reloc_signed_max = 0xffffe;
10856 bfd_signed_vma reloc_signed_min = -0x100000;
10857 bfd_signed_vma signed_check;
10858 enum elf32_arm_stub_type stub_type = arm_stub_none;
10859 struct elf32_arm_stub_hash_entry *stub_entry;
10860 struct elf32_arm_link_hash_entry *hash;
10861
10862 /* Need to refetch the addend, reconstruct the top three bits,
10863 and squish the two 11 bit pieces together. */
10864 if (globals->use_rel)
10865 {
10866 bfd_vma S = (upper_insn & 0x0400) >> 10;
10867 bfd_vma upper = (upper_insn & 0x003f);
10868 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10869 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10870 bfd_vma lower = (lower_insn & 0x07ff);
10871
10872 upper |= J1 << 6;
10873 upper |= J2 << 7;
10874 upper |= (!S) << 8;
10875 upper -= 0x0100; /* Sign extend. */
10876
10877 addend = (upper << 12) | (lower << 1);
10878 signed_addend = addend;
10879 }
10880
10881 /* Handle calls via the PLT. */
10882 if (plt_offset != (bfd_vma) -1)
10883 {
10884 value = (splt->output_section->vma
10885 + splt->output_offset
10886 + plt_offset);
10887 /* Target the Thumb stub before the ARM PLT entry. */
10888 value -= PLT_THUMB_STUB_SIZE;
10889 *unresolved_reloc_p = FALSE;
10890 }
10891
10892 hash = (struct elf32_arm_link_hash_entry *)h;
10893
10894 stub_type = arm_type_of_stub (info, input_section, rel,
10895 st_type, &branch_type,
10896 hash, value, sym_sec,
10897 input_bfd, sym_name);
10898 if (stub_type != arm_stub_none)
10899 {
10900 stub_entry = elf32_arm_get_stub_entry (input_section,
10901 sym_sec, h,
10902 rel, globals,
10903 stub_type);
10904 if (stub_entry != NULL)
10905 {
10906 value = (stub_entry->stub_offset
10907 + stub_entry->stub_sec->output_offset
10908 + stub_entry->stub_sec->output_section->vma);
10909 }
10910 }
10911
10912 relocation = value + signed_addend;
10913 relocation -= (input_section->output_section->vma
10914 + input_section->output_offset
10915 + rel->r_offset);
10916 signed_check = (bfd_signed_vma) relocation;
10917
10918 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10919 overflow = TRUE;
10920
10921 /* Put RELOCATION back into the insn. */
10922 {
10923 bfd_vma S = (relocation & 0x00100000) >> 20;
10924 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10925 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10926 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10927 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10928
10929 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10930 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10931 }
10932
10933 /* Put the relocated value back in the object file: */
10934 bfd_put_16 (input_bfd, upper_insn, hit_data);
10935 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10936
10937 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10938 }
10939
10940 case R_ARM_THM_JUMP11:
10941 case R_ARM_THM_JUMP8:
10942 case R_ARM_THM_JUMP6:
10943 /* Thumb B (branch) instruction). */
10944 {
10945 bfd_signed_vma relocation;
10946 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10947 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10948 bfd_signed_vma signed_check;
10949
10950 /* CZB cannot jump backward. */
10951 if (r_type == R_ARM_THM_JUMP6)
10952 reloc_signed_min = 0;
10953
10954 if (globals->use_rel)
10955 {
10956 /* Need to refetch addend. */
10957 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10958 if (addend & ((howto->src_mask + 1) >> 1))
10959 {
10960 signed_addend = -1;
10961 signed_addend &= ~ howto->src_mask;
10962 signed_addend |= addend;
10963 }
10964 else
10965 signed_addend = addend;
10966 /* The value in the insn has been right shifted. We need to
10967 undo this, so that we can perform the address calculation
10968 in terms of bytes. */
10969 signed_addend <<= howto->rightshift;
10970 }
10971 relocation = value + signed_addend;
10972
10973 relocation -= (input_section->output_section->vma
10974 + input_section->output_offset
10975 + rel->r_offset);
10976
10977 relocation >>= howto->rightshift;
10978 signed_check = relocation;
10979
10980 if (r_type == R_ARM_THM_JUMP6)
10981 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10982 else
10983 relocation &= howto->dst_mask;
10984 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10985
10986 bfd_put_16 (input_bfd, relocation, hit_data);
10987
10988 /* Assumes two's complement. */
10989 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10990 return bfd_reloc_overflow;
10991
10992 return bfd_reloc_ok;
10993 }
10994
10995 case R_ARM_ALU_PCREL7_0:
10996 case R_ARM_ALU_PCREL15_8:
10997 case R_ARM_ALU_PCREL23_15:
10998 {
10999 bfd_vma insn;
11000 bfd_vma relocation;
11001
11002 insn = bfd_get_32 (input_bfd, hit_data);
11003 if (globals->use_rel)
11004 {
11005 /* Extract the addend. */
11006 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11007 signed_addend = addend;
11008 }
11009 relocation = value + signed_addend;
11010
11011 relocation -= (input_section->output_section->vma
11012 + input_section->output_offset
11013 + rel->r_offset);
11014 insn = (insn & ~0xfff)
11015 | ((howto->bitpos << 7) & 0xf00)
11016 | ((relocation >> howto->bitpos) & 0xff);
11017 bfd_put_32 (input_bfd, value, hit_data);
11018 }
11019 return bfd_reloc_ok;
11020
11021 case R_ARM_GNU_VTINHERIT:
11022 case R_ARM_GNU_VTENTRY:
11023 return bfd_reloc_ok;
11024
11025 case R_ARM_GOTOFF32:
11026 /* Relocation is relative to the start of the
11027 global offset table. */
11028
11029 BFD_ASSERT (sgot != NULL);
11030 if (sgot == NULL)
11031 return bfd_reloc_notsupported;
11032
11033 /* If we are addressing a Thumb function, we need to adjust the
11034 address by one, so that attempts to call the function pointer will
11035 correctly interpret it as Thumb code. */
11036 if (branch_type == ST_BRANCH_TO_THUMB)
11037 value += 1;
11038
11039 /* Note that sgot->output_offset is not involved in this
11040 calculation. We always want the start of .got. If we
11041 define _GLOBAL_OFFSET_TABLE in a different way, as is
11042 permitted by the ABI, we might have to change this
11043 calculation. */
11044 value -= sgot->output_section->vma;
11045 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11046 contents, rel->r_offset, value,
11047 rel->r_addend);
11048
11049 case R_ARM_GOTPC:
11050 /* Use global offset table as symbol value. */
11051 BFD_ASSERT (sgot != NULL);
11052
11053 if (sgot == NULL)
11054 return bfd_reloc_notsupported;
11055
11056 *unresolved_reloc_p = FALSE;
11057 value = sgot->output_section->vma;
11058 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11059 contents, rel->r_offset, value,
11060 rel->r_addend);
11061
11062 case R_ARM_GOT32:
11063 case R_ARM_GOT_PREL:
11064 /* Relocation is to the entry for this symbol in the
11065 global offset table. */
11066 if (sgot == NULL)
11067 return bfd_reloc_notsupported;
11068
11069 if (dynreloc_st_type == STT_GNU_IFUNC
11070 && plt_offset != (bfd_vma) -1
11071 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11072 {
11073 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11074 symbol, and the relocation resolves directly to the runtime
11075 target rather than to the .iplt entry. This means that any
11076 .got entry would be the same value as the .igot.plt entry,
11077 so there's no point creating both. */
11078 sgot = globals->root.igotplt;
11079 value = sgot->output_offset + gotplt_offset;
11080 }
11081 else if (h != NULL)
11082 {
11083 bfd_vma off;
11084
11085 off = h->got.offset;
11086 BFD_ASSERT (off != (bfd_vma) -1);
11087 if ((off & 1) != 0)
11088 {
11089 /* We have already processsed one GOT relocation against
11090 this symbol. */
11091 off &= ~1;
11092 if (globals->root.dynamic_sections_created
11093 && !SYMBOL_REFERENCES_LOCAL (info, h))
11094 *unresolved_reloc_p = FALSE;
11095 }
11096 else
11097 {
11098 Elf_Internal_Rela outrel;
11099
11100 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
11101 {
11102 /* If the symbol doesn't resolve locally in a static
11103 object, we have an undefined reference. If the
11104 symbol doesn't resolve locally in a dynamic object,
11105 it should be resolved by the dynamic linker. */
11106 if (globals->root.dynamic_sections_created)
11107 {
11108 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11109 *unresolved_reloc_p = FALSE;
11110 }
11111 else
11112 outrel.r_info = 0;
11113 outrel.r_addend = 0;
11114 }
11115 else
11116 {
11117 if (dynreloc_st_type == STT_GNU_IFUNC)
11118 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11119 else if (bfd_link_pic (info)
11120 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11121 || h->root.type != bfd_link_hash_undefweak))
11122 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11123 else
11124 outrel.r_info = 0;
11125 outrel.r_addend = dynreloc_value;
11126 }
11127
11128 /* The GOT entry is initialized to zero by default.
11129 See if we should install a different value. */
11130 if (outrel.r_addend != 0
11131 && (outrel.r_info == 0 || globals->use_rel))
11132 {
11133 bfd_put_32 (output_bfd, outrel.r_addend,
11134 sgot->contents + off);
11135 outrel.r_addend = 0;
11136 }
11137
11138 if (outrel.r_info != 0)
11139 {
11140 outrel.r_offset = (sgot->output_section->vma
11141 + sgot->output_offset
11142 + off);
11143 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11144 }
11145 h->got.offset |= 1;
11146 }
11147 value = sgot->output_offset + off;
11148 }
11149 else
11150 {
11151 bfd_vma off;
11152
11153 BFD_ASSERT (local_got_offsets != NULL
11154 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11155
11156 off = local_got_offsets[r_symndx];
11157
11158 /* The offset must always be a multiple of 4. We use the
11159 least significant bit to record whether we have already
11160 generated the necessary reloc. */
11161 if ((off & 1) != 0)
11162 off &= ~1;
11163 else
11164 {
11165 if (globals->use_rel)
11166 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11167
11168 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
11169 {
11170 Elf_Internal_Rela outrel;
11171
11172 outrel.r_addend = addend + dynreloc_value;
11173 outrel.r_offset = (sgot->output_section->vma
11174 + sgot->output_offset
11175 + off);
11176 if (dynreloc_st_type == STT_GNU_IFUNC)
11177 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11178 else
11179 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11180 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11181 }
11182
11183 local_got_offsets[r_symndx] |= 1;
11184 }
11185
11186 value = sgot->output_offset + off;
11187 }
11188 if (r_type != R_ARM_GOT32)
11189 value += sgot->output_section->vma;
11190
11191 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11192 contents, rel->r_offset, value,
11193 rel->r_addend);
11194
11195 case R_ARM_TLS_LDO32:
11196 value = value - dtpoff_base (info);
11197
11198 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11199 contents, rel->r_offset, value,
11200 rel->r_addend);
11201
11202 case R_ARM_TLS_LDM32:
11203 {
11204 bfd_vma off;
11205
11206 if (sgot == NULL)
11207 abort ();
11208
11209 off = globals->tls_ldm_got.offset;
11210
11211 if ((off & 1) != 0)
11212 off &= ~1;
11213 else
11214 {
11215 /* If we don't know the module number, create a relocation
11216 for it. */
11217 if (bfd_link_pic (info))
11218 {
11219 Elf_Internal_Rela outrel;
11220
11221 if (srelgot == NULL)
11222 abort ();
11223
11224 outrel.r_addend = 0;
11225 outrel.r_offset = (sgot->output_section->vma
11226 + sgot->output_offset + off);
11227 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11228
11229 if (globals->use_rel)
11230 bfd_put_32 (output_bfd, outrel.r_addend,
11231 sgot->contents + off);
11232
11233 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11234 }
11235 else
11236 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11237
11238 globals->tls_ldm_got.offset |= 1;
11239 }
11240
11241 value = sgot->output_section->vma + sgot->output_offset + off
11242 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
11243
11244 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11245 contents, rel->r_offset, value,
11246 rel->r_addend);
11247 }
11248
11249 case R_ARM_TLS_CALL:
11250 case R_ARM_THM_TLS_CALL:
11251 case R_ARM_TLS_GD32:
11252 case R_ARM_TLS_IE32:
11253 case R_ARM_TLS_GOTDESC:
11254 case R_ARM_TLS_DESCSEQ:
11255 case R_ARM_THM_TLS_DESCSEQ:
11256 {
11257 bfd_vma off, offplt;
11258 int indx = 0;
11259 char tls_type;
11260
11261 BFD_ASSERT (sgot != NULL);
11262
11263 if (h != NULL)
11264 {
11265 bfd_boolean dyn;
11266 dyn = globals->root.dynamic_sections_created;
11267 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11268 bfd_link_pic (info),
11269 h)
11270 && (!bfd_link_pic (info)
11271 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11272 {
11273 *unresolved_reloc_p = FALSE;
11274 indx = h->dynindx;
11275 }
11276 off = h->got.offset;
11277 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11278 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11279 }
11280 else
11281 {
11282 BFD_ASSERT (local_got_offsets != NULL);
11283 off = local_got_offsets[r_symndx];
11284 offplt = local_tlsdesc_gotents[r_symndx];
11285 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11286 }
11287
11288 /* Linker relaxations happens from one of the
11289 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11290 if (ELF32_R_TYPE(rel->r_info) != r_type)
11291 tls_type = GOT_TLS_IE;
11292
11293 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11294
11295 if ((off & 1) != 0)
11296 off &= ~1;
11297 else
11298 {
11299 bfd_boolean need_relocs = FALSE;
11300 Elf_Internal_Rela outrel;
11301 int cur_off = off;
11302
11303 /* The GOT entries have not been initialized yet. Do it
11304 now, and emit any relocations. If both an IE GOT and a
11305 GD GOT are necessary, we emit the GD first. */
11306
11307 if ((bfd_link_pic (info) || indx != 0)
11308 && (h == NULL
11309 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11310 && !resolved_to_zero)
11311 || h->root.type != bfd_link_hash_undefweak))
11312 {
11313 need_relocs = TRUE;
11314 BFD_ASSERT (srelgot != NULL);
11315 }
11316
11317 if (tls_type & GOT_TLS_GDESC)
11318 {
11319 bfd_byte *loc;
11320
11321 /* We should have relaxed, unless this is an undefined
11322 weak symbol. */
11323 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11324 || bfd_link_pic (info));
11325 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11326 <= globals->root.sgotplt->size);
11327
11328 outrel.r_addend = 0;
11329 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11330 + globals->root.sgotplt->output_offset
11331 + offplt
11332 + globals->sgotplt_jump_table_size);
11333
11334 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11335 sreloc = globals->root.srelplt;
11336 loc = sreloc->contents;
11337 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11338 BFD_ASSERT (loc + RELOC_SIZE (globals)
11339 <= sreloc->contents + sreloc->size);
11340
11341 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11342
11343 /* For globals, the first word in the relocation gets
11344 the relocation index and the top bit set, or zero,
11345 if we're binding now. For locals, it gets the
11346 symbol's offset in the tls section. */
11347 bfd_put_32 (output_bfd,
11348 !h ? value - elf_hash_table (info)->tls_sec->vma
11349 : info->flags & DF_BIND_NOW ? 0
11350 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11351 globals->root.sgotplt->contents + offplt
11352 + globals->sgotplt_jump_table_size);
11353
11354 /* Second word in the relocation is always zero. */
11355 bfd_put_32 (output_bfd, 0,
11356 globals->root.sgotplt->contents + offplt
11357 + globals->sgotplt_jump_table_size + 4);
11358 }
11359 if (tls_type & GOT_TLS_GD)
11360 {
11361 if (need_relocs)
11362 {
11363 outrel.r_addend = 0;
11364 outrel.r_offset = (sgot->output_section->vma
11365 + sgot->output_offset
11366 + cur_off);
11367 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11368
11369 if (globals->use_rel)
11370 bfd_put_32 (output_bfd, outrel.r_addend,
11371 sgot->contents + cur_off);
11372
11373 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11374
11375 if (indx == 0)
11376 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11377 sgot->contents + cur_off + 4);
11378 else
11379 {
11380 outrel.r_addend = 0;
11381 outrel.r_info = ELF32_R_INFO (indx,
11382 R_ARM_TLS_DTPOFF32);
11383 outrel.r_offset += 4;
11384
11385 if (globals->use_rel)
11386 bfd_put_32 (output_bfd, outrel.r_addend,
11387 sgot->contents + cur_off + 4);
11388
11389 elf32_arm_add_dynreloc (output_bfd, info,
11390 srelgot, &outrel);
11391 }
11392 }
11393 else
11394 {
11395 /* If we are not emitting relocations for a
11396 general dynamic reference, then we must be in a
11397 static link or an executable link with the
11398 symbol binding locally. Mark it as belonging
11399 to module 1, the executable. */
11400 bfd_put_32 (output_bfd, 1,
11401 sgot->contents + cur_off);
11402 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11403 sgot->contents + cur_off + 4);
11404 }
11405
11406 cur_off += 8;
11407 }
11408
11409 if (tls_type & GOT_TLS_IE)
11410 {
11411 if (need_relocs)
11412 {
11413 if (indx == 0)
11414 outrel.r_addend = value - dtpoff_base (info);
11415 else
11416 outrel.r_addend = 0;
11417 outrel.r_offset = (sgot->output_section->vma
11418 + sgot->output_offset
11419 + cur_off);
11420 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11421
11422 if (globals->use_rel)
11423 bfd_put_32 (output_bfd, outrel.r_addend,
11424 sgot->contents + cur_off);
11425
11426 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11427 }
11428 else
11429 bfd_put_32 (output_bfd, tpoff (info, value),
11430 sgot->contents + cur_off);
11431 cur_off += 4;
11432 }
11433
11434 if (h != NULL)
11435 h->got.offset |= 1;
11436 else
11437 local_got_offsets[r_symndx] |= 1;
11438 }
11439
11440 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
11441 off += 8;
11442 else if (tls_type & GOT_TLS_GDESC)
11443 off = offplt;
11444
11445 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11446 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11447 {
11448 bfd_signed_vma offset;
11449 /* TLS stubs are arm mode. The original symbol is a
11450 data object, so branch_type is bogus. */
11451 branch_type = ST_BRANCH_TO_ARM;
11452 enum elf32_arm_stub_type stub_type
11453 = arm_type_of_stub (info, input_section, rel,
11454 st_type, &branch_type,
11455 (struct elf32_arm_link_hash_entry *)h,
11456 globals->tls_trampoline, globals->root.splt,
11457 input_bfd, sym_name);
11458
11459 if (stub_type != arm_stub_none)
11460 {
11461 struct elf32_arm_stub_hash_entry *stub_entry
11462 = elf32_arm_get_stub_entry
11463 (input_section, globals->root.splt, 0, rel,
11464 globals, stub_type);
11465 offset = (stub_entry->stub_offset
11466 + stub_entry->stub_sec->output_offset
11467 + stub_entry->stub_sec->output_section->vma);
11468 }
11469 else
11470 offset = (globals->root.splt->output_section->vma
11471 + globals->root.splt->output_offset
11472 + globals->tls_trampoline);
11473
11474 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11475 {
11476 unsigned long inst;
11477
11478 offset -= (input_section->output_section->vma
11479 + input_section->output_offset
11480 + rel->r_offset + 8);
11481
11482 inst = offset >> 2;
11483 inst &= 0x00ffffff;
11484 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11485 }
11486 else
11487 {
11488 /* Thumb blx encodes the offset in a complicated
11489 fashion. */
11490 unsigned upper_insn, lower_insn;
11491 unsigned neg;
11492
11493 offset -= (input_section->output_section->vma
11494 + input_section->output_offset
11495 + rel->r_offset + 4);
11496
11497 if (stub_type != arm_stub_none
11498 && arm_stub_is_thumb (stub_type))
11499 {
11500 lower_insn = 0xd000;
11501 }
11502 else
11503 {
11504 lower_insn = 0xc000;
11505 /* Round up the offset to a word boundary. */
11506 offset = (offset + 2) & ~2;
11507 }
11508
11509 neg = offset < 0;
11510 upper_insn = (0xf000
11511 | ((offset >> 12) & 0x3ff)
11512 | (neg << 10));
11513 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11514 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11515 | ((offset >> 1) & 0x7ff);
11516 bfd_put_16 (input_bfd, upper_insn, hit_data);
11517 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11518 return bfd_reloc_ok;
11519 }
11520 }
11521 /* These relocations needs special care, as besides the fact
11522 they point somewhere in .gotplt, the addend must be
11523 adjusted accordingly depending on the type of instruction
11524 we refer to. */
11525 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11526 {
11527 unsigned long data, insn;
11528 unsigned thumb;
11529
11530 data = bfd_get_32 (input_bfd, hit_data);
11531 thumb = data & 1;
11532 data &= ~1u;
11533
11534 if (thumb)
11535 {
11536 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11537 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11538 insn = (insn << 16)
11539 | bfd_get_16 (input_bfd,
11540 contents + rel->r_offset - data + 2);
11541 if ((insn & 0xf800c000) == 0xf000c000)
11542 /* bl/blx */
11543 value = -6;
11544 else if ((insn & 0xffffff00) == 0x4400)
11545 /* add */
11546 value = -5;
11547 else
11548 {
11549 _bfd_error_handler
11550 /* xgettext:c-format */
11551 (_("%pB(%pA+%#" PRIx64 "): "
11552 "unexpected %s instruction '%#lx' "
11553 "referenced by TLS_GOTDESC"),
11554 input_bfd, input_section, (uint64_t) rel->r_offset,
11555 "Thumb", insn);
11556 return bfd_reloc_notsupported;
11557 }
11558 }
11559 else
11560 {
11561 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11562
11563 switch (insn >> 24)
11564 {
11565 case 0xeb: /* bl */
11566 case 0xfa: /* blx */
11567 value = -4;
11568 break;
11569
11570 case 0xe0: /* add */
11571 value = -8;
11572 break;
11573
11574 default:
11575 _bfd_error_handler
11576 /* xgettext:c-format */
11577 (_("%pB(%pA+%#" PRIx64 "): "
11578 "unexpected %s instruction '%#lx' "
11579 "referenced by TLS_GOTDESC"),
11580 input_bfd, input_section, (uint64_t) rel->r_offset,
11581 "ARM", insn);
11582 return bfd_reloc_notsupported;
11583 }
11584 }
11585
11586 value += ((globals->root.sgotplt->output_section->vma
11587 + globals->root.sgotplt->output_offset + off)
11588 - (input_section->output_section->vma
11589 + input_section->output_offset
11590 + rel->r_offset)
11591 + globals->sgotplt_jump_table_size);
11592 }
11593 else
11594 value = ((globals->root.sgot->output_section->vma
11595 + globals->root.sgot->output_offset + off)
11596 - (input_section->output_section->vma
11597 + input_section->output_offset + rel->r_offset));
11598
11599 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11600 contents, rel->r_offset, value,
11601 rel->r_addend);
11602 }
11603
11604 case R_ARM_TLS_LE32:
11605 if (bfd_link_dll (info))
11606 {
11607 _bfd_error_handler
11608 /* xgettext:c-format */
11609 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
11610 "in shared object"),
11611 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
11612 return bfd_reloc_notsupported;
11613 }
11614 else
11615 value = tpoff (info, value);
11616
11617 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11618 contents, rel->r_offset, value,
11619 rel->r_addend);
11620
11621 case R_ARM_V4BX:
11622 if (globals->fix_v4bx)
11623 {
11624 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11625
11626 /* Ensure that we have a BX instruction. */
11627 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
11628
11629 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
11630 {
11631 /* Branch to veneer. */
11632 bfd_vma glue_addr;
11633 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
11634 glue_addr -= input_section->output_section->vma
11635 + input_section->output_offset
11636 + rel->r_offset + 8;
11637 insn = (insn & 0xf0000000) | 0x0a000000
11638 | ((glue_addr >> 2) & 0x00ffffff);
11639 }
11640 else
11641 {
11642 /* Preserve Rm (lowest four bits) and the condition code
11643 (highest four bits). Other bits encode MOV PC,Rm. */
11644 insn = (insn & 0xf000000f) | 0x01a0f000;
11645 }
11646
11647 bfd_put_32 (input_bfd, insn, hit_data);
11648 }
11649 return bfd_reloc_ok;
11650
11651 case R_ARM_MOVW_ABS_NC:
11652 case R_ARM_MOVT_ABS:
11653 case R_ARM_MOVW_PREL_NC:
11654 case R_ARM_MOVT_PREL:
11655 /* Until we properly support segment-base-relative addressing then
11656 we assume the segment base to be zero, as for the group relocations.
11657 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
11658 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
11659 case R_ARM_MOVW_BREL_NC:
11660 case R_ARM_MOVW_BREL:
11661 case R_ARM_MOVT_BREL:
11662 {
11663 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11664
11665 if (globals->use_rel)
11666 {
11667 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
11668 signed_addend = (addend ^ 0x8000) - 0x8000;
11669 }
11670
11671 value += signed_addend;
11672
11673 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
11674 value -= (input_section->output_section->vma
11675 + input_section->output_offset + rel->r_offset);
11676
11677 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
11678 return bfd_reloc_overflow;
11679
11680 if (branch_type == ST_BRANCH_TO_THUMB)
11681 value |= 1;
11682
11683 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
11684 || r_type == R_ARM_MOVT_BREL)
11685 value >>= 16;
11686
11687 insn &= 0xfff0f000;
11688 insn |= value & 0xfff;
11689 insn |= (value & 0xf000) << 4;
11690 bfd_put_32 (input_bfd, insn, hit_data);
11691 }
11692 return bfd_reloc_ok;
11693
11694 case R_ARM_THM_MOVW_ABS_NC:
11695 case R_ARM_THM_MOVT_ABS:
11696 case R_ARM_THM_MOVW_PREL_NC:
11697 case R_ARM_THM_MOVT_PREL:
11698 /* Until we properly support segment-base-relative addressing then
11699 we assume the segment base to be zero, as for the above relocations.
11700 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
11701 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
11702 as R_ARM_THM_MOVT_ABS. */
11703 case R_ARM_THM_MOVW_BREL_NC:
11704 case R_ARM_THM_MOVW_BREL:
11705 case R_ARM_THM_MOVT_BREL:
11706 {
11707 bfd_vma insn;
11708
11709 insn = bfd_get_16 (input_bfd, hit_data) << 16;
11710 insn |= bfd_get_16 (input_bfd, hit_data + 2);
11711
11712 if (globals->use_rel)
11713 {
11714 addend = ((insn >> 4) & 0xf000)
11715 | ((insn >> 15) & 0x0800)
11716 | ((insn >> 4) & 0x0700)
11717 | (insn & 0x00ff);
11718 signed_addend = (addend ^ 0x8000) - 0x8000;
11719 }
11720
11721 value += signed_addend;
11722
11723 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
11724 value -= (input_section->output_section->vma
11725 + input_section->output_offset + rel->r_offset);
11726
11727 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
11728 return bfd_reloc_overflow;
11729
11730 if (branch_type == ST_BRANCH_TO_THUMB)
11731 value |= 1;
11732
11733 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
11734 || r_type == R_ARM_THM_MOVT_BREL)
11735 value >>= 16;
11736
11737 insn &= 0xfbf08f00;
11738 insn |= (value & 0xf000) << 4;
11739 insn |= (value & 0x0800) << 15;
11740 insn |= (value & 0x0700) << 4;
11741 insn |= (value & 0x00ff);
11742
11743 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11744 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11745 }
11746 return bfd_reloc_ok;
11747
11748 case R_ARM_ALU_PC_G0_NC:
11749 case R_ARM_ALU_PC_G1_NC:
11750 case R_ARM_ALU_PC_G0:
11751 case R_ARM_ALU_PC_G1:
11752 case R_ARM_ALU_PC_G2:
11753 case R_ARM_ALU_SB_G0_NC:
11754 case R_ARM_ALU_SB_G1_NC:
11755 case R_ARM_ALU_SB_G0:
11756 case R_ARM_ALU_SB_G1:
11757 case R_ARM_ALU_SB_G2:
11758 {
11759 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11760 bfd_vma pc = input_section->output_section->vma
11761 + input_section->output_offset + rel->r_offset;
11762 /* sb is the origin of the *segment* containing the symbol. */
11763 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11764 bfd_vma residual;
11765 bfd_vma g_n;
11766 bfd_signed_vma signed_value;
11767 int group = 0;
11768
11769 /* Determine which group of bits to select. */
11770 switch (r_type)
11771 {
11772 case R_ARM_ALU_PC_G0_NC:
11773 case R_ARM_ALU_PC_G0:
11774 case R_ARM_ALU_SB_G0_NC:
11775 case R_ARM_ALU_SB_G0:
11776 group = 0;
11777 break;
11778
11779 case R_ARM_ALU_PC_G1_NC:
11780 case R_ARM_ALU_PC_G1:
11781 case R_ARM_ALU_SB_G1_NC:
11782 case R_ARM_ALU_SB_G1:
11783 group = 1;
11784 break;
11785
11786 case R_ARM_ALU_PC_G2:
11787 case R_ARM_ALU_SB_G2:
11788 group = 2;
11789 break;
11790
11791 default:
11792 abort ();
11793 }
11794
11795 /* If REL, extract the addend from the insn. If RELA, it will
11796 have already been fetched for us. */
11797 if (globals->use_rel)
11798 {
11799 int negative;
11800 bfd_vma constant = insn & 0xff;
11801 bfd_vma rotation = (insn & 0xf00) >> 8;
11802
11803 if (rotation == 0)
11804 signed_addend = constant;
11805 else
11806 {
11807 /* Compensate for the fact that in the instruction, the
11808 rotation is stored in multiples of 2 bits. */
11809 rotation *= 2;
11810
11811 /* Rotate "constant" right by "rotation" bits. */
11812 signed_addend = (constant >> rotation) |
11813 (constant << (8 * sizeof (bfd_vma) - rotation));
11814 }
11815
11816 /* Determine if the instruction is an ADD or a SUB.
11817 (For REL, this determines the sign of the addend.) */
11818 negative = identify_add_or_sub (insn);
11819 if (negative == 0)
11820 {
11821 _bfd_error_handler
11822 /* xgettext:c-format */
11823 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
11824 "are allowed for ALU group relocations"),
11825 input_bfd, input_section, (uint64_t) rel->r_offset);
11826 return bfd_reloc_overflow;
11827 }
11828
11829 signed_addend *= negative;
11830 }
11831
11832 /* Compute the value (X) to go in the place. */
11833 if (r_type == R_ARM_ALU_PC_G0_NC
11834 || r_type == R_ARM_ALU_PC_G1_NC
11835 || r_type == R_ARM_ALU_PC_G0
11836 || r_type == R_ARM_ALU_PC_G1
11837 || r_type == R_ARM_ALU_PC_G2)
11838 /* PC relative. */
11839 signed_value = value - pc + signed_addend;
11840 else
11841 /* Section base relative. */
11842 signed_value = value - sb + signed_addend;
11843
11844 /* If the target symbol is a Thumb function, then set the
11845 Thumb bit in the address. */
11846 if (branch_type == ST_BRANCH_TO_THUMB)
11847 signed_value |= 1;
11848
11849 /* Calculate the value of the relevant G_n, in encoded
11850 constant-with-rotation format. */
11851 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11852 group, &residual);
11853
11854 /* Check for overflow if required. */
11855 if ((r_type == R_ARM_ALU_PC_G0
11856 || r_type == R_ARM_ALU_PC_G1
11857 || r_type == R_ARM_ALU_PC_G2
11858 || r_type == R_ARM_ALU_SB_G0
11859 || r_type == R_ARM_ALU_SB_G1
11860 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11861 {
11862 _bfd_error_handler
11863 /* xgettext:c-format */
11864 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
11865 "splitting %#" PRIx64 " for group relocation %s"),
11866 input_bfd, input_section, (uint64_t) rel->r_offset,
11867 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
11868 howto->name);
11869 return bfd_reloc_overflow;
11870 }
11871
11872 /* Mask out the value and the ADD/SUB part of the opcode; take care
11873 not to destroy the S bit. */
11874 insn &= 0xff1ff000;
11875
11876 /* Set the opcode according to whether the value to go in the
11877 place is negative. */
11878 if (signed_value < 0)
11879 insn |= 1 << 22;
11880 else
11881 insn |= 1 << 23;
11882
11883 /* Encode the offset. */
11884 insn |= g_n;
11885
11886 bfd_put_32 (input_bfd, insn, hit_data);
11887 }
11888 return bfd_reloc_ok;
11889
11890 case R_ARM_LDR_PC_G0:
11891 case R_ARM_LDR_PC_G1:
11892 case R_ARM_LDR_PC_G2:
11893 case R_ARM_LDR_SB_G0:
11894 case R_ARM_LDR_SB_G1:
11895 case R_ARM_LDR_SB_G2:
11896 {
11897 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11898 bfd_vma pc = input_section->output_section->vma
11899 + input_section->output_offset + rel->r_offset;
11900 /* sb is the origin of the *segment* containing the symbol. */
11901 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11902 bfd_vma residual;
11903 bfd_signed_vma signed_value;
11904 int group = 0;
11905
11906 /* Determine which groups of bits to calculate. */
11907 switch (r_type)
11908 {
11909 case R_ARM_LDR_PC_G0:
11910 case R_ARM_LDR_SB_G0:
11911 group = 0;
11912 break;
11913
11914 case R_ARM_LDR_PC_G1:
11915 case R_ARM_LDR_SB_G1:
11916 group = 1;
11917 break;
11918
11919 case R_ARM_LDR_PC_G2:
11920 case R_ARM_LDR_SB_G2:
11921 group = 2;
11922 break;
11923
11924 default:
11925 abort ();
11926 }
11927
11928 /* If REL, extract the addend from the insn. If RELA, it will
11929 have already been fetched for us. */
11930 if (globals->use_rel)
11931 {
11932 int negative = (insn & (1 << 23)) ? 1 : -1;
11933 signed_addend = negative * (insn & 0xfff);
11934 }
11935
11936 /* Compute the value (X) to go in the place. */
11937 if (r_type == R_ARM_LDR_PC_G0
11938 || r_type == R_ARM_LDR_PC_G1
11939 || r_type == R_ARM_LDR_PC_G2)
11940 /* PC relative. */
11941 signed_value = value - pc + signed_addend;
11942 else
11943 /* Section base relative. */
11944 signed_value = value - sb + signed_addend;
11945
11946 /* Calculate the value of the relevant G_{n-1} to obtain
11947 the residual at that stage. */
11948 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11949 group - 1, &residual);
11950
11951 /* Check for overflow. */
11952 if (residual >= 0x1000)
11953 {
11954 _bfd_error_handler
11955 /* xgettext:c-format */
11956 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
11957 "splitting %#" PRIx64 " for group relocation %s"),
11958 input_bfd, input_section, (uint64_t) rel->r_offset,
11959 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
11960 howto->name);
11961 return bfd_reloc_overflow;
11962 }
11963
11964 /* Mask out the value and U bit. */
11965 insn &= 0xff7ff000;
11966
11967 /* Set the U bit if the value to go in the place is non-negative. */
11968 if (signed_value >= 0)
11969 insn |= 1 << 23;
11970
11971 /* Encode the offset. */
11972 insn |= residual;
11973
11974 bfd_put_32 (input_bfd, insn, hit_data);
11975 }
11976 return bfd_reloc_ok;
11977
11978 case R_ARM_LDRS_PC_G0:
11979 case R_ARM_LDRS_PC_G1:
11980 case R_ARM_LDRS_PC_G2:
11981 case R_ARM_LDRS_SB_G0:
11982 case R_ARM_LDRS_SB_G1:
11983 case R_ARM_LDRS_SB_G2:
11984 {
11985 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11986 bfd_vma pc = input_section->output_section->vma
11987 + input_section->output_offset + rel->r_offset;
11988 /* sb is the origin of the *segment* containing the symbol. */
11989 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11990 bfd_vma residual;
11991 bfd_signed_vma signed_value;
11992 int group = 0;
11993
11994 /* Determine which groups of bits to calculate. */
11995 switch (r_type)
11996 {
11997 case R_ARM_LDRS_PC_G0:
11998 case R_ARM_LDRS_SB_G0:
11999 group = 0;
12000 break;
12001
12002 case R_ARM_LDRS_PC_G1:
12003 case R_ARM_LDRS_SB_G1:
12004 group = 1;
12005 break;
12006
12007 case R_ARM_LDRS_PC_G2:
12008 case R_ARM_LDRS_SB_G2:
12009 group = 2;
12010 break;
12011
12012 default:
12013 abort ();
12014 }
12015
12016 /* If REL, extract the addend from the insn. If RELA, it will
12017 have already been fetched for us. */
12018 if (globals->use_rel)
12019 {
12020 int negative = (insn & (1 << 23)) ? 1 : -1;
12021 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12022 }
12023
12024 /* Compute the value (X) to go in the place. */
12025 if (r_type == R_ARM_LDRS_PC_G0
12026 || r_type == R_ARM_LDRS_PC_G1
12027 || r_type == R_ARM_LDRS_PC_G2)
12028 /* PC relative. */
12029 signed_value = value - pc + signed_addend;
12030 else
12031 /* Section base relative. */
12032 signed_value = value - sb + signed_addend;
12033
12034 /* Calculate the value of the relevant G_{n-1} to obtain
12035 the residual at that stage. */
12036 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12037 group - 1, &residual);
12038
12039 /* Check for overflow. */
12040 if (residual >= 0x100)
12041 {
12042 _bfd_error_handler
12043 /* xgettext:c-format */
12044 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12045 "splitting %#" PRIx64 " for group relocation %s"),
12046 input_bfd, input_section, (uint64_t) rel->r_offset,
12047 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12048 howto->name);
12049 return bfd_reloc_overflow;
12050 }
12051
12052 /* Mask out the value and U bit. */
12053 insn &= 0xff7ff0f0;
12054
12055 /* Set the U bit if the value to go in the place is non-negative. */
12056 if (signed_value >= 0)
12057 insn |= 1 << 23;
12058
12059 /* Encode the offset. */
12060 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12061
12062 bfd_put_32 (input_bfd, insn, hit_data);
12063 }
12064 return bfd_reloc_ok;
12065
12066 case R_ARM_LDC_PC_G0:
12067 case R_ARM_LDC_PC_G1:
12068 case R_ARM_LDC_PC_G2:
12069 case R_ARM_LDC_SB_G0:
12070 case R_ARM_LDC_SB_G1:
12071 case R_ARM_LDC_SB_G2:
12072 {
12073 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12074 bfd_vma pc = input_section->output_section->vma
12075 + input_section->output_offset + rel->r_offset;
12076 /* sb is the origin of the *segment* containing the symbol. */
12077 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12078 bfd_vma residual;
12079 bfd_signed_vma signed_value;
12080 int group = 0;
12081
12082 /* Determine which groups of bits to calculate. */
12083 switch (r_type)
12084 {
12085 case R_ARM_LDC_PC_G0:
12086 case R_ARM_LDC_SB_G0:
12087 group = 0;
12088 break;
12089
12090 case R_ARM_LDC_PC_G1:
12091 case R_ARM_LDC_SB_G1:
12092 group = 1;
12093 break;
12094
12095 case R_ARM_LDC_PC_G2:
12096 case R_ARM_LDC_SB_G2:
12097 group = 2;
12098 break;
12099
12100 default:
12101 abort ();
12102 }
12103
12104 /* If REL, extract the addend from the insn. If RELA, it will
12105 have already been fetched for us. */
12106 if (globals->use_rel)
12107 {
12108 int negative = (insn & (1 << 23)) ? 1 : -1;
12109 signed_addend = negative * ((insn & 0xff) << 2);
12110 }
12111
12112 /* Compute the value (X) to go in the place. */
12113 if (r_type == R_ARM_LDC_PC_G0
12114 || r_type == R_ARM_LDC_PC_G1
12115 || r_type == R_ARM_LDC_PC_G2)
12116 /* PC relative. */
12117 signed_value = value - pc + signed_addend;
12118 else
12119 /* Section base relative. */
12120 signed_value = value - sb + signed_addend;
12121
12122 /* Calculate the value of the relevant G_{n-1} to obtain
12123 the residual at that stage. */
12124 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12125 group - 1, &residual);
12126
12127 /* Check for overflow. (The absolute value to go in the place must be
12128 divisible by four and, after having been divided by four, must
12129 fit in eight bits.) */
12130 if ((residual & 0x3) != 0 || residual >= 0x400)
12131 {
12132 _bfd_error_handler
12133 /* xgettext:c-format */
12134 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12135 "splitting %#" PRIx64 " for group relocation %s"),
12136 input_bfd, input_section, (uint64_t) rel->r_offset,
12137 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12138 howto->name);
12139 return bfd_reloc_overflow;
12140 }
12141
12142 /* Mask out the value and U bit. */
12143 insn &= 0xff7fff00;
12144
12145 /* Set the U bit if the value to go in the place is non-negative. */
12146 if (signed_value >= 0)
12147 insn |= 1 << 23;
12148
12149 /* Encode the offset. */
12150 insn |= residual >> 2;
12151
12152 bfd_put_32 (input_bfd, insn, hit_data);
12153 }
12154 return bfd_reloc_ok;
12155
12156 case R_ARM_THM_ALU_ABS_G0_NC:
12157 case R_ARM_THM_ALU_ABS_G1_NC:
12158 case R_ARM_THM_ALU_ABS_G2_NC:
12159 case R_ARM_THM_ALU_ABS_G3_NC:
12160 {
12161 const int shift_array[4] = {0, 8, 16, 24};
12162 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12163 bfd_vma addr = value;
12164 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12165
12166 /* Compute address. */
12167 if (globals->use_rel)
12168 signed_addend = insn & 0xff;
12169 addr += signed_addend;
12170 if (branch_type == ST_BRANCH_TO_THUMB)
12171 addr |= 1;
12172 /* Clean imm8 insn. */
12173 insn &= 0xff00;
12174 /* And update with correct part of address. */
12175 insn |= (addr >> shift) & 0xff;
12176 /* Update insn. */
12177 bfd_put_16 (input_bfd, insn, hit_data);
12178 }
12179
12180 *unresolved_reloc_p = FALSE;
12181 return bfd_reloc_ok;
12182
12183 default:
12184 return bfd_reloc_notsupported;
12185 }
12186 }
12187
12188 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12189 static void
12190 arm_add_to_rel (bfd * abfd,
12191 bfd_byte * address,
12192 reloc_howto_type * howto,
12193 bfd_signed_vma increment)
12194 {
12195 bfd_signed_vma addend;
12196
12197 if (howto->type == R_ARM_THM_CALL
12198 || howto->type == R_ARM_THM_JUMP24)
12199 {
12200 int upper_insn, lower_insn;
12201 int upper, lower;
12202
12203 upper_insn = bfd_get_16 (abfd, address);
12204 lower_insn = bfd_get_16 (abfd, address + 2);
12205 upper = upper_insn & 0x7ff;
12206 lower = lower_insn & 0x7ff;
12207
12208 addend = (upper << 12) | (lower << 1);
12209 addend += increment;
12210 addend >>= 1;
12211
12212 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12213 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12214
12215 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12216 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12217 }
12218 else
12219 {
12220 bfd_vma contents;
12221
12222 contents = bfd_get_32 (abfd, address);
12223
12224 /* Get the (signed) value from the instruction. */
12225 addend = contents & howto->src_mask;
12226 if (addend & ((howto->src_mask + 1) >> 1))
12227 {
12228 bfd_signed_vma mask;
12229
12230 mask = -1;
12231 mask &= ~ howto->src_mask;
12232 addend |= mask;
12233 }
12234
12235 /* Add in the increment, (which is a byte value). */
12236 switch (howto->type)
12237 {
12238 default:
12239 addend += increment;
12240 break;
12241
12242 case R_ARM_PC24:
12243 case R_ARM_PLT32:
12244 case R_ARM_CALL:
12245 case R_ARM_JUMP24:
12246 addend <<= howto->size;
12247 addend += increment;
12248
12249 /* Should we check for overflow here ? */
12250
12251 /* Drop any undesired bits. */
12252 addend >>= howto->rightshift;
12253 break;
12254 }
12255
12256 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12257
12258 bfd_put_32 (abfd, contents, address);
12259 }
12260 }
12261
12262 #define IS_ARM_TLS_RELOC(R_TYPE) \
12263 ((R_TYPE) == R_ARM_TLS_GD32 \
12264 || (R_TYPE) == R_ARM_TLS_LDO32 \
12265 || (R_TYPE) == R_ARM_TLS_LDM32 \
12266 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12267 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12268 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12269 || (R_TYPE) == R_ARM_TLS_LE32 \
12270 || (R_TYPE) == R_ARM_TLS_IE32 \
12271 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12272
12273 /* Specific set of relocations for the gnu tls dialect. */
12274 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12275 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12276 || (R_TYPE) == R_ARM_TLS_CALL \
12277 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12278 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12279 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12280
12281 /* Relocate an ARM ELF section. */
12282
12283 static bfd_boolean
12284 elf32_arm_relocate_section (bfd * output_bfd,
12285 struct bfd_link_info * info,
12286 bfd * input_bfd,
12287 asection * input_section,
12288 bfd_byte * contents,
12289 Elf_Internal_Rela * relocs,
12290 Elf_Internal_Sym * local_syms,
12291 asection ** local_sections)
12292 {
12293 Elf_Internal_Shdr *symtab_hdr;
12294 struct elf_link_hash_entry **sym_hashes;
12295 Elf_Internal_Rela *rel;
12296 Elf_Internal_Rela *relend;
12297 const char *name;
12298 struct elf32_arm_link_hash_table * globals;
12299
12300 globals = elf32_arm_hash_table (info);
12301 if (globals == NULL)
12302 return FALSE;
12303
12304 symtab_hdr = & elf_symtab_hdr (input_bfd);
12305 sym_hashes = elf_sym_hashes (input_bfd);
12306
12307 rel = relocs;
12308 relend = relocs + input_section->reloc_count;
12309 for (; rel < relend; rel++)
12310 {
12311 int r_type;
12312 reloc_howto_type * howto;
12313 unsigned long r_symndx;
12314 Elf_Internal_Sym * sym;
12315 asection * sec;
12316 struct elf_link_hash_entry * h;
12317 bfd_vma relocation;
12318 bfd_reloc_status_type r;
12319 arelent bfd_reloc;
12320 char sym_type;
12321 bfd_boolean unresolved_reloc = FALSE;
12322 char *error_message = NULL;
12323
12324 r_symndx = ELF32_R_SYM (rel->r_info);
12325 r_type = ELF32_R_TYPE (rel->r_info);
12326 r_type = arm_real_reloc_type (globals, r_type);
12327
12328 if ( r_type == R_ARM_GNU_VTENTRY
12329 || r_type == R_ARM_GNU_VTINHERIT)
12330 continue;
12331
12332 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
12333
12334 if (howto == NULL)
12335 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
12336
12337 h = NULL;
12338 sym = NULL;
12339 sec = NULL;
12340
12341 if (r_symndx < symtab_hdr->sh_info)
12342 {
12343 sym = local_syms + r_symndx;
12344 sym_type = ELF32_ST_TYPE (sym->st_info);
12345 sec = local_sections[r_symndx];
12346
12347 /* An object file might have a reference to a local
12348 undefined symbol. This is a daft object file, but we
12349 should at least do something about it. V4BX & NONE
12350 relocations do not use the symbol and are explicitly
12351 allowed to use the undefined symbol, so allow those.
12352 Likewise for relocations against STN_UNDEF. */
12353 if (r_type != R_ARM_V4BX
12354 && r_type != R_ARM_NONE
12355 && r_symndx != STN_UNDEF
12356 && bfd_is_und_section (sec)
12357 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
12358 (*info->callbacks->undefined_symbol)
12359 (info, bfd_elf_string_from_elf_section
12360 (input_bfd, symtab_hdr->sh_link, sym->st_name),
12361 input_bfd, input_section,
12362 rel->r_offset, TRUE);
12363
12364 if (globals->use_rel)
12365 {
12366 relocation = (sec->output_section->vma
12367 + sec->output_offset
12368 + sym->st_value);
12369 if (!bfd_link_relocatable (info)
12370 && (sec->flags & SEC_MERGE)
12371 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
12372 {
12373 asection *msec;
12374 bfd_vma addend, value;
12375
12376 switch (r_type)
12377 {
12378 case R_ARM_MOVW_ABS_NC:
12379 case R_ARM_MOVT_ABS:
12380 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
12381 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
12382 addend = (addend ^ 0x8000) - 0x8000;
12383 break;
12384
12385 case R_ARM_THM_MOVW_ABS_NC:
12386 case R_ARM_THM_MOVT_ABS:
12387 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
12388 << 16;
12389 value |= bfd_get_16 (input_bfd,
12390 contents + rel->r_offset + 2);
12391 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
12392 | ((value & 0x04000000) >> 15);
12393 addend = (addend ^ 0x8000) - 0x8000;
12394 break;
12395
12396 default:
12397 if (howto->rightshift
12398 || (howto->src_mask & (howto->src_mask + 1)))
12399 {
12400 _bfd_error_handler
12401 /* xgettext:c-format */
12402 (_("%pB(%pA+%#" PRIx64 "): "
12403 "%s relocation against SEC_MERGE section"),
12404 input_bfd, input_section,
12405 (uint64_t) rel->r_offset, howto->name);
12406 return FALSE;
12407 }
12408
12409 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
12410
12411 /* Get the (signed) value from the instruction. */
12412 addend = value & howto->src_mask;
12413 if (addend & ((howto->src_mask + 1) >> 1))
12414 {
12415 bfd_signed_vma mask;
12416
12417 mask = -1;
12418 mask &= ~ howto->src_mask;
12419 addend |= mask;
12420 }
12421 break;
12422 }
12423
12424 msec = sec;
12425 addend =
12426 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
12427 - relocation;
12428 addend += msec->output_section->vma + msec->output_offset;
12429
12430 /* Cases here must match those in the preceding
12431 switch statement. */
12432 switch (r_type)
12433 {
12434 case R_ARM_MOVW_ABS_NC:
12435 case R_ARM_MOVT_ABS:
12436 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
12437 | (addend & 0xfff);
12438 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
12439 break;
12440
12441 case R_ARM_THM_MOVW_ABS_NC:
12442 case R_ARM_THM_MOVT_ABS:
12443 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
12444 | (addend & 0xff) | ((addend & 0x0800) << 15);
12445 bfd_put_16 (input_bfd, value >> 16,
12446 contents + rel->r_offset);
12447 bfd_put_16 (input_bfd, value,
12448 contents + rel->r_offset + 2);
12449 break;
12450
12451 default:
12452 value = (value & ~ howto->dst_mask)
12453 | (addend & howto->dst_mask);
12454 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
12455 break;
12456 }
12457 }
12458 }
12459 else
12460 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
12461 }
12462 else
12463 {
12464 bfd_boolean warned, ignored;
12465
12466 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
12467 r_symndx, symtab_hdr, sym_hashes,
12468 h, sec, relocation,
12469 unresolved_reloc, warned, ignored);
12470
12471 sym_type = h->type;
12472 }
12473
12474 if (sec != NULL && discarded_section (sec))
12475 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
12476 rel, 1, relend, howto, 0, contents);
12477
12478 if (bfd_link_relocatable (info))
12479 {
12480 /* This is a relocatable link. We don't have to change
12481 anything, unless the reloc is against a section symbol,
12482 in which case we have to adjust according to where the
12483 section symbol winds up in the output section. */
12484 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
12485 {
12486 if (globals->use_rel)
12487 arm_add_to_rel (input_bfd, contents + rel->r_offset,
12488 howto, (bfd_signed_vma) sec->output_offset);
12489 else
12490 rel->r_addend += sec->output_offset;
12491 }
12492 continue;
12493 }
12494
12495 if (h != NULL)
12496 name = h->root.root.string;
12497 else
12498 {
12499 name = (bfd_elf_string_from_elf_section
12500 (input_bfd, symtab_hdr->sh_link, sym->st_name));
12501 if (name == NULL || *name == '\0')
12502 name = bfd_section_name (input_bfd, sec);
12503 }
12504
12505 if (r_symndx != STN_UNDEF
12506 && r_type != R_ARM_NONE
12507 && (h == NULL
12508 || h->root.type == bfd_link_hash_defined
12509 || h->root.type == bfd_link_hash_defweak)
12510 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
12511 {
12512 _bfd_error_handler
12513 ((sym_type == STT_TLS
12514 /* xgettext:c-format */
12515 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
12516 /* xgettext:c-format */
12517 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
12518 input_bfd,
12519 input_section,
12520 (uint64_t) rel->r_offset,
12521 howto->name,
12522 name);
12523 }
12524
12525 /* We call elf32_arm_final_link_relocate unless we're completely
12526 done, i.e., the relaxation produced the final output we want,
12527 and we won't let anybody mess with it. Also, we have to do
12528 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
12529 both in relaxed and non-relaxed cases. */
12530 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
12531 || (IS_ARM_TLS_GNU_RELOC (r_type)
12532 && !((h ? elf32_arm_hash_entry (h)->tls_type :
12533 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
12534 & GOT_TLS_GDESC)))
12535 {
12536 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
12537 contents, rel, h == NULL);
12538 /* This may have been marked unresolved because it came from
12539 a shared library. But we've just dealt with that. */
12540 unresolved_reloc = 0;
12541 }
12542 else
12543 r = bfd_reloc_continue;
12544
12545 if (r == bfd_reloc_continue)
12546 {
12547 unsigned char branch_type =
12548 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
12549 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
12550
12551 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
12552 input_section, contents, rel,
12553 relocation, info, sec, name,
12554 sym_type, branch_type, h,
12555 &unresolved_reloc,
12556 &error_message);
12557 }
12558
12559 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
12560 because such sections are not SEC_ALLOC and thus ld.so will
12561 not process them. */
12562 if (unresolved_reloc
12563 && !((input_section->flags & SEC_DEBUGGING) != 0
12564 && h->def_dynamic)
12565 && _bfd_elf_section_offset (output_bfd, info, input_section,
12566 rel->r_offset) != (bfd_vma) -1)
12567 {
12568 _bfd_error_handler
12569 /* xgettext:c-format */
12570 (_("%pB(%pA+%#" PRIx64 "): "
12571 "unresolvable %s relocation against symbol `%s'"),
12572 input_bfd,
12573 input_section,
12574 (uint64_t) rel->r_offset,
12575 howto->name,
12576 h->root.root.string);
12577 return FALSE;
12578 }
12579
12580 if (r != bfd_reloc_ok)
12581 {
12582 switch (r)
12583 {
12584 case bfd_reloc_overflow:
12585 /* If the overflowing reloc was to an undefined symbol,
12586 we have already printed one error message and there
12587 is no point complaining again. */
12588 if (!h || h->root.type != bfd_link_hash_undefined)
12589 (*info->callbacks->reloc_overflow)
12590 (info, (h ? &h->root : NULL), name, howto->name,
12591 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
12592 break;
12593
12594 case bfd_reloc_undefined:
12595 (*info->callbacks->undefined_symbol)
12596 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
12597 break;
12598
12599 case bfd_reloc_outofrange:
12600 error_message = _("out of range");
12601 goto common_error;
12602
12603 case bfd_reloc_notsupported:
12604 error_message = _("unsupported relocation");
12605 goto common_error;
12606
12607 case bfd_reloc_dangerous:
12608 /* error_message should already be set. */
12609 goto common_error;
12610
12611 default:
12612 error_message = _("unknown error");
12613 /* Fall through. */
12614
12615 common_error:
12616 BFD_ASSERT (error_message != NULL);
12617 (*info->callbacks->reloc_dangerous)
12618 (info, error_message, input_bfd, input_section, rel->r_offset);
12619 break;
12620 }
12621 }
12622 }
12623
12624 return TRUE;
12625 }
12626
12627 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
12628 adds the edit to the start of the list. (The list must be built in order of
12629 ascending TINDEX: the function's callers are primarily responsible for
12630 maintaining that condition). */
12631
12632 static void
12633 add_unwind_table_edit (arm_unwind_table_edit **head,
12634 arm_unwind_table_edit **tail,
12635 arm_unwind_edit_type type,
12636 asection *linked_section,
12637 unsigned int tindex)
12638 {
12639 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
12640 xmalloc (sizeof (arm_unwind_table_edit));
12641
12642 new_edit->type = type;
12643 new_edit->linked_section = linked_section;
12644 new_edit->index = tindex;
12645
12646 if (tindex > 0)
12647 {
12648 new_edit->next = NULL;
12649
12650 if (*tail)
12651 (*tail)->next = new_edit;
12652
12653 (*tail) = new_edit;
12654
12655 if (!*head)
12656 (*head) = new_edit;
12657 }
12658 else
12659 {
12660 new_edit->next = *head;
12661
12662 if (!*tail)
12663 *tail = new_edit;
12664
12665 *head = new_edit;
12666 }
12667 }
12668
12669 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
12670
12671 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
12672 static void
12673 adjust_exidx_size(asection *exidx_sec, int adjust)
12674 {
12675 asection *out_sec;
12676
12677 if (!exidx_sec->rawsize)
12678 exidx_sec->rawsize = exidx_sec->size;
12679
12680 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
12681 out_sec = exidx_sec->output_section;
12682 /* Adjust size of output section. */
12683 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
12684 }
12685
12686 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
12687 static void
12688 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
12689 {
12690 struct _arm_elf_section_data *exidx_arm_data;
12691
12692 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
12693 add_unwind_table_edit (
12694 &exidx_arm_data->u.exidx.unwind_edit_list,
12695 &exidx_arm_data->u.exidx.unwind_edit_tail,
12696 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
12697
12698 exidx_arm_data->additional_reloc_count++;
12699
12700 adjust_exidx_size(exidx_sec, 8);
12701 }
12702
12703 /* Scan .ARM.exidx tables, and create a list describing edits which should be
12704 made to those tables, such that:
12705
12706 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
12707 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
12708 codes which have been inlined into the index).
12709
12710 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
12711
12712 The edits are applied when the tables are written
12713 (in elf32_arm_write_section). */
12714
12715 bfd_boolean
12716 elf32_arm_fix_exidx_coverage (asection **text_section_order,
12717 unsigned int num_text_sections,
12718 struct bfd_link_info *info,
12719 bfd_boolean merge_exidx_entries)
12720 {
12721 bfd *inp;
12722 unsigned int last_second_word = 0, i;
12723 asection *last_exidx_sec = NULL;
12724 asection *last_text_sec = NULL;
12725 int last_unwind_type = -1;
12726
12727 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
12728 text sections. */
12729 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
12730 {
12731 asection *sec;
12732
12733 for (sec = inp->sections; sec != NULL; sec = sec->next)
12734 {
12735 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
12736 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
12737
12738 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
12739 continue;
12740
12741 if (elf_sec->linked_to)
12742 {
12743 Elf_Internal_Shdr *linked_hdr
12744 = &elf_section_data (elf_sec->linked_to)->this_hdr;
12745 struct _arm_elf_section_data *linked_sec_arm_data
12746 = get_arm_elf_section_data (linked_hdr->bfd_section);
12747
12748 if (linked_sec_arm_data == NULL)
12749 continue;
12750
12751 /* Link this .ARM.exidx section back from the text section it
12752 describes. */
12753 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
12754 }
12755 }
12756 }
12757
12758 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
12759 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
12760 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
12761
12762 for (i = 0; i < num_text_sections; i++)
12763 {
12764 asection *sec = text_section_order[i];
12765 asection *exidx_sec;
12766 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
12767 struct _arm_elf_section_data *exidx_arm_data;
12768 bfd_byte *contents = NULL;
12769 int deleted_exidx_bytes = 0;
12770 bfd_vma j;
12771 arm_unwind_table_edit *unwind_edit_head = NULL;
12772 arm_unwind_table_edit *unwind_edit_tail = NULL;
12773 Elf_Internal_Shdr *hdr;
12774 bfd *ibfd;
12775
12776 if (arm_data == NULL)
12777 continue;
12778
12779 exidx_sec = arm_data->u.text.arm_exidx_sec;
12780 if (exidx_sec == NULL)
12781 {
12782 /* Section has no unwind data. */
12783 if (last_unwind_type == 0 || !last_exidx_sec)
12784 continue;
12785
12786 /* Ignore zero sized sections. */
12787 if (sec->size == 0)
12788 continue;
12789
12790 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12791 last_unwind_type = 0;
12792 continue;
12793 }
12794
12795 /* Skip /DISCARD/ sections. */
12796 if (bfd_is_abs_section (exidx_sec->output_section))
12797 continue;
12798
12799 hdr = &elf_section_data (exidx_sec)->this_hdr;
12800 if (hdr->sh_type != SHT_ARM_EXIDX)
12801 continue;
12802
12803 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
12804 if (exidx_arm_data == NULL)
12805 continue;
12806
12807 ibfd = exidx_sec->owner;
12808
12809 if (hdr->contents != NULL)
12810 contents = hdr->contents;
12811 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
12812 /* An error? */
12813 continue;
12814
12815 if (last_unwind_type > 0)
12816 {
12817 unsigned int first_word = bfd_get_32 (ibfd, contents);
12818 /* Add cantunwind if first unwind item does not match section
12819 start. */
12820 if (first_word != sec->vma)
12821 {
12822 insert_cantunwind_after (last_text_sec, last_exidx_sec);
12823 last_unwind_type = 0;
12824 }
12825 }
12826
12827 for (j = 0; j < hdr->sh_size; j += 8)
12828 {
12829 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
12830 int unwind_type;
12831 int elide = 0;
12832
12833 /* An EXIDX_CANTUNWIND entry. */
12834 if (second_word == 1)
12835 {
12836 if (last_unwind_type == 0)
12837 elide = 1;
12838 unwind_type = 0;
12839 }
12840 /* Inlined unwinding data. Merge if equal to previous. */
12841 else if ((second_word & 0x80000000) != 0)
12842 {
12843 if (merge_exidx_entries
12844 && last_second_word == second_word && last_unwind_type == 1)
12845 elide = 1;
12846 unwind_type = 1;
12847 last_second_word = second_word;
12848 }
12849 /* Normal table entry. In theory we could merge these too,
12850 but duplicate entries are likely to be much less common. */
12851 else
12852 unwind_type = 2;
12853
12854 if (elide && !bfd_link_relocatable (info))
12855 {
12856 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12857 DELETE_EXIDX_ENTRY, NULL, j / 8);
12858
12859 deleted_exidx_bytes += 8;
12860 }
12861
12862 last_unwind_type = unwind_type;
12863 }
12864
12865 /* Free contents if we allocated it ourselves. */
12866 if (contents != hdr->contents)
12867 free (contents);
12868
12869 /* Record edits to be applied later (in elf32_arm_write_section). */
12870 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12871 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12872
12873 if (deleted_exidx_bytes > 0)
12874 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12875
12876 last_exidx_sec = exidx_sec;
12877 last_text_sec = sec;
12878 }
12879
12880 /* Add terminating CANTUNWIND entry. */
12881 if (!bfd_link_relocatable (info) && last_exidx_sec
12882 && last_unwind_type != 0)
12883 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12884
12885 return TRUE;
12886 }
12887
12888 static bfd_boolean
12889 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12890 bfd *ibfd, const char *name)
12891 {
12892 asection *sec, *osec;
12893
12894 sec = bfd_get_linker_section (ibfd, name);
12895 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12896 return TRUE;
12897
12898 osec = sec->output_section;
12899 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12900 return TRUE;
12901
12902 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12903 sec->output_offset, sec->size))
12904 return FALSE;
12905
12906 return TRUE;
12907 }
12908
12909 static bfd_boolean
12910 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12911 {
12912 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12913 asection *sec, *osec;
12914
12915 if (globals == NULL)
12916 return FALSE;
12917
12918 /* Invoke the regular ELF backend linker to do all the work. */
12919 if (!bfd_elf_final_link (abfd, info))
12920 return FALSE;
12921
12922 /* Process stub sections (eg BE8 encoding, ...). */
12923 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12924 unsigned int i;
12925 for (i=0; i<htab->top_id; i++)
12926 {
12927 sec = htab->stub_group[i].stub_sec;
12928 /* Only process it once, in its link_sec slot. */
12929 if (sec && i == htab->stub_group[i].link_sec->id)
12930 {
12931 osec = sec->output_section;
12932 elf32_arm_write_section (abfd, info, sec, sec->contents);
12933 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12934 sec->output_offset, sec->size))
12935 return FALSE;
12936 }
12937 }
12938
12939 /* Write out any glue sections now that we have created all the
12940 stubs. */
12941 if (globals->bfd_of_glue_owner != NULL)
12942 {
12943 if (! elf32_arm_output_glue_section (info, abfd,
12944 globals->bfd_of_glue_owner,
12945 ARM2THUMB_GLUE_SECTION_NAME))
12946 return FALSE;
12947
12948 if (! elf32_arm_output_glue_section (info, abfd,
12949 globals->bfd_of_glue_owner,
12950 THUMB2ARM_GLUE_SECTION_NAME))
12951 return FALSE;
12952
12953 if (! elf32_arm_output_glue_section (info, abfd,
12954 globals->bfd_of_glue_owner,
12955 VFP11_ERRATUM_VENEER_SECTION_NAME))
12956 return FALSE;
12957
12958 if (! elf32_arm_output_glue_section (info, abfd,
12959 globals->bfd_of_glue_owner,
12960 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12961 return FALSE;
12962
12963 if (! elf32_arm_output_glue_section (info, abfd,
12964 globals->bfd_of_glue_owner,
12965 ARM_BX_GLUE_SECTION_NAME))
12966 return FALSE;
12967 }
12968
12969 return TRUE;
12970 }
12971
12972 /* Return a best guess for the machine number based on the attributes. */
12973
12974 static unsigned int
12975 bfd_arm_get_mach_from_attributes (bfd * abfd)
12976 {
12977 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12978
12979 switch (arch)
12980 {
12981 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12982 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12983 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12984
12985 case TAG_CPU_ARCH_V5TE:
12986 {
12987 char * name;
12988
12989 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12990 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12991
12992 if (name)
12993 {
12994 if (strcmp (name, "IWMMXT2") == 0)
12995 return bfd_mach_arm_iWMMXt2;
12996
12997 if (strcmp (name, "IWMMXT") == 0)
12998 return bfd_mach_arm_iWMMXt;
12999
13000 if (strcmp (name, "XSCALE") == 0)
13001 {
13002 int wmmx;
13003
13004 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13005 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13006 switch (wmmx)
13007 {
13008 case 1: return bfd_mach_arm_iWMMXt;
13009 case 2: return bfd_mach_arm_iWMMXt2;
13010 default: return bfd_mach_arm_XScale;
13011 }
13012 }
13013 }
13014
13015 return bfd_mach_arm_5TE;
13016 }
13017
13018 default:
13019 return bfd_mach_arm_unknown;
13020 }
13021 }
13022
13023 /* Set the right machine number. */
13024
13025 static bfd_boolean
13026 elf32_arm_object_p (bfd *abfd)
13027 {
13028 unsigned int mach;
13029
13030 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13031
13032 if (mach == bfd_mach_arm_unknown)
13033 {
13034 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13035 mach = bfd_mach_arm_ep9312;
13036 else
13037 mach = bfd_arm_get_mach_from_attributes (abfd);
13038 }
13039
13040 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13041 return TRUE;
13042 }
13043
13044 /* Function to keep ARM specific flags in the ELF header. */
13045
13046 static bfd_boolean
13047 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13048 {
13049 if (elf_flags_init (abfd)
13050 && elf_elfheader (abfd)->e_flags != flags)
13051 {
13052 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13053 {
13054 if (flags & EF_ARM_INTERWORK)
13055 _bfd_error_handler
13056 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13057 abfd);
13058 else
13059 _bfd_error_handler
13060 (_("warning: clearing the interworking flag of %pB due to outside request"),
13061 abfd);
13062 }
13063 }
13064 else
13065 {
13066 elf_elfheader (abfd)->e_flags = flags;
13067 elf_flags_init (abfd) = TRUE;
13068 }
13069
13070 return TRUE;
13071 }
13072
13073 /* Copy backend specific data from one object module to another. */
13074
13075 static bfd_boolean
13076 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13077 {
13078 flagword in_flags;
13079 flagword out_flags;
13080
13081 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13082 return TRUE;
13083
13084 in_flags = elf_elfheader (ibfd)->e_flags;
13085 out_flags = elf_elfheader (obfd)->e_flags;
13086
13087 if (elf_flags_init (obfd)
13088 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13089 && in_flags != out_flags)
13090 {
13091 /* Cannot mix APCS26 and APCS32 code. */
13092 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13093 return FALSE;
13094
13095 /* Cannot mix float APCS and non-float APCS code. */
13096 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13097 return FALSE;
13098
13099 /* If the src and dest have different interworking flags
13100 then turn off the interworking bit. */
13101 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13102 {
13103 if (out_flags & EF_ARM_INTERWORK)
13104 _bfd_error_handler
13105 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13106 obfd, ibfd);
13107
13108 in_flags &= ~EF_ARM_INTERWORK;
13109 }
13110
13111 /* Likewise for PIC, though don't warn for this case. */
13112 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13113 in_flags &= ~EF_ARM_PIC;
13114 }
13115
13116 elf_elfheader (obfd)->e_flags = in_flags;
13117 elf_flags_init (obfd) = TRUE;
13118
13119 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13120 }
13121
13122 /* Values for Tag_ABI_PCS_R9_use. */
13123 enum
13124 {
13125 AEABI_R9_V6,
13126 AEABI_R9_SB,
13127 AEABI_R9_TLS,
13128 AEABI_R9_unused
13129 };
13130
13131 /* Values for Tag_ABI_PCS_RW_data. */
13132 enum
13133 {
13134 AEABI_PCS_RW_data_absolute,
13135 AEABI_PCS_RW_data_PCrel,
13136 AEABI_PCS_RW_data_SBrel,
13137 AEABI_PCS_RW_data_unused
13138 };
13139
13140 /* Values for Tag_ABI_enum_size. */
13141 enum
13142 {
13143 AEABI_enum_unused,
13144 AEABI_enum_short,
13145 AEABI_enum_wide,
13146 AEABI_enum_forced_wide
13147 };
13148
13149 /* Determine whether an object attribute tag takes an integer, a
13150 string or both. */
13151
13152 static int
13153 elf32_arm_obj_attrs_arg_type (int tag)
13154 {
13155 if (tag == Tag_compatibility)
13156 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13157 else if (tag == Tag_nodefaults)
13158 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13159 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13160 return ATTR_TYPE_FLAG_STR_VAL;
13161 else if (tag < 32)
13162 return ATTR_TYPE_FLAG_INT_VAL;
13163 else
13164 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13165 }
13166
13167 /* The ABI defines that Tag_conformance should be emitted first, and that
13168 Tag_nodefaults should be second (if either is defined). This sets those
13169 two positions, and bumps up the position of all the remaining tags to
13170 compensate. */
13171 static int
13172 elf32_arm_obj_attrs_order (int num)
13173 {
13174 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13175 return Tag_conformance;
13176 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13177 return Tag_nodefaults;
13178 if ((num - 2) < Tag_nodefaults)
13179 return num - 2;
13180 if ((num - 1) < Tag_conformance)
13181 return num - 1;
13182 return num;
13183 }
13184
13185 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13186 static bfd_boolean
13187 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13188 {
13189 if ((tag & 127) < 64)
13190 {
13191 _bfd_error_handler
13192 (_("%pB: unknown mandatory EABI object attribute %d"),
13193 abfd, tag);
13194 bfd_set_error (bfd_error_bad_value);
13195 return FALSE;
13196 }
13197 else
13198 {
13199 _bfd_error_handler
13200 (_("warning: %pB: unknown EABI object attribute %d"),
13201 abfd, tag);
13202 return TRUE;
13203 }
13204 }
13205
13206 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13207 Returns -1 if no architecture could be read. */
13208
13209 static int
13210 get_secondary_compatible_arch (bfd *abfd)
13211 {
13212 obj_attribute *attr =
13213 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13214
13215 /* Note: the tag and its argument below are uleb128 values, though
13216 currently-defined values fit in one byte for each. */
13217 if (attr->s
13218 && attr->s[0] == Tag_CPU_arch
13219 && (attr->s[1] & 128) != 128
13220 && attr->s[2] == 0)
13221 return attr->s[1];
13222
13223 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13224 return -1;
13225 }
13226
13227 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13228 The tag is removed if ARCH is -1. */
13229
13230 static void
13231 set_secondary_compatible_arch (bfd *abfd, int arch)
13232 {
13233 obj_attribute *attr =
13234 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13235
13236 if (arch == -1)
13237 {
13238 attr->s = NULL;
13239 return;
13240 }
13241
13242 /* Note: the tag and its argument below are uleb128 values, though
13243 currently-defined values fit in one byte for each. */
13244 if (!attr->s)
13245 attr->s = (char *) bfd_alloc (abfd, 3);
13246 attr->s[0] = Tag_CPU_arch;
13247 attr->s[1] = arch;
13248 attr->s[2] = '\0';
13249 }
13250
13251 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13252 into account. */
13253
13254 static int
13255 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13256 int newtag, int secondary_compat)
13257 {
13258 #define T(X) TAG_CPU_ARCH_##X
13259 int tagl, tagh, result;
13260 const int v6t2[] =
13261 {
13262 T(V6T2), /* PRE_V4. */
13263 T(V6T2), /* V4. */
13264 T(V6T2), /* V4T. */
13265 T(V6T2), /* V5T. */
13266 T(V6T2), /* V5TE. */
13267 T(V6T2), /* V5TEJ. */
13268 T(V6T2), /* V6. */
13269 T(V7), /* V6KZ. */
13270 T(V6T2) /* V6T2. */
13271 };
13272 const int v6k[] =
13273 {
13274 T(V6K), /* PRE_V4. */
13275 T(V6K), /* V4. */
13276 T(V6K), /* V4T. */
13277 T(V6K), /* V5T. */
13278 T(V6K), /* V5TE. */
13279 T(V6K), /* V5TEJ. */
13280 T(V6K), /* V6. */
13281 T(V6KZ), /* V6KZ. */
13282 T(V7), /* V6T2. */
13283 T(V6K) /* V6K. */
13284 };
13285 const int v7[] =
13286 {
13287 T(V7), /* PRE_V4. */
13288 T(V7), /* V4. */
13289 T(V7), /* V4T. */
13290 T(V7), /* V5T. */
13291 T(V7), /* V5TE. */
13292 T(V7), /* V5TEJ. */
13293 T(V7), /* V6. */
13294 T(V7), /* V6KZ. */
13295 T(V7), /* V6T2. */
13296 T(V7), /* V6K. */
13297 T(V7) /* V7. */
13298 };
13299 const int v6_m[] =
13300 {
13301 -1, /* PRE_V4. */
13302 -1, /* V4. */
13303 T(V6K), /* V4T. */
13304 T(V6K), /* V5T. */
13305 T(V6K), /* V5TE. */
13306 T(V6K), /* V5TEJ. */
13307 T(V6K), /* V6. */
13308 T(V6KZ), /* V6KZ. */
13309 T(V7), /* V6T2. */
13310 T(V6K), /* V6K. */
13311 T(V7), /* V7. */
13312 T(V6_M) /* V6_M. */
13313 };
13314 const int v6s_m[] =
13315 {
13316 -1, /* PRE_V4. */
13317 -1, /* V4. */
13318 T(V6K), /* V4T. */
13319 T(V6K), /* V5T. */
13320 T(V6K), /* V5TE. */
13321 T(V6K), /* V5TEJ. */
13322 T(V6K), /* V6. */
13323 T(V6KZ), /* V6KZ. */
13324 T(V7), /* V6T2. */
13325 T(V6K), /* V6K. */
13326 T(V7), /* V7. */
13327 T(V6S_M), /* V6_M. */
13328 T(V6S_M) /* V6S_M. */
13329 };
13330 const int v7e_m[] =
13331 {
13332 -1, /* PRE_V4. */
13333 -1, /* V4. */
13334 T(V7E_M), /* V4T. */
13335 T(V7E_M), /* V5T. */
13336 T(V7E_M), /* V5TE. */
13337 T(V7E_M), /* V5TEJ. */
13338 T(V7E_M), /* V6. */
13339 T(V7E_M), /* V6KZ. */
13340 T(V7E_M), /* V6T2. */
13341 T(V7E_M), /* V6K. */
13342 T(V7E_M), /* V7. */
13343 T(V7E_M), /* V6_M. */
13344 T(V7E_M), /* V6S_M. */
13345 T(V7E_M) /* V7E_M. */
13346 };
13347 const int v8[] =
13348 {
13349 T(V8), /* PRE_V4. */
13350 T(V8), /* V4. */
13351 T(V8), /* V4T. */
13352 T(V8), /* V5T. */
13353 T(V8), /* V5TE. */
13354 T(V8), /* V5TEJ. */
13355 T(V8), /* V6. */
13356 T(V8), /* V6KZ. */
13357 T(V8), /* V6T2. */
13358 T(V8), /* V6K. */
13359 T(V8), /* V7. */
13360 T(V8), /* V6_M. */
13361 T(V8), /* V6S_M. */
13362 T(V8), /* V7E_M. */
13363 T(V8) /* V8. */
13364 };
13365 const int v8r[] =
13366 {
13367 T(V8R), /* PRE_V4. */
13368 T(V8R), /* V4. */
13369 T(V8R), /* V4T. */
13370 T(V8R), /* V5T. */
13371 T(V8R), /* V5TE. */
13372 T(V8R), /* V5TEJ. */
13373 T(V8R), /* V6. */
13374 T(V8R), /* V6KZ. */
13375 T(V8R), /* V6T2. */
13376 T(V8R), /* V6K. */
13377 T(V8R), /* V7. */
13378 T(V8R), /* V6_M. */
13379 T(V8R), /* V6S_M. */
13380 T(V8R), /* V7E_M. */
13381 T(V8), /* V8. */
13382 T(V8R), /* V8R. */
13383 };
13384 const int v8m_baseline[] =
13385 {
13386 -1, /* PRE_V4. */
13387 -1, /* V4. */
13388 -1, /* V4T. */
13389 -1, /* V5T. */
13390 -1, /* V5TE. */
13391 -1, /* V5TEJ. */
13392 -1, /* V6. */
13393 -1, /* V6KZ. */
13394 -1, /* V6T2. */
13395 -1, /* V6K. */
13396 -1, /* V7. */
13397 T(V8M_BASE), /* V6_M. */
13398 T(V8M_BASE), /* V6S_M. */
13399 -1, /* V7E_M. */
13400 -1, /* V8. */
13401 -1, /* V8R. */
13402 T(V8M_BASE) /* V8-M BASELINE. */
13403 };
13404 const int v8m_mainline[] =
13405 {
13406 -1, /* PRE_V4. */
13407 -1, /* V4. */
13408 -1, /* V4T. */
13409 -1, /* V5T. */
13410 -1, /* V5TE. */
13411 -1, /* V5TEJ. */
13412 -1, /* V6. */
13413 -1, /* V6KZ. */
13414 -1, /* V6T2. */
13415 -1, /* V6K. */
13416 T(V8M_MAIN), /* V7. */
13417 T(V8M_MAIN), /* V6_M. */
13418 T(V8M_MAIN), /* V6S_M. */
13419 T(V8M_MAIN), /* V7E_M. */
13420 -1, /* V8. */
13421 -1, /* V8R. */
13422 T(V8M_MAIN), /* V8-M BASELINE. */
13423 T(V8M_MAIN) /* V8-M MAINLINE. */
13424 };
13425 const int v4t_plus_v6_m[] =
13426 {
13427 -1, /* PRE_V4. */
13428 -1, /* V4. */
13429 T(V4T), /* V4T. */
13430 T(V5T), /* V5T. */
13431 T(V5TE), /* V5TE. */
13432 T(V5TEJ), /* V5TEJ. */
13433 T(V6), /* V6. */
13434 T(V6KZ), /* V6KZ. */
13435 T(V6T2), /* V6T2. */
13436 T(V6K), /* V6K. */
13437 T(V7), /* V7. */
13438 T(V6_M), /* V6_M. */
13439 T(V6S_M), /* V6S_M. */
13440 T(V7E_M), /* V7E_M. */
13441 T(V8), /* V8. */
13442 -1, /* V8R. */
13443 T(V8M_BASE), /* V8-M BASELINE. */
13444 T(V8M_MAIN), /* V8-M MAINLINE. */
13445 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
13446 };
13447 const int *comb[] =
13448 {
13449 v6t2,
13450 v6k,
13451 v7,
13452 v6_m,
13453 v6s_m,
13454 v7e_m,
13455 v8,
13456 v8r,
13457 v8m_baseline,
13458 v8m_mainline,
13459 /* Pseudo-architecture. */
13460 v4t_plus_v6_m
13461 };
13462
13463 /* Check we've not got a higher architecture than we know about. */
13464
13465 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
13466 {
13467 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
13468 return -1;
13469 }
13470
13471 /* Override old tag if we have a Tag_also_compatible_with on the output. */
13472
13473 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
13474 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
13475 oldtag = T(V4T_PLUS_V6_M);
13476
13477 /* And override the new tag if we have a Tag_also_compatible_with on the
13478 input. */
13479
13480 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
13481 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
13482 newtag = T(V4T_PLUS_V6_M);
13483
13484 tagl = (oldtag < newtag) ? oldtag : newtag;
13485 result = tagh = (oldtag > newtag) ? oldtag : newtag;
13486
13487 /* Architectures before V6KZ add features monotonically. */
13488 if (tagh <= TAG_CPU_ARCH_V6KZ)
13489 return result;
13490
13491 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
13492
13493 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
13494 as the canonical version. */
13495 if (result == T(V4T_PLUS_V6_M))
13496 {
13497 result = T(V4T);
13498 *secondary_compat_out = T(V6_M);
13499 }
13500 else
13501 *secondary_compat_out = -1;
13502
13503 if (result == -1)
13504 {
13505 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
13506 ibfd, oldtag, newtag);
13507 return -1;
13508 }
13509
13510 return result;
13511 #undef T
13512 }
13513
13514 /* Query attributes object to see if integer divide instructions may be
13515 present in an object. */
13516 static bfd_boolean
13517 elf32_arm_attributes_accept_div (const obj_attribute *attr)
13518 {
13519 int arch = attr[Tag_CPU_arch].i;
13520 int profile = attr[Tag_CPU_arch_profile].i;
13521
13522 switch (attr[Tag_DIV_use].i)
13523 {
13524 case 0:
13525 /* Integer divide allowed if instruction contained in archetecture. */
13526 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
13527 return TRUE;
13528 else if (arch >= TAG_CPU_ARCH_V7E_M)
13529 return TRUE;
13530 else
13531 return FALSE;
13532
13533 case 1:
13534 /* Integer divide explicitly prohibited. */
13535 return FALSE;
13536
13537 default:
13538 /* Unrecognised case - treat as allowing divide everywhere. */
13539 case 2:
13540 /* Integer divide allowed in ARM state. */
13541 return TRUE;
13542 }
13543 }
13544
13545 /* Query attributes object to see if integer divide instructions are
13546 forbidden to be in the object. This is not the inverse of
13547 elf32_arm_attributes_accept_div. */
13548 static bfd_boolean
13549 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
13550 {
13551 return attr[Tag_DIV_use].i == 1;
13552 }
13553
13554 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
13555 are conflicting attributes. */
13556
13557 static bfd_boolean
13558 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
13559 {
13560 bfd *obfd = info->output_bfd;
13561 obj_attribute *in_attr;
13562 obj_attribute *out_attr;
13563 /* Some tags have 0 = don't care, 1 = strong requirement,
13564 2 = weak requirement. */
13565 static const int order_021[3] = {0, 2, 1};
13566 int i;
13567 bfd_boolean result = TRUE;
13568 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
13569
13570 /* Skip the linker stubs file. This preserves previous behavior
13571 of accepting unknown attributes in the first input file - but
13572 is that a bug? */
13573 if (ibfd->flags & BFD_LINKER_CREATED)
13574 return TRUE;
13575
13576 /* Skip any input that hasn't attribute section.
13577 This enables to link object files without attribute section with
13578 any others. */
13579 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
13580 return TRUE;
13581
13582 if (!elf_known_obj_attributes_proc (obfd)[0].i)
13583 {
13584 /* This is the first object. Copy the attributes. */
13585 _bfd_elf_copy_obj_attributes (ibfd, obfd);
13586
13587 out_attr = elf_known_obj_attributes_proc (obfd);
13588
13589 /* Use the Tag_null value to indicate the attributes have been
13590 initialized. */
13591 out_attr[0].i = 1;
13592
13593 /* We do not output objects with Tag_MPextension_use_legacy - we move
13594 the attribute's value to Tag_MPextension_use. */
13595 if (out_attr[Tag_MPextension_use_legacy].i != 0)
13596 {
13597 if (out_attr[Tag_MPextension_use].i != 0
13598 && out_attr[Tag_MPextension_use_legacy].i
13599 != out_attr[Tag_MPextension_use].i)
13600 {
13601 _bfd_error_handler
13602 (_("Error: %pB has both the current and legacy "
13603 "Tag_MPextension_use attributes"), ibfd);
13604 result = FALSE;
13605 }
13606
13607 out_attr[Tag_MPextension_use] =
13608 out_attr[Tag_MPextension_use_legacy];
13609 out_attr[Tag_MPextension_use_legacy].type = 0;
13610 out_attr[Tag_MPextension_use_legacy].i = 0;
13611 }
13612
13613 return result;
13614 }
13615
13616 in_attr = elf_known_obj_attributes_proc (ibfd);
13617 out_attr = elf_known_obj_attributes_proc (obfd);
13618 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
13619 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
13620 {
13621 /* Ignore mismatches if the object doesn't use floating point or is
13622 floating point ABI independent. */
13623 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
13624 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
13625 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
13626 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
13627 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
13628 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
13629 {
13630 _bfd_error_handler
13631 (_("error: %pB uses VFP register arguments, %pB does not"),
13632 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
13633 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
13634 result = FALSE;
13635 }
13636 }
13637
13638 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
13639 {
13640 /* Merge this attribute with existing attributes. */
13641 switch (i)
13642 {
13643 case Tag_CPU_raw_name:
13644 case Tag_CPU_name:
13645 /* These are merged after Tag_CPU_arch. */
13646 break;
13647
13648 case Tag_ABI_optimization_goals:
13649 case Tag_ABI_FP_optimization_goals:
13650 /* Use the first value seen. */
13651 break;
13652
13653 case Tag_CPU_arch:
13654 {
13655 int secondary_compat = -1, secondary_compat_out = -1;
13656 unsigned int saved_out_attr = out_attr[i].i;
13657 int arch_attr;
13658 static const char *name_table[] =
13659 {
13660 /* These aren't real CPU names, but we can't guess
13661 that from the architecture version alone. */
13662 "Pre v4",
13663 "ARM v4",
13664 "ARM v4T",
13665 "ARM v5T",
13666 "ARM v5TE",
13667 "ARM v5TEJ",
13668 "ARM v6",
13669 "ARM v6KZ",
13670 "ARM v6T2",
13671 "ARM v6K",
13672 "ARM v7",
13673 "ARM v6-M",
13674 "ARM v6S-M",
13675 "ARM v8",
13676 "",
13677 "ARM v8-M.baseline",
13678 "ARM v8-M.mainline",
13679 };
13680
13681 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
13682 secondary_compat = get_secondary_compatible_arch (ibfd);
13683 secondary_compat_out = get_secondary_compatible_arch (obfd);
13684 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
13685 &secondary_compat_out,
13686 in_attr[i].i,
13687 secondary_compat);
13688
13689 /* Return with error if failed to merge. */
13690 if (arch_attr == -1)
13691 return FALSE;
13692
13693 out_attr[i].i = arch_attr;
13694
13695 set_secondary_compatible_arch (obfd, secondary_compat_out);
13696
13697 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
13698 if (out_attr[i].i == saved_out_attr)
13699 ; /* Leave the names alone. */
13700 else if (out_attr[i].i == in_attr[i].i)
13701 {
13702 /* The output architecture has been changed to match the
13703 input architecture. Use the input names. */
13704 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
13705 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
13706 : NULL;
13707 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
13708 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
13709 : NULL;
13710 }
13711 else
13712 {
13713 out_attr[Tag_CPU_name].s = NULL;
13714 out_attr[Tag_CPU_raw_name].s = NULL;
13715 }
13716
13717 /* If we still don't have a value for Tag_CPU_name,
13718 make one up now. Tag_CPU_raw_name remains blank. */
13719 if (out_attr[Tag_CPU_name].s == NULL
13720 && out_attr[i].i < ARRAY_SIZE (name_table))
13721 out_attr[Tag_CPU_name].s =
13722 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
13723 }
13724 break;
13725
13726 case Tag_ARM_ISA_use:
13727 case Tag_THUMB_ISA_use:
13728 case Tag_WMMX_arch:
13729 case Tag_Advanced_SIMD_arch:
13730 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
13731 case Tag_ABI_FP_rounding:
13732 case Tag_ABI_FP_exceptions:
13733 case Tag_ABI_FP_user_exceptions:
13734 case Tag_ABI_FP_number_model:
13735 case Tag_FP_HP_extension:
13736 case Tag_CPU_unaligned_access:
13737 case Tag_T2EE_use:
13738 case Tag_MPextension_use:
13739 /* Use the largest value specified. */
13740 if (in_attr[i].i > out_attr[i].i)
13741 out_attr[i].i = in_attr[i].i;
13742 break;
13743
13744 case Tag_ABI_align_preserved:
13745 case Tag_ABI_PCS_RO_data:
13746 /* Use the smallest value specified. */
13747 if (in_attr[i].i < out_attr[i].i)
13748 out_attr[i].i = in_attr[i].i;
13749 break;
13750
13751 case Tag_ABI_align_needed:
13752 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
13753 && (in_attr[Tag_ABI_align_preserved].i == 0
13754 || out_attr[Tag_ABI_align_preserved].i == 0))
13755 {
13756 /* This error message should be enabled once all non-conformant
13757 binaries in the toolchain have had the attributes set
13758 properly.
13759 _bfd_error_handler
13760 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
13761 obfd, ibfd);
13762 result = FALSE; */
13763 }
13764 /* Fall through. */
13765 case Tag_ABI_FP_denormal:
13766 case Tag_ABI_PCS_GOT_use:
13767 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
13768 value if greater than 2 (for future-proofing). */
13769 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
13770 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
13771 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
13772 out_attr[i].i = in_attr[i].i;
13773 break;
13774
13775 case Tag_Virtualization_use:
13776 /* The virtualization tag effectively stores two bits of
13777 information: the intended use of TrustZone (in bit 0), and the
13778 intended use of Virtualization (in bit 1). */
13779 if (out_attr[i].i == 0)
13780 out_attr[i].i = in_attr[i].i;
13781 else if (in_attr[i].i != 0
13782 && in_attr[i].i != out_attr[i].i)
13783 {
13784 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
13785 out_attr[i].i = 3;
13786 else
13787 {
13788 _bfd_error_handler
13789 (_("error: %pB: unable to merge virtualization attributes "
13790 "with %pB"),
13791 obfd, ibfd);
13792 result = FALSE;
13793 }
13794 }
13795 break;
13796
13797 case Tag_CPU_arch_profile:
13798 if (out_attr[i].i != in_attr[i].i)
13799 {
13800 /* 0 will merge with anything.
13801 'A' and 'S' merge to 'A'.
13802 'R' and 'S' merge to 'R'.
13803 'M' and 'A|R|S' is an error. */
13804 if (out_attr[i].i == 0
13805 || (out_attr[i].i == 'S'
13806 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
13807 out_attr[i].i = in_attr[i].i;
13808 else if (in_attr[i].i == 0
13809 || (in_attr[i].i == 'S'
13810 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
13811 ; /* Do nothing. */
13812 else
13813 {
13814 _bfd_error_handler
13815 (_("error: %pB: conflicting architecture profiles %c/%c"),
13816 ibfd,
13817 in_attr[i].i ? in_attr[i].i : '0',
13818 out_attr[i].i ? out_attr[i].i : '0');
13819 result = FALSE;
13820 }
13821 }
13822 break;
13823
13824 case Tag_DSP_extension:
13825 /* No need to change output value if any of:
13826 - pre (<=) ARMv5T input architecture (do not have DSP)
13827 - M input profile not ARMv7E-M and do not have DSP. */
13828 if (in_attr[Tag_CPU_arch].i <= 3
13829 || (in_attr[Tag_CPU_arch_profile].i == 'M'
13830 && in_attr[Tag_CPU_arch].i != 13
13831 && in_attr[i].i == 0))
13832 ; /* Do nothing. */
13833 /* Output value should be 0 if DSP part of architecture, ie.
13834 - post (>=) ARMv5te architecture output
13835 - A, R or S profile output or ARMv7E-M output architecture. */
13836 else if (out_attr[Tag_CPU_arch].i >= 4
13837 && (out_attr[Tag_CPU_arch_profile].i == 'A'
13838 || out_attr[Tag_CPU_arch_profile].i == 'R'
13839 || out_attr[Tag_CPU_arch_profile].i == 'S'
13840 || out_attr[Tag_CPU_arch].i == 13))
13841 out_attr[i].i = 0;
13842 /* Otherwise, DSP instructions are added and not part of output
13843 architecture. */
13844 else
13845 out_attr[i].i = 1;
13846 break;
13847
13848 case Tag_FP_arch:
13849 {
13850 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13851 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13852 when it's 0. It might mean absence of FP hardware if
13853 Tag_FP_arch is zero. */
13854
13855 #define VFP_VERSION_COUNT 9
13856 static const struct
13857 {
13858 int ver;
13859 int regs;
13860 } vfp_versions[VFP_VERSION_COUNT] =
13861 {
13862 {0, 0},
13863 {1, 16},
13864 {2, 16},
13865 {3, 32},
13866 {3, 16},
13867 {4, 32},
13868 {4, 16},
13869 {8, 32},
13870 {8, 16}
13871 };
13872 int ver;
13873 int regs;
13874 int newval;
13875
13876 /* If the output has no requirement about FP hardware,
13877 follow the requirement of the input. */
13878 if (out_attr[i].i == 0)
13879 {
13880 /* This assert is still reasonable, we shouldn't
13881 produce the suspicious build attribute
13882 combination (See below for in_attr). */
13883 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13884 out_attr[i].i = in_attr[i].i;
13885 out_attr[Tag_ABI_HardFP_use].i
13886 = in_attr[Tag_ABI_HardFP_use].i;
13887 break;
13888 }
13889 /* If the input has no requirement about FP hardware, do
13890 nothing. */
13891 else if (in_attr[i].i == 0)
13892 {
13893 /* We used to assert that Tag_ABI_HardFP_use was
13894 zero here, but we should never assert when
13895 consuming an object file that has suspicious
13896 build attributes. The single precision variant
13897 of 'no FP architecture' is still 'no FP
13898 architecture', so we just ignore the tag in this
13899 case. */
13900 break;
13901 }
13902
13903 /* Both the input and the output have nonzero Tag_FP_arch.
13904 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13905
13906 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13907 do nothing. */
13908 if (in_attr[Tag_ABI_HardFP_use].i == 0
13909 && out_attr[Tag_ABI_HardFP_use].i == 0)
13910 ;
13911 /* If the input and the output have different Tag_ABI_HardFP_use,
13912 the combination of them is 0 (implied by Tag_FP_arch). */
13913 else if (in_attr[Tag_ABI_HardFP_use].i
13914 != out_attr[Tag_ABI_HardFP_use].i)
13915 out_attr[Tag_ABI_HardFP_use].i = 0;
13916
13917 /* Now we can handle Tag_FP_arch. */
13918
13919 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13920 pick the biggest. */
13921 if (in_attr[i].i >= VFP_VERSION_COUNT
13922 && in_attr[i].i > out_attr[i].i)
13923 {
13924 out_attr[i] = in_attr[i];
13925 break;
13926 }
13927 /* The output uses the superset of input features
13928 (ISA version) and registers. */
13929 ver = vfp_versions[in_attr[i].i].ver;
13930 if (ver < vfp_versions[out_attr[i].i].ver)
13931 ver = vfp_versions[out_attr[i].i].ver;
13932 regs = vfp_versions[in_attr[i].i].regs;
13933 if (regs < vfp_versions[out_attr[i].i].regs)
13934 regs = vfp_versions[out_attr[i].i].regs;
13935 /* This assumes all possible supersets are also a valid
13936 options. */
13937 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13938 {
13939 if (regs == vfp_versions[newval].regs
13940 && ver == vfp_versions[newval].ver)
13941 break;
13942 }
13943 out_attr[i].i = newval;
13944 }
13945 break;
13946 case Tag_PCS_config:
13947 if (out_attr[i].i == 0)
13948 out_attr[i].i = in_attr[i].i;
13949 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13950 {
13951 /* It's sometimes ok to mix different configs, so this is only
13952 a warning. */
13953 _bfd_error_handler
13954 (_("warning: %pB: conflicting platform configuration"), ibfd);
13955 }
13956 break;
13957 case Tag_ABI_PCS_R9_use:
13958 if (in_attr[i].i != out_attr[i].i
13959 && out_attr[i].i != AEABI_R9_unused
13960 && in_attr[i].i != AEABI_R9_unused)
13961 {
13962 _bfd_error_handler
13963 (_("error: %pB: conflicting use of R9"), ibfd);
13964 result = FALSE;
13965 }
13966 if (out_attr[i].i == AEABI_R9_unused)
13967 out_attr[i].i = in_attr[i].i;
13968 break;
13969 case Tag_ABI_PCS_RW_data:
13970 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13971 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13972 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13973 {
13974 _bfd_error_handler
13975 (_("error: %pB: SB relative addressing conflicts with use of R9"),
13976 ibfd);
13977 result = FALSE;
13978 }
13979 /* Use the smallest value specified. */
13980 if (in_attr[i].i < out_attr[i].i)
13981 out_attr[i].i = in_attr[i].i;
13982 break;
13983 case Tag_ABI_PCS_wchar_t:
13984 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13985 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13986 {
13987 _bfd_error_handler
13988 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13989 ibfd, in_attr[i].i, out_attr[i].i);
13990 }
13991 else if (in_attr[i].i && !out_attr[i].i)
13992 out_attr[i].i = in_attr[i].i;
13993 break;
13994 case Tag_ABI_enum_size:
13995 if (in_attr[i].i != AEABI_enum_unused)
13996 {
13997 if (out_attr[i].i == AEABI_enum_unused
13998 || out_attr[i].i == AEABI_enum_forced_wide)
13999 {
14000 /* The existing object is compatible with anything.
14001 Use whatever requirements the new object has. */
14002 out_attr[i].i = in_attr[i].i;
14003 }
14004 else if (in_attr[i].i != AEABI_enum_forced_wide
14005 && out_attr[i].i != in_attr[i].i
14006 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14007 {
14008 static const char *aeabi_enum_names[] =
14009 { "", "variable-size", "32-bit", "" };
14010 const char *in_name =
14011 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14012 ? aeabi_enum_names[in_attr[i].i]
14013 : "<unknown>";
14014 const char *out_name =
14015 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14016 ? aeabi_enum_names[out_attr[i].i]
14017 : "<unknown>";
14018 _bfd_error_handler
14019 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14020 ibfd, in_name, out_name);
14021 }
14022 }
14023 break;
14024 case Tag_ABI_VFP_args:
14025 /* Aready done. */
14026 break;
14027 case Tag_ABI_WMMX_args:
14028 if (in_attr[i].i != out_attr[i].i)
14029 {
14030 _bfd_error_handler
14031 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14032 ibfd, obfd);
14033 result = FALSE;
14034 }
14035 break;
14036 case Tag_compatibility:
14037 /* Merged in target-independent code. */
14038 break;
14039 case Tag_ABI_HardFP_use:
14040 /* This is handled along with Tag_FP_arch. */
14041 break;
14042 case Tag_ABI_FP_16bit_format:
14043 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14044 {
14045 if (in_attr[i].i != out_attr[i].i)
14046 {
14047 _bfd_error_handler
14048 (_("error: fp16 format mismatch between %pB and %pB"),
14049 ibfd, obfd);
14050 result = FALSE;
14051 }
14052 }
14053 if (in_attr[i].i != 0)
14054 out_attr[i].i = in_attr[i].i;
14055 break;
14056
14057 case Tag_DIV_use:
14058 /* A value of zero on input means that the divide instruction may
14059 be used if available in the base architecture as specified via
14060 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14061 the user did not want divide instructions. A value of 2
14062 explicitly means that divide instructions were allowed in ARM
14063 and Thumb state. */
14064 if (in_attr[i].i == out_attr[i].i)
14065 /* Do nothing. */ ;
14066 else if (elf32_arm_attributes_forbid_div (in_attr)
14067 && !elf32_arm_attributes_accept_div (out_attr))
14068 out_attr[i].i = 1;
14069 else if (elf32_arm_attributes_forbid_div (out_attr)
14070 && elf32_arm_attributes_accept_div (in_attr))
14071 out_attr[i].i = in_attr[i].i;
14072 else if (in_attr[i].i == 2)
14073 out_attr[i].i = in_attr[i].i;
14074 break;
14075
14076 case Tag_MPextension_use_legacy:
14077 /* We don't output objects with Tag_MPextension_use_legacy - we
14078 move the value to Tag_MPextension_use. */
14079 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14080 {
14081 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14082 {
14083 _bfd_error_handler
14084 (_("%pB has both the current and legacy "
14085 "Tag_MPextension_use attributes"),
14086 ibfd);
14087 result = FALSE;
14088 }
14089 }
14090
14091 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14092 out_attr[Tag_MPextension_use] = in_attr[i];
14093
14094 break;
14095
14096 case Tag_nodefaults:
14097 /* This tag is set if it exists, but the value is unused (and is
14098 typically zero). We don't actually need to do anything here -
14099 the merge happens automatically when the type flags are merged
14100 below. */
14101 break;
14102 case Tag_also_compatible_with:
14103 /* Already done in Tag_CPU_arch. */
14104 break;
14105 case Tag_conformance:
14106 /* Keep the attribute if it matches. Throw it away otherwise.
14107 No attribute means no claim to conform. */
14108 if (!in_attr[i].s || !out_attr[i].s
14109 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14110 out_attr[i].s = NULL;
14111 break;
14112
14113 default:
14114 result
14115 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14116 }
14117
14118 /* If out_attr was copied from in_attr then it won't have a type yet. */
14119 if (in_attr[i].type && !out_attr[i].type)
14120 out_attr[i].type = in_attr[i].type;
14121 }
14122
14123 /* Merge Tag_compatibility attributes and any common GNU ones. */
14124 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14125 return FALSE;
14126
14127 /* Check for any attributes not known on ARM. */
14128 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14129
14130 return result;
14131 }
14132
14133
14134 /* Return TRUE if the two EABI versions are incompatible. */
14135
14136 static bfd_boolean
14137 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14138 {
14139 /* v4 and v5 are the same spec before and after it was released,
14140 so allow mixing them. */
14141 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14142 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14143 return TRUE;
14144
14145 return (iver == over);
14146 }
14147
14148 /* Merge backend specific data from an object file to the output
14149 object file when linking. */
14150
14151 static bfd_boolean
14152 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14153
14154 /* Display the flags field. */
14155
14156 static bfd_boolean
14157 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14158 {
14159 FILE * file = (FILE *) ptr;
14160 unsigned long flags;
14161
14162 BFD_ASSERT (abfd != NULL && ptr != NULL);
14163
14164 /* Print normal ELF private data. */
14165 _bfd_elf_print_private_bfd_data (abfd, ptr);
14166
14167 flags = elf_elfheader (abfd)->e_flags;
14168 /* Ignore init flag - it may not be set, despite the flags field
14169 containing valid data. */
14170
14171 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14172
14173 switch (EF_ARM_EABI_VERSION (flags))
14174 {
14175 case EF_ARM_EABI_UNKNOWN:
14176 /* The following flag bits are GNU extensions and not part of the
14177 official ARM ELF extended ABI. Hence they are only decoded if
14178 the EABI version is not set. */
14179 if (flags & EF_ARM_INTERWORK)
14180 fprintf (file, _(" [interworking enabled]"));
14181
14182 if (flags & EF_ARM_APCS_26)
14183 fprintf (file, " [APCS-26]");
14184 else
14185 fprintf (file, " [APCS-32]");
14186
14187 if (flags & EF_ARM_VFP_FLOAT)
14188 fprintf (file, _(" [VFP float format]"));
14189 else if (flags & EF_ARM_MAVERICK_FLOAT)
14190 fprintf (file, _(" [Maverick float format]"));
14191 else
14192 fprintf (file, _(" [FPA float format]"));
14193
14194 if (flags & EF_ARM_APCS_FLOAT)
14195 fprintf (file, _(" [floats passed in float registers]"));
14196
14197 if (flags & EF_ARM_PIC)
14198 fprintf (file, _(" [position independent]"));
14199
14200 if (flags & EF_ARM_NEW_ABI)
14201 fprintf (file, _(" [new ABI]"));
14202
14203 if (flags & EF_ARM_OLD_ABI)
14204 fprintf (file, _(" [old ABI]"));
14205
14206 if (flags & EF_ARM_SOFT_FLOAT)
14207 fprintf (file, _(" [software FP]"));
14208
14209 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14210 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14211 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14212 | EF_ARM_MAVERICK_FLOAT);
14213 break;
14214
14215 case EF_ARM_EABI_VER1:
14216 fprintf (file, _(" [Version1 EABI]"));
14217
14218 if (flags & EF_ARM_SYMSARESORTED)
14219 fprintf (file, _(" [sorted symbol table]"));
14220 else
14221 fprintf (file, _(" [unsorted symbol table]"));
14222
14223 flags &= ~ EF_ARM_SYMSARESORTED;
14224 break;
14225
14226 case EF_ARM_EABI_VER2:
14227 fprintf (file, _(" [Version2 EABI]"));
14228
14229 if (flags & EF_ARM_SYMSARESORTED)
14230 fprintf (file, _(" [sorted symbol table]"));
14231 else
14232 fprintf (file, _(" [unsorted symbol table]"));
14233
14234 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14235 fprintf (file, _(" [dynamic symbols use segment index]"));
14236
14237 if (flags & EF_ARM_MAPSYMSFIRST)
14238 fprintf (file, _(" [mapping symbols precede others]"));
14239
14240 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14241 | EF_ARM_MAPSYMSFIRST);
14242 break;
14243
14244 case EF_ARM_EABI_VER3:
14245 fprintf (file, _(" [Version3 EABI]"));
14246 break;
14247
14248 case EF_ARM_EABI_VER4:
14249 fprintf (file, _(" [Version4 EABI]"));
14250 goto eabi;
14251
14252 case EF_ARM_EABI_VER5:
14253 fprintf (file, _(" [Version5 EABI]"));
14254
14255 if (flags & EF_ARM_ABI_FLOAT_SOFT)
14256 fprintf (file, _(" [soft-float ABI]"));
14257
14258 if (flags & EF_ARM_ABI_FLOAT_HARD)
14259 fprintf (file, _(" [hard-float ABI]"));
14260
14261 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14262
14263 eabi:
14264 if (flags & EF_ARM_BE8)
14265 fprintf (file, _(" [BE8]"));
14266
14267 if (flags & EF_ARM_LE8)
14268 fprintf (file, _(" [LE8]"));
14269
14270 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14271 break;
14272
14273 default:
14274 fprintf (file, _(" <EABI version unrecognised>"));
14275 break;
14276 }
14277
14278 flags &= ~ EF_ARM_EABIMASK;
14279
14280 if (flags & EF_ARM_RELEXEC)
14281 fprintf (file, _(" [relocatable executable]"));
14282
14283 if (flags & EF_ARM_PIC)
14284 fprintf (file, _(" [position independent]"));
14285
14286 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14287 fprintf (file, _(" [FDPIC ABI supplement]"));
14288
14289 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14290
14291 if (flags)
14292 fprintf (file, _("<Unrecognised flag bits set>"));
14293
14294 fputc ('\n', file);
14295
14296 return TRUE;
14297 }
14298
14299 static int
14300 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
14301 {
14302 switch (ELF_ST_TYPE (elf_sym->st_info))
14303 {
14304 case STT_ARM_TFUNC:
14305 return ELF_ST_TYPE (elf_sym->st_info);
14306
14307 case STT_ARM_16BIT:
14308 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
14309 This allows us to distinguish between data used by Thumb instructions
14310 and non-data (which is probably code) inside Thumb regions of an
14311 executable. */
14312 if (type != STT_OBJECT && type != STT_TLS)
14313 return ELF_ST_TYPE (elf_sym->st_info);
14314 break;
14315
14316 default:
14317 break;
14318 }
14319
14320 return type;
14321 }
14322
14323 static asection *
14324 elf32_arm_gc_mark_hook (asection *sec,
14325 struct bfd_link_info *info,
14326 Elf_Internal_Rela *rel,
14327 struct elf_link_hash_entry *h,
14328 Elf_Internal_Sym *sym)
14329 {
14330 if (h != NULL)
14331 switch (ELF32_R_TYPE (rel->r_info))
14332 {
14333 case R_ARM_GNU_VTINHERIT:
14334 case R_ARM_GNU_VTENTRY:
14335 return NULL;
14336 }
14337
14338 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
14339 }
14340
14341 /* Look through the relocs for a section during the first phase. */
14342
14343 static bfd_boolean
14344 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
14345 asection *sec, const Elf_Internal_Rela *relocs)
14346 {
14347 Elf_Internal_Shdr *symtab_hdr;
14348 struct elf_link_hash_entry **sym_hashes;
14349 const Elf_Internal_Rela *rel;
14350 const Elf_Internal_Rela *rel_end;
14351 bfd *dynobj;
14352 asection *sreloc;
14353 struct elf32_arm_link_hash_table *htab;
14354 bfd_boolean call_reloc_p;
14355 bfd_boolean may_become_dynamic_p;
14356 bfd_boolean may_need_local_target_p;
14357 unsigned long nsyms;
14358
14359 if (bfd_link_relocatable (info))
14360 return TRUE;
14361
14362 BFD_ASSERT (is_arm_elf (abfd));
14363
14364 htab = elf32_arm_hash_table (info);
14365 if (htab == NULL)
14366 return FALSE;
14367
14368 sreloc = NULL;
14369
14370 /* Create dynamic sections for relocatable executables so that we can
14371 copy relocations. */
14372 if (htab->root.is_relocatable_executable
14373 && ! htab->root.dynamic_sections_created)
14374 {
14375 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
14376 return FALSE;
14377 }
14378
14379 if (htab->root.dynobj == NULL)
14380 htab->root.dynobj = abfd;
14381 if (!create_ifunc_sections (info))
14382 return FALSE;
14383
14384 dynobj = htab->root.dynobj;
14385
14386 symtab_hdr = & elf_symtab_hdr (abfd);
14387 sym_hashes = elf_sym_hashes (abfd);
14388 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
14389
14390 rel_end = relocs + sec->reloc_count;
14391 for (rel = relocs; rel < rel_end; rel++)
14392 {
14393 Elf_Internal_Sym *isym;
14394 struct elf_link_hash_entry *h;
14395 struct elf32_arm_link_hash_entry *eh;
14396 unsigned int r_symndx;
14397 int r_type;
14398
14399 r_symndx = ELF32_R_SYM (rel->r_info);
14400 r_type = ELF32_R_TYPE (rel->r_info);
14401 r_type = arm_real_reloc_type (htab, r_type);
14402
14403 if (r_symndx >= nsyms
14404 /* PR 9934: It is possible to have relocations that do not
14405 refer to symbols, thus it is also possible to have an
14406 object file containing relocations but no symbol table. */
14407 && (r_symndx > STN_UNDEF || nsyms > 0))
14408 {
14409 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
14410 r_symndx);
14411 return FALSE;
14412 }
14413
14414 h = NULL;
14415 isym = NULL;
14416 if (nsyms > 0)
14417 {
14418 if (r_symndx < symtab_hdr->sh_info)
14419 {
14420 /* A local symbol. */
14421 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
14422 abfd, r_symndx);
14423 if (isym == NULL)
14424 return FALSE;
14425 }
14426 else
14427 {
14428 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
14429 while (h->root.type == bfd_link_hash_indirect
14430 || h->root.type == bfd_link_hash_warning)
14431 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14432 }
14433 }
14434
14435 eh = (struct elf32_arm_link_hash_entry *) h;
14436
14437 call_reloc_p = FALSE;
14438 may_become_dynamic_p = FALSE;
14439 may_need_local_target_p = FALSE;
14440
14441 /* Could be done earlier, if h were already available. */
14442 r_type = elf32_arm_tls_transition (info, r_type, h);
14443 switch (r_type)
14444 {
14445 case R_ARM_GOT32:
14446 case R_ARM_GOT_PREL:
14447 case R_ARM_TLS_GD32:
14448 case R_ARM_TLS_IE32:
14449 case R_ARM_TLS_GOTDESC:
14450 case R_ARM_TLS_DESCSEQ:
14451 case R_ARM_THM_TLS_DESCSEQ:
14452 case R_ARM_TLS_CALL:
14453 case R_ARM_THM_TLS_CALL:
14454 /* This symbol requires a global offset table entry. */
14455 {
14456 int tls_type, old_tls_type;
14457
14458 switch (r_type)
14459 {
14460 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
14461
14462 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
14463
14464 case R_ARM_TLS_GOTDESC:
14465 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
14466 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
14467 tls_type = GOT_TLS_GDESC; break;
14468
14469 default: tls_type = GOT_NORMAL; break;
14470 }
14471
14472 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
14473 info->flags |= DF_STATIC_TLS;
14474
14475 if (h != NULL)
14476 {
14477 h->got.refcount++;
14478 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
14479 }
14480 else
14481 {
14482 /* This is a global offset table entry for a local symbol. */
14483 if (!elf32_arm_allocate_local_sym_info (abfd))
14484 return FALSE;
14485 elf_local_got_refcounts (abfd)[r_symndx] += 1;
14486 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
14487 }
14488
14489 /* If a variable is accessed with both tls methods, two
14490 slots may be created. */
14491 if (GOT_TLS_GD_ANY_P (old_tls_type)
14492 && GOT_TLS_GD_ANY_P (tls_type))
14493 tls_type |= old_tls_type;
14494
14495 /* We will already have issued an error message if there
14496 is a TLS/non-TLS mismatch, based on the symbol
14497 type. So just combine any TLS types needed. */
14498 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
14499 && tls_type != GOT_NORMAL)
14500 tls_type |= old_tls_type;
14501
14502 /* If the symbol is accessed in both IE and GDESC
14503 method, we're able to relax. Turn off the GDESC flag,
14504 without messing up with any other kind of tls types
14505 that may be involved. */
14506 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
14507 tls_type &= ~GOT_TLS_GDESC;
14508
14509 if (old_tls_type != tls_type)
14510 {
14511 if (h != NULL)
14512 elf32_arm_hash_entry (h)->tls_type = tls_type;
14513 else
14514 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
14515 }
14516 }
14517 /* Fall through. */
14518
14519 case R_ARM_TLS_LDM32:
14520 if (r_type == R_ARM_TLS_LDM32)
14521 htab->tls_ldm_got.refcount++;
14522 /* Fall through. */
14523
14524 case R_ARM_GOTOFF32:
14525 case R_ARM_GOTPC:
14526 if (htab->root.sgot == NULL
14527 && !create_got_section (htab->root.dynobj, info))
14528 return FALSE;
14529 break;
14530
14531 case R_ARM_PC24:
14532 case R_ARM_PLT32:
14533 case R_ARM_CALL:
14534 case R_ARM_JUMP24:
14535 case R_ARM_PREL31:
14536 case R_ARM_THM_CALL:
14537 case R_ARM_THM_JUMP24:
14538 case R_ARM_THM_JUMP19:
14539 call_reloc_p = TRUE;
14540 may_need_local_target_p = TRUE;
14541 break;
14542
14543 case R_ARM_ABS12:
14544 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
14545 ldr __GOTT_INDEX__ offsets. */
14546 if (!htab->vxworks_p)
14547 {
14548 may_need_local_target_p = TRUE;
14549 break;
14550 }
14551 else goto jump_over;
14552
14553 /* Fall through. */
14554
14555 case R_ARM_MOVW_ABS_NC:
14556 case R_ARM_MOVT_ABS:
14557 case R_ARM_THM_MOVW_ABS_NC:
14558 case R_ARM_THM_MOVT_ABS:
14559 if (bfd_link_pic (info))
14560 {
14561 _bfd_error_handler
14562 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
14563 abfd, elf32_arm_howto_table_1[r_type].name,
14564 (h) ? h->root.root.string : "a local symbol");
14565 bfd_set_error (bfd_error_bad_value);
14566 return FALSE;
14567 }
14568
14569 /* Fall through. */
14570 case R_ARM_ABS32:
14571 case R_ARM_ABS32_NOI:
14572 jump_over:
14573 if (h != NULL && bfd_link_executable (info))
14574 {
14575 h->pointer_equality_needed = 1;
14576 }
14577 /* Fall through. */
14578 case R_ARM_REL32:
14579 case R_ARM_REL32_NOI:
14580 case R_ARM_MOVW_PREL_NC:
14581 case R_ARM_MOVT_PREL:
14582 case R_ARM_THM_MOVW_PREL_NC:
14583 case R_ARM_THM_MOVT_PREL:
14584
14585 /* Should the interworking branches be listed here? */
14586 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
14587 && (sec->flags & SEC_ALLOC) != 0)
14588 {
14589 if (h == NULL
14590 && elf32_arm_howto_from_type (r_type)->pc_relative)
14591 {
14592 /* In shared libraries and relocatable executables,
14593 we treat local relative references as calls;
14594 see the related SYMBOL_CALLS_LOCAL code in
14595 allocate_dynrelocs. */
14596 call_reloc_p = TRUE;
14597 may_need_local_target_p = TRUE;
14598 }
14599 else
14600 /* We are creating a shared library or relocatable
14601 executable, and this is a reloc against a global symbol,
14602 or a non-PC-relative reloc against a local symbol.
14603 We may need to copy the reloc into the output. */
14604 may_become_dynamic_p = TRUE;
14605 }
14606 else
14607 may_need_local_target_p = TRUE;
14608 break;
14609
14610 /* This relocation describes the C++ object vtable hierarchy.
14611 Reconstruct it for later use during GC. */
14612 case R_ARM_GNU_VTINHERIT:
14613 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
14614 return FALSE;
14615 break;
14616
14617 /* This relocation describes which C++ vtable entries are actually
14618 used. Record for later use during GC. */
14619 case R_ARM_GNU_VTENTRY:
14620 BFD_ASSERT (h != NULL);
14621 if (h != NULL
14622 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
14623 return FALSE;
14624 break;
14625 }
14626
14627 if (h != NULL)
14628 {
14629 if (call_reloc_p)
14630 /* We may need a .plt entry if the function this reloc
14631 refers to is in a different object, regardless of the
14632 symbol's type. We can't tell for sure yet, because
14633 something later might force the symbol local. */
14634 h->needs_plt = 1;
14635 else if (may_need_local_target_p)
14636 /* If this reloc is in a read-only section, we might
14637 need a copy reloc. We can't check reliably at this
14638 stage whether the section is read-only, as input
14639 sections have not yet been mapped to output sections.
14640 Tentatively set the flag for now, and correct in
14641 adjust_dynamic_symbol. */
14642 h->non_got_ref = 1;
14643 }
14644
14645 if (may_need_local_target_p
14646 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
14647 {
14648 union gotplt_union *root_plt;
14649 struct arm_plt_info *arm_plt;
14650 struct arm_local_iplt_info *local_iplt;
14651
14652 if (h != NULL)
14653 {
14654 root_plt = &h->plt;
14655 arm_plt = &eh->plt;
14656 }
14657 else
14658 {
14659 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
14660 if (local_iplt == NULL)
14661 return FALSE;
14662 root_plt = &local_iplt->root;
14663 arm_plt = &local_iplt->arm;
14664 }
14665
14666 /* If the symbol is a function that doesn't bind locally,
14667 this relocation will need a PLT entry. */
14668 if (root_plt->refcount != -1)
14669 root_plt->refcount += 1;
14670
14671 if (!call_reloc_p)
14672 arm_plt->noncall_refcount++;
14673
14674 /* It's too early to use htab->use_blx here, so we have to
14675 record possible blx references separately from
14676 relocs that definitely need a thumb stub. */
14677
14678 if (r_type == R_ARM_THM_CALL)
14679 arm_plt->maybe_thumb_refcount += 1;
14680
14681 if (r_type == R_ARM_THM_JUMP24
14682 || r_type == R_ARM_THM_JUMP19)
14683 arm_plt->thumb_refcount += 1;
14684 }
14685
14686 if (may_become_dynamic_p)
14687 {
14688 struct elf_dyn_relocs *p, **head;
14689
14690 /* Create a reloc section in dynobj. */
14691 if (sreloc == NULL)
14692 {
14693 sreloc = _bfd_elf_make_dynamic_reloc_section
14694 (sec, dynobj, 2, abfd, ! htab->use_rel);
14695
14696 if (sreloc == NULL)
14697 return FALSE;
14698
14699 /* BPABI objects never have dynamic relocations mapped. */
14700 if (htab->symbian_p)
14701 {
14702 flagword flags;
14703
14704 flags = bfd_get_section_flags (dynobj, sreloc);
14705 flags &= ~(SEC_LOAD | SEC_ALLOC);
14706 bfd_set_section_flags (dynobj, sreloc, flags);
14707 }
14708 }
14709
14710 /* If this is a global symbol, count the number of
14711 relocations we need for this symbol. */
14712 if (h != NULL)
14713 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14714 else
14715 {
14716 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14717 if (head == NULL)
14718 return FALSE;
14719 }
14720
14721 p = *head;
14722 if (p == NULL || p->sec != sec)
14723 {
14724 bfd_size_type amt = sizeof *p;
14725
14726 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14727 if (p == NULL)
14728 return FALSE;
14729 p->next = *head;
14730 *head = p;
14731 p->sec = sec;
14732 p->count = 0;
14733 p->pc_count = 0;
14734 }
14735
14736 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14737 p->pc_count += 1;
14738 p->count += 1;
14739 }
14740 }
14741
14742 return TRUE;
14743 }
14744
14745 static void
14746 elf32_arm_update_relocs (asection *o,
14747 struct bfd_elf_section_reloc_data *reldata)
14748 {
14749 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
14750 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
14751 const struct elf_backend_data *bed;
14752 _arm_elf_section_data *eado;
14753 struct bfd_link_order *p;
14754 bfd_byte *erela_head, *erela;
14755 Elf_Internal_Rela *irela_head, *irela;
14756 Elf_Internal_Shdr *rel_hdr;
14757 bfd *abfd;
14758 unsigned int count;
14759
14760 eado = get_arm_elf_section_data (o);
14761
14762 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
14763 return;
14764
14765 abfd = o->owner;
14766 bed = get_elf_backend_data (abfd);
14767 rel_hdr = reldata->hdr;
14768
14769 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
14770 {
14771 swap_in = bed->s->swap_reloc_in;
14772 swap_out = bed->s->swap_reloc_out;
14773 }
14774 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
14775 {
14776 swap_in = bed->s->swap_reloca_in;
14777 swap_out = bed->s->swap_reloca_out;
14778 }
14779 else
14780 abort ();
14781
14782 erela_head = rel_hdr->contents;
14783 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
14784 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
14785
14786 erela = erela_head;
14787 irela = irela_head;
14788 count = 0;
14789
14790 for (p = o->map_head.link_order; p; p = p->next)
14791 {
14792 if (p->type == bfd_section_reloc_link_order
14793 || p->type == bfd_symbol_reloc_link_order)
14794 {
14795 (*swap_in) (abfd, erela, irela);
14796 erela += rel_hdr->sh_entsize;
14797 irela++;
14798 count++;
14799 }
14800 else if (p->type == bfd_indirect_link_order)
14801 {
14802 struct bfd_elf_section_reloc_data *input_reldata;
14803 arm_unwind_table_edit *edit_list, *edit_tail;
14804 _arm_elf_section_data *eadi;
14805 bfd_size_type j;
14806 bfd_vma offset;
14807 asection *i;
14808
14809 i = p->u.indirect.section;
14810
14811 eadi = get_arm_elf_section_data (i);
14812 edit_list = eadi->u.exidx.unwind_edit_list;
14813 edit_tail = eadi->u.exidx.unwind_edit_tail;
14814 offset = o->vma + i->output_offset;
14815
14816 if (eadi->elf.rel.hdr &&
14817 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
14818 input_reldata = &eadi->elf.rel;
14819 else if (eadi->elf.rela.hdr &&
14820 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
14821 input_reldata = &eadi->elf.rela;
14822 else
14823 abort ();
14824
14825 if (edit_list)
14826 {
14827 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
14828 {
14829 arm_unwind_table_edit *edit_node, *edit_next;
14830 bfd_vma bias;
14831 bfd_vma reloc_index;
14832
14833 (*swap_in) (abfd, erela, irela);
14834 reloc_index = (irela->r_offset - offset) / 8;
14835
14836 bias = 0;
14837 edit_node = edit_list;
14838 for (edit_next = edit_list;
14839 edit_next && edit_next->index <= reloc_index;
14840 edit_next = edit_node->next)
14841 {
14842 bias++;
14843 edit_node = edit_next;
14844 }
14845
14846 if (edit_node->type != DELETE_EXIDX_ENTRY
14847 || edit_node->index != reloc_index)
14848 {
14849 irela->r_offset -= bias * 8;
14850 irela++;
14851 count++;
14852 }
14853
14854 erela += rel_hdr->sh_entsize;
14855 }
14856
14857 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
14858 {
14859 /* New relocation entity. */
14860 asection *text_sec = edit_tail->linked_section;
14861 asection *text_out = text_sec->output_section;
14862 bfd_vma exidx_offset = offset + i->size - 8;
14863
14864 irela->r_addend = 0;
14865 irela->r_offset = exidx_offset;
14866 irela->r_info = ELF32_R_INFO
14867 (text_out->target_index, R_ARM_PREL31);
14868 irela++;
14869 count++;
14870 }
14871 }
14872 else
14873 {
14874 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
14875 {
14876 (*swap_in) (abfd, erela, irela);
14877 erela += rel_hdr->sh_entsize;
14878 irela++;
14879 }
14880
14881 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
14882 }
14883 }
14884 }
14885
14886 reldata->count = count;
14887 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
14888
14889 erela = erela_head;
14890 irela = irela_head;
14891 while (count > 0)
14892 {
14893 (*swap_out) (abfd, irela, erela);
14894 erela += rel_hdr->sh_entsize;
14895 irela++;
14896 count--;
14897 }
14898
14899 free (irela_head);
14900
14901 /* Hashes are no longer valid. */
14902 free (reldata->hashes);
14903 reldata->hashes = NULL;
14904 }
14905
14906 /* Unwinding tables are not referenced directly. This pass marks them as
14907 required if the corresponding code section is marked. Similarly, ARMv8-M
14908 secure entry functions can only be referenced by SG veneers which are
14909 created after the GC process. They need to be marked in case they reside in
14910 their own section (as would be the case if code was compiled with
14911 -ffunction-sections). */
14912
14913 static bfd_boolean
14914 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14915 elf_gc_mark_hook_fn gc_mark_hook)
14916 {
14917 bfd *sub;
14918 Elf_Internal_Shdr **elf_shdrp;
14919 asection *cmse_sec;
14920 obj_attribute *out_attr;
14921 Elf_Internal_Shdr *symtab_hdr;
14922 unsigned i, sym_count, ext_start;
14923 const struct elf_backend_data *bed;
14924 struct elf_link_hash_entry **sym_hashes;
14925 struct elf32_arm_link_hash_entry *cmse_hash;
14926 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
14927
14928 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14929
14930 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
14931 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
14932 && out_attr[Tag_CPU_arch_profile].i == 'M';
14933
14934 /* Marking EH data may cause additional code sections to be marked,
14935 requiring multiple passes. */
14936 again = TRUE;
14937 while (again)
14938 {
14939 again = FALSE;
14940 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14941 {
14942 asection *o;
14943
14944 if (! is_arm_elf (sub))
14945 continue;
14946
14947 elf_shdrp = elf_elfsections (sub);
14948 for (o = sub->sections; o != NULL; o = o->next)
14949 {
14950 Elf_Internal_Shdr *hdr;
14951
14952 hdr = &elf_section_data (o)->this_hdr;
14953 if (hdr->sh_type == SHT_ARM_EXIDX
14954 && hdr->sh_link
14955 && hdr->sh_link < elf_numsections (sub)
14956 && !o->gc_mark
14957 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14958 {
14959 again = TRUE;
14960 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14961 return FALSE;
14962 }
14963 }
14964
14965 /* Mark section holding ARMv8-M secure entry functions. We mark all
14966 of them so no need for a second browsing. */
14967 if (is_v8m && first_bfd_browse)
14968 {
14969 sym_hashes = elf_sym_hashes (sub);
14970 bed = get_elf_backend_data (sub);
14971 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
14972 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
14973 ext_start = symtab_hdr->sh_info;
14974
14975 /* Scan symbols. */
14976 for (i = ext_start; i < sym_count; i++)
14977 {
14978 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
14979
14980 /* Assume it is a special symbol. If not, cmse_scan will
14981 warn about it and user can do something about it. */
14982 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
14983 {
14984 cmse_sec = cmse_hash->root.root.u.def.section;
14985 if (!cmse_sec->gc_mark
14986 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
14987 return FALSE;
14988 }
14989 }
14990 }
14991 }
14992 first_bfd_browse = FALSE;
14993 }
14994
14995 return TRUE;
14996 }
14997
14998 /* Treat mapping symbols as special target symbols. */
14999
15000 static bfd_boolean
15001 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15002 {
15003 return bfd_is_arm_special_symbol_name (sym->name,
15004 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15005 }
15006
15007 /* This is a copy of elf_find_function() from elf.c except that
15008 ARM mapping symbols are ignored when looking for function names
15009 and STT_ARM_TFUNC is considered to a function type. */
15010
15011 static bfd_boolean
15012 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15013 asymbol ** symbols,
15014 asection * section,
15015 bfd_vma offset,
15016 const char ** filename_ptr,
15017 const char ** functionname_ptr)
15018 {
15019 const char * filename = NULL;
15020 asymbol * func = NULL;
15021 bfd_vma low_func = 0;
15022 asymbol ** p;
15023
15024 for (p = symbols; *p != NULL; p++)
15025 {
15026 elf_symbol_type *q;
15027
15028 q = (elf_symbol_type *) *p;
15029
15030 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15031 {
15032 default:
15033 break;
15034 case STT_FILE:
15035 filename = bfd_asymbol_name (&q->symbol);
15036 break;
15037 case STT_FUNC:
15038 case STT_ARM_TFUNC:
15039 case STT_NOTYPE:
15040 /* Skip mapping symbols. */
15041 if ((q->symbol.flags & BSF_LOCAL)
15042 && bfd_is_arm_special_symbol_name (q->symbol.name,
15043 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15044 continue;
15045 /* Fall through. */
15046 if (bfd_get_section (&q->symbol) == section
15047 && q->symbol.value >= low_func
15048 && q->symbol.value <= offset)
15049 {
15050 func = (asymbol *) q;
15051 low_func = q->symbol.value;
15052 }
15053 break;
15054 }
15055 }
15056
15057 if (func == NULL)
15058 return FALSE;
15059
15060 if (filename_ptr)
15061 *filename_ptr = filename;
15062 if (functionname_ptr)
15063 *functionname_ptr = bfd_asymbol_name (func);
15064
15065 return TRUE;
15066 }
15067
15068
15069 /* Find the nearest line to a particular section and offset, for error
15070 reporting. This code is a duplicate of the code in elf.c, except
15071 that it uses arm_elf_find_function. */
15072
15073 static bfd_boolean
15074 elf32_arm_find_nearest_line (bfd * abfd,
15075 asymbol ** symbols,
15076 asection * section,
15077 bfd_vma offset,
15078 const char ** filename_ptr,
15079 const char ** functionname_ptr,
15080 unsigned int * line_ptr,
15081 unsigned int * discriminator_ptr)
15082 {
15083 bfd_boolean found = FALSE;
15084
15085 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15086 filename_ptr, functionname_ptr,
15087 line_ptr, discriminator_ptr,
15088 dwarf_debug_sections, 0,
15089 & elf_tdata (abfd)->dwarf2_find_line_info))
15090 {
15091 if (!*functionname_ptr)
15092 arm_elf_find_function (abfd, symbols, section, offset,
15093 *filename_ptr ? NULL : filename_ptr,
15094 functionname_ptr);
15095
15096 return TRUE;
15097 }
15098
15099 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15100 uses DWARF1. */
15101
15102 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15103 & found, filename_ptr,
15104 functionname_ptr, line_ptr,
15105 & elf_tdata (abfd)->line_info))
15106 return FALSE;
15107
15108 if (found && (*functionname_ptr || *line_ptr))
15109 return TRUE;
15110
15111 if (symbols == NULL)
15112 return FALSE;
15113
15114 if (! arm_elf_find_function (abfd, symbols, section, offset,
15115 filename_ptr, functionname_ptr))
15116 return FALSE;
15117
15118 *line_ptr = 0;
15119 return TRUE;
15120 }
15121
15122 static bfd_boolean
15123 elf32_arm_find_inliner_info (bfd * abfd,
15124 const char ** filename_ptr,
15125 const char ** functionname_ptr,
15126 unsigned int * line_ptr)
15127 {
15128 bfd_boolean found;
15129 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15130 functionname_ptr, line_ptr,
15131 & elf_tdata (abfd)->dwarf2_find_line_info);
15132 return found;
15133 }
15134
15135 /* Find dynamic relocs for H that apply to read-only sections. */
15136
15137 static asection *
15138 readonly_dynrelocs (struct elf_link_hash_entry *h)
15139 {
15140 struct elf_dyn_relocs *p;
15141
15142 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15143 {
15144 asection *s = p->sec->output_section;
15145
15146 if (s != NULL && (s->flags & SEC_READONLY) != 0)
15147 return p->sec;
15148 }
15149 return NULL;
15150 }
15151
15152 /* Adjust a symbol defined by a dynamic object and referenced by a
15153 regular object. The current definition is in some section of the
15154 dynamic object, but we're not including those sections. We have to
15155 change the definition to something the rest of the link can
15156 understand. */
15157
15158 static bfd_boolean
15159 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15160 struct elf_link_hash_entry * h)
15161 {
15162 bfd * dynobj;
15163 asection *s, *srel;
15164 struct elf32_arm_link_hash_entry * eh;
15165 struct elf32_arm_link_hash_table *globals;
15166
15167 globals = elf32_arm_hash_table (info);
15168 if (globals == NULL)
15169 return FALSE;
15170
15171 dynobj = elf_hash_table (info)->dynobj;
15172
15173 /* Make sure we know what is going on here. */
15174 BFD_ASSERT (dynobj != NULL
15175 && (h->needs_plt
15176 || h->type == STT_GNU_IFUNC
15177 || h->is_weakalias
15178 || (h->def_dynamic
15179 && h->ref_regular
15180 && !h->def_regular)));
15181
15182 eh = (struct elf32_arm_link_hash_entry *) h;
15183
15184 /* If this is a function, put it in the procedure linkage table. We
15185 will fill in the contents of the procedure linkage table later,
15186 when we know the address of the .got section. */
15187 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15188 {
15189 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15190 symbol binds locally. */
15191 if (h->plt.refcount <= 0
15192 || (h->type != STT_GNU_IFUNC
15193 && (SYMBOL_CALLS_LOCAL (info, h)
15194 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15195 && h->root.type == bfd_link_hash_undefweak))))
15196 {
15197 /* This case can occur if we saw a PLT32 reloc in an input
15198 file, but the symbol was never referred to by a dynamic
15199 object, or if all references were garbage collected. In
15200 such a case, we don't actually need to build a procedure
15201 linkage table, and we can just do a PC24 reloc instead. */
15202 h->plt.offset = (bfd_vma) -1;
15203 eh->plt.thumb_refcount = 0;
15204 eh->plt.maybe_thumb_refcount = 0;
15205 eh->plt.noncall_refcount = 0;
15206 h->needs_plt = 0;
15207 }
15208
15209 return TRUE;
15210 }
15211 else
15212 {
15213 /* It's possible that we incorrectly decided a .plt reloc was
15214 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15215 in check_relocs. We can't decide accurately between function
15216 and non-function syms in check-relocs; Objects loaded later in
15217 the link may change h->type. So fix it now. */
15218 h->plt.offset = (bfd_vma) -1;
15219 eh->plt.thumb_refcount = 0;
15220 eh->plt.maybe_thumb_refcount = 0;
15221 eh->plt.noncall_refcount = 0;
15222 }
15223
15224 /* If this is a weak symbol, and there is a real definition, the
15225 processor independent code will have arranged for us to see the
15226 real definition first, and we can just use the same value. */
15227 if (h->is_weakalias)
15228 {
15229 struct elf_link_hash_entry *def = weakdef (h);
15230 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
15231 h->root.u.def.section = def->root.u.def.section;
15232 h->root.u.def.value = def->root.u.def.value;
15233 return TRUE;
15234 }
15235
15236 /* If there are no non-GOT references, we do not need a copy
15237 relocation. */
15238 if (!h->non_got_ref)
15239 return TRUE;
15240
15241 /* This is a reference to a symbol defined by a dynamic object which
15242 is not a function. */
15243
15244 /* If we are creating a shared library, we must presume that the
15245 only references to the symbol are via the global offset table.
15246 For such cases we need not do anything here; the relocations will
15247 be handled correctly by relocate_section. Relocatable executables
15248 can reference data in shared objects directly, so we don't need to
15249 do anything here. */
15250 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
15251 return TRUE;
15252
15253 /* We must allocate the symbol in our .dynbss section, which will
15254 become part of the .bss section of the executable. There will be
15255 an entry for this symbol in the .dynsym section. The dynamic
15256 object will contain position independent code, so all references
15257 from the dynamic object to this symbol will go through the global
15258 offset table. The dynamic linker will use the .dynsym entry to
15259 determine the address it must put in the global offset table, so
15260 both the dynamic object and the regular object will refer to the
15261 same memory location for the variable. */
15262 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
15263 linker to copy the initial value out of the dynamic object and into
15264 the runtime process image. We need to remember the offset into the
15265 .rel(a).bss section we are going to use. */
15266 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
15267 {
15268 s = globals->root.sdynrelro;
15269 srel = globals->root.sreldynrelro;
15270 }
15271 else
15272 {
15273 s = globals->root.sdynbss;
15274 srel = globals->root.srelbss;
15275 }
15276 if (info->nocopyreloc == 0
15277 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
15278 && h->size != 0)
15279 {
15280 elf32_arm_allocate_dynrelocs (info, srel, 1);
15281 h->needs_copy = 1;
15282 }
15283
15284 return _bfd_elf_adjust_dynamic_copy (info, h, s);
15285 }
15286
15287 /* Allocate space in .plt, .got and associated reloc sections for
15288 dynamic relocs. */
15289
15290 static bfd_boolean
15291 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
15292 {
15293 struct bfd_link_info *info;
15294 struct elf32_arm_link_hash_table *htab;
15295 struct elf32_arm_link_hash_entry *eh;
15296 struct elf_dyn_relocs *p;
15297
15298 if (h->root.type == bfd_link_hash_indirect)
15299 return TRUE;
15300
15301 eh = (struct elf32_arm_link_hash_entry *) h;
15302
15303 info = (struct bfd_link_info *) inf;
15304 htab = elf32_arm_hash_table (info);
15305 if (htab == NULL)
15306 return FALSE;
15307
15308 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
15309 && h->plt.refcount > 0)
15310 {
15311 /* Make sure this symbol is output as a dynamic symbol.
15312 Undefined weak syms won't yet be marked as dynamic. */
15313 if (h->dynindx == -1 && !h->forced_local
15314 && h->root.type == bfd_link_hash_undefweak)
15315 {
15316 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15317 return FALSE;
15318 }
15319
15320 /* If the call in the PLT entry binds locally, the associated
15321 GOT entry should use an R_ARM_IRELATIVE relocation instead of
15322 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
15323 than the .plt section. */
15324 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
15325 {
15326 eh->is_iplt = 1;
15327 if (eh->plt.noncall_refcount == 0
15328 && SYMBOL_REFERENCES_LOCAL (info, h))
15329 /* All non-call references can be resolved directly.
15330 This means that they can (and in some cases, must)
15331 resolve directly to the run-time target, rather than
15332 to the PLT. That in turns means that any .got entry
15333 would be equal to the .igot.plt entry, so there's
15334 no point having both. */
15335 h->got.refcount = 0;
15336 }
15337
15338 if (bfd_link_pic (info)
15339 || eh->is_iplt
15340 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
15341 {
15342 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
15343
15344 /* If this symbol is not defined in a regular file, and we are
15345 not generating a shared library, then set the symbol to this
15346 location in the .plt. This is required to make function
15347 pointers compare as equal between the normal executable and
15348 the shared library. */
15349 if (! bfd_link_pic (info)
15350 && !h->def_regular)
15351 {
15352 h->root.u.def.section = htab->root.splt;
15353 h->root.u.def.value = h->plt.offset;
15354
15355 /* Make sure the function is not marked as Thumb, in case
15356 it is the target of an ABS32 relocation, which will
15357 point to the PLT entry. */
15358 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
15359 }
15360
15361 /* VxWorks executables have a second set of relocations for
15362 each PLT entry. They go in a separate relocation section,
15363 which is processed by the kernel loader. */
15364 if (htab->vxworks_p && !bfd_link_pic (info))
15365 {
15366 /* There is a relocation for the initial PLT entry:
15367 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
15368 if (h->plt.offset == htab->plt_header_size)
15369 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
15370
15371 /* There are two extra relocations for each subsequent
15372 PLT entry: an R_ARM_32 relocation for the GOT entry,
15373 and an R_ARM_32 relocation for the PLT entry. */
15374 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
15375 }
15376 }
15377 else
15378 {
15379 h->plt.offset = (bfd_vma) -1;
15380 h->needs_plt = 0;
15381 }
15382 }
15383 else
15384 {
15385 h->plt.offset = (bfd_vma) -1;
15386 h->needs_plt = 0;
15387 }
15388
15389 eh = (struct elf32_arm_link_hash_entry *) h;
15390 eh->tlsdesc_got = (bfd_vma) -1;
15391
15392 if (h->got.refcount > 0)
15393 {
15394 asection *s;
15395 bfd_boolean dyn;
15396 int tls_type = elf32_arm_hash_entry (h)->tls_type;
15397 int indx;
15398
15399 /* Make sure this symbol is output as a dynamic symbol.
15400 Undefined weak syms won't yet be marked as dynamic. */
15401 if (h->dynindx == -1 && !h->forced_local
15402 && h->root.type == bfd_link_hash_undefweak)
15403 {
15404 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15405 return FALSE;
15406 }
15407
15408 if (!htab->symbian_p)
15409 {
15410 s = htab->root.sgot;
15411 h->got.offset = s->size;
15412
15413 if (tls_type == GOT_UNKNOWN)
15414 abort ();
15415
15416 if (tls_type == GOT_NORMAL)
15417 /* Non-TLS symbols need one GOT slot. */
15418 s->size += 4;
15419 else
15420 {
15421 if (tls_type & GOT_TLS_GDESC)
15422 {
15423 /* R_ARM_TLS_DESC needs 2 GOT slots. */
15424 eh->tlsdesc_got
15425 = (htab->root.sgotplt->size
15426 - elf32_arm_compute_jump_table_size (htab));
15427 htab->root.sgotplt->size += 8;
15428 h->got.offset = (bfd_vma) -2;
15429 /* plt.got_offset needs to know there's a TLS_DESC
15430 reloc in the middle of .got.plt. */
15431 htab->num_tls_desc++;
15432 }
15433
15434 if (tls_type & GOT_TLS_GD)
15435 {
15436 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
15437 the symbol is both GD and GDESC, got.offset may
15438 have been overwritten. */
15439 h->got.offset = s->size;
15440 s->size += 8;
15441 }
15442
15443 if (tls_type & GOT_TLS_IE)
15444 /* R_ARM_TLS_IE32 needs one GOT slot. */
15445 s->size += 4;
15446 }
15447
15448 dyn = htab->root.dynamic_sections_created;
15449
15450 indx = 0;
15451 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
15452 bfd_link_pic (info),
15453 h)
15454 && (!bfd_link_pic (info)
15455 || !SYMBOL_REFERENCES_LOCAL (info, h)))
15456 indx = h->dynindx;
15457
15458 if (tls_type != GOT_NORMAL
15459 && (bfd_link_pic (info) || indx != 0)
15460 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
15461 || h->root.type != bfd_link_hash_undefweak))
15462 {
15463 if (tls_type & GOT_TLS_IE)
15464 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15465
15466 if (tls_type & GOT_TLS_GD)
15467 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15468
15469 if (tls_type & GOT_TLS_GDESC)
15470 {
15471 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
15472 /* GDESC needs a trampoline to jump to. */
15473 htab->tls_trampoline = -1;
15474 }
15475
15476 /* Only GD needs it. GDESC just emits one relocation per
15477 2 entries. */
15478 if ((tls_type & GOT_TLS_GD) && indx != 0)
15479 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15480 }
15481 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
15482 {
15483 if (htab->root.dynamic_sections_created)
15484 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
15485 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15486 }
15487 else if (h->type == STT_GNU_IFUNC
15488 && eh->plt.noncall_refcount == 0)
15489 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
15490 they all resolve dynamically instead. Reserve room for the
15491 GOT entry's R_ARM_IRELATIVE relocation. */
15492 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
15493 else if (bfd_link_pic (info)
15494 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
15495 || h->root.type != bfd_link_hash_undefweak))
15496 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
15497 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15498 }
15499 }
15500 else
15501 h->got.offset = (bfd_vma) -1;
15502
15503 /* Allocate stubs for exported Thumb functions on v4t. */
15504 if (!htab->use_blx && h->dynindx != -1
15505 && h->def_regular
15506 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
15507 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
15508 {
15509 struct elf_link_hash_entry * th;
15510 struct bfd_link_hash_entry * bh;
15511 struct elf_link_hash_entry * myh;
15512 char name[1024];
15513 asection *s;
15514 bh = NULL;
15515 /* Create a new symbol to regist the real location of the function. */
15516 s = h->root.u.def.section;
15517 sprintf (name, "__real_%s", h->root.root.string);
15518 _bfd_generic_link_add_one_symbol (info, s->owner,
15519 name, BSF_GLOBAL, s,
15520 h->root.u.def.value,
15521 NULL, TRUE, FALSE, &bh);
15522
15523 myh = (struct elf_link_hash_entry *) bh;
15524 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
15525 myh->forced_local = 1;
15526 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
15527 eh->export_glue = myh;
15528 th = record_arm_to_thumb_glue (info, h);
15529 /* Point the symbol at the stub. */
15530 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
15531 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
15532 h->root.u.def.section = th->root.u.def.section;
15533 h->root.u.def.value = th->root.u.def.value & ~1;
15534 }
15535
15536 if (eh->dyn_relocs == NULL)
15537 return TRUE;
15538
15539 /* In the shared -Bsymbolic case, discard space allocated for
15540 dynamic pc-relative relocs against symbols which turn out to be
15541 defined in regular objects. For the normal shared case, discard
15542 space for pc-relative relocs that have become local due to symbol
15543 visibility changes. */
15544
15545 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
15546 {
15547 /* Relocs that use pc_count are PC-relative forms, which will appear
15548 on something like ".long foo - ." or "movw REG, foo - .". We want
15549 calls to protected symbols to resolve directly to the function
15550 rather than going via the plt. If people want function pointer
15551 comparisons to work as expected then they should avoid writing
15552 assembly like ".long foo - .". */
15553 if (SYMBOL_CALLS_LOCAL (info, h))
15554 {
15555 struct elf_dyn_relocs **pp;
15556
15557 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
15558 {
15559 p->count -= p->pc_count;
15560 p->pc_count = 0;
15561 if (p->count == 0)
15562 *pp = p->next;
15563 else
15564 pp = &p->next;
15565 }
15566 }
15567
15568 if (htab->vxworks_p)
15569 {
15570 struct elf_dyn_relocs **pp;
15571
15572 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
15573 {
15574 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
15575 *pp = p->next;
15576 else
15577 pp = &p->next;
15578 }
15579 }
15580
15581 /* Also discard relocs on undefined weak syms with non-default
15582 visibility. */
15583 if (eh->dyn_relocs != NULL
15584 && h->root.type == bfd_link_hash_undefweak)
15585 {
15586 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15587 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
15588 eh->dyn_relocs = NULL;
15589
15590 /* Make sure undefined weak symbols are output as a dynamic
15591 symbol in PIEs. */
15592 else if (h->dynindx == -1
15593 && !h->forced_local)
15594 {
15595 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15596 return FALSE;
15597 }
15598 }
15599
15600 else if (htab->root.is_relocatable_executable && h->dynindx == -1
15601 && h->root.type == bfd_link_hash_new)
15602 {
15603 /* Output absolute symbols so that we can create relocations
15604 against them. For normal symbols we output a relocation
15605 against the section that contains them. */
15606 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15607 return FALSE;
15608 }
15609
15610 }
15611 else
15612 {
15613 /* For the non-shared case, discard space for relocs against
15614 symbols which turn out to need copy relocs or are not
15615 dynamic. */
15616
15617 if (!h->non_got_ref
15618 && ((h->def_dynamic
15619 && !h->def_regular)
15620 || (htab->root.dynamic_sections_created
15621 && (h->root.type == bfd_link_hash_undefweak
15622 || h->root.type == bfd_link_hash_undefined))))
15623 {
15624 /* Make sure this symbol is output as a dynamic symbol.
15625 Undefined weak syms won't yet be marked as dynamic. */
15626 if (h->dynindx == -1 && !h->forced_local
15627 && h->root.type == bfd_link_hash_undefweak)
15628 {
15629 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15630 return FALSE;
15631 }
15632
15633 /* If that succeeded, we know we'll be keeping all the
15634 relocs. */
15635 if (h->dynindx != -1)
15636 goto keep;
15637 }
15638
15639 eh->dyn_relocs = NULL;
15640
15641 keep: ;
15642 }
15643
15644 /* Finally, allocate space. */
15645 for (p = eh->dyn_relocs; p != NULL; p = p->next)
15646 {
15647 asection *sreloc = elf_section_data (p->sec)->sreloc;
15648 if (h->type == STT_GNU_IFUNC
15649 && eh->plt.noncall_refcount == 0
15650 && SYMBOL_REFERENCES_LOCAL (info, h))
15651 elf32_arm_allocate_irelocs (info, sreloc, p->count);
15652 else
15653 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
15654 }
15655
15656 return TRUE;
15657 }
15658
15659 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
15660 read-only sections. */
15661
15662 static bfd_boolean
15663 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
15664 {
15665 asection *sec;
15666
15667 if (h->root.type == bfd_link_hash_indirect)
15668 return TRUE;
15669
15670 sec = readonly_dynrelocs (h);
15671 if (sec != NULL)
15672 {
15673 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
15674
15675 info->flags |= DF_TEXTREL;
15676 info->callbacks->minfo
15677 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
15678 sec->owner, h->root.root.string, sec);
15679
15680 /* Not an error, just cut short the traversal. */
15681 return FALSE;
15682 }
15683 return TRUE;
15684 }
15685
15686 void
15687 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
15688 int byteswap_code)
15689 {
15690 struct elf32_arm_link_hash_table *globals;
15691
15692 globals = elf32_arm_hash_table (info);
15693 if (globals == NULL)
15694 return;
15695
15696 globals->byteswap_code = byteswap_code;
15697 }
15698
15699 /* Set the sizes of the dynamic sections. */
15700
15701 static bfd_boolean
15702 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
15703 struct bfd_link_info * info)
15704 {
15705 bfd * dynobj;
15706 asection * s;
15707 bfd_boolean plt;
15708 bfd_boolean relocs;
15709 bfd *ibfd;
15710 struct elf32_arm_link_hash_table *htab;
15711
15712 htab = elf32_arm_hash_table (info);
15713 if (htab == NULL)
15714 return FALSE;
15715
15716 dynobj = elf_hash_table (info)->dynobj;
15717 BFD_ASSERT (dynobj != NULL);
15718 check_use_blx (htab);
15719
15720 if (elf_hash_table (info)->dynamic_sections_created)
15721 {
15722 /* Set the contents of the .interp section to the interpreter. */
15723 if (bfd_link_executable (info) && !info->nointerp)
15724 {
15725 s = bfd_get_linker_section (dynobj, ".interp");
15726 BFD_ASSERT (s != NULL);
15727 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
15728 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
15729 }
15730 }
15731
15732 /* Set up .got offsets for local syms, and space for local dynamic
15733 relocs. */
15734 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15735 {
15736 bfd_signed_vma *local_got;
15737 bfd_signed_vma *end_local_got;
15738 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
15739 char *local_tls_type;
15740 bfd_vma *local_tlsdesc_gotent;
15741 bfd_size_type locsymcount;
15742 Elf_Internal_Shdr *symtab_hdr;
15743 asection *srel;
15744 bfd_boolean is_vxworks = htab->vxworks_p;
15745 unsigned int symndx;
15746
15747 if (! is_arm_elf (ibfd))
15748 continue;
15749
15750 for (s = ibfd->sections; s != NULL; s = s->next)
15751 {
15752 struct elf_dyn_relocs *p;
15753
15754 for (p = (struct elf_dyn_relocs *)
15755 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
15756 {
15757 if (!bfd_is_abs_section (p->sec)
15758 && bfd_is_abs_section (p->sec->output_section))
15759 {
15760 /* Input section has been discarded, either because
15761 it is a copy of a linkonce section or due to
15762 linker script /DISCARD/, so we'll be discarding
15763 the relocs too. */
15764 }
15765 else if (is_vxworks
15766 && strcmp (p->sec->output_section->name,
15767 ".tls_vars") == 0)
15768 {
15769 /* Relocations in vxworks .tls_vars sections are
15770 handled specially by the loader. */
15771 }
15772 else if (p->count != 0)
15773 {
15774 srel = elf_section_data (p->sec)->sreloc;
15775 elf32_arm_allocate_dynrelocs (info, srel, p->count);
15776 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
15777 info->flags |= DF_TEXTREL;
15778 }
15779 }
15780 }
15781
15782 local_got = elf_local_got_refcounts (ibfd);
15783 if (!local_got)
15784 continue;
15785
15786 symtab_hdr = & elf_symtab_hdr (ibfd);
15787 locsymcount = symtab_hdr->sh_info;
15788 end_local_got = local_got + locsymcount;
15789 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
15790 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
15791 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
15792 symndx = 0;
15793 s = htab->root.sgot;
15794 srel = htab->root.srelgot;
15795 for (; local_got < end_local_got;
15796 ++local_got, ++local_iplt_ptr, ++local_tls_type,
15797 ++local_tlsdesc_gotent, ++symndx)
15798 {
15799 *local_tlsdesc_gotent = (bfd_vma) -1;
15800 local_iplt = *local_iplt_ptr;
15801 if (local_iplt != NULL)
15802 {
15803 struct elf_dyn_relocs *p;
15804
15805 if (local_iplt->root.refcount > 0)
15806 {
15807 elf32_arm_allocate_plt_entry (info, TRUE,
15808 &local_iplt->root,
15809 &local_iplt->arm);
15810 if (local_iplt->arm.noncall_refcount == 0)
15811 /* All references to the PLT are calls, so all
15812 non-call references can resolve directly to the
15813 run-time target. This means that the .got entry
15814 would be the same as the .igot.plt entry, so there's
15815 no point creating both. */
15816 *local_got = 0;
15817 }
15818 else
15819 {
15820 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
15821 local_iplt->root.offset = (bfd_vma) -1;
15822 }
15823
15824 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
15825 {
15826 asection *psrel;
15827
15828 psrel = elf_section_data (p->sec)->sreloc;
15829 if (local_iplt->arm.noncall_refcount == 0)
15830 elf32_arm_allocate_irelocs (info, psrel, p->count);
15831 else
15832 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
15833 }
15834 }
15835 if (*local_got > 0)
15836 {
15837 Elf_Internal_Sym *isym;
15838
15839 *local_got = s->size;
15840 if (*local_tls_type & GOT_TLS_GD)
15841 /* TLS_GD relocs need an 8-byte structure in the GOT. */
15842 s->size += 8;
15843 if (*local_tls_type & GOT_TLS_GDESC)
15844 {
15845 *local_tlsdesc_gotent = htab->root.sgotplt->size
15846 - elf32_arm_compute_jump_table_size (htab);
15847 htab->root.sgotplt->size += 8;
15848 *local_got = (bfd_vma) -2;
15849 /* plt.got_offset needs to know there's a TLS_DESC
15850 reloc in the middle of .got.plt. */
15851 htab->num_tls_desc++;
15852 }
15853 if (*local_tls_type & GOT_TLS_IE)
15854 s->size += 4;
15855
15856 if (*local_tls_type & GOT_NORMAL)
15857 {
15858 /* If the symbol is both GD and GDESC, *local_got
15859 may have been overwritten. */
15860 *local_got = s->size;
15861 s->size += 4;
15862 }
15863
15864 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
15865 if (isym == NULL)
15866 return FALSE;
15867
15868 /* If all references to an STT_GNU_IFUNC PLT are calls,
15869 then all non-call references, including this GOT entry,
15870 resolve directly to the run-time target. */
15871 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
15872 && (local_iplt == NULL
15873 || local_iplt->arm.noncall_refcount == 0))
15874 elf32_arm_allocate_irelocs (info, srel, 1);
15875 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
15876 {
15877 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
15878 || *local_tls_type & GOT_TLS_GD)
15879 elf32_arm_allocate_dynrelocs (info, srel, 1);
15880
15881 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
15882 {
15883 elf32_arm_allocate_dynrelocs (info,
15884 htab->root.srelplt, 1);
15885 htab->tls_trampoline = -1;
15886 }
15887 }
15888 }
15889 else
15890 *local_got = (bfd_vma) -1;
15891 }
15892 }
15893
15894 if (htab->tls_ldm_got.refcount > 0)
15895 {
15896 /* Allocate two GOT entries and one dynamic relocation (if necessary)
15897 for R_ARM_TLS_LDM32 relocations. */
15898 htab->tls_ldm_got.offset = htab->root.sgot->size;
15899 htab->root.sgot->size += 8;
15900 if (bfd_link_pic (info))
15901 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15902 }
15903 else
15904 htab->tls_ldm_got.offset = -1;
15905
15906 /* Allocate global sym .plt and .got entries, and space for global
15907 sym dynamic relocs. */
15908 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15909
15910 /* Here we rummage through the found bfds to collect glue information. */
15911 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15912 {
15913 if (! is_arm_elf (ibfd))
15914 continue;
15915
15916 /* Initialise mapping tables for code/data. */
15917 bfd_elf32_arm_init_maps (ibfd);
15918
15919 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15920 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15921 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15922 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
15923 }
15924
15925 /* Allocate space for the glue sections now that we've sized them. */
15926 bfd_elf32_arm_allocate_interworking_sections (info);
15927
15928 /* For every jump slot reserved in the sgotplt, reloc_count is
15929 incremented. However, when we reserve space for TLS descriptors,
15930 it's not incremented, so in order to compute the space reserved
15931 for them, it suffices to multiply the reloc count by the jump
15932 slot size. */
15933 if (htab->root.srelplt)
15934 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15935
15936 if (htab->tls_trampoline)
15937 {
15938 if (htab->root.splt->size == 0)
15939 htab->root.splt->size += htab->plt_header_size;
15940
15941 htab->tls_trampoline = htab->root.splt->size;
15942 htab->root.splt->size += htab->plt_entry_size;
15943
15944 /* If we're not using lazy TLS relocations, don't generate the
15945 PLT and GOT entries they require. */
15946 if (!(info->flags & DF_BIND_NOW))
15947 {
15948 htab->dt_tlsdesc_got = htab->root.sgot->size;
15949 htab->root.sgot->size += 4;
15950
15951 htab->dt_tlsdesc_plt = htab->root.splt->size;
15952 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15953 }
15954 }
15955
15956 /* The check_relocs and adjust_dynamic_symbol entry points have
15957 determined the sizes of the various dynamic sections. Allocate
15958 memory for them. */
15959 plt = FALSE;
15960 relocs = FALSE;
15961 for (s = dynobj->sections; s != NULL; s = s->next)
15962 {
15963 const char * name;
15964
15965 if ((s->flags & SEC_LINKER_CREATED) == 0)
15966 continue;
15967
15968 /* It's OK to base decisions on the section name, because none
15969 of the dynobj section names depend upon the input files. */
15970 name = bfd_get_section_name (dynobj, s);
15971
15972 if (s == htab->root.splt)
15973 {
15974 /* Remember whether there is a PLT. */
15975 plt = s->size != 0;
15976 }
15977 else if (CONST_STRNEQ (name, ".rel"))
15978 {
15979 if (s->size != 0)
15980 {
15981 /* Remember whether there are any reloc sections other
15982 than .rel(a).plt and .rela.plt.unloaded. */
15983 if (s != htab->root.srelplt && s != htab->srelplt2)
15984 relocs = TRUE;
15985
15986 /* We use the reloc_count field as a counter if we need
15987 to copy relocs into the output file. */
15988 s->reloc_count = 0;
15989 }
15990 }
15991 else if (s != htab->root.sgot
15992 && s != htab->root.sgotplt
15993 && s != htab->root.iplt
15994 && s != htab->root.igotplt
15995 && s != htab->root.sdynbss
15996 && s != htab->root.sdynrelro)
15997 {
15998 /* It's not one of our sections, so don't allocate space. */
15999 continue;
16000 }
16001
16002 if (s->size == 0)
16003 {
16004 /* If we don't need this section, strip it from the
16005 output file. This is mostly to handle .rel(a).bss and
16006 .rel(a).plt. We must create both sections in
16007 create_dynamic_sections, because they must be created
16008 before the linker maps input sections to output
16009 sections. The linker does that before
16010 adjust_dynamic_symbol is called, and it is that
16011 function which decides whether anything needs to go
16012 into these sections. */
16013 s->flags |= SEC_EXCLUDE;
16014 continue;
16015 }
16016
16017 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16018 continue;
16019
16020 /* Allocate memory for the section contents. */
16021 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16022 if (s->contents == NULL)
16023 return FALSE;
16024 }
16025
16026 if (elf_hash_table (info)->dynamic_sections_created)
16027 {
16028 /* Add some entries to the .dynamic section. We fill in the
16029 values later, in elf32_arm_finish_dynamic_sections, but we
16030 must add the entries now so that we get the correct size for
16031 the .dynamic section. The DT_DEBUG entry is filled in by the
16032 dynamic linker and used by the debugger. */
16033 #define add_dynamic_entry(TAG, VAL) \
16034 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16035
16036 if (bfd_link_executable (info))
16037 {
16038 if (!add_dynamic_entry (DT_DEBUG, 0))
16039 return FALSE;
16040 }
16041
16042 if (plt)
16043 {
16044 if ( !add_dynamic_entry (DT_PLTGOT, 0)
16045 || !add_dynamic_entry (DT_PLTRELSZ, 0)
16046 || !add_dynamic_entry (DT_PLTREL,
16047 htab->use_rel ? DT_REL : DT_RELA)
16048 || !add_dynamic_entry (DT_JMPREL, 0))
16049 return FALSE;
16050
16051 if (htab->dt_tlsdesc_plt
16052 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16053 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16054 return FALSE;
16055 }
16056
16057 if (relocs)
16058 {
16059 if (htab->use_rel)
16060 {
16061 if (!add_dynamic_entry (DT_REL, 0)
16062 || !add_dynamic_entry (DT_RELSZ, 0)
16063 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16064 return FALSE;
16065 }
16066 else
16067 {
16068 if (!add_dynamic_entry (DT_RELA, 0)
16069 || !add_dynamic_entry (DT_RELASZ, 0)
16070 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
16071 return FALSE;
16072 }
16073 }
16074
16075 /* If any dynamic relocs apply to a read-only section,
16076 then we need a DT_TEXTREL entry. */
16077 if ((info->flags & DF_TEXTREL) == 0)
16078 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
16079
16080 if ((info->flags & DF_TEXTREL) != 0)
16081 {
16082 if (!add_dynamic_entry (DT_TEXTREL, 0))
16083 return FALSE;
16084 }
16085 if (htab->vxworks_p
16086 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
16087 return FALSE;
16088 }
16089 #undef add_dynamic_entry
16090
16091 return TRUE;
16092 }
16093
16094 /* Size sections even though they're not dynamic. We use it to setup
16095 _TLS_MODULE_BASE_, if needed. */
16096
16097 static bfd_boolean
16098 elf32_arm_always_size_sections (bfd *output_bfd,
16099 struct bfd_link_info *info)
16100 {
16101 asection *tls_sec;
16102
16103 if (bfd_link_relocatable (info))
16104 return TRUE;
16105
16106 tls_sec = elf_hash_table (info)->tls_sec;
16107
16108 if (tls_sec)
16109 {
16110 struct elf_link_hash_entry *tlsbase;
16111
16112 tlsbase = elf_link_hash_lookup
16113 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
16114
16115 if (tlsbase)
16116 {
16117 struct bfd_link_hash_entry *bh = NULL;
16118 const struct elf_backend_data *bed
16119 = get_elf_backend_data (output_bfd);
16120
16121 if (!(_bfd_generic_link_add_one_symbol
16122 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
16123 tls_sec, 0, NULL, FALSE,
16124 bed->collect, &bh)))
16125 return FALSE;
16126
16127 tlsbase->type = STT_TLS;
16128 tlsbase = (struct elf_link_hash_entry *)bh;
16129 tlsbase->def_regular = 1;
16130 tlsbase->other = STV_HIDDEN;
16131 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
16132 }
16133 }
16134 return TRUE;
16135 }
16136
16137 /* Finish up dynamic symbol handling. We set the contents of various
16138 dynamic sections here. */
16139
16140 static bfd_boolean
16141 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
16142 struct bfd_link_info * info,
16143 struct elf_link_hash_entry * h,
16144 Elf_Internal_Sym * sym)
16145 {
16146 struct elf32_arm_link_hash_table *htab;
16147 struct elf32_arm_link_hash_entry *eh;
16148
16149 htab = elf32_arm_hash_table (info);
16150 if (htab == NULL)
16151 return FALSE;
16152
16153 eh = (struct elf32_arm_link_hash_entry *) h;
16154
16155 if (h->plt.offset != (bfd_vma) -1)
16156 {
16157 if (!eh->is_iplt)
16158 {
16159 BFD_ASSERT (h->dynindx != -1);
16160 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
16161 h->dynindx, 0))
16162 return FALSE;
16163 }
16164
16165 if (!h->def_regular)
16166 {
16167 /* Mark the symbol as undefined, rather than as defined in
16168 the .plt section. */
16169 sym->st_shndx = SHN_UNDEF;
16170 /* If the symbol is weak we need to clear the value.
16171 Otherwise, the PLT entry would provide a definition for
16172 the symbol even if the symbol wasn't defined anywhere,
16173 and so the symbol would never be NULL. Leave the value if
16174 there were any relocations where pointer equality matters
16175 (this is a clue for the dynamic linker, to make function
16176 pointer comparisons work between an application and shared
16177 library). */
16178 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
16179 sym->st_value = 0;
16180 }
16181 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
16182 {
16183 /* At least one non-call relocation references this .iplt entry,
16184 so the .iplt entry is the function's canonical address. */
16185 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
16186 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
16187 sym->st_shndx = (_bfd_elf_section_from_bfd_section
16188 (output_bfd, htab->root.iplt->output_section));
16189 sym->st_value = (h->plt.offset
16190 + htab->root.iplt->output_section->vma
16191 + htab->root.iplt->output_offset);
16192 }
16193 }
16194
16195 if (h->needs_copy)
16196 {
16197 asection * s;
16198 Elf_Internal_Rela rel;
16199
16200 /* This symbol needs a copy reloc. Set it up. */
16201 BFD_ASSERT (h->dynindx != -1
16202 && (h->root.type == bfd_link_hash_defined
16203 || h->root.type == bfd_link_hash_defweak));
16204
16205 rel.r_addend = 0;
16206 rel.r_offset = (h->root.u.def.value
16207 + h->root.u.def.section->output_section->vma
16208 + h->root.u.def.section->output_offset);
16209 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
16210 if (h->root.u.def.section == htab->root.sdynrelro)
16211 s = htab->root.sreldynrelro;
16212 else
16213 s = htab->root.srelbss;
16214 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
16215 }
16216
16217 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
16218 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
16219 to the ".got" section. */
16220 if (h == htab->root.hdynamic
16221 || (!htab->vxworks_p && h == htab->root.hgot))
16222 sym->st_shndx = SHN_ABS;
16223
16224 return TRUE;
16225 }
16226
16227 static void
16228 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
16229 void *contents,
16230 const unsigned long *template, unsigned count)
16231 {
16232 unsigned ix;
16233
16234 for (ix = 0; ix != count; ix++)
16235 {
16236 unsigned long insn = template[ix];
16237
16238 /* Emit mov pc,rx if bx is not permitted. */
16239 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
16240 insn = (insn & 0xf000000f) | 0x01a0f000;
16241 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
16242 }
16243 }
16244
16245 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
16246 other variants, NaCl needs this entry in a static executable's
16247 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
16248 zero. For .iplt really only the last bundle is useful, and .iplt
16249 could have a shorter first entry, with each individual PLT entry's
16250 relative branch calculated differently so it targets the last
16251 bundle instead of the instruction before it (labelled .Lplt_tail
16252 above). But it's simpler to keep the size and layout of PLT0
16253 consistent with the dynamic case, at the cost of some dead code at
16254 the start of .iplt and the one dead store to the stack at the start
16255 of .Lplt_tail. */
16256 static void
16257 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
16258 asection *plt, bfd_vma got_displacement)
16259 {
16260 unsigned int i;
16261
16262 put_arm_insn (htab, output_bfd,
16263 elf32_arm_nacl_plt0_entry[0]
16264 | arm_movw_immediate (got_displacement),
16265 plt->contents + 0);
16266 put_arm_insn (htab, output_bfd,
16267 elf32_arm_nacl_plt0_entry[1]
16268 | arm_movt_immediate (got_displacement),
16269 plt->contents + 4);
16270
16271 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
16272 put_arm_insn (htab, output_bfd,
16273 elf32_arm_nacl_plt0_entry[i],
16274 plt->contents + (i * 4));
16275 }
16276
16277 /* Finish up the dynamic sections. */
16278
16279 static bfd_boolean
16280 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
16281 {
16282 bfd * dynobj;
16283 asection * sgot;
16284 asection * sdyn;
16285 struct elf32_arm_link_hash_table *htab;
16286
16287 htab = elf32_arm_hash_table (info);
16288 if (htab == NULL)
16289 return FALSE;
16290
16291 dynobj = elf_hash_table (info)->dynobj;
16292
16293 sgot = htab->root.sgotplt;
16294 /* A broken linker script might have discarded the dynamic sections.
16295 Catch this here so that we do not seg-fault later on. */
16296 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
16297 return FALSE;
16298 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
16299
16300 if (elf_hash_table (info)->dynamic_sections_created)
16301 {
16302 asection *splt;
16303 Elf32_External_Dyn *dyncon, *dynconend;
16304
16305 splt = htab->root.splt;
16306 BFD_ASSERT (splt != NULL && sdyn != NULL);
16307 BFD_ASSERT (htab->symbian_p || sgot != NULL);
16308
16309 dyncon = (Elf32_External_Dyn *) sdyn->contents;
16310 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
16311
16312 for (; dyncon < dynconend; dyncon++)
16313 {
16314 Elf_Internal_Dyn dyn;
16315 const char * name;
16316 asection * s;
16317
16318 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
16319
16320 switch (dyn.d_tag)
16321 {
16322 unsigned int type;
16323
16324 default:
16325 if (htab->vxworks_p
16326 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
16327 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16328 break;
16329
16330 case DT_HASH:
16331 name = ".hash";
16332 goto get_vma_if_bpabi;
16333 case DT_STRTAB:
16334 name = ".dynstr";
16335 goto get_vma_if_bpabi;
16336 case DT_SYMTAB:
16337 name = ".dynsym";
16338 goto get_vma_if_bpabi;
16339 case DT_VERSYM:
16340 name = ".gnu.version";
16341 goto get_vma_if_bpabi;
16342 case DT_VERDEF:
16343 name = ".gnu.version_d";
16344 goto get_vma_if_bpabi;
16345 case DT_VERNEED:
16346 name = ".gnu.version_r";
16347 goto get_vma_if_bpabi;
16348
16349 case DT_PLTGOT:
16350 name = htab->symbian_p ? ".got" : ".got.plt";
16351 goto get_vma;
16352 case DT_JMPREL:
16353 name = RELOC_SECTION (htab, ".plt");
16354 get_vma:
16355 s = bfd_get_linker_section (dynobj, name);
16356 if (s == NULL)
16357 {
16358 _bfd_error_handler
16359 (_("could not find section %s"), name);
16360 bfd_set_error (bfd_error_invalid_operation);
16361 return FALSE;
16362 }
16363 if (!htab->symbian_p)
16364 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
16365 else
16366 /* In the BPABI, tags in the PT_DYNAMIC section point
16367 at the file offset, not the memory address, for the
16368 convenience of the post linker. */
16369 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
16370 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16371 break;
16372
16373 get_vma_if_bpabi:
16374 if (htab->symbian_p)
16375 goto get_vma;
16376 break;
16377
16378 case DT_PLTRELSZ:
16379 s = htab->root.srelplt;
16380 BFD_ASSERT (s != NULL);
16381 dyn.d_un.d_val = s->size;
16382 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16383 break;
16384
16385 case DT_RELSZ:
16386 case DT_RELASZ:
16387 case DT_REL:
16388 case DT_RELA:
16389 /* In the BPABI, the DT_REL tag must point at the file
16390 offset, not the VMA, of the first relocation
16391 section. So, we use code similar to that in
16392 elflink.c, but do not check for SHF_ALLOC on the
16393 relocation section, since relocation sections are
16394 never allocated under the BPABI. PLT relocs are also
16395 included. */
16396 if (htab->symbian_p)
16397 {
16398 unsigned int i;
16399 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
16400 ? SHT_REL : SHT_RELA);
16401 dyn.d_un.d_val = 0;
16402 for (i = 1; i < elf_numsections (output_bfd); i++)
16403 {
16404 Elf_Internal_Shdr *hdr
16405 = elf_elfsections (output_bfd)[i];
16406 if (hdr->sh_type == type)
16407 {
16408 if (dyn.d_tag == DT_RELSZ
16409 || dyn.d_tag == DT_RELASZ)
16410 dyn.d_un.d_val += hdr->sh_size;
16411 else if ((ufile_ptr) hdr->sh_offset
16412 <= dyn.d_un.d_val - 1)
16413 dyn.d_un.d_val = hdr->sh_offset;
16414 }
16415 }
16416 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16417 }
16418 break;
16419
16420 case DT_TLSDESC_PLT:
16421 s = htab->root.splt;
16422 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
16423 + htab->dt_tlsdesc_plt);
16424 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16425 break;
16426
16427 case DT_TLSDESC_GOT:
16428 s = htab->root.sgot;
16429 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
16430 + htab->dt_tlsdesc_got);
16431 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16432 break;
16433
16434 /* Set the bottom bit of DT_INIT/FINI if the
16435 corresponding function is Thumb. */
16436 case DT_INIT:
16437 name = info->init_function;
16438 goto get_sym;
16439 case DT_FINI:
16440 name = info->fini_function;
16441 get_sym:
16442 /* If it wasn't set by elf_bfd_final_link
16443 then there is nothing to adjust. */
16444 if (dyn.d_un.d_val != 0)
16445 {
16446 struct elf_link_hash_entry * eh;
16447
16448 eh = elf_link_hash_lookup (elf_hash_table (info), name,
16449 FALSE, FALSE, TRUE);
16450 if (eh != NULL
16451 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
16452 == ST_BRANCH_TO_THUMB)
16453 {
16454 dyn.d_un.d_val |= 1;
16455 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
16456 }
16457 }
16458 break;
16459 }
16460 }
16461
16462 /* Fill in the first entry in the procedure linkage table. */
16463 if (splt->size > 0 && htab->plt_header_size)
16464 {
16465 const bfd_vma *plt0_entry;
16466 bfd_vma got_address, plt_address, got_displacement;
16467
16468 /* Calculate the addresses of the GOT and PLT. */
16469 got_address = sgot->output_section->vma + sgot->output_offset;
16470 plt_address = splt->output_section->vma + splt->output_offset;
16471
16472 if (htab->vxworks_p)
16473 {
16474 /* The VxWorks GOT is relocated by the dynamic linker.
16475 Therefore, we must emit relocations rather than simply
16476 computing the values now. */
16477 Elf_Internal_Rela rel;
16478
16479 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
16480 put_arm_insn (htab, output_bfd, plt0_entry[0],
16481 splt->contents + 0);
16482 put_arm_insn (htab, output_bfd, plt0_entry[1],
16483 splt->contents + 4);
16484 put_arm_insn (htab, output_bfd, plt0_entry[2],
16485 splt->contents + 8);
16486 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
16487
16488 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
16489 rel.r_offset = plt_address + 12;
16490 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
16491 rel.r_addend = 0;
16492 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
16493 htab->srelplt2->contents);
16494 }
16495 else if (htab->nacl_p)
16496 arm_nacl_put_plt0 (htab, output_bfd, splt,
16497 got_address + 8 - (plt_address + 16));
16498 else if (using_thumb_only (htab))
16499 {
16500 got_displacement = got_address - (plt_address + 12);
16501
16502 plt0_entry = elf32_thumb2_plt0_entry;
16503 put_arm_insn (htab, output_bfd, plt0_entry[0],
16504 splt->contents + 0);
16505 put_arm_insn (htab, output_bfd, plt0_entry[1],
16506 splt->contents + 4);
16507 put_arm_insn (htab, output_bfd, plt0_entry[2],
16508 splt->contents + 8);
16509
16510 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
16511 }
16512 else
16513 {
16514 got_displacement = got_address - (plt_address + 16);
16515
16516 plt0_entry = elf32_arm_plt0_entry;
16517 put_arm_insn (htab, output_bfd, plt0_entry[0],
16518 splt->contents + 0);
16519 put_arm_insn (htab, output_bfd, plt0_entry[1],
16520 splt->contents + 4);
16521 put_arm_insn (htab, output_bfd, plt0_entry[2],
16522 splt->contents + 8);
16523 put_arm_insn (htab, output_bfd, plt0_entry[3],
16524 splt->contents + 12);
16525
16526 #ifdef FOUR_WORD_PLT
16527 /* The displacement value goes in the otherwise-unused
16528 last word of the second entry. */
16529 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
16530 #else
16531 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
16532 #endif
16533 }
16534 }
16535
16536 /* UnixWare sets the entsize of .plt to 4, although that doesn't
16537 really seem like the right value. */
16538 if (splt->output_section->owner == output_bfd)
16539 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
16540
16541 if (htab->dt_tlsdesc_plt)
16542 {
16543 bfd_vma got_address
16544 = sgot->output_section->vma + sgot->output_offset;
16545 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
16546 + htab->root.sgot->output_offset);
16547 bfd_vma plt_address
16548 = splt->output_section->vma + splt->output_offset;
16549
16550 arm_put_trampoline (htab, output_bfd,
16551 splt->contents + htab->dt_tlsdesc_plt,
16552 dl_tlsdesc_lazy_trampoline, 6);
16553
16554 bfd_put_32 (output_bfd,
16555 gotplt_address + htab->dt_tlsdesc_got
16556 - (plt_address + htab->dt_tlsdesc_plt)
16557 - dl_tlsdesc_lazy_trampoline[6],
16558 splt->contents + htab->dt_tlsdesc_plt + 24);
16559 bfd_put_32 (output_bfd,
16560 got_address - (plt_address + htab->dt_tlsdesc_plt)
16561 - dl_tlsdesc_lazy_trampoline[7],
16562 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
16563 }
16564
16565 if (htab->tls_trampoline)
16566 {
16567 arm_put_trampoline (htab, output_bfd,
16568 splt->contents + htab->tls_trampoline,
16569 tls_trampoline, 3);
16570 #ifdef FOUR_WORD_PLT
16571 bfd_put_32 (output_bfd, 0x00000000,
16572 splt->contents + htab->tls_trampoline + 12);
16573 #endif
16574 }
16575
16576 if (htab->vxworks_p
16577 && !bfd_link_pic (info)
16578 && htab->root.splt->size > 0)
16579 {
16580 /* Correct the .rel(a).plt.unloaded relocations. They will have
16581 incorrect symbol indexes. */
16582 int num_plts;
16583 unsigned char *p;
16584
16585 num_plts = ((htab->root.splt->size - htab->plt_header_size)
16586 / htab->plt_entry_size);
16587 p = htab->srelplt2->contents + RELOC_SIZE (htab);
16588
16589 for (; num_plts; num_plts--)
16590 {
16591 Elf_Internal_Rela rel;
16592
16593 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
16594 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
16595 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
16596 p += RELOC_SIZE (htab);
16597
16598 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
16599 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
16600 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
16601 p += RELOC_SIZE (htab);
16602 }
16603 }
16604 }
16605
16606 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
16607 /* NaCl uses a special first entry in .iplt too. */
16608 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
16609
16610 /* Fill in the first three entries in the global offset table. */
16611 if (sgot)
16612 {
16613 if (sgot->size > 0)
16614 {
16615 if (sdyn == NULL)
16616 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
16617 else
16618 bfd_put_32 (output_bfd,
16619 sdyn->output_section->vma + sdyn->output_offset,
16620 sgot->contents);
16621 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
16622 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
16623 }
16624
16625 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
16626 }
16627
16628 return TRUE;
16629 }
16630
16631 static void
16632 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
16633 {
16634 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
16635 struct elf32_arm_link_hash_table *globals;
16636 struct elf_segment_map *m;
16637
16638 i_ehdrp = elf_elfheader (abfd);
16639
16640 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
16641 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
16642 else
16643 _bfd_elf_post_process_headers (abfd, link_info);
16644 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
16645
16646 if (link_info)
16647 {
16648 globals = elf32_arm_hash_table (link_info);
16649 if (globals != NULL && globals->byteswap_code)
16650 i_ehdrp->e_flags |= EF_ARM_BE8;
16651
16652 if (globals->fdpic_p)
16653 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
16654 }
16655
16656 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
16657 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
16658 {
16659 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
16660 if (abi == AEABI_VFP_args_vfp)
16661 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
16662 else
16663 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
16664 }
16665
16666 /* Scan segment to set p_flags attribute if it contains only sections with
16667 SHF_ARM_PURECODE flag. */
16668 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
16669 {
16670 unsigned int j;
16671
16672 if (m->count == 0)
16673 continue;
16674 for (j = 0; j < m->count; j++)
16675 {
16676 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
16677 break;
16678 }
16679 if (j == m->count)
16680 {
16681 m->p_flags = PF_X;
16682 m->p_flags_valid = 1;
16683 }
16684 }
16685 }
16686
16687 static enum elf_reloc_type_class
16688 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
16689 const asection *rel_sec ATTRIBUTE_UNUSED,
16690 const Elf_Internal_Rela *rela)
16691 {
16692 switch ((int) ELF32_R_TYPE (rela->r_info))
16693 {
16694 case R_ARM_RELATIVE:
16695 return reloc_class_relative;
16696 case R_ARM_JUMP_SLOT:
16697 return reloc_class_plt;
16698 case R_ARM_COPY:
16699 return reloc_class_copy;
16700 case R_ARM_IRELATIVE:
16701 return reloc_class_ifunc;
16702 default:
16703 return reloc_class_normal;
16704 }
16705 }
16706
16707 static void
16708 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
16709 {
16710 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
16711 }
16712
16713 /* Return TRUE if this is an unwinding table entry. */
16714
16715 static bfd_boolean
16716 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
16717 {
16718 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
16719 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
16720 }
16721
16722
16723 /* Set the type and flags for an ARM section. We do this by
16724 the section name, which is a hack, but ought to work. */
16725
16726 static bfd_boolean
16727 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
16728 {
16729 const char * name;
16730
16731 name = bfd_get_section_name (abfd, sec);
16732
16733 if (is_arm_elf_unwind_section_name (abfd, name))
16734 {
16735 hdr->sh_type = SHT_ARM_EXIDX;
16736 hdr->sh_flags |= SHF_LINK_ORDER;
16737 }
16738
16739 if (sec->flags & SEC_ELF_PURECODE)
16740 hdr->sh_flags |= SHF_ARM_PURECODE;
16741
16742 return TRUE;
16743 }
16744
16745 /* Handle an ARM specific section when reading an object file. This is
16746 called when bfd_section_from_shdr finds a section with an unknown
16747 type. */
16748
16749 static bfd_boolean
16750 elf32_arm_section_from_shdr (bfd *abfd,
16751 Elf_Internal_Shdr * hdr,
16752 const char *name,
16753 int shindex)
16754 {
16755 /* There ought to be a place to keep ELF backend specific flags, but
16756 at the moment there isn't one. We just keep track of the
16757 sections by their name, instead. Fortunately, the ABI gives
16758 names for all the ARM specific sections, so we will probably get
16759 away with this. */
16760 switch (hdr->sh_type)
16761 {
16762 case SHT_ARM_EXIDX:
16763 case SHT_ARM_PREEMPTMAP:
16764 case SHT_ARM_ATTRIBUTES:
16765 break;
16766
16767 default:
16768 return FALSE;
16769 }
16770
16771 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
16772 return FALSE;
16773
16774 return TRUE;
16775 }
16776
16777 static _arm_elf_section_data *
16778 get_arm_elf_section_data (asection * sec)
16779 {
16780 if (sec && sec->owner && is_arm_elf (sec->owner))
16781 return elf32_arm_section_data (sec);
16782 else
16783 return NULL;
16784 }
16785
16786 typedef struct
16787 {
16788 void *flaginfo;
16789 struct bfd_link_info *info;
16790 asection *sec;
16791 int sec_shndx;
16792 int (*func) (void *, const char *, Elf_Internal_Sym *,
16793 asection *, struct elf_link_hash_entry *);
16794 } output_arch_syminfo;
16795
16796 enum map_symbol_type
16797 {
16798 ARM_MAP_ARM,
16799 ARM_MAP_THUMB,
16800 ARM_MAP_DATA
16801 };
16802
16803
16804 /* Output a single mapping symbol. */
16805
16806 static bfd_boolean
16807 elf32_arm_output_map_sym (output_arch_syminfo *osi,
16808 enum map_symbol_type type,
16809 bfd_vma offset)
16810 {
16811 static const char *names[3] = {"$a", "$t", "$d"};
16812 Elf_Internal_Sym sym;
16813
16814 sym.st_value = osi->sec->output_section->vma
16815 + osi->sec->output_offset
16816 + offset;
16817 sym.st_size = 0;
16818 sym.st_other = 0;
16819 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
16820 sym.st_shndx = osi->sec_shndx;
16821 sym.st_target_internal = 0;
16822 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
16823 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
16824 }
16825
16826 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
16827 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
16828
16829 static bfd_boolean
16830 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
16831 bfd_boolean is_iplt_entry_p,
16832 union gotplt_union *root_plt,
16833 struct arm_plt_info *arm_plt)
16834 {
16835 struct elf32_arm_link_hash_table *htab;
16836 bfd_vma addr, plt_header_size;
16837
16838 if (root_plt->offset == (bfd_vma) -1)
16839 return TRUE;
16840
16841 htab = elf32_arm_hash_table (osi->info);
16842 if (htab == NULL)
16843 return FALSE;
16844
16845 if (is_iplt_entry_p)
16846 {
16847 osi->sec = htab->root.iplt;
16848 plt_header_size = 0;
16849 }
16850 else
16851 {
16852 osi->sec = htab->root.splt;
16853 plt_header_size = htab->plt_header_size;
16854 }
16855 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
16856 (osi->info->output_bfd, osi->sec->output_section));
16857
16858 addr = root_plt->offset & -2;
16859 if (htab->symbian_p)
16860 {
16861 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16862 return FALSE;
16863 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
16864 return FALSE;
16865 }
16866 else if (htab->vxworks_p)
16867 {
16868 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16869 return FALSE;
16870 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
16871 return FALSE;
16872 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
16873 return FALSE;
16874 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
16875 return FALSE;
16876 }
16877 else if (htab->nacl_p)
16878 {
16879 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16880 return FALSE;
16881 }
16882 else if (using_thumb_only (htab))
16883 {
16884 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16885 return FALSE;
16886 }
16887 else
16888 {
16889 bfd_boolean thumb_stub_p;
16890
16891 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16892 if (thumb_stub_p)
16893 {
16894 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16895 return FALSE;
16896 }
16897 #ifdef FOUR_WORD_PLT
16898 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16899 return FALSE;
16900 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16901 return FALSE;
16902 #else
16903 /* A three-word PLT with no Thumb thunk contains only Arm code,
16904 so only need to output a mapping symbol for the first PLT entry and
16905 entries with thumb thunks. */
16906 if (thumb_stub_p || addr == plt_header_size)
16907 {
16908 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16909 return FALSE;
16910 }
16911 #endif
16912 }
16913
16914 return TRUE;
16915 }
16916
16917 /* Output mapping symbols for PLT entries associated with H. */
16918
16919 static bfd_boolean
16920 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16921 {
16922 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16923 struct elf32_arm_link_hash_entry *eh;
16924
16925 if (h->root.type == bfd_link_hash_indirect)
16926 return TRUE;
16927
16928 if (h->root.type == bfd_link_hash_warning)
16929 /* When warning symbols are created, they **replace** the "real"
16930 entry in the hash table, thus we never get to see the real
16931 symbol in a hash traversal. So look at it now. */
16932 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16933
16934 eh = (struct elf32_arm_link_hash_entry *) h;
16935 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16936 &h->plt, &eh->plt);
16937 }
16938
16939 /* Bind a veneered symbol to its veneer identified by its hash entry
16940 STUB_ENTRY. The veneered location thus loose its symbol. */
16941
16942 static void
16943 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16944 {
16945 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16946
16947 BFD_ASSERT (hash);
16948 hash->root.root.u.def.section = stub_entry->stub_sec;
16949 hash->root.root.u.def.value = stub_entry->stub_offset;
16950 hash->root.size = stub_entry->stub_size;
16951 }
16952
16953 /* Output a single local symbol for a generated stub. */
16954
16955 static bfd_boolean
16956 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16957 bfd_vma offset, bfd_vma size)
16958 {
16959 Elf_Internal_Sym sym;
16960
16961 sym.st_value = osi->sec->output_section->vma
16962 + osi->sec->output_offset
16963 + offset;
16964 sym.st_size = size;
16965 sym.st_other = 0;
16966 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16967 sym.st_shndx = osi->sec_shndx;
16968 sym.st_target_internal = 0;
16969 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16970 }
16971
16972 static bfd_boolean
16973 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16974 void * in_arg)
16975 {
16976 struct elf32_arm_stub_hash_entry *stub_entry;
16977 asection *stub_sec;
16978 bfd_vma addr;
16979 char *stub_name;
16980 output_arch_syminfo *osi;
16981 const insn_sequence *template_sequence;
16982 enum stub_insn_type prev_type;
16983 int size;
16984 int i;
16985 enum map_symbol_type sym_type;
16986
16987 /* Massage our args to the form they really have. */
16988 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16989 osi = (output_arch_syminfo *) in_arg;
16990
16991 stub_sec = stub_entry->stub_sec;
16992
16993 /* Ensure this stub is attached to the current section being
16994 processed. */
16995 if (stub_sec != osi->sec)
16996 return TRUE;
16997
16998 addr = (bfd_vma) stub_entry->stub_offset;
16999 template_sequence = stub_entry->stub_template;
17000
17001 if (arm_stub_sym_claimed (stub_entry->stub_type))
17002 arm_stub_claim_sym (stub_entry);
17003 else
17004 {
17005 stub_name = stub_entry->output_name;
17006 switch (template_sequence[0].type)
17007 {
17008 case ARM_TYPE:
17009 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17010 stub_entry->stub_size))
17011 return FALSE;
17012 break;
17013 case THUMB16_TYPE:
17014 case THUMB32_TYPE:
17015 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17016 stub_entry->stub_size))
17017 return FALSE;
17018 break;
17019 default:
17020 BFD_FAIL ();
17021 return 0;
17022 }
17023 }
17024
17025 prev_type = DATA_TYPE;
17026 size = 0;
17027 for (i = 0; i < stub_entry->stub_template_size; i++)
17028 {
17029 switch (template_sequence[i].type)
17030 {
17031 case ARM_TYPE:
17032 sym_type = ARM_MAP_ARM;
17033 break;
17034
17035 case THUMB16_TYPE:
17036 case THUMB32_TYPE:
17037 sym_type = ARM_MAP_THUMB;
17038 break;
17039
17040 case DATA_TYPE:
17041 sym_type = ARM_MAP_DATA;
17042 break;
17043
17044 default:
17045 BFD_FAIL ();
17046 return FALSE;
17047 }
17048
17049 if (template_sequence[i].type != prev_type)
17050 {
17051 prev_type = template_sequence[i].type;
17052 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17053 return FALSE;
17054 }
17055
17056 switch (template_sequence[i].type)
17057 {
17058 case ARM_TYPE:
17059 case THUMB32_TYPE:
17060 size += 4;
17061 break;
17062
17063 case THUMB16_TYPE:
17064 size += 2;
17065 break;
17066
17067 case DATA_TYPE:
17068 size += 4;
17069 break;
17070
17071 default:
17072 BFD_FAIL ();
17073 return FALSE;
17074 }
17075 }
17076
17077 return TRUE;
17078 }
17079
17080 /* Output mapping symbols for linker generated sections,
17081 and for those data-only sections that do not have a
17082 $d. */
17083
17084 static bfd_boolean
17085 elf32_arm_output_arch_local_syms (bfd *output_bfd,
17086 struct bfd_link_info *info,
17087 void *flaginfo,
17088 int (*func) (void *, const char *,
17089 Elf_Internal_Sym *,
17090 asection *,
17091 struct elf_link_hash_entry *))
17092 {
17093 output_arch_syminfo osi;
17094 struct elf32_arm_link_hash_table *htab;
17095 bfd_vma offset;
17096 bfd_size_type size;
17097 bfd *input_bfd;
17098
17099 htab = elf32_arm_hash_table (info);
17100 if (htab == NULL)
17101 return FALSE;
17102
17103 check_use_blx (htab);
17104
17105 osi.flaginfo = flaginfo;
17106 osi.info = info;
17107 osi.func = func;
17108
17109 /* Add a $d mapping symbol to data-only sections that
17110 don't have any mapping symbol. This may result in (harmless) redundant
17111 mapping symbols. */
17112 for (input_bfd = info->input_bfds;
17113 input_bfd != NULL;
17114 input_bfd = input_bfd->link.next)
17115 {
17116 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
17117 for (osi.sec = input_bfd->sections;
17118 osi.sec != NULL;
17119 osi.sec = osi.sec->next)
17120 {
17121 if (osi.sec->output_section != NULL
17122 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
17123 != 0)
17124 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
17125 == SEC_HAS_CONTENTS
17126 && get_arm_elf_section_data (osi.sec) != NULL
17127 && get_arm_elf_section_data (osi.sec)->mapcount == 0
17128 && osi.sec->size > 0
17129 && (osi.sec->flags & SEC_EXCLUDE) == 0)
17130 {
17131 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17132 (output_bfd, osi.sec->output_section);
17133 if (osi.sec_shndx != (int)SHN_BAD)
17134 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
17135 }
17136 }
17137 }
17138
17139 /* ARM->Thumb glue. */
17140 if (htab->arm_glue_size > 0)
17141 {
17142 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
17143 ARM2THUMB_GLUE_SECTION_NAME);
17144
17145 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17146 (output_bfd, osi.sec->output_section);
17147 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
17148 || htab->pic_veneer)
17149 size = ARM2THUMB_PIC_GLUE_SIZE;
17150 else if (htab->use_blx)
17151 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
17152 else
17153 size = ARM2THUMB_STATIC_GLUE_SIZE;
17154
17155 for (offset = 0; offset < htab->arm_glue_size; offset += size)
17156 {
17157 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
17158 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
17159 }
17160 }
17161
17162 /* Thumb->ARM glue. */
17163 if (htab->thumb_glue_size > 0)
17164 {
17165 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
17166 THUMB2ARM_GLUE_SECTION_NAME);
17167
17168 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17169 (output_bfd, osi.sec->output_section);
17170 size = THUMB2ARM_GLUE_SIZE;
17171
17172 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
17173 {
17174 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
17175 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
17176 }
17177 }
17178
17179 /* ARMv4 BX veneers. */
17180 if (htab->bx_glue_size > 0)
17181 {
17182 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
17183 ARM_BX_GLUE_SECTION_NAME);
17184
17185 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17186 (output_bfd, osi.sec->output_section);
17187
17188 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
17189 }
17190
17191 /* Long calls stubs. */
17192 if (htab->stub_bfd && htab->stub_bfd->sections)
17193 {
17194 asection* stub_sec;
17195
17196 for (stub_sec = htab->stub_bfd->sections;
17197 stub_sec != NULL;
17198 stub_sec = stub_sec->next)
17199 {
17200 /* Ignore non-stub sections. */
17201 if (!strstr (stub_sec->name, STUB_SUFFIX))
17202 continue;
17203
17204 osi.sec = stub_sec;
17205
17206 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17207 (output_bfd, osi.sec->output_section);
17208
17209 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
17210 }
17211 }
17212
17213 /* Finally, output mapping symbols for the PLT. */
17214 if (htab->root.splt && htab->root.splt->size > 0)
17215 {
17216 osi.sec = htab->root.splt;
17217 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
17218 (output_bfd, osi.sec->output_section));
17219
17220 /* Output mapping symbols for the plt header. SymbianOS does not have a
17221 plt header. */
17222 if (htab->vxworks_p)
17223 {
17224 /* VxWorks shared libraries have no PLT header. */
17225 if (!bfd_link_pic (info))
17226 {
17227 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
17228 return FALSE;
17229 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
17230 return FALSE;
17231 }
17232 }
17233 else if (htab->nacl_p)
17234 {
17235 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
17236 return FALSE;
17237 }
17238 else if (using_thumb_only (htab))
17239 {
17240 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
17241 return FALSE;
17242 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
17243 return FALSE;
17244 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
17245 return FALSE;
17246 }
17247 else if (!htab->symbian_p)
17248 {
17249 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
17250 return FALSE;
17251 #ifndef FOUR_WORD_PLT
17252 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
17253 return FALSE;
17254 #endif
17255 }
17256 }
17257 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
17258 {
17259 /* NaCl uses a special first entry in .iplt too. */
17260 osi.sec = htab->root.iplt;
17261 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
17262 (output_bfd, osi.sec->output_section));
17263 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
17264 return FALSE;
17265 }
17266 if ((htab->root.splt && htab->root.splt->size > 0)
17267 || (htab->root.iplt && htab->root.iplt->size > 0))
17268 {
17269 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
17270 for (input_bfd = info->input_bfds;
17271 input_bfd != NULL;
17272 input_bfd = input_bfd->link.next)
17273 {
17274 struct arm_local_iplt_info **local_iplt;
17275 unsigned int i, num_syms;
17276
17277 local_iplt = elf32_arm_local_iplt (input_bfd);
17278 if (local_iplt != NULL)
17279 {
17280 num_syms = elf_symtab_hdr (input_bfd).sh_info;
17281 for (i = 0; i < num_syms; i++)
17282 if (local_iplt[i] != NULL
17283 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
17284 &local_iplt[i]->root,
17285 &local_iplt[i]->arm))
17286 return FALSE;
17287 }
17288 }
17289 }
17290 if (htab->dt_tlsdesc_plt != 0)
17291 {
17292 /* Mapping symbols for the lazy tls trampoline. */
17293 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
17294 return FALSE;
17295
17296 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
17297 htab->dt_tlsdesc_plt + 24))
17298 return FALSE;
17299 }
17300 if (htab->tls_trampoline != 0)
17301 {
17302 /* Mapping symbols for the tls trampoline. */
17303 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
17304 return FALSE;
17305 #ifdef FOUR_WORD_PLT
17306 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
17307 htab->tls_trampoline + 12))
17308 return FALSE;
17309 #endif
17310 }
17311
17312 return TRUE;
17313 }
17314
17315 /* Filter normal symbols of CMSE entry functions of ABFD to include in
17316 the import library. All SYMCOUNT symbols of ABFD can be examined
17317 from their pointers in SYMS. Pointers of symbols to keep should be
17318 stored continuously at the beginning of that array.
17319
17320 Returns the number of symbols to keep. */
17321
17322 static unsigned int
17323 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
17324 struct bfd_link_info *info,
17325 asymbol **syms, long symcount)
17326 {
17327 size_t maxnamelen;
17328 char *cmse_name;
17329 long src_count, dst_count = 0;
17330 struct elf32_arm_link_hash_table *htab;
17331
17332 htab = elf32_arm_hash_table (info);
17333 if (!htab->stub_bfd || !htab->stub_bfd->sections)
17334 symcount = 0;
17335
17336 maxnamelen = 128;
17337 cmse_name = (char *) bfd_malloc (maxnamelen);
17338 for (src_count = 0; src_count < symcount; src_count++)
17339 {
17340 struct elf32_arm_link_hash_entry *cmse_hash;
17341 asymbol *sym;
17342 flagword flags;
17343 char *name;
17344 size_t namelen;
17345
17346 sym = syms[src_count];
17347 flags = sym->flags;
17348 name = (char *) bfd_asymbol_name (sym);
17349
17350 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
17351 continue;
17352 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
17353 continue;
17354
17355 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
17356 if (namelen > maxnamelen)
17357 {
17358 cmse_name = (char *)
17359 bfd_realloc (cmse_name, namelen);
17360 maxnamelen = namelen;
17361 }
17362 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
17363 cmse_hash = (struct elf32_arm_link_hash_entry *)
17364 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
17365
17366 if (!cmse_hash
17367 || (cmse_hash->root.root.type != bfd_link_hash_defined
17368 && cmse_hash->root.root.type != bfd_link_hash_defweak)
17369 || cmse_hash->root.type != STT_FUNC)
17370 continue;
17371
17372 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
17373 continue;
17374
17375 syms[dst_count++] = sym;
17376 }
17377 free (cmse_name);
17378
17379 syms[dst_count] = NULL;
17380
17381 return dst_count;
17382 }
17383
17384 /* Filter symbols of ABFD to include in the import library. All
17385 SYMCOUNT symbols of ABFD can be examined from their pointers in
17386 SYMS. Pointers of symbols to keep should be stored continuously at
17387 the beginning of that array.
17388
17389 Returns the number of symbols to keep. */
17390
17391 static unsigned int
17392 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
17393 struct bfd_link_info *info,
17394 asymbol **syms, long symcount)
17395 {
17396 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
17397
17398 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
17399 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
17400 library to be a relocatable object file. */
17401 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
17402 if (globals->cmse_implib)
17403 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
17404 else
17405 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
17406 }
17407
17408 /* Allocate target specific section data. */
17409
17410 static bfd_boolean
17411 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
17412 {
17413 if (!sec->used_by_bfd)
17414 {
17415 _arm_elf_section_data *sdata;
17416 bfd_size_type amt = sizeof (*sdata);
17417
17418 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
17419 if (sdata == NULL)
17420 return FALSE;
17421 sec->used_by_bfd = sdata;
17422 }
17423
17424 return _bfd_elf_new_section_hook (abfd, sec);
17425 }
17426
17427
17428 /* Used to order a list of mapping symbols by address. */
17429
17430 static int
17431 elf32_arm_compare_mapping (const void * a, const void * b)
17432 {
17433 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
17434 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
17435
17436 if (amap->vma > bmap->vma)
17437 return 1;
17438 else if (amap->vma < bmap->vma)
17439 return -1;
17440 else if (amap->type > bmap->type)
17441 /* Ensure results do not depend on the host qsort for objects with
17442 multiple mapping symbols at the same address by sorting on type
17443 after vma. */
17444 return 1;
17445 else if (amap->type < bmap->type)
17446 return -1;
17447 else
17448 return 0;
17449 }
17450
17451 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
17452
17453 static unsigned long
17454 offset_prel31 (unsigned long addr, bfd_vma offset)
17455 {
17456 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
17457 }
17458
17459 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
17460 relocations. */
17461
17462 static void
17463 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
17464 {
17465 unsigned long first_word = bfd_get_32 (output_bfd, from);
17466 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
17467
17468 /* High bit of first word is supposed to be zero. */
17469 if ((first_word & 0x80000000ul) == 0)
17470 first_word = offset_prel31 (first_word, offset);
17471
17472 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
17473 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
17474 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
17475 second_word = offset_prel31 (second_word, offset);
17476
17477 bfd_put_32 (output_bfd, first_word, to);
17478 bfd_put_32 (output_bfd, second_word, to + 4);
17479 }
17480
17481 /* Data for make_branch_to_a8_stub(). */
17482
17483 struct a8_branch_to_stub_data
17484 {
17485 asection *writing_section;
17486 bfd_byte *contents;
17487 };
17488
17489
17490 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
17491 places for a particular section. */
17492
17493 static bfd_boolean
17494 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
17495 void *in_arg)
17496 {
17497 struct elf32_arm_stub_hash_entry *stub_entry;
17498 struct a8_branch_to_stub_data *data;
17499 bfd_byte *contents;
17500 unsigned long branch_insn;
17501 bfd_vma veneered_insn_loc, veneer_entry_loc;
17502 bfd_signed_vma branch_offset;
17503 bfd *abfd;
17504 unsigned int loc;
17505
17506 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17507 data = (struct a8_branch_to_stub_data *) in_arg;
17508
17509 if (stub_entry->target_section != data->writing_section
17510 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
17511 return TRUE;
17512
17513 contents = data->contents;
17514
17515 /* We use target_section as Cortex-A8 erratum workaround stubs are only
17516 generated when both source and target are in the same section. */
17517 veneered_insn_loc = stub_entry->target_section->output_section->vma
17518 + stub_entry->target_section->output_offset
17519 + stub_entry->source_value;
17520
17521 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
17522 + stub_entry->stub_sec->output_offset
17523 + stub_entry->stub_offset;
17524
17525 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
17526 veneered_insn_loc &= ~3u;
17527
17528 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
17529
17530 abfd = stub_entry->target_section->owner;
17531 loc = stub_entry->source_value;
17532
17533 /* We attempt to avoid this condition by setting stubs_always_after_branch
17534 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
17535 This check is just to be on the safe side... */
17536 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
17537 {
17538 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
17539 "allocated in unsafe location"), abfd);
17540 return FALSE;
17541 }
17542
17543 switch (stub_entry->stub_type)
17544 {
17545 case arm_stub_a8_veneer_b:
17546 case arm_stub_a8_veneer_b_cond:
17547 branch_insn = 0xf0009000;
17548 goto jump24;
17549
17550 case arm_stub_a8_veneer_blx:
17551 branch_insn = 0xf000e800;
17552 goto jump24;
17553
17554 case arm_stub_a8_veneer_bl:
17555 {
17556 unsigned int i1, j1, i2, j2, s;
17557
17558 branch_insn = 0xf000d000;
17559
17560 jump24:
17561 if (branch_offset < -16777216 || branch_offset > 16777214)
17562 {
17563 /* There's not much we can do apart from complain if this
17564 happens. */
17565 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
17566 "of range (input file too large)"), abfd);
17567 return FALSE;
17568 }
17569
17570 /* i1 = not(j1 eor s), so:
17571 not i1 = j1 eor s
17572 j1 = (not i1) eor s. */
17573
17574 branch_insn |= (branch_offset >> 1) & 0x7ff;
17575 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
17576 i2 = (branch_offset >> 22) & 1;
17577 i1 = (branch_offset >> 23) & 1;
17578 s = (branch_offset >> 24) & 1;
17579 j1 = (!i1) ^ s;
17580 j2 = (!i2) ^ s;
17581 branch_insn |= j2 << 11;
17582 branch_insn |= j1 << 13;
17583 branch_insn |= s << 26;
17584 }
17585 break;
17586
17587 default:
17588 BFD_FAIL ();
17589 return FALSE;
17590 }
17591
17592 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
17593 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
17594
17595 return TRUE;
17596 }
17597
17598 /* Beginning of stm32l4xx work-around. */
17599
17600 /* Functions encoding instructions necessary for the emission of the
17601 fix-stm32l4xx-629360.
17602 Encoding is extracted from the
17603 ARM (C) Architecture Reference Manual
17604 ARMv7-A and ARMv7-R edition
17605 ARM DDI 0406C.b (ID072512). */
17606
17607 static inline bfd_vma
17608 create_instruction_branch_absolute (int branch_offset)
17609 {
17610 /* A8.8.18 B (A8-334)
17611 B target_address (Encoding T4). */
17612 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
17613 /* jump offset is: S:I1:I2:imm10:imm11:0. */
17614 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
17615
17616 int s = ((branch_offset & 0x1000000) >> 24);
17617 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
17618 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
17619
17620 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
17621 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
17622
17623 bfd_vma patched_inst = 0xf0009000
17624 | s << 26 /* S. */
17625 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
17626 | j1 << 13 /* J1. */
17627 | j2 << 11 /* J2. */
17628 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
17629
17630 return patched_inst;
17631 }
17632
17633 static inline bfd_vma
17634 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
17635 {
17636 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
17637 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
17638 bfd_vma patched_inst = 0xe8900000
17639 | (/*W=*/wback << 21)
17640 | (base_reg << 16)
17641 | (reg_mask & 0x0000ffff);
17642
17643 return patched_inst;
17644 }
17645
17646 static inline bfd_vma
17647 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
17648 {
17649 /* A8.8.60 LDMDB/LDMEA (A8-402)
17650 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
17651 bfd_vma patched_inst = 0xe9100000
17652 | (/*W=*/wback << 21)
17653 | (base_reg << 16)
17654 | (reg_mask & 0x0000ffff);
17655
17656 return patched_inst;
17657 }
17658
17659 static inline bfd_vma
17660 create_instruction_mov (int target_reg, int source_reg)
17661 {
17662 /* A8.8.103 MOV (register) (A8-486)
17663 MOV Rd, Rm (Encoding T1). */
17664 bfd_vma patched_inst = 0x4600
17665 | (target_reg & 0x7)
17666 | ((target_reg & 0x8) >> 3) << 7
17667 | (source_reg << 3);
17668
17669 return patched_inst;
17670 }
17671
17672 static inline bfd_vma
17673 create_instruction_sub (int target_reg, int source_reg, int value)
17674 {
17675 /* A8.8.221 SUB (immediate) (A8-708)
17676 SUB Rd, Rn, #value (Encoding T3). */
17677 bfd_vma patched_inst = 0xf1a00000
17678 | (target_reg << 8)
17679 | (source_reg << 16)
17680 | (/*S=*/0 << 20)
17681 | ((value & 0x800) >> 11) << 26
17682 | ((value & 0x700) >> 8) << 12
17683 | (value & 0x0ff);
17684
17685 return patched_inst;
17686 }
17687
17688 static inline bfd_vma
17689 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
17690 int first_reg)
17691 {
17692 /* A8.8.332 VLDM (A8-922)
17693 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
17694 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
17695 | (/*W=*/wback << 21)
17696 | (base_reg << 16)
17697 | (num_words & 0x000000ff)
17698 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
17699 | (first_reg & 0x00000001) << 22;
17700
17701 return patched_inst;
17702 }
17703
17704 static inline bfd_vma
17705 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
17706 int first_reg)
17707 {
17708 /* A8.8.332 VLDM (A8-922)
17709 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
17710 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
17711 | (base_reg << 16)
17712 | (num_words & 0x000000ff)
17713 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
17714 | (first_reg & 0x00000001) << 22;
17715
17716 return patched_inst;
17717 }
17718
17719 static inline bfd_vma
17720 create_instruction_udf_w (int value)
17721 {
17722 /* A8.8.247 UDF (A8-758)
17723 Undefined (Encoding T2). */
17724 bfd_vma patched_inst = 0xf7f0a000
17725 | (value & 0x00000fff)
17726 | (value & 0x000f0000) << 16;
17727
17728 return patched_inst;
17729 }
17730
17731 static inline bfd_vma
17732 create_instruction_udf (int value)
17733 {
17734 /* A8.8.247 UDF (A8-758)
17735 Undefined (Encoding T1). */
17736 bfd_vma patched_inst = 0xde00
17737 | (value & 0xff);
17738
17739 return patched_inst;
17740 }
17741
17742 /* Functions writing an instruction in memory, returning the next
17743 memory position to write to. */
17744
17745 static inline bfd_byte *
17746 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
17747 bfd * output_bfd, bfd_byte *pt, insn32 insn)
17748 {
17749 put_thumb2_insn (htab, output_bfd, insn, pt);
17750 return pt + 4;
17751 }
17752
17753 static inline bfd_byte *
17754 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
17755 bfd * output_bfd, bfd_byte *pt, insn32 insn)
17756 {
17757 put_thumb_insn (htab, output_bfd, insn, pt);
17758 return pt + 2;
17759 }
17760
17761 /* Function filling up a region in memory with T1 and T2 UDFs taking
17762 care of alignment. */
17763
17764 static bfd_byte *
17765 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
17766 bfd * output_bfd,
17767 const bfd_byte * const base_stub_contents,
17768 bfd_byte * const from_stub_contents,
17769 const bfd_byte * const end_stub_contents)
17770 {
17771 bfd_byte *current_stub_contents = from_stub_contents;
17772
17773 /* Fill the remaining of the stub with deterministic contents : UDF
17774 instructions.
17775 Check if realignment is needed on modulo 4 frontier using T1, to
17776 further use T2. */
17777 if ((current_stub_contents < end_stub_contents)
17778 && !((current_stub_contents - base_stub_contents) % 2)
17779 && ((current_stub_contents - base_stub_contents) % 4))
17780 current_stub_contents =
17781 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17782 create_instruction_udf (0));
17783
17784 for (; current_stub_contents < end_stub_contents;)
17785 current_stub_contents =
17786 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17787 create_instruction_udf_w (0));
17788
17789 return current_stub_contents;
17790 }
17791
17792 /* Functions writing the stream of instructions equivalent to the
17793 derived sequence for ldmia, ldmdb, vldm respectively. */
17794
17795 static void
17796 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
17797 bfd * output_bfd,
17798 const insn32 initial_insn,
17799 const bfd_byte *const initial_insn_addr,
17800 bfd_byte *const base_stub_contents)
17801 {
17802 int wback = (initial_insn & 0x00200000) >> 21;
17803 int ri, rn = (initial_insn & 0x000F0000) >> 16;
17804 int insn_all_registers = initial_insn & 0x0000ffff;
17805 int insn_low_registers, insn_high_registers;
17806 int usable_register_mask;
17807 int nb_registers = elf32_arm_popcount (insn_all_registers);
17808 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
17809 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
17810 bfd_byte *current_stub_contents = base_stub_contents;
17811
17812 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
17813
17814 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17815 smaller than 8 registers load sequences that do not cause the
17816 hardware issue. */
17817 if (nb_registers <= 8)
17818 {
17819 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
17820 current_stub_contents =
17821 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17822 initial_insn);
17823
17824 /* B initial_insn_addr+4. */
17825 if (!restore_pc)
17826 current_stub_contents =
17827 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17828 create_instruction_branch_absolute
17829 (initial_insn_addr - current_stub_contents));
17830
17831 /* Fill the remaining of the stub with deterministic contents. */
17832 current_stub_contents =
17833 stm32l4xx_fill_stub_udf (htab, output_bfd,
17834 base_stub_contents, current_stub_contents,
17835 base_stub_contents +
17836 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17837
17838 return;
17839 }
17840
17841 /* - reg_list[13] == 0. */
17842 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
17843
17844 /* - reg_list[14] & reg_list[15] != 1. */
17845 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17846
17847 /* - if (wback==1) reg_list[rn] == 0. */
17848 BFD_ASSERT (!wback || !restore_rn);
17849
17850 /* - nb_registers > 8. */
17851 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
17852
17853 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17854
17855 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
17856 - One with the 7 lowest registers (register mask 0x007F)
17857 This LDM will finally contain between 2 and 7 registers
17858 - One with the 7 highest registers (register mask 0xDF80)
17859 This ldm will finally contain between 2 and 7 registers. */
17860 insn_low_registers = insn_all_registers & 0x007F;
17861 insn_high_registers = insn_all_registers & 0xDF80;
17862
17863 /* A spare register may be needed during this veneer to temporarily
17864 handle the base register. This register will be restored with the
17865 last LDM operation.
17866 The usable register may be any general purpose register (that
17867 excludes PC, SP, LR : register mask is 0x1FFF). */
17868 usable_register_mask = 0x1FFF;
17869
17870 /* Generate the stub function. */
17871 if (wback)
17872 {
17873 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
17874 current_stub_contents =
17875 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17876 create_instruction_ldmia
17877 (rn, /*wback=*/1, insn_low_registers));
17878
17879 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
17880 current_stub_contents =
17881 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17882 create_instruction_ldmia
17883 (rn, /*wback=*/1, insn_high_registers));
17884 if (!restore_pc)
17885 {
17886 /* B initial_insn_addr+4. */
17887 current_stub_contents =
17888 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17889 create_instruction_branch_absolute
17890 (initial_insn_addr - current_stub_contents));
17891 }
17892 }
17893 else /* if (!wback). */
17894 {
17895 ri = rn;
17896
17897 /* If Rn is not part of the high-register-list, move it there. */
17898 if (!(insn_high_registers & (1 << rn)))
17899 {
17900 /* Choose a Ri in the high-register-list that will be restored. */
17901 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17902
17903 /* MOV Ri, Rn. */
17904 current_stub_contents =
17905 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17906 create_instruction_mov (ri, rn));
17907 }
17908
17909 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
17910 current_stub_contents =
17911 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17912 create_instruction_ldmia
17913 (ri, /*wback=*/1, insn_low_registers));
17914
17915 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
17916 current_stub_contents =
17917 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17918 create_instruction_ldmia
17919 (ri, /*wback=*/0, insn_high_registers));
17920
17921 if (!restore_pc)
17922 {
17923 /* B initial_insn_addr+4. */
17924 current_stub_contents =
17925 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17926 create_instruction_branch_absolute
17927 (initial_insn_addr - current_stub_contents));
17928 }
17929 }
17930
17931 /* Fill the remaining of the stub with deterministic contents. */
17932 current_stub_contents =
17933 stm32l4xx_fill_stub_udf (htab, output_bfd,
17934 base_stub_contents, current_stub_contents,
17935 base_stub_contents +
17936 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17937 }
17938
17939 static void
17940 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
17941 bfd * output_bfd,
17942 const insn32 initial_insn,
17943 const bfd_byte *const initial_insn_addr,
17944 bfd_byte *const base_stub_contents)
17945 {
17946 int wback = (initial_insn & 0x00200000) >> 21;
17947 int ri, rn = (initial_insn & 0x000f0000) >> 16;
17948 int insn_all_registers = initial_insn & 0x0000ffff;
17949 int insn_low_registers, insn_high_registers;
17950 int usable_register_mask;
17951 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
17952 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
17953 int nb_registers = elf32_arm_popcount (insn_all_registers);
17954 bfd_byte *current_stub_contents = base_stub_contents;
17955
17956 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
17957
17958 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17959 smaller than 8 registers load sequences that do not cause the
17960 hardware issue. */
17961 if (nb_registers <= 8)
17962 {
17963 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
17964 current_stub_contents =
17965 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17966 initial_insn);
17967
17968 /* B initial_insn_addr+4. */
17969 current_stub_contents =
17970 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17971 create_instruction_branch_absolute
17972 (initial_insn_addr - current_stub_contents));
17973
17974 /* Fill the remaining of the stub with deterministic contents. */
17975 current_stub_contents =
17976 stm32l4xx_fill_stub_udf (htab, output_bfd,
17977 base_stub_contents, current_stub_contents,
17978 base_stub_contents +
17979 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17980
17981 return;
17982 }
17983
17984 /* - reg_list[13] == 0. */
17985 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17986
17987 /* - reg_list[14] & reg_list[15] != 1. */
17988 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17989
17990 /* - if (wback==1) reg_list[rn] == 0. */
17991 BFD_ASSERT (!wback || !restore_rn);
17992
17993 /* - nb_registers > 8. */
17994 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
17995
17996 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17997
17998 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17999 - One with the 7 lowest registers (register mask 0x007F)
18000 This LDM will finally contain between 2 and 7 registers
18001 - One with the 7 highest registers (register mask 0xDF80)
18002 This ldm will finally contain between 2 and 7 registers. */
18003 insn_low_registers = insn_all_registers & 0x007F;
18004 insn_high_registers = insn_all_registers & 0xDF80;
18005
18006 /* A spare register may be needed during this veneer to temporarily
18007 handle the base register. This register will be restored with
18008 the last LDM operation.
18009 The usable register may be any general purpose register (that excludes
18010 PC, SP, LR : register mask is 0x1FFF). */
18011 usable_register_mask = 0x1FFF;
18012
18013 /* Generate the stub function. */
18014 if (!wback && !restore_pc && !restore_rn)
18015 {
18016 /* Choose a Ri in the low-register-list that will be restored. */
18017 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18018
18019 /* MOV Ri, Rn. */
18020 current_stub_contents =
18021 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18022 create_instruction_mov (ri, rn));
18023
18024 /* LDMDB Ri!, {R-high-register-list}. */
18025 current_stub_contents =
18026 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18027 create_instruction_ldmdb
18028 (ri, /*wback=*/1, insn_high_registers));
18029
18030 /* LDMDB Ri, {R-low-register-list}. */
18031 current_stub_contents =
18032 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18033 create_instruction_ldmdb
18034 (ri, /*wback=*/0, insn_low_registers));
18035
18036 /* B initial_insn_addr+4. */
18037 current_stub_contents =
18038 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18039 create_instruction_branch_absolute
18040 (initial_insn_addr - current_stub_contents));
18041 }
18042 else if (wback && !restore_pc && !restore_rn)
18043 {
18044 /* LDMDB Rn!, {R-high-register-list}. */
18045 current_stub_contents =
18046 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18047 create_instruction_ldmdb
18048 (rn, /*wback=*/1, insn_high_registers));
18049
18050 /* LDMDB Rn!, {R-low-register-list}. */
18051 current_stub_contents =
18052 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18053 create_instruction_ldmdb
18054 (rn, /*wback=*/1, insn_low_registers));
18055
18056 /* B initial_insn_addr+4. */
18057 current_stub_contents =
18058 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18059 create_instruction_branch_absolute
18060 (initial_insn_addr - current_stub_contents));
18061 }
18062 else if (!wback && restore_pc && !restore_rn)
18063 {
18064 /* Choose a Ri in the high-register-list that will be restored. */
18065 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18066
18067 /* SUB Ri, Rn, #(4*nb_registers). */
18068 current_stub_contents =
18069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18070 create_instruction_sub (ri, rn, (4 * nb_registers)));
18071
18072 /* LDMIA Ri!, {R-low-register-list}. */
18073 current_stub_contents =
18074 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18075 create_instruction_ldmia
18076 (ri, /*wback=*/1, insn_low_registers));
18077
18078 /* LDMIA Ri, {R-high-register-list}. */
18079 current_stub_contents =
18080 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18081 create_instruction_ldmia
18082 (ri, /*wback=*/0, insn_high_registers));
18083 }
18084 else if (wback && restore_pc && !restore_rn)
18085 {
18086 /* Choose a Ri in the high-register-list that will be restored. */
18087 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18088
18089 /* SUB Rn, Rn, #(4*nb_registers) */
18090 current_stub_contents =
18091 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18092 create_instruction_sub (rn, rn, (4 * nb_registers)));
18093
18094 /* MOV Ri, Rn. */
18095 current_stub_contents =
18096 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18097 create_instruction_mov (ri, rn));
18098
18099 /* LDMIA Ri!, {R-low-register-list}. */
18100 current_stub_contents =
18101 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18102 create_instruction_ldmia
18103 (ri, /*wback=*/1, insn_low_registers));
18104
18105 /* LDMIA Ri, {R-high-register-list}. */
18106 current_stub_contents =
18107 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18108 create_instruction_ldmia
18109 (ri, /*wback=*/0, insn_high_registers));
18110 }
18111 else if (!wback && !restore_pc && restore_rn)
18112 {
18113 ri = rn;
18114 if (!(insn_low_registers & (1 << rn)))
18115 {
18116 /* Choose a Ri in the low-register-list that will be restored. */
18117 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18118
18119 /* MOV Ri, Rn. */
18120 current_stub_contents =
18121 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18122 create_instruction_mov (ri, rn));
18123 }
18124
18125 /* LDMDB Ri!, {R-high-register-list}. */
18126 current_stub_contents =
18127 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18128 create_instruction_ldmdb
18129 (ri, /*wback=*/1, insn_high_registers));
18130
18131 /* LDMDB Ri, {R-low-register-list}. */
18132 current_stub_contents =
18133 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18134 create_instruction_ldmdb
18135 (ri, /*wback=*/0, insn_low_registers));
18136
18137 /* B initial_insn_addr+4. */
18138 current_stub_contents =
18139 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18140 create_instruction_branch_absolute
18141 (initial_insn_addr - current_stub_contents));
18142 }
18143 else if (!wback && restore_pc && restore_rn)
18144 {
18145 ri = rn;
18146 if (!(insn_high_registers & (1 << rn)))
18147 {
18148 /* Choose a Ri in the high-register-list that will be restored. */
18149 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18150 }
18151
18152 /* SUB Ri, Rn, #(4*nb_registers). */
18153 current_stub_contents =
18154 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18155 create_instruction_sub (ri, rn, (4 * nb_registers)));
18156
18157 /* LDMIA Ri!, {R-low-register-list}. */
18158 current_stub_contents =
18159 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18160 create_instruction_ldmia
18161 (ri, /*wback=*/1, insn_low_registers));
18162
18163 /* LDMIA Ri, {R-high-register-list}. */
18164 current_stub_contents =
18165 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18166 create_instruction_ldmia
18167 (ri, /*wback=*/0, insn_high_registers));
18168 }
18169 else if (wback && restore_rn)
18170 {
18171 /* The assembler should not have accepted to encode this. */
18172 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
18173 "undefined behavior.\n");
18174 }
18175
18176 /* Fill the remaining of the stub with deterministic contents. */
18177 current_stub_contents =
18178 stm32l4xx_fill_stub_udf (htab, output_bfd,
18179 base_stub_contents, current_stub_contents,
18180 base_stub_contents +
18181 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18182
18183 }
18184
18185 static void
18186 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
18187 bfd * output_bfd,
18188 const insn32 initial_insn,
18189 const bfd_byte *const initial_insn_addr,
18190 bfd_byte *const base_stub_contents)
18191 {
18192 int num_words = ((unsigned int) initial_insn << 24) >> 24;
18193 bfd_byte *current_stub_contents = base_stub_contents;
18194
18195 BFD_ASSERT (is_thumb2_vldm (initial_insn));
18196
18197 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18198 smaller than 8 words load sequences that do not cause the
18199 hardware issue. */
18200 if (num_words <= 8)
18201 {
18202 /* Untouched instruction. */
18203 current_stub_contents =
18204 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18205 initial_insn);
18206
18207 /* B initial_insn_addr+4. */
18208 current_stub_contents =
18209 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18210 create_instruction_branch_absolute
18211 (initial_insn_addr - current_stub_contents));
18212 }
18213 else
18214 {
18215 bfd_boolean is_dp = /* DP encoding. */
18216 (initial_insn & 0xfe100f00) == 0xec100b00;
18217 bfd_boolean is_ia_nobang = /* (IA without !). */
18218 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
18219 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
18220 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
18221 bfd_boolean is_db_bang = /* (DB with !). */
18222 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
18223 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
18224 /* d = UInt (Vd:D);. */
18225 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
18226 | (((unsigned int)initial_insn << 9) >> 31);
18227
18228 /* Compute the number of 8-words chunks needed to split. */
18229 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
18230 int chunk;
18231
18232 /* The test coverage has been done assuming the following
18233 hypothesis that exactly one of the previous is_ predicates is
18234 true. */
18235 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
18236 && !(is_ia_nobang & is_ia_bang & is_db_bang));
18237
18238 /* We treat the cutting of the words in one pass for all
18239 cases, then we emit the adjustments:
18240
18241 vldm rx, {...}
18242 -> vldm rx!, {8_words_or_less} for each needed 8_word
18243 -> sub rx, rx, #size (list)
18244
18245 vldm rx!, {...}
18246 -> vldm rx!, {8_words_or_less} for each needed 8_word
18247 This also handles vpop instruction (when rx is sp)
18248
18249 vldmd rx!, {...}
18250 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
18251 for (chunk = 0; chunk < chunks; ++chunk)
18252 {
18253 bfd_vma new_insn = 0;
18254
18255 if (is_ia_nobang || is_ia_bang)
18256 {
18257 new_insn = create_instruction_vldmia
18258 (base_reg,
18259 is_dp,
18260 /*wback= . */1,
18261 chunks - (chunk + 1) ?
18262 8 : num_words - chunk * 8,
18263 first_reg + chunk * 8);
18264 }
18265 else if (is_db_bang)
18266 {
18267 new_insn = create_instruction_vldmdb
18268 (base_reg,
18269 is_dp,
18270 chunks - (chunk + 1) ?
18271 8 : num_words - chunk * 8,
18272 first_reg + chunk * 8);
18273 }
18274
18275 if (new_insn)
18276 current_stub_contents =
18277 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18278 new_insn);
18279 }
18280
18281 /* Only this case requires the base register compensation
18282 subtract. */
18283 if (is_ia_nobang)
18284 {
18285 current_stub_contents =
18286 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18287 create_instruction_sub
18288 (base_reg, base_reg, 4*num_words));
18289 }
18290
18291 /* B initial_insn_addr+4. */
18292 current_stub_contents =
18293 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18294 create_instruction_branch_absolute
18295 (initial_insn_addr - current_stub_contents));
18296 }
18297
18298 /* Fill the remaining of the stub with deterministic contents. */
18299 current_stub_contents =
18300 stm32l4xx_fill_stub_udf (htab, output_bfd,
18301 base_stub_contents, current_stub_contents,
18302 base_stub_contents +
18303 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
18304 }
18305
18306 static void
18307 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
18308 bfd * output_bfd,
18309 const insn32 wrong_insn,
18310 const bfd_byte *const wrong_insn_addr,
18311 bfd_byte *const stub_contents)
18312 {
18313 if (is_thumb2_ldmia (wrong_insn))
18314 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
18315 wrong_insn, wrong_insn_addr,
18316 stub_contents);
18317 else if (is_thumb2_ldmdb (wrong_insn))
18318 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
18319 wrong_insn, wrong_insn_addr,
18320 stub_contents);
18321 else if (is_thumb2_vldm (wrong_insn))
18322 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
18323 wrong_insn, wrong_insn_addr,
18324 stub_contents);
18325 }
18326
18327 /* End of stm32l4xx work-around. */
18328
18329
18330 /* Do code byteswapping. Return FALSE afterwards so that the section is
18331 written out as normal. */
18332
18333 static bfd_boolean
18334 elf32_arm_write_section (bfd *output_bfd,
18335 struct bfd_link_info *link_info,
18336 asection *sec,
18337 bfd_byte *contents)
18338 {
18339 unsigned int mapcount, errcount;
18340 _arm_elf_section_data *arm_data;
18341 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
18342 elf32_arm_section_map *map;
18343 elf32_vfp11_erratum_list *errnode;
18344 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
18345 bfd_vma ptr;
18346 bfd_vma end;
18347 bfd_vma offset = sec->output_section->vma + sec->output_offset;
18348 bfd_byte tmp;
18349 unsigned int i;
18350
18351 if (globals == NULL)
18352 return FALSE;
18353
18354 /* If this section has not been allocated an _arm_elf_section_data
18355 structure then we cannot record anything. */
18356 arm_data = get_arm_elf_section_data (sec);
18357 if (arm_data == NULL)
18358 return FALSE;
18359
18360 mapcount = arm_data->mapcount;
18361 map = arm_data->map;
18362 errcount = arm_data->erratumcount;
18363
18364 if (errcount != 0)
18365 {
18366 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
18367
18368 for (errnode = arm_data->erratumlist; errnode != 0;
18369 errnode = errnode->next)
18370 {
18371 bfd_vma target = errnode->vma - offset;
18372
18373 switch (errnode->type)
18374 {
18375 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
18376 {
18377 bfd_vma branch_to_veneer;
18378 /* Original condition code of instruction, plus bit mask for
18379 ARM B instruction. */
18380 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
18381 | 0x0a000000;
18382
18383 /* The instruction is before the label. */
18384 target -= 4;
18385
18386 /* Above offset included in -4 below. */
18387 branch_to_veneer = errnode->u.b.veneer->vma
18388 - errnode->vma - 4;
18389
18390 if ((signed) branch_to_veneer < -(1 << 25)
18391 || (signed) branch_to_veneer >= (1 << 25))
18392 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
18393 "range"), output_bfd);
18394
18395 insn |= (branch_to_veneer >> 2) & 0xffffff;
18396 contents[endianflip ^ target] = insn & 0xff;
18397 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
18398 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
18399 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
18400 }
18401 break;
18402
18403 case VFP11_ERRATUM_ARM_VENEER:
18404 {
18405 bfd_vma branch_from_veneer;
18406 unsigned int insn;
18407
18408 /* Take size of veneer into account. */
18409 branch_from_veneer = errnode->u.v.branch->vma
18410 - errnode->vma - 12;
18411
18412 if ((signed) branch_from_veneer < -(1 << 25)
18413 || (signed) branch_from_veneer >= (1 << 25))
18414 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
18415 "range"), output_bfd);
18416
18417 /* Original instruction. */
18418 insn = errnode->u.v.branch->u.b.vfp_insn;
18419 contents[endianflip ^ target] = insn & 0xff;
18420 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
18421 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
18422 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
18423
18424 /* Branch back to insn after original insn. */
18425 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
18426 contents[endianflip ^ (target + 4)] = insn & 0xff;
18427 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
18428 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
18429 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
18430 }
18431 break;
18432
18433 default:
18434 abort ();
18435 }
18436 }
18437 }
18438
18439 if (arm_data->stm32l4xx_erratumcount != 0)
18440 {
18441 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
18442 stm32l4xx_errnode != 0;
18443 stm32l4xx_errnode = stm32l4xx_errnode->next)
18444 {
18445 bfd_vma target = stm32l4xx_errnode->vma - offset;
18446
18447 switch (stm32l4xx_errnode->type)
18448 {
18449 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
18450 {
18451 unsigned int insn;
18452 bfd_vma branch_to_veneer =
18453 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
18454
18455 if ((signed) branch_to_veneer < -(1 << 24)
18456 || (signed) branch_to_veneer >= (1 << 24))
18457 {
18458 bfd_vma out_of_range =
18459 ((signed) branch_to_veneer < -(1 << 24)) ?
18460 - branch_to_veneer - (1 << 24) :
18461 ((signed) branch_to_veneer >= (1 << 24)) ?
18462 branch_to_veneer - (1 << 24) : 0;
18463
18464 _bfd_error_handler
18465 (_("%pB(%#" PRIx64 "): error: "
18466 "cannot create STM32L4XX veneer; "
18467 "jump out of range by %" PRId64 " bytes; "
18468 "cannot encode branch instruction"),
18469 output_bfd,
18470 (uint64_t) (stm32l4xx_errnode->vma - 4),
18471 (int64_t) out_of_range);
18472 continue;
18473 }
18474
18475 insn = create_instruction_branch_absolute
18476 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
18477
18478 /* The instruction is before the label. */
18479 target -= 4;
18480
18481 put_thumb2_insn (globals, output_bfd,
18482 (bfd_vma) insn, contents + target);
18483 }
18484 break;
18485
18486 case STM32L4XX_ERRATUM_VENEER:
18487 {
18488 bfd_byte * veneer;
18489 bfd_byte * veneer_r;
18490 unsigned int insn;
18491
18492 veneer = contents + target;
18493 veneer_r = veneer
18494 + stm32l4xx_errnode->u.b.veneer->vma
18495 - stm32l4xx_errnode->vma - 4;
18496
18497 if ((signed) (veneer_r - veneer -
18498 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
18499 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
18500 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
18501 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
18502 || (signed) (veneer_r - veneer) >= (1 << 24))
18503 {
18504 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
18505 "veneer"), output_bfd);
18506 continue;
18507 }
18508
18509 /* Original instruction. */
18510 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
18511
18512 stm32l4xx_create_replacing_stub
18513 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
18514 }
18515 break;
18516
18517 default:
18518 abort ();
18519 }
18520 }
18521 }
18522
18523 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
18524 {
18525 arm_unwind_table_edit *edit_node
18526 = arm_data->u.exidx.unwind_edit_list;
18527 /* Now, sec->size is the size of the section we will write. The original
18528 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
18529 markers) was sec->rawsize. (This isn't the case if we perform no
18530 edits, then rawsize will be zero and we should use size). */
18531 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
18532 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
18533 unsigned int in_index, out_index;
18534 bfd_vma add_to_offsets = 0;
18535
18536 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
18537 {
18538 if (edit_node)
18539 {
18540 unsigned int edit_index = edit_node->index;
18541
18542 if (in_index < edit_index && in_index * 8 < input_size)
18543 {
18544 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
18545 contents + in_index * 8, add_to_offsets);
18546 out_index++;
18547 in_index++;
18548 }
18549 else if (in_index == edit_index
18550 || (in_index * 8 >= input_size
18551 && edit_index == UINT_MAX))
18552 {
18553 switch (edit_node->type)
18554 {
18555 case DELETE_EXIDX_ENTRY:
18556 in_index++;
18557 add_to_offsets += 8;
18558 break;
18559
18560 case INSERT_EXIDX_CANTUNWIND_AT_END:
18561 {
18562 asection *text_sec = edit_node->linked_section;
18563 bfd_vma text_offset = text_sec->output_section->vma
18564 + text_sec->output_offset
18565 + text_sec->size;
18566 bfd_vma exidx_offset = offset + out_index * 8;
18567 unsigned long prel31_offset;
18568
18569 /* Note: this is meant to be equivalent to an
18570 R_ARM_PREL31 relocation. These synthetic
18571 EXIDX_CANTUNWIND markers are not relocated by the
18572 usual BFD method. */
18573 prel31_offset = (text_offset - exidx_offset)
18574 & 0x7ffffffful;
18575 if (bfd_link_relocatable (link_info))
18576 {
18577 /* Here relocation for new EXIDX_CANTUNWIND is
18578 created, so there is no need to
18579 adjust offset by hand. */
18580 prel31_offset = text_sec->output_offset
18581 + text_sec->size;
18582 }
18583
18584 /* First address we can't unwind. */
18585 bfd_put_32 (output_bfd, prel31_offset,
18586 &edited_contents[out_index * 8]);
18587
18588 /* Code for EXIDX_CANTUNWIND. */
18589 bfd_put_32 (output_bfd, 0x1,
18590 &edited_contents[out_index * 8 + 4]);
18591
18592 out_index++;
18593 add_to_offsets -= 8;
18594 }
18595 break;
18596 }
18597
18598 edit_node = edit_node->next;
18599 }
18600 }
18601 else
18602 {
18603 /* No more edits, copy remaining entries verbatim. */
18604 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
18605 contents + in_index * 8, add_to_offsets);
18606 out_index++;
18607 in_index++;
18608 }
18609 }
18610
18611 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
18612 bfd_set_section_contents (output_bfd, sec->output_section,
18613 edited_contents,
18614 (file_ptr) sec->output_offset, sec->size);
18615
18616 return TRUE;
18617 }
18618
18619 /* Fix code to point to Cortex-A8 erratum stubs. */
18620 if (globals->fix_cortex_a8)
18621 {
18622 struct a8_branch_to_stub_data data;
18623
18624 data.writing_section = sec;
18625 data.contents = contents;
18626
18627 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
18628 & data);
18629 }
18630
18631 if (mapcount == 0)
18632 return FALSE;
18633
18634 if (globals->byteswap_code)
18635 {
18636 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
18637
18638 ptr = map[0].vma;
18639 for (i = 0; i < mapcount; i++)
18640 {
18641 if (i == mapcount - 1)
18642 end = sec->size;
18643 else
18644 end = map[i + 1].vma;
18645
18646 switch (map[i].type)
18647 {
18648 case 'a':
18649 /* Byte swap code words. */
18650 while (ptr + 3 < end)
18651 {
18652 tmp = contents[ptr];
18653 contents[ptr] = contents[ptr + 3];
18654 contents[ptr + 3] = tmp;
18655 tmp = contents[ptr + 1];
18656 contents[ptr + 1] = contents[ptr + 2];
18657 contents[ptr + 2] = tmp;
18658 ptr += 4;
18659 }
18660 break;
18661
18662 case 't':
18663 /* Byte swap code halfwords. */
18664 while (ptr + 1 < end)
18665 {
18666 tmp = contents[ptr];
18667 contents[ptr] = contents[ptr + 1];
18668 contents[ptr + 1] = tmp;
18669 ptr += 2;
18670 }
18671 break;
18672
18673 case 'd':
18674 /* Leave data alone. */
18675 break;
18676 }
18677 ptr = end;
18678 }
18679 }
18680
18681 free (map);
18682 arm_data->mapcount = -1;
18683 arm_data->mapsize = 0;
18684 arm_data->map = NULL;
18685
18686 return FALSE;
18687 }
18688
18689 /* Mangle thumb function symbols as we read them in. */
18690
18691 static bfd_boolean
18692 elf32_arm_swap_symbol_in (bfd * abfd,
18693 const void *psrc,
18694 const void *pshn,
18695 Elf_Internal_Sym *dst)
18696 {
18697 Elf_Internal_Shdr *symtab_hdr;
18698 const char *name = NULL;
18699
18700 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
18701 return FALSE;
18702 dst->st_target_internal = 0;
18703
18704 /* New EABI objects mark thumb function symbols by setting the low bit of
18705 the address. */
18706 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
18707 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
18708 {
18709 if (dst->st_value & 1)
18710 {
18711 dst->st_value &= ~(bfd_vma) 1;
18712 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
18713 ST_BRANCH_TO_THUMB);
18714 }
18715 else
18716 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
18717 }
18718 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
18719 {
18720 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
18721 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
18722 }
18723 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
18724 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
18725 else
18726 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
18727
18728 /* Mark CMSE special symbols. */
18729 symtab_hdr = & elf_symtab_hdr (abfd);
18730 if (symtab_hdr->sh_size)
18731 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
18732 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
18733 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
18734
18735 return TRUE;
18736 }
18737
18738
18739 /* Mangle thumb function symbols as we write them out. */
18740
18741 static void
18742 elf32_arm_swap_symbol_out (bfd *abfd,
18743 const Elf_Internal_Sym *src,
18744 void *cdst,
18745 void *shndx)
18746 {
18747 Elf_Internal_Sym newsym;
18748
18749 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
18750 of the address set, as per the new EABI. We do this unconditionally
18751 because objcopy does not set the elf header flags until after
18752 it writes out the symbol table. */
18753 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
18754 {
18755 newsym = *src;
18756 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
18757 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
18758 if (newsym.st_shndx != SHN_UNDEF)
18759 {
18760 /* Do this only for defined symbols. At link type, the static
18761 linker will simulate the work of dynamic linker of resolving
18762 symbols and will carry over the thumbness of found symbols to
18763 the output symbol table. It's not clear how it happens, but
18764 the thumbness of undefined symbols can well be different at
18765 runtime, and writing '1' for them will be confusing for users
18766 and possibly for dynamic linker itself.
18767 */
18768 newsym.st_value |= 1;
18769 }
18770
18771 src = &newsym;
18772 }
18773 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
18774 }
18775
18776 /* Add the PT_ARM_EXIDX program header. */
18777
18778 static bfd_boolean
18779 elf32_arm_modify_segment_map (bfd *abfd,
18780 struct bfd_link_info *info ATTRIBUTE_UNUSED)
18781 {
18782 struct elf_segment_map *m;
18783 asection *sec;
18784
18785 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
18786 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
18787 {
18788 /* If there is already a PT_ARM_EXIDX header, then we do not
18789 want to add another one. This situation arises when running
18790 "strip"; the input binary already has the header. */
18791 m = elf_seg_map (abfd);
18792 while (m && m->p_type != PT_ARM_EXIDX)
18793 m = m->next;
18794 if (!m)
18795 {
18796 m = (struct elf_segment_map *)
18797 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
18798 if (m == NULL)
18799 return FALSE;
18800 m->p_type = PT_ARM_EXIDX;
18801 m->count = 1;
18802 m->sections[0] = sec;
18803
18804 m->next = elf_seg_map (abfd);
18805 elf_seg_map (abfd) = m;
18806 }
18807 }
18808
18809 return TRUE;
18810 }
18811
18812 /* We may add a PT_ARM_EXIDX program header. */
18813
18814 static int
18815 elf32_arm_additional_program_headers (bfd *abfd,
18816 struct bfd_link_info *info ATTRIBUTE_UNUSED)
18817 {
18818 asection *sec;
18819
18820 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
18821 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
18822 return 1;
18823 else
18824 return 0;
18825 }
18826
18827 /* Hook called by the linker routine which adds symbols from an object
18828 file. */
18829
18830 static bfd_boolean
18831 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
18832 Elf_Internal_Sym *sym, const char **namep,
18833 flagword *flagsp, asection **secp, bfd_vma *valp)
18834 {
18835 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
18836 && (abfd->flags & DYNAMIC) == 0
18837 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
18838 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
18839
18840 if (elf32_arm_hash_table (info) == NULL)
18841 return FALSE;
18842
18843 if (elf32_arm_hash_table (info)->vxworks_p
18844 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
18845 flagsp, secp, valp))
18846 return FALSE;
18847
18848 return TRUE;
18849 }
18850
18851 /* We use this to override swap_symbol_in and swap_symbol_out. */
18852 const struct elf_size_info elf32_arm_size_info =
18853 {
18854 sizeof (Elf32_External_Ehdr),
18855 sizeof (Elf32_External_Phdr),
18856 sizeof (Elf32_External_Shdr),
18857 sizeof (Elf32_External_Rel),
18858 sizeof (Elf32_External_Rela),
18859 sizeof (Elf32_External_Sym),
18860 sizeof (Elf32_External_Dyn),
18861 sizeof (Elf_External_Note),
18862 4,
18863 1,
18864 32, 2,
18865 ELFCLASS32, EV_CURRENT,
18866 bfd_elf32_write_out_phdrs,
18867 bfd_elf32_write_shdrs_and_ehdr,
18868 bfd_elf32_checksum_contents,
18869 bfd_elf32_write_relocs,
18870 elf32_arm_swap_symbol_in,
18871 elf32_arm_swap_symbol_out,
18872 bfd_elf32_slurp_reloc_table,
18873 bfd_elf32_slurp_symbol_table,
18874 bfd_elf32_swap_dyn_in,
18875 bfd_elf32_swap_dyn_out,
18876 bfd_elf32_swap_reloc_in,
18877 bfd_elf32_swap_reloc_out,
18878 bfd_elf32_swap_reloca_in,
18879 bfd_elf32_swap_reloca_out
18880 };
18881
18882 static bfd_vma
18883 read_code32 (const bfd *abfd, const bfd_byte *addr)
18884 {
18885 /* V7 BE8 code is always little endian. */
18886 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18887 return bfd_getl32 (addr);
18888
18889 return bfd_get_32 (abfd, addr);
18890 }
18891
18892 static bfd_vma
18893 read_code16 (const bfd *abfd, const bfd_byte *addr)
18894 {
18895 /* V7 BE8 code is always little endian. */
18896 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18897 return bfd_getl16 (addr);
18898
18899 return bfd_get_16 (abfd, addr);
18900 }
18901
18902 /* Return size of plt0 entry starting at ADDR
18903 or (bfd_vma) -1 if size can not be determined. */
18904
18905 static bfd_vma
18906 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
18907 {
18908 bfd_vma first_word;
18909 bfd_vma plt0_size;
18910
18911 first_word = read_code32 (abfd, addr);
18912
18913 if (first_word == elf32_arm_plt0_entry[0])
18914 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
18915 else if (first_word == elf32_thumb2_plt0_entry[0])
18916 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
18917 else
18918 /* We don't yet handle this PLT format. */
18919 return (bfd_vma) -1;
18920
18921 return plt0_size;
18922 }
18923
18924 /* Return size of plt entry starting at offset OFFSET
18925 of plt section located at address START
18926 or (bfd_vma) -1 if size can not be determined. */
18927
18928 static bfd_vma
18929 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
18930 {
18931 bfd_vma first_insn;
18932 bfd_vma plt_size = 0;
18933 const bfd_byte *addr = start + offset;
18934
18935 /* PLT entry size if fixed on Thumb-only platforms. */
18936 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
18937 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18938
18939 /* Respect Thumb stub if necessary. */
18940 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18941 {
18942 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18943 }
18944
18945 /* Strip immediate from first add. */
18946 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18947
18948 #ifdef FOUR_WORD_PLT
18949 if (first_insn == elf32_arm_plt_entry[0])
18950 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18951 #else
18952 if (first_insn == elf32_arm_plt_entry_long[0])
18953 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18954 else if (first_insn == elf32_arm_plt_entry_short[0])
18955 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18956 #endif
18957 else
18958 /* We don't yet handle this PLT format. */
18959 return (bfd_vma) -1;
18960
18961 return plt_size;
18962 }
18963
18964 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18965
18966 static long
18967 elf32_arm_get_synthetic_symtab (bfd *abfd,
18968 long symcount ATTRIBUTE_UNUSED,
18969 asymbol **syms ATTRIBUTE_UNUSED,
18970 long dynsymcount,
18971 asymbol **dynsyms,
18972 asymbol **ret)
18973 {
18974 asection *relplt;
18975 asymbol *s;
18976 arelent *p;
18977 long count, i, n;
18978 size_t size;
18979 Elf_Internal_Shdr *hdr;
18980 char *names;
18981 asection *plt;
18982 bfd_vma offset;
18983 bfd_byte *data;
18984
18985 *ret = NULL;
18986
18987 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18988 return 0;
18989
18990 if (dynsymcount <= 0)
18991 return 0;
18992
18993 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18994 if (relplt == NULL)
18995 return 0;
18996
18997 hdr = &elf_section_data (relplt)->this_hdr;
18998 if (hdr->sh_link != elf_dynsymtab (abfd)
18999 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19000 return 0;
19001
19002 plt = bfd_get_section_by_name (abfd, ".plt");
19003 if (plt == NULL)
19004 return 0;
19005
19006 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19007 return -1;
19008
19009 data = plt->contents;
19010 if (data == NULL)
19011 {
19012 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19013 return -1;
19014 bfd_cache_section_contents((asection *) plt, data);
19015 }
19016
19017 count = relplt->size / hdr->sh_entsize;
19018 size = count * sizeof (asymbol);
19019 p = relplt->relocation;
19020 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19021 {
19022 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19023 if (p->addend != 0)
19024 size += sizeof ("+0x") - 1 + 8;
19025 }
19026
19027 s = *ret = (asymbol *) bfd_malloc (size);
19028 if (s == NULL)
19029 return -1;
19030
19031 offset = elf32_arm_plt0_size (abfd, data);
19032 if (offset == (bfd_vma) -1)
19033 return -1;
19034
19035 names = (char *) (s + count);
19036 p = relplt->relocation;
19037 n = 0;
19038 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19039 {
19040 size_t len;
19041
19042 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19043 if (plt_size == (bfd_vma) -1)
19044 break;
19045
19046 *s = **p->sym_ptr_ptr;
19047 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19048 we are defining a symbol, ensure one of them is set. */
19049 if ((s->flags & BSF_LOCAL) == 0)
19050 s->flags |= BSF_GLOBAL;
19051 s->flags |= BSF_SYNTHETIC;
19052 s->section = plt;
19053 s->value = offset;
19054 s->name = names;
19055 s->udata.p = NULL;
19056 len = strlen ((*p->sym_ptr_ptr)->name);
19057 memcpy (names, (*p->sym_ptr_ptr)->name, len);
19058 names += len;
19059 if (p->addend != 0)
19060 {
19061 char buf[30], *a;
19062
19063 memcpy (names, "+0x", sizeof ("+0x") - 1);
19064 names += sizeof ("+0x") - 1;
19065 bfd_sprintf_vma (abfd, buf, p->addend);
19066 for (a = buf; *a == '0'; ++a)
19067 ;
19068 len = strlen (a);
19069 memcpy (names, a, len);
19070 names += len;
19071 }
19072 memcpy (names, "@plt", sizeof ("@plt"));
19073 names += sizeof ("@plt");
19074 ++s, ++n;
19075 offset += plt_size;
19076 }
19077
19078 return n;
19079 }
19080
19081 static bfd_boolean
19082 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
19083 {
19084 if (hdr->sh_flags & SHF_ARM_PURECODE)
19085 *flags |= SEC_ELF_PURECODE;
19086 return TRUE;
19087 }
19088
19089 static flagword
19090 elf32_arm_lookup_section_flags (char *flag_name)
19091 {
19092 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
19093 return SHF_ARM_PURECODE;
19094
19095 return SEC_NO_FLAGS;
19096 }
19097
19098 static unsigned int
19099 elf32_arm_count_additional_relocs (asection *sec)
19100 {
19101 struct _arm_elf_section_data *arm_data;
19102 arm_data = get_arm_elf_section_data (sec);
19103
19104 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
19105 }
19106
19107 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
19108 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
19109 FALSE otherwise. ISECTION is the best guess matching section from the
19110 input bfd IBFD, but it might be NULL. */
19111
19112 static bfd_boolean
19113 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
19114 bfd *obfd ATTRIBUTE_UNUSED,
19115 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
19116 Elf_Internal_Shdr *osection)
19117 {
19118 switch (osection->sh_type)
19119 {
19120 case SHT_ARM_EXIDX:
19121 {
19122 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
19123 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
19124 unsigned i = 0;
19125
19126 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
19127 osection->sh_info = 0;
19128
19129 /* The sh_link field must be set to the text section associated with
19130 this index section. Unfortunately the ARM EHABI does not specify
19131 exactly how to determine this association. Our caller does try
19132 to match up OSECTION with its corresponding input section however
19133 so that is a good first guess. */
19134 if (isection != NULL
19135 && osection->bfd_section != NULL
19136 && isection->bfd_section != NULL
19137 && isection->bfd_section->output_section != NULL
19138 && isection->bfd_section->output_section == osection->bfd_section
19139 && iheaders != NULL
19140 && isection->sh_link > 0
19141 && isection->sh_link < elf_numsections (ibfd)
19142 && iheaders[isection->sh_link]->bfd_section != NULL
19143 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
19144 )
19145 {
19146 for (i = elf_numsections (obfd); i-- > 0;)
19147 if (oheaders[i]->bfd_section
19148 == iheaders[isection->sh_link]->bfd_section->output_section)
19149 break;
19150 }
19151
19152 if (i == 0)
19153 {
19154 /* Failing that we have to find a matching section ourselves. If
19155 we had the output section name available we could compare that
19156 with input section names. Unfortunately we don't. So instead
19157 we use a simple heuristic and look for the nearest executable
19158 section before this one. */
19159 for (i = elf_numsections (obfd); i-- > 0;)
19160 if (oheaders[i] == osection)
19161 break;
19162 if (i == 0)
19163 break;
19164
19165 while (i-- > 0)
19166 if (oheaders[i]->sh_type == SHT_PROGBITS
19167 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
19168 == (SHF_ALLOC | SHF_EXECINSTR))
19169 break;
19170 }
19171
19172 if (i)
19173 {
19174 osection->sh_link = i;
19175 /* If the text section was part of a group
19176 then the index section should be too. */
19177 if (oheaders[i]->sh_flags & SHF_GROUP)
19178 osection->sh_flags |= SHF_GROUP;
19179 return TRUE;
19180 }
19181 }
19182 break;
19183
19184 case SHT_ARM_PREEMPTMAP:
19185 osection->sh_flags = SHF_ALLOC;
19186 break;
19187
19188 case SHT_ARM_ATTRIBUTES:
19189 case SHT_ARM_DEBUGOVERLAY:
19190 case SHT_ARM_OVERLAYSECTION:
19191 default:
19192 break;
19193 }
19194
19195 return FALSE;
19196 }
19197
19198 /* Returns TRUE if NAME is an ARM mapping symbol.
19199 Traditionally the symbols $a, $d and $t have been used.
19200 The ARM ELF standard also defines $x (for A64 code). It also allows a
19201 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
19202 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
19203 not support them here. $t.x indicates the start of ThumbEE instructions. */
19204
19205 static bfd_boolean
19206 is_arm_mapping_symbol (const char * name)
19207 {
19208 return name != NULL /* Paranoia. */
19209 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
19210 the mapping symbols could have acquired a prefix.
19211 We do not support this here, since such symbols no
19212 longer conform to the ARM ELF ABI. */
19213 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
19214 && (name[2] == 0 || name[2] == '.');
19215 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
19216 any characters that follow the period are legal characters for the body
19217 of a symbol's name. For now we just assume that this is the case. */
19218 }
19219
19220 /* Make sure that mapping symbols in object files are not removed via the
19221 "strip --strip-unneeded" tool. These symbols are needed in order to
19222 correctly generate interworking veneers, and for byte swapping code
19223 regions. Once an object file has been linked, it is safe to remove the
19224 symbols as they will no longer be needed. */
19225
19226 static void
19227 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
19228 {
19229 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
19230 && sym->section != bfd_abs_section_ptr
19231 && is_arm_mapping_symbol (sym->name))
19232 sym->flags |= BSF_KEEP;
19233 }
19234
19235 #undef elf_backend_copy_special_section_fields
19236 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
19237
19238 #define ELF_ARCH bfd_arch_arm
19239 #define ELF_TARGET_ID ARM_ELF_DATA
19240 #define ELF_MACHINE_CODE EM_ARM
19241 #ifdef __QNXTARGET__
19242 #define ELF_MAXPAGESIZE 0x1000
19243 #else
19244 #define ELF_MAXPAGESIZE 0x10000
19245 #endif
19246 #define ELF_MINPAGESIZE 0x1000
19247 #define ELF_COMMONPAGESIZE 0x1000
19248
19249 #define bfd_elf32_mkobject elf32_arm_mkobject
19250
19251 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
19252 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
19253 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
19254 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
19255 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
19256 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
19257 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
19258 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
19259 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
19260 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
19261 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
19262 #define bfd_elf32_bfd_final_link elf32_arm_final_link
19263 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
19264
19265 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
19266 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
19267 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
19268 #define elf_backend_check_relocs elf32_arm_check_relocs
19269 #define elf_backend_update_relocs elf32_arm_update_relocs
19270 #define elf_backend_relocate_section elf32_arm_relocate_section
19271 #define elf_backend_write_section elf32_arm_write_section
19272 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
19273 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
19274 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
19275 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
19276 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
19277 #define elf_backend_always_size_sections elf32_arm_always_size_sections
19278 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
19279 #define elf_backend_post_process_headers elf32_arm_post_process_headers
19280 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
19281 #define elf_backend_object_p elf32_arm_object_p
19282 #define elf_backend_fake_sections elf32_arm_fake_sections
19283 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
19284 #define elf_backend_final_write_processing elf32_arm_final_write_processing
19285 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
19286 #define elf_backend_size_info elf32_arm_size_info
19287 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
19288 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
19289 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
19290 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
19291 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
19292 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
19293 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
19294 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
19295
19296 #define elf_backend_can_refcount 1
19297 #define elf_backend_can_gc_sections 1
19298 #define elf_backend_plt_readonly 1
19299 #define elf_backend_want_got_plt 1
19300 #define elf_backend_want_plt_sym 0
19301 #define elf_backend_want_dynrelro 1
19302 #define elf_backend_may_use_rel_p 1
19303 #define elf_backend_may_use_rela_p 0
19304 #define elf_backend_default_use_rela_p 0
19305 #define elf_backend_dtrel_excludes_plt 1
19306
19307 #define elf_backend_got_header_size 12
19308 #define elf_backend_extern_protected_data 1
19309
19310 #undef elf_backend_obj_attrs_vendor
19311 #define elf_backend_obj_attrs_vendor "aeabi"
19312 #undef elf_backend_obj_attrs_section
19313 #define elf_backend_obj_attrs_section ".ARM.attributes"
19314 #undef elf_backend_obj_attrs_arg_type
19315 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
19316 #undef elf_backend_obj_attrs_section_type
19317 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
19318 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
19319 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
19320
19321 #undef elf_backend_section_flags
19322 #define elf_backend_section_flags elf32_arm_section_flags
19323 #undef elf_backend_lookup_section_flags_hook
19324 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
19325
19326 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
19327
19328 #include "elf32-target.h"
19329
19330 /* Native Client targets. */
19331
19332 #undef TARGET_LITTLE_SYM
19333 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
19334 #undef TARGET_LITTLE_NAME
19335 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
19336 #undef TARGET_BIG_SYM
19337 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
19338 #undef TARGET_BIG_NAME
19339 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
19340
19341 /* Like elf32_arm_link_hash_table_create -- but overrides
19342 appropriately for NaCl. */
19343
19344 static struct bfd_link_hash_table *
19345 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
19346 {
19347 struct bfd_link_hash_table *ret;
19348
19349 ret = elf32_arm_link_hash_table_create (abfd);
19350 if (ret)
19351 {
19352 struct elf32_arm_link_hash_table *htab
19353 = (struct elf32_arm_link_hash_table *) ret;
19354
19355 htab->nacl_p = 1;
19356
19357 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
19358 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
19359 }
19360 return ret;
19361 }
19362
19363 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
19364 really need to use elf32_arm_modify_segment_map. But we do it
19365 anyway just to reduce gratuitous differences with the stock ARM backend. */
19366
19367 static bfd_boolean
19368 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
19369 {
19370 return (elf32_arm_modify_segment_map (abfd, info)
19371 && nacl_modify_segment_map (abfd, info));
19372 }
19373
19374 static void
19375 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
19376 {
19377 elf32_arm_final_write_processing (abfd, linker);
19378 nacl_final_write_processing (abfd, linker);
19379 }
19380
19381 static bfd_vma
19382 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
19383 const arelent *rel ATTRIBUTE_UNUSED)
19384 {
19385 return plt->vma
19386 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
19387 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
19388 }
19389
19390 #undef elf32_bed
19391 #define elf32_bed elf32_arm_nacl_bed
19392 #undef bfd_elf32_bfd_link_hash_table_create
19393 #define bfd_elf32_bfd_link_hash_table_create \
19394 elf32_arm_nacl_link_hash_table_create
19395 #undef elf_backend_plt_alignment
19396 #define elf_backend_plt_alignment 4
19397 #undef elf_backend_modify_segment_map
19398 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
19399 #undef elf_backend_modify_program_headers
19400 #define elf_backend_modify_program_headers nacl_modify_program_headers
19401 #undef elf_backend_final_write_processing
19402 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
19403 #undef bfd_elf32_get_synthetic_symtab
19404 #undef elf_backend_plt_sym_val
19405 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
19406 #undef elf_backend_copy_special_section_fields
19407
19408 #undef ELF_MINPAGESIZE
19409 #undef ELF_COMMONPAGESIZE
19410
19411
19412 #include "elf32-target.h"
19413
19414 /* Reset to defaults. */
19415 #undef elf_backend_plt_alignment
19416 #undef elf_backend_modify_segment_map
19417 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
19418 #undef elf_backend_modify_program_headers
19419 #undef elf_backend_final_write_processing
19420 #define elf_backend_final_write_processing elf32_arm_final_write_processing
19421 #undef ELF_MINPAGESIZE
19422 #define ELF_MINPAGESIZE 0x1000
19423 #undef ELF_COMMONPAGESIZE
19424 #define ELF_COMMONPAGESIZE 0x1000
19425
19426
19427 /* FDPIC Targets. */
19428
19429 #undef TARGET_LITTLE_SYM
19430 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
19431 #undef TARGET_LITTLE_NAME
19432 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
19433 #undef TARGET_BIG_SYM
19434 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
19435 #undef TARGET_BIG_NAME
19436 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
19437 #undef elf_match_priority
19438 #define elf_match_priority 128
19439 #undef ELF_OSABI
19440 #define ELF_OSABI ELFOSABI_ARM_FDPIC
19441
19442 /* Like elf32_arm_link_hash_table_create -- but overrides
19443 appropriately for FDPIC. */
19444
19445 static struct bfd_link_hash_table *
19446 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
19447 {
19448 struct bfd_link_hash_table *ret;
19449
19450 ret = elf32_arm_link_hash_table_create (abfd);
19451 if (ret)
19452 {
19453 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
19454
19455 htab->fdpic_p = 1;
19456 }
19457 return ret;
19458 }
19459
19460 #undef elf32_bed
19461 #define elf32_bed elf32_arm_fdpic_bed
19462
19463 #undef bfd_elf32_bfd_link_hash_table_create
19464 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
19465
19466 #include "elf32-target.h"
19467 #undef elf_match_priority
19468 #undef ELF_OSABI
19469
19470 /* VxWorks Targets. */
19471
19472 #undef TARGET_LITTLE_SYM
19473 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
19474 #undef TARGET_LITTLE_NAME
19475 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
19476 #undef TARGET_BIG_SYM
19477 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
19478 #undef TARGET_BIG_NAME
19479 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
19480
19481 /* Like elf32_arm_link_hash_table_create -- but overrides
19482 appropriately for VxWorks. */
19483
19484 static struct bfd_link_hash_table *
19485 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
19486 {
19487 struct bfd_link_hash_table *ret;
19488
19489 ret = elf32_arm_link_hash_table_create (abfd);
19490 if (ret)
19491 {
19492 struct elf32_arm_link_hash_table *htab
19493 = (struct elf32_arm_link_hash_table *) ret;
19494 htab->use_rel = 0;
19495 htab->vxworks_p = 1;
19496 }
19497 return ret;
19498 }
19499
19500 static void
19501 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
19502 {
19503 elf32_arm_final_write_processing (abfd, linker);
19504 elf_vxworks_final_write_processing (abfd, linker);
19505 }
19506
19507 #undef elf32_bed
19508 #define elf32_bed elf32_arm_vxworks_bed
19509
19510 #undef bfd_elf32_bfd_link_hash_table_create
19511 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
19512 #undef elf_backend_final_write_processing
19513 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
19514 #undef elf_backend_emit_relocs
19515 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
19516
19517 #undef elf_backend_may_use_rel_p
19518 #define elf_backend_may_use_rel_p 0
19519 #undef elf_backend_may_use_rela_p
19520 #define elf_backend_may_use_rela_p 1
19521 #undef elf_backend_default_use_rela_p
19522 #define elf_backend_default_use_rela_p 1
19523 #undef elf_backend_want_plt_sym
19524 #define elf_backend_want_plt_sym 1
19525 #undef ELF_MAXPAGESIZE
19526 #define ELF_MAXPAGESIZE 0x1000
19527
19528 #include "elf32-target.h"
19529
19530
19531 /* Merge backend specific data from an object file to the output
19532 object file when linking. */
19533
19534 static bfd_boolean
19535 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
19536 {
19537 bfd *obfd = info->output_bfd;
19538 flagword out_flags;
19539 flagword in_flags;
19540 bfd_boolean flags_compatible = TRUE;
19541 asection *sec;
19542
19543 /* Check if we have the same endianness. */
19544 if (! _bfd_generic_verify_endian_match (ibfd, info))
19545 return FALSE;
19546
19547 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
19548 return TRUE;
19549
19550 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
19551 return FALSE;
19552
19553 /* The input BFD must have had its flags initialised. */
19554 /* The following seems bogus to me -- The flags are initialized in
19555 the assembler but I don't think an elf_flags_init field is
19556 written into the object. */
19557 /* BFD_ASSERT (elf_flags_init (ibfd)); */
19558
19559 in_flags = elf_elfheader (ibfd)->e_flags;
19560 out_flags = elf_elfheader (obfd)->e_flags;
19561
19562 /* In theory there is no reason why we couldn't handle this. However
19563 in practice it isn't even close to working and there is no real
19564 reason to want it. */
19565 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
19566 && !(ibfd->flags & DYNAMIC)
19567 && (in_flags & EF_ARM_BE8))
19568 {
19569 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
19570 ibfd);
19571 return FALSE;
19572 }
19573
19574 if (!elf_flags_init (obfd))
19575 {
19576 /* If the input is the default architecture and had the default
19577 flags then do not bother setting the flags for the output
19578 architecture, instead allow future merges to do this. If no
19579 future merges ever set these flags then they will retain their
19580 uninitialised values, which surprise surprise, correspond
19581 to the default values. */
19582 if (bfd_get_arch_info (ibfd)->the_default
19583 && elf_elfheader (ibfd)->e_flags == 0)
19584 return TRUE;
19585
19586 elf_flags_init (obfd) = TRUE;
19587 elf_elfheader (obfd)->e_flags = in_flags;
19588
19589 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
19590 && bfd_get_arch_info (obfd)->the_default)
19591 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
19592
19593 return TRUE;
19594 }
19595
19596 /* Determine what should happen if the input ARM architecture
19597 does not match the output ARM architecture. */
19598 if (! bfd_arm_merge_machines (ibfd, obfd))
19599 return FALSE;
19600
19601 /* Identical flags must be compatible. */
19602 if (in_flags == out_flags)
19603 return TRUE;
19604
19605 /* Check to see if the input BFD actually contains any sections. If
19606 not, its flags may not have been initialised either, but it
19607 cannot actually cause any incompatiblity. Do not short-circuit
19608 dynamic objects; their section list may be emptied by
19609 elf_link_add_object_symbols.
19610
19611 Also check to see if there are no code sections in the input.
19612 In this case there is no need to check for code specific flags.
19613 XXX - do we need to worry about floating-point format compatability
19614 in data sections ? */
19615 if (!(ibfd->flags & DYNAMIC))
19616 {
19617 bfd_boolean null_input_bfd = TRUE;
19618 bfd_boolean only_data_sections = TRUE;
19619
19620 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
19621 {
19622 /* Ignore synthetic glue sections. */
19623 if (strcmp (sec->name, ".glue_7")
19624 && strcmp (sec->name, ".glue_7t"))
19625 {
19626 if ((bfd_get_section_flags (ibfd, sec)
19627 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
19628 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
19629 only_data_sections = FALSE;
19630
19631 null_input_bfd = FALSE;
19632 break;
19633 }
19634 }
19635
19636 if (null_input_bfd || only_data_sections)
19637 return TRUE;
19638 }
19639
19640 /* Complain about various flag mismatches. */
19641 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
19642 EF_ARM_EABI_VERSION (out_flags)))
19643 {
19644 _bfd_error_handler
19645 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
19646 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
19647 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
19648 return FALSE;
19649 }
19650
19651 /* Not sure what needs to be checked for EABI versions >= 1. */
19652 /* VxWorks libraries do not use these flags. */
19653 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
19654 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
19655 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
19656 {
19657 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
19658 {
19659 _bfd_error_handler
19660 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
19661 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
19662 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
19663 flags_compatible = FALSE;
19664 }
19665
19666 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
19667 {
19668 if (in_flags & EF_ARM_APCS_FLOAT)
19669 _bfd_error_handler
19670 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
19671 ibfd, obfd);
19672 else
19673 _bfd_error_handler
19674 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
19675 ibfd, obfd);
19676
19677 flags_compatible = FALSE;
19678 }
19679
19680 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
19681 {
19682 if (in_flags & EF_ARM_VFP_FLOAT)
19683 _bfd_error_handler
19684 (_("error: %pB uses %s instructions, whereas %pB does not"),
19685 ibfd, "VFP", obfd);
19686 else
19687 _bfd_error_handler
19688 (_("error: %pB uses %s instructions, whereas %pB does not"),
19689 ibfd, "FPA", obfd);
19690
19691 flags_compatible = FALSE;
19692 }
19693
19694 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
19695 {
19696 if (in_flags & EF_ARM_MAVERICK_FLOAT)
19697 _bfd_error_handler
19698 (_("error: %pB uses %s instructions, whereas %pB does not"),
19699 ibfd, "Maverick", obfd);
19700 else
19701 _bfd_error_handler
19702 (_("error: %pB does not use %s instructions, whereas %pB does"),
19703 ibfd, "Maverick", obfd);
19704
19705 flags_compatible = FALSE;
19706 }
19707
19708 #ifdef EF_ARM_SOFT_FLOAT
19709 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
19710 {
19711 /* We can allow interworking between code that is VFP format
19712 layout, and uses either soft float or integer regs for
19713 passing floating point arguments and results. We already
19714 know that the APCS_FLOAT flags match; similarly for VFP
19715 flags. */
19716 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
19717 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
19718 {
19719 if (in_flags & EF_ARM_SOFT_FLOAT)
19720 _bfd_error_handler
19721 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
19722 ibfd, obfd);
19723 else
19724 _bfd_error_handler
19725 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
19726 ibfd, obfd);
19727
19728 flags_compatible = FALSE;
19729 }
19730 }
19731 #endif
19732
19733 /* Interworking mismatch is only a warning. */
19734 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
19735 {
19736 if (in_flags & EF_ARM_INTERWORK)
19737 {
19738 _bfd_error_handler
19739 (_("warning: %pB supports interworking, whereas %pB does not"),
19740 ibfd, obfd);
19741 }
19742 else
19743 {
19744 _bfd_error_handler
19745 (_("warning: %pB does not support interworking, whereas %pB does"),
19746 ibfd, obfd);
19747 }
19748 }
19749 }
19750
19751 return flags_compatible;
19752 }
19753
19754
19755 /* Symbian OS Targets. */
19756
19757 #undef TARGET_LITTLE_SYM
19758 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
19759 #undef TARGET_LITTLE_NAME
19760 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
19761 #undef TARGET_BIG_SYM
19762 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
19763 #undef TARGET_BIG_NAME
19764 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
19765
19766 /* Like elf32_arm_link_hash_table_create -- but overrides
19767 appropriately for Symbian OS. */
19768
19769 static struct bfd_link_hash_table *
19770 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
19771 {
19772 struct bfd_link_hash_table *ret;
19773
19774 ret = elf32_arm_link_hash_table_create (abfd);
19775 if (ret)
19776 {
19777 struct elf32_arm_link_hash_table *htab
19778 = (struct elf32_arm_link_hash_table *)ret;
19779 /* There is no PLT header for Symbian OS. */
19780 htab->plt_header_size = 0;
19781 /* The PLT entries are each one instruction and one word. */
19782 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
19783 htab->symbian_p = 1;
19784 /* Symbian uses armv5t or above, so use_blx is always true. */
19785 htab->use_blx = 1;
19786 htab->root.is_relocatable_executable = 1;
19787 }
19788 return ret;
19789 }
19790
19791 static const struct bfd_elf_special_section
19792 elf32_arm_symbian_special_sections[] =
19793 {
19794 /* In a BPABI executable, the dynamic linking sections do not go in
19795 the loadable read-only segment. The post-linker may wish to
19796 refer to these sections, but they are not part of the final
19797 program image. */
19798 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
19799 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
19800 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
19801 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
19802 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
19803 /* These sections do not need to be writable as the SymbianOS
19804 postlinker will arrange things so that no dynamic relocation is
19805 required. */
19806 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
19807 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
19808 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
19809 { NULL, 0, 0, 0, 0 }
19810 };
19811
19812 static void
19813 elf32_arm_symbian_begin_write_processing (bfd *abfd,
19814 struct bfd_link_info *link_info)
19815 {
19816 /* BPABI objects are never loaded directly by an OS kernel; they are
19817 processed by a postlinker first, into an OS-specific format. If
19818 the D_PAGED bit is set on the file, BFD will align segments on
19819 page boundaries, so that an OS can directly map the file. With
19820 BPABI objects, that just results in wasted space. In addition,
19821 because we clear the D_PAGED bit, map_sections_to_segments will
19822 recognize that the program headers should not be mapped into any
19823 loadable segment. */
19824 abfd->flags &= ~D_PAGED;
19825 elf32_arm_begin_write_processing (abfd, link_info);
19826 }
19827
19828 static bfd_boolean
19829 elf32_arm_symbian_modify_segment_map (bfd *abfd,
19830 struct bfd_link_info *info)
19831 {
19832 struct elf_segment_map *m;
19833 asection *dynsec;
19834
19835 /* BPABI shared libraries and executables should have a PT_DYNAMIC
19836 segment. However, because the .dynamic section is not marked
19837 with SEC_LOAD, the generic ELF code will not create such a
19838 segment. */
19839 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
19840 if (dynsec)
19841 {
19842 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
19843 if (m->p_type == PT_DYNAMIC)
19844 break;
19845
19846 if (m == NULL)
19847 {
19848 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
19849 m->next = elf_seg_map (abfd);
19850 elf_seg_map (abfd) = m;
19851 }
19852 }
19853
19854 /* Also call the generic arm routine. */
19855 return elf32_arm_modify_segment_map (abfd, info);
19856 }
19857
19858 /* Return address for Ith PLT stub in section PLT, for relocation REL
19859 or (bfd_vma) -1 if it should not be included. */
19860
19861 static bfd_vma
19862 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
19863 const arelent *rel ATTRIBUTE_UNUSED)
19864 {
19865 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
19866 }
19867
19868 #undef elf32_bed
19869 #define elf32_bed elf32_arm_symbian_bed
19870
19871 /* The dynamic sections are not allocated on SymbianOS; the postlinker
19872 will process them and then discard them. */
19873 #undef ELF_DYNAMIC_SEC_FLAGS
19874 #define ELF_DYNAMIC_SEC_FLAGS \
19875 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
19876
19877 #undef elf_backend_emit_relocs
19878
19879 #undef bfd_elf32_bfd_link_hash_table_create
19880 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
19881 #undef elf_backend_special_sections
19882 #define elf_backend_special_sections elf32_arm_symbian_special_sections
19883 #undef elf_backend_begin_write_processing
19884 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
19885 #undef elf_backend_final_write_processing
19886 #define elf_backend_final_write_processing elf32_arm_final_write_processing
19887
19888 #undef elf_backend_modify_segment_map
19889 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
19890
19891 /* There is no .got section for BPABI objects, and hence no header. */
19892 #undef elf_backend_got_header_size
19893 #define elf_backend_got_header_size 0
19894
19895 /* Similarly, there is no .got.plt section. */
19896 #undef elf_backend_want_got_plt
19897 #define elf_backend_want_got_plt 0
19898
19899 #undef elf_backend_plt_sym_val
19900 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
19901
19902 #undef elf_backend_may_use_rel_p
19903 #define elf_backend_may_use_rel_p 1
19904 #undef elf_backend_may_use_rela_p
19905 #define elf_backend_may_use_rela_p 0
19906 #undef elf_backend_default_use_rela_p
19907 #define elf_backend_default_use_rela_p 0
19908 #undef elf_backend_want_plt_sym
19909 #define elf_backend_want_plt_sym 0
19910 #undef elf_backend_dtrel_excludes_plt
19911 #define elf_backend_dtrel_excludes_plt 0
19912 #undef ELF_MAXPAGESIZE
19913 #define ELF_MAXPAGESIZE 0x8000
19914
19915 #include "elf32-target.h"
This page took 0.453415 seconds and 4 git commands to generate.