Properly install gold as default linker
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "bfd_stdint.h"
27 #include "libiberty.h"
28 #include "libbfd.h"
29 #include "elf-bfd.h"
30 #include "elf-nacl.h"
31 #include "elf-vxworks.h"
32 #include "elf/arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto 0
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 0, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 FALSE, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 FALSE), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 TRUE, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 FALSE, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 TRUE, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 TRUE, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 FALSE, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 FALSE, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 FALSE, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 FALSE, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 FALSE, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 TRUE, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 TRUE, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 FALSE, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 FALSE, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 FALSE, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 TRUE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 TRUE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 FALSE, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 FALSE, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 TRUE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 TRUE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 TRUE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 TRUE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 FALSE, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 TRUE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 TRUE, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 TRUE, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 FALSE, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 FALSE, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 FALSE, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 FALSE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 FALSE, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 TRUE, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 FALSE, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 FALSE, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 TRUE, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 TRUE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 FALSE, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 TRUE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 TRUE, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 TRUE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 FALSE, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 TRUE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 TRUE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 TRUE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 TRUE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 TRUE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 TRUE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 TRUE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 TRUE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 TRUE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 TRUE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 TRUE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 TRUE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 TRUE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 TRUE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 TRUE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 TRUE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 TRUE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 TRUE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 TRUE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 TRUE, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 TRUE, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 TRUE, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 TRUE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 TRUE, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 FALSE, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 FALSE, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 FALSE, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 FALSE, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 FALSE, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 FALSE, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_bitfield,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 FALSE, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 TRUE, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 FALSE, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 FALSE, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 TRUE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 TRUE, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 FALSE, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 bfd_elf_generic_reloc, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 FALSE, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 FALSE, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 FALSE, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 FALSE, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_bitfield,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1693 };
1694
1695 /* 160 onwards: */
1696 static reloc_howto_type elf32_arm_howto_table_2[1] =
1697 {
1698 HOWTO (R_ARM_IRELATIVE, /* type */
1699 0, /* rightshift */
1700 2, /* size (0 = byte, 1 = short, 2 = long) */
1701 32, /* bitsize */
1702 FALSE, /* pc_relative */
1703 0, /* bitpos */
1704 complain_overflow_bitfield,/* complain_on_overflow */
1705 bfd_elf_generic_reloc, /* special_function */
1706 "R_ARM_IRELATIVE", /* name */
1707 TRUE, /* partial_inplace */
1708 0xffffffff, /* src_mask */
1709 0xffffffff, /* dst_mask */
1710 FALSE) /* pcrel_offset */
1711 };
1712
1713 /* 249-255 extended, currently unused, relocations: */
1714 static reloc_howto_type elf32_arm_howto_table_3[4] =
1715 {
1716 HOWTO (R_ARM_RREL32, /* type */
1717 0, /* rightshift */
1718 0, /* size (0 = byte, 1 = short, 2 = long) */
1719 0, /* bitsize */
1720 FALSE, /* pc_relative */
1721 0, /* bitpos */
1722 complain_overflow_dont,/* complain_on_overflow */
1723 bfd_elf_generic_reloc, /* special_function */
1724 "R_ARM_RREL32", /* name */
1725 FALSE, /* partial_inplace */
1726 0, /* src_mask */
1727 0, /* dst_mask */
1728 FALSE), /* pcrel_offset */
1729
1730 HOWTO (R_ARM_RABS32, /* type */
1731 0, /* rightshift */
1732 0, /* size (0 = byte, 1 = short, 2 = long) */
1733 0, /* bitsize */
1734 FALSE, /* pc_relative */
1735 0, /* bitpos */
1736 complain_overflow_dont,/* complain_on_overflow */
1737 bfd_elf_generic_reloc, /* special_function */
1738 "R_ARM_RABS32", /* name */
1739 FALSE, /* partial_inplace */
1740 0, /* src_mask */
1741 0, /* dst_mask */
1742 FALSE), /* pcrel_offset */
1743
1744 HOWTO (R_ARM_RPC24, /* type */
1745 0, /* rightshift */
1746 0, /* size (0 = byte, 1 = short, 2 = long) */
1747 0, /* bitsize */
1748 FALSE, /* pc_relative */
1749 0, /* bitpos */
1750 complain_overflow_dont,/* complain_on_overflow */
1751 bfd_elf_generic_reloc, /* special_function */
1752 "R_ARM_RPC24", /* name */
1753 FALSE, /* partial_inplace */
1754 0, /* src_mask */
1755 0, /* dst_mask */
1756 FALSE), /* pcrel_offset */
1757
1758 HOWTO (R_ARM_RBASE, /* type */
1759 0, /* rightshift */
1760 0, /* size (0 = byte, 1 = short, 2 = long) */
1761 0, /* bitsize */
1762 FALSE, /* pc_relative */
1763 0, /* bitpos */
1764 complain_overflow_dont,/* complain_on_overflow */
1765 bfd_elf_generic_reloc, /* special_function */
1766 "R_ARM_RBASE", /* name */
1767 FALSE, /* partial_inplace */
1768 0, /* src_mask */
1769 0, /* dst_mask */
1770 FALSE) /* pcrel_offset */
1771 };
1772
1773 static reloc_howto_type *
1774 elf32_arm_howto_from_type (unsigned int r_type)
1775 {
1776 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1777 return &elf32_arm_howto_table_1[r_type];
1778
1779 if (r_type == R_ARM_IRELATIVE)
1780 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1781
1782 if (r_type >= R_ARM_RREL32
1783 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1784 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1785
1786 return NULL;
1787 }
1788
1789 static void
1790 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1791 Elf_Internal_Rela * elf_reloc)
1792 {
1793 unsigned int r_type;
1794
1795 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1796 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1797 }
1798
1799 struct elf32_arm_reloc_map
1800 {
1801 bfd_reloc_code_real_type bfd_reloc_val;
1802 unsigned char elf_reloc_val;
1803 };
1804
1805 /* All entries in this list must also be present in elf32_arm_howto_table. */
1806 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1807 {
1808 {BFD_RELOC_NONE, R_ARM_NONE},
1809 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1810 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1811 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1812 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1813 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1814 {BFD_RELOC_32, R_ARM_ABS32},
1815 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1816 {BFD_RELOC_8, R_ARM_ABS8},
1817 {BFD_RELOC_16, R_ARM_ABS16},
1818 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1819 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1825 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1826 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1827 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1828 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1829 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1830 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1831 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1832 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1833 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1834 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1835 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1836 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1837 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1838 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1839 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1840 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1841 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1842 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1843 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1845 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1846 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1847 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1848 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1849 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1850 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1851 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1852 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1853 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1854 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1855 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1856 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1857 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1858 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1859 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1860 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1861 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1862 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1863 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1864 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1865 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1866 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1867 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1868 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1869 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1870 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1871 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1872 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1873 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1874 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1875 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1876 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1877 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1878 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1879 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1880 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1881 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1882 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1883 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1884 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1885 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1886 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1887 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1888 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1889 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1890 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1891 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1892 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1893 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1894 };
1895
1896 static reloc_howto_type *
1897 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1898 bfd_reloc_code_real_type code)
1899 {
1900 unsigned int i;
1901
1902 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1903 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1904 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1905
1906 return NULL;
1907 }
1908
1909 static reloc_howto_type *
1910 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1911 const char *r_name)
1912 {
1913 unsigned int i;
1914
1915 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1916 if (elf32_arm_howto_table_1[i].name != NULL
1917 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1918 return &elf32_arm_howto_table_1[i];
1919
1920 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1921 if (elf32_arm_howto_table_2[i].name != NULL
1922 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1923 return &elf32_arm_howto_table_2[i];
1924
1925 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1926 if (elf32_arm_howto_table_3[i].name != NULL
1927 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1928 return &elf32_arm_howto_table_3[i];
1929
1930 return NULL;
1931 }
1932
1933 /* Support for core dump NOTE sections. */
1934
1935 static bfd_boolean
1936 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1937 {
1938 int offset;
1939 size_t size;
1940
1941 switch (note->descsz)
1942 {
1943 default:
1944 return FALSE;
1945
1946 case 148: /* Linux/ARM 32-bit. */
1947 /* pr_cursig */
1948 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1949
1950 /* pr_pid */
1951 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1952
1953 /* pr_reg */
1954 offset = 72;
1955 size = 72;
1956
1957 break;
1958 }
1959
1960 /* Make a ".reg/999" section. */
1961 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1962 size, note->descpos + offset);
1963 }
1964
1965 static bfd_boolean
1966 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1967 {
1968 switch (note->descsz)
1969 {
1970 default:
1971 return FALSE;
1972
1973 case 124: /* Linux/ARM elf_prpsinfo. */
1974 elf_tdata (abfd)->core_pid
1975 = bfd_get_32 (abfd, note->descdata + 12);
1976 elf_tdata (abfd)->core_program
1977 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1978 elf_tdata (abfd)->core_command
1979 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1980 }
1981
1982 /* Note that for some reason, a spurious space is tacked
1983 onto the end of the args in some (at least one anyway)
1984 implementations, so strip it off if it exists. */
1985 {
1986 char *command = elf_tdata (abfd)->core_command;
1987 int n = strlen (command);
1988
1989 if (0 < n && command[n - 1] == ' ')
1990 command[n - 1] = '\0';
1991 }
1992
1993 return TRUE;
1994 }
1995
1996 static char *
1997 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1998 int note_type, ...)
1999 {
2000 switch (note_type)
2001 {
2002 default:
2003 return NULL;
2004
2005 case NT_PRPSINFO:
2006 {
2007 char data[124];
2008 va_list ap;
2009
2010 va_start (ap, note_type);
2011 memset (data, 0, sizeof (data));
2012 strncpy (data + 28, va_arg (ap, const char *), 16);
2013 strncpy (data + 44, va_arg (ap, const char *), 80);
2014 va_end (ap);
2015
2016 return elfcore_write_note (abfd, buf, bufsiz,
2017 "CORE", note_type, data, sizeof (data));
2018 }
2019
2020 case NT_PRSTATUS:
2021 {
2022 char data[148];
2023 va_list ap;
2024 long pid;
2025 int cursig;
2026 const void *greg;
2027
2028 va_start (ap, note_type);
2029 memset (data, 0, sizeof (data));
2030 pid = va_arg (ap, long);
2031 bfd_put_32 (abfd, pid, data + 24);
2032 cursig = va_arg (ap, int);
2033 bfd_put_16 (abfd, cursig, data + 12);
2034 greg = va_arg (ap, const void *);
2035 memcpy (data + 72, greg, 72);
2036 va_end (ap);
2037
2038 return elfcore_write_note (abfd, buf, bufsiz,
2039 "CORE", note_type, data, sizeof (data));
2040 }
2041 }
2042 }
2043
2044 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2045 #define TARGET_LITTLE_NAME "elf32-littlearm"
2046 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2047 #define TARGET_BIG_NAME "elf32-bigarm"
2048
2049 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2050 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2051 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2052
2053 typedef unsigned long int insn32;
2054 typedef unsigned short int insn16;
2055
2056 /* In lieu of proper flags, assume all EABIv4 or later objects are
2057 interworkable. */
2058 #define INTERWORK_FLAG(abfd) \
2059 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2060 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2061 || ((abfd)->flags & BFD_LINKER_CREATED))
2062
2063 /* The linker script knows the section names for placement.
2064 The entry_names are used to do simple name mangling on the stubs.
2065 Given a function name, and its type, the stub can be found. The
2066 name can be changed. The only requirement is the %s be present. */
2067 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2068 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2069
2070 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2071 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2072
2073 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2074 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2075
2076 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2077 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2078
2079 #define STUB_ENTRY_NAME "__%s_veneer"
2080
2081 /* The name of the dynamic interpreter. This is put in the .interp
2082 section. */
2083 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2084
2085 static const unsigned long tls_trampoline [] =
2086 {
2087 0xe08e0000, /* add r0, lr, r0 */
2088 0xe5901004, /* ldr r1, [r0,#4] */
2089 0xe12fff11, /* bx r1 */
2090 };
2091
2092 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2093 {
2094 0xe52d2004, /* push {r2} */
2095 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2096 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2097 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2098 0xe081100f, /* 2: add r1, pc */
2099 0xe12fff12, /* bx r2 */
2100 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2101 + dl_tlsdesc_lazy_resolver(GOT) */
2102 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2103 };
2104
2105 #ifdef FOUR_WORD_PLT
2106
2107 /* The first entry in a procedure linkage table looks like
2108 this. It is set up so that any shared library function that is
2109 called before the relocation has been set up calls the dynamic
2110 linker first. */
2111 static const bfd_vma elf32_arm_plt0_entry [] =
2112 {
2113 0xe52de004, /* str lr, [sp, #-4]! */
2114 0xe59fe010, /* ldr lr, [pc, #16] */
2115 0xe08fe00e, /* add lr, pc, lr */
2116 0xe5bef008, /* ldr pc, [lr, #8]! */
2117 };
2118
2119 /* Subsequent entries in a procedure linkage table look like
2120 this. */
2121 static const bfd_vma elf32_arm_plt_entry [] =
2122 {
2123 0xe28fc600, /* add ip, pc, #NN */
2124 0xe28cca00, /* add ip, ip, #NN */
2125 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2126 0x00000000, /* unused */
2127 };
2128
2129 #else
2130
2131 /* The first entry in a procedure linkage table looks like
2132 this. It is set up so that any shared library function that is
2133 called before the relocation has been set up calls the dynamic
2134 linker first. */
2135 static const bfd_vma elf32_arm_plt0_entry [] =
2136 {
2137 0xe52de004, /* str lr, [sp, #-4]! */
2138 0xe59fe004, /* ldr lr, [pc, #4] */
2139 0xe08fe00e, /* add lr, pc, lr */
2140 0xe5bef008, /* ldr pc, [lr, #8]! */
2141 0x00000000, /* &GOT[0] - . */
2142 };
2143
2144 /* Subsequent entries in a procedure linkage table look like
2145 this. */
2146 static const bfd_vma elf32_arm_plt_entry [] =
2147 {
2148 0xe28fc600, /* add ip, pc, #0xNN00000 */
2149 0xe28cca00, /* add ip, ip, #0xNN000 */
2150 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2151 };
2152
2153 #endif
2154
2155 /* The format of the first entry in the procedure linkage table
2156 for a VxWorks executable. */
2157 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2158 {
2159 0xe52dc008, /* str ip,[sp,#-8]! */
2160 0xe59fc000, /* ldr ip,[pc] */
2161 0xe59cf008, /* ldr pc,[ip,#8] */
2162 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2163 };
2164
2165 /* The format of subsequent entries in a VxWorks executable. */
2166 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2167 {
2168 0xe59fc000, /* ldr ip,[pc] */
2169 0xe59cf000, /* ldr pc,[ip] */
2170 0x00000000, /* .long @got */
2171 0xe59fc000, /* ldr ip,[pc] */
2172 0xea000000, /* b _PLT */
2173 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2174 };
2175
2176 /* The format of entries in a VxWorks shared library. */
2177 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2178 {
2179 0xe59fc000, /* ldr ip,[pc] */
2180 0xe79cf009, /* ldr pc,[ip,r9] */
2181 0x00000000, /* .long @got */
2182 0xe59fc000, /* ldr ip,[pc] */
2183 0xe599f008, /* ldr pc,[r9,#8] */
2184 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2185 };
2186
2187 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2188 #define PLT_THUMB_STUB_SIZE 4
2189 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2190 {
2191 0x4778, /* bx pc */
2192 0x46c0 /* nop */
2193 };
2194
2195 /* The entries in a PLT when using a DLL-based target with multiple
2196 address spaces. */
2197 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2198 {
2199 0xe51ff004, /* ldr pc, [pc, #-4] */
2200 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2201 };
2202
2203 /* The first entry in a procedure linkage table looks like
2204 this. It is set up so that any shared library function that is
2205 called before the relocation has been set up calls the dynamic
2206 linker first. */
2207 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2208 {
2209 /* First bundle: */
2210 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2211 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2212 0xe08cc00f, /* add ip, ip, pc */
2213 0xe52dc008, /* str ip, [sp, #-8]! */
2214 /* Second bundle: */
2215 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2216 0xe59cc000, /* ldr ip, [ip] */
2217 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2218 0xe12fff1c, /* bx ip */
2219 /* Third bundle: */
2220 0xe320f000, /* nop */
2221 0xe320f000, /* nop */
2222 0xe320f000, /* nop */
2223 /* .Lplt_tail: */
2224 0xe50dc004, /* str ip, [sp, #-4] */
2225 /* Fourth bundle: */
2226 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2227 0xe59cc000, /* ldr ip, [ip] */
2228 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2229 0xe12fff1c, /* bx ip */
2230 };
2231 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2232
2233 /* Subsequent entries in a procedure linkage table look like this. */
2234 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2235 {
2236 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2237 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2238 0xe08cc00f, /* add ip, ip, pc */
2239 0xea000000, /* b .Lplt_tail */
2240 };
2241
2242 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2243 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2244 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2245 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2246 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2247 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2248
2249 enum stub_insn_type
2250 {
2251 THUMB16_TYPE = 1,
2252 THUMB32_TYPE,
2253 ARM_TYPE,
2254 DATA_TYPE
2255 };
2256
2257 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2258 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2259 is inserted in arm_build_one_stub(). */
2260 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2261 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2262 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2263 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2264 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2265 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2266
2267 typedef struct
2268 {
2269 bfd_vma data;
2270 enum stub_insn_type type;
2271 unsigned int r_type;
2272 int reloc_addend;
2273 } insn_sequence;
2274
2275 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2276 to reach the stub if necessary. */
2277 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2278 {
2279 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2280 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2281 };
2282
2283 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2284 available. */
2285 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2286 {
2287 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2288 ARM_INSN (0xe12fff1c), /* bx ip */
2289 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2290 };
2291
2292 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2293 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2294 {
2295 THUMB16_INSN (0xb401), /* push {r0} */
2296 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2297 THUMB16_INSN (0x4684), /* mov ip, r0 */
2298 THUMB16_INSN (0xbc01), /* pop {r0} */
2299 THUMB16_INSN (0x4760), /* bx ip */
2300 THUMB16_INSN (0xbf00), /* nop */
2301 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2302 };
2303
2304 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2305 allowed. */
2306 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2307 {
2308 THUMB16_INSN (0x4778), /* bx pc */
2309 THUMB16_INSN (0x46c0), /* nop */
2310 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2311 ARM_INSN (0xe12fff1c), /* bx ip */
2312 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2313 };
2314
2315 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2316 available. */
2317 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2318 {
2319 THUMB16_INSN (0x4778), /* bx pc */
2320 THUMB16_INSN (0x46c0), /* nop */
2321 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2322 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2323 };
2324
2325 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2326 one, when the destination is close enough. */
2327 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2328 {
2329 THUMB16_INSN (0x4778), /* bx pc */
2330 THUMB16_INSN (0x46c0), /* nop */
2331 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2332 };
2333
2334 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2335 blx to reach the stub if necessary. */
2336 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2337 {
2338 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2339 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2340 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2341 };
2342
2343 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2344 blx to reach the stub if necessary. We can not add into pc;
2345 it is not guaranteed to mode switch (different in ARMv6 and
2346 ARMv7). */
2347 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2348 {
2349 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2350 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2351 ARM_INSN (0xe12fff1c), /* bx ip */
2352 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2353 };
2354
2355 /* V4T ARM -> ARM long branch stub, PIC. */
2356 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2357 {
2358 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2359 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2360 ARM_INSN (0xe12fff1c), /* bx ip */
2361 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2362 };
2363
2364 /* V4T Thumb -> ARM long branch stub, PIC. */
2365 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2366 {
2367 THUMB16_INSN (0x4778), /* bx pc */
2368 THUMB16_INSN (0x46c0), /* nop */
2369 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2370 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2371 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2372 };
2373
2374 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2375 architectures. */
2376 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2377 {
2378 THUMB16_INSN (0xb401), /* push {r0} */
2379 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2380 THUMB16_INSN (0x46fc), /* mov ip, pc */
2381 THUMB16_INSN (0x4484), /* add ip, r0 */
2382 THUMB16_INSN (0xbc01), /* pop {r0} */
2383 THUMB16_INSN (0x4760), /* bx ip */
2384 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2385 };
2386
2387 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2388 allowed. */
2389 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2390 {
2391 THUMB16_INSN (0x4778), /* bx pc */
2392 THUMB16_INSN (0x46c0), /* nop */
2393 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2394 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2395 ARM_INSN (0xe12fff1c), /* bx ip */
2396 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2397 };
2398
2399 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2400 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2401 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2402 {
2403 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2404 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2405 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2406 };
2407
2408 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2409 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2410 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2411 {
2412 THUMB16_INSN (0x4778), /* bx pc */
2413 THUMB16_INSN (0x46c0), /* nop */
2414 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2415 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2416 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2417 };
2418
2419 /* Cortex-A8 erratum-workaround stubs. */
2420
2421 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2422 can't use a conditional branch to reach this stub). */
2423
2424 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2425 {
2426 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2427 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2428 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2429 };
2430
2431 /* Stub used for b.w and bl.w instructions. */
2432
2433 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2434 {
2435 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2436 };
2437
2438 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2439 {
2440 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2441 };
2442
2443 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2444 instruction (which switches to ARM mode) to point to this stub. Jump to the
2445 real destination using an ARM-mode branch. */
2446
2447 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2448 {
2449 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2450 };
2451
2452 /* For each section group there can be a specially created linker section
2453 to hold the stubs for that group. The name of the stub section is based
2454 upon the name of another section within that group with the suffix below
2455 applied.
2456
2457 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2458 create what appeared to be a linker stub section when it actually
2459 contained user code/data. For example, consider this fragment:
2460
2461 const char * stubborn_problems[] = { "np" };
2462
2463 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2464 section called:
2465
2466 .data.rel.local.stubborn_problems
2467
2468 This then causes problems in arm32_arm_build_stubs() as it triggers:
2469
2470 // Ignore non-stub sections.
2471 if (!strstr (stub_sec->name, STUB_SUFFIX))
2472 continue;
2473
2474 And so the section would be ignored instead of being processed. Hence
2475 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2476 C identifier. */
2477 #define STUB_SUFFIX ".__stub"
2478
2479 /* One entry per long/short branch stub defined above. */
2480 #define DEF_STUBS \
2481 DEF_STUB(long_branch_any_any) \
2482 DEF_STUB(long_branch_v4t_arm_thumb) \
2483 DEF_STUB(long_branch_thumb_only) \
2484 DEF_STUB(long_branch_v4t_thumb_thumb) \
2485 DEF_STUB(long_branch_v4t_thumb_arm) \
2486 DEF_STUB(short_branch_v4t_thumb_arm) \
2487 DEF_STUB(long_branch_any_arm_pic) \
2488 DEF_STUB(long_branch_any_thumb_pic) \
2489 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2490 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2491 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2492 DEF_STUB(long_branch_thumb_only_pic) \
2493 DEF_STUB(long_branch_any_tls_pic) \
2494 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2495 DEF_STUB(a8_veneer_b_cond) \
2496 DEF_STUB(a8_veneer_b) \
2497 DEF_STUB(a8_veneer_bl) \
2498 DEF_STUB(a8_veneer_blx)
2499
2500 #define DEF_STUB(x) arm_stub_##x,
2501 enum elf32_arm_stub_type
2502 {
2503 arm_stub_none,
2504 DEF_STUBS
2505 /* Note the first a8_veneer type */
2506 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2507 };
2508 #undef DEF_STUB
2509
2510 typedef struct
2511 {
2512 const insn_sequence* template_sequence;
2513 int template_size;
2514 } stub_def;
2515
2516 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2517 static const stub_def stub_definitions[] =
2518 {
2519 {NULL, 0},
2520 DEF_STUBS
2521 };
2522
2523 struct elf32_arm_stub_hash_entry
2524 {
2525 /* Base hash table entry structure. */
2526 struct bfd_hash_entry root;
2527
2528 /* The stub section. */
2529 asection *stub_sec;
2530
2531 /* Offset within stub_sec of the beginning of this stub. */
2532 bfd_vma stub_offset;
2533
2534 /* Given the symbol's value and its section we can determine its final
2535 value when building the stubs (so the stub knows where to jump). */
2536 bfd_vma target_value;
2537 asection *target_section;
2538
2539 /* Offset to apply to relocation referencing target_value. */
2540 bfd_vma target_addend;
2541
2542 /* The instruction which caused this stub to be generated (only valid for
2543 Cortex-A8 erratum workaround stubs at present). */
2544 unsigned long orig_insn;
2545
2546 /* The stub type. */
2547 enum elf32_arm_stub_type stub_type;
2548 /* Its encoding size in bytes. */
2549 int stub_size;
2550 /* Its template. */
2551 const insn_sequence *stub_template;
2552 /* The size of the template (number of entries). */
2553 int stub_template_size;
2554
2555 /* The symbol table entry, if any, that this was derived from. */
2556 struct elf32_arm_link_hash_entry *h;
2557
2558 /* Type of branch. */
2559 enum arm_st_branch_type branch_type;
2560
2561 /* Where this stub is being called from, or, in the case of combined
2562 stub sections, the first input section in the group. */
2563 asection *id_sec;
2564
2565 /* The name for the local symbol at the start of this stub. The
2566 stub name in the hash table has to be unique; this does not, so
2567 it can be friendlier. */
2568 char *output_name;
2569 };
2570
2571 /* Used to build a map of a section. This is required for mixed-endian
2572 code/data. */
2573
2574 typedef struct elf32_elf_section_map
2575 {
2576 bfd_vma vma;
2577 char type;
2578 }
2579 elf32_arm_section_map;
2580
2581 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2582
2583 typedef enum
2584 {
2585 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2586 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2587 VFP11_ERRATUM_ARM_VENEER,
2588 VFP11_ERRATUM_THUMB_VENEER
2589 }
2590 elf32_vfp11_erratum_type;
2591
2592 typedef struct elf32_vfp11_erratum_list
2593 {
2594 struct elf32_vfp11_erratum_list *next;
2595 bfd_vma vma;
2596 union
2597 {
2598 struct
2599 {
2600 struct elf32_vfp11_erratum_list *veneer;
2601 unsigned int vfp_insn;
2602 } b;
2603 struct
2604 {
2605 struct elf32_vfp11_erratum_list *branch;
2606 unsigned int id;
2607 } v;
2608 } u;
2609 elf32_vfp11_erratum_type type;
2610 }
2611 elf32_vfp11_erratum_list;
2612
2613 typedef enum
2614 {
2615 DELETE_EXIDX_ENTRY,
2616 INSERT_EXIDX_CANTUNWIND_AT_END
2617 }
2618 arm_unwind_edit_type;
2619
2620 /* A (sorted) list of edits to apply to an unwind table. */
2621 typedef struct arm_unwind_table_edit
2622 {
2623 arm_unwind_edit_type type;
2624 /* Note: we sometimes want to insert an unwind entry corresponding to a
2625 section different from the one we're currently writing out, so record the
2626 (text) section this edit relates to here. */
2627 asection *linked_section;
2628 unsigned int index;
2629 struct arm_unwind_table_edit *next;
2630 }
2631 arm_unwind_table_edit;
2632
2633 typedef struct _arm_elf_section_data
2634 {
2635 /* Information about mapping symbols. */
2636 struct bfd_elf_section_data elf;
2637 unsigned int mapcount;
2638 unsigned int mapsize;
2639 elf32_arm_section_map *map;
2640 /* Information about CPU errata. */
2641 unsigned int erratumcount;
2642 elf32_vfp11_erratum_list *erratumlist;
2643 /* Information about unwind tables. */
2644 union
2645 {
2646 /* Unwind info attached to a text section. */
2647 struct
2648 {
2649 asection *arm_exidx_sec;
2650 } text;
2651
2652 /* Unwind info attached to an .ARM.exidx section. */
2653 struct
2654 {
2655 arm_unwind_table_edit *unwind_edit_list;
2656 arm_unwind_table_edit *unwind_edit_tail;
2657 } exidx;
2658 } u;
2659 }
2660 _arm_elf_section_data;
2661
2662 #define elf32_arm_section_data(sec) \
2663 ((_arm_elf_section_data *) elf_section_data (sec))
2664
2665 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2666 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2667 so may be created multiple times: we use an array of these entries whilst
2668 relaxing which we can refresh easily, then create stubs for each potentially
2669 erratum-triggering instruction once we've settled on a solution. */
2670
2671 struct a8_erratum_fix
2672 {
2673 bfd *input_bfd;
2674 asection *section;
2675 bfd_vma offset;
2676 bfd_vma addend;
2677 unsigned long orig_insn;
2678 char *stub_name;
2679 enum elf32_arm_stub_type stub_type;
2680 enum arm_st_branch_type branch_type;
2681 };
2682
2683 /* A table of relocs applied to branches which might trigger Cortex-A8
2684 erratum. */
2685
2686 struct a8_erratum_reloc
2687 {
2688 bfd_vma from;
2689 bfd_vma destination;
2690 struct elf32_arm_link_hash_entry *hash;
2691 const char *sym_name;
2692 unsigned int r_type;
2693 enum arm_st_branch_type branch_type;
2694 bfd_boolean non_a8_stub;
2695 };
2696
2697 /* The size of the thread control block. */
2698 #define TCB_SIZE 8
2699
2700 /* ARM-specific information about a PLT entry, over and above the usual
2701 gotplt_union. */
2702 struct arm_plt_info
2703 {
2704 /* We reference count Thumb references to a PLT entry separately,
2705 so that we can emit the Thumb trampoline only if needed. */
2706 bfd_signed_vma thumb_refcount;
2707
2708 /* Some references from Thumb code may be eliminated by BL->BLX
2709 conversion, so record them separately. */
2710 bfd_signed_vma maybe_thumb_refcount;
2711
2712 /* How many of the recorded PLT accesses were from non-call relocations.
2713 This information is useful when deciding whether anything takes the
2714 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2715 non-call references to the function should resolve directly to the
2716 real runtime target. */
2717 unsigned int noncall_refcount;
2718
2719 /* Since PLT entries have variable size if the Thumb prologue is
2720 used, we need to record the index into .got.plt instead of
2721 recomputing it from the PLT offset. */
2722 bfd_signed_vma got_offset;
2723 };
2724
2725 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2726 struct arm_local_iplt_info
2727 {
2728 /* The information that is usually found in the generic ELF part of
2729 the hash table entry. */
2730 union gotplt_union root;
2731
2732 /* The information that is usually found in the ARM-specific part of
2733 the hash table entry. */
2734 struct arm_plt_info arm;
2735
2736 /* A list of all potential dynamic relocations against this symbol. */
2737 struct elf_dyn_relocs *dyn_relocs;
2738 };
2739
2740 struct elf_arm_obj_tdata
2741 {
2742 struct elf_obj_tdata root;
2743
2744 /* tls_type for each local got entry. */
2745 char *local_got_tls_type;
2746
2747 /* GOTPLT entries for TLS descriptors. */
2748 bfd_vma *local_tlsdesc_gotent;
2749
2750 /* Information for local symbols that need entries in .iplt. */
2751 struct arm_local_iplt_info **local_iplt;
2752
2753 /* Zero to warn when linking objects with incompatible enum sizes. */
2754 int no_enum_size_warning;
2755
2756 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2757 int no_wchar_size_warning;
2758 };
2759
2760 #define elf_arm_tdata(bfd) \
2761 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2762
2763 #define elf32_arm_local_got_tls_type(bfd) \
2764 (elf_arm_tdata (bfd)->local_got_tls_type)
2765
2766 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2767 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2768
2769 #define elf32_arm_local_iplt(bfd) \
2770 (elf_arm_tdata (bfd)->local_iplt)
2771
2772 #define is_arm_elf(bfd) \
2773 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2774 && elf_tdata (bfd) != NULL \
2775 && elf_object_id (bfd) == ARM_ELF_DATA)
2776
2777 static bfd_boolean
2778 elf32_arm_mkobject (bfd *abfd)
2779 {
2780 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2781 ARM_ELF_DATA);
2782 }
2783
2784 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2785
2786 /* Arm ELF linker hash entry. */
2787 struct elf32_arm_link_hash_entry
2788 {
2789 struct elf_link_hash_entry root;
2790
2791 /* Track dynamic relocs copied for this symbol. */
2792 struct elf_dyn_relocs *dyn_relocs;
2793
2794 /* ARM-specific PLT information. */
2795 struct arm_plt_info plt;
2796
2797 #define GOT_UNKNOWN 0
2798 #define GOT_NORMAL 1
2799 #define GOT_TLS_GD 2
2800 #define GOT_TLS_IE 4
2801 #define GOT_TLS_GDESC 8
2802 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2803 unsigned int tls_type : 8;
2804
2805 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2806 unsigned int is_iplt : 1;
2807
2808 unsigned int unused : 23;
2809
2810 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2811 starting at the end of the jump table. */
2812 bfd_vma tlsdesc_got;
2813
2814 /* The symbol marking the real symbol location for exported thumb
2815 symbols with Arm stubs. */
2816 struct elf_link_hash_entry *export_glue;
2817
2818 /* A pointer to the most recently used stub hash entry against this
2819 symbol. */
2820 struct elf32_arm_stub_hash_entry *stub_cache;
2821 };
2822
2823 /* Traverse an arm ELF linker hash table. */
2824 #define elf32_arm_link_hash_traverse(table, func, info) \
2825 (elf_link_hash_traverse \
2826 (&(table)->root, \
2827 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2828 (info)))
2829
2830 /* Get the ARM elf linker hash table from a link_info structure. */
2831 #define elf32_arm_hash_table(info) \
2832 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2833 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2834
2835 #define arm_stub_hash_lookup(table, string, create, copy) \
2836 ((struct elf32_arm_stub_hash_entry *) \
2837 bfd_hash_lookup ((table), (string), (create), (copy)))
2838
2839 /* Array to keep track of which stub sections have been created, and
2840 information on stub grouping. */
2841 struct map_stub
2842 {
2843 /* This is the section to which stubs in the group will be
2844 attached. */
2845 asection *link_sec;
2846 /* The stub section. */
2847 asection *stub_sec;
2848 };
2849
2850 #define elf32_arm_compute_jump_table_size(htab) \
2851 ((htab)->next_tls_desc_index * 4)
2852
2853 /* ARM ELF linker hash table. */
2854 struct elf32_arm_link_hash_table
2855 {
2856 /* The main hash table. */
2857 struct elf_link_hash_table root;
2858
2859 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2860 bfd_size_type thumb_glue_size;
2861
2862 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2863 bfd_size_type arm_glue_size;
2864
2865 /* The size in bytes of section containing the ARMv4 BX veneers. */
2866 bfd_size_type bx_glue_size;
2867
2868 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2869 veneer has been populated. */
2870 bfd_vma bx_glue_offset[15];
2871
2872 /* The size in bytes of the section containing glue for VFP11 erratum
2873 veneers. */
2874 bfd_size_type vfp11_erratum_glue_size;
2875
2876 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2877 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2878 elf32_arm_write_section(). */
2879 struct a8_erratum_fix *a8_erratum_fixes;
2880 unsigned int num_a8_erratum_fixes;
2881
2882 /* An arbitrary input BFD chosen to hold the glue sections. */
2883 bfd * bfd_of_glue_owner;
2884
2885 /* Nonzero to output a BE8 image. */
2886 int byteswap_code;
2887
2888 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2889 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2890 int target1_is_rel;
2891
2892 /* The relocation to use for R_ARM_TARGET2 relocations. */
2893 int target2_reloc;
2894
2895 /* 0 = Ignore R_ARM_V4BX.
2896 1 = Convert BX to MOV PC.
2897 2 = Generate v4 interworing stubs. */
2898 int fix_v4bx;
2899
2900 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2901 int fix_cortex_a8;
2902
2903 /* Whether we should fix the ARM1176 BLX immediate issue. */
2904 int fix_arm1176;
2905
2906 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2907 int use_blx;
2908
2909 /* What sort of code sequences we should look for which may trigger the
2910 VFP11 denorm erratum. */
2911 bfd_arm_vfp11_fix vfp11_fix;
2912
2913 /* Global counter for the number of fixes we have emitted. */
2914 int num_vfp11_fixes;
2915
2916 /* Nonzero to force PIC branch veneers. */
2917 int pic_veneer;
2918
2919 /* The number of bytes in the initial entry in the PLT. */
2920 bfd_size_type plt_header_size;
2921
2922 /* The number of bytes in the subsequent PLT etries. */
2923 bfd_size_type plt_entry_size;
2924
2925 /* True if the target system is VxWorks. */
2926 int vxworks_p;
2927
2928 /* True if the target system is Symbian OS. */
2929 int symbian_p;
2930
2931 /* True if the target system is Native Client. */
2932 int nacl_p;
2933
2934 /* True if the target uses REL relocations. */
2935 int use_rel;
2936
2937 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2938 bfd_vma next_tls_desc_index;
2939
2940 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2941 bfd_vma num_tls_desc;
2942
2943 /* Short-cuts to get to dynamic linker sections. */
2944 asection *sdynbss;
2945 asection *srelbss;
2946
2947 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2948 asection *srelplt2;
2949
2950 /* The offset into splt of the PLT entry for the TLS descriptor
2951 resolver. Special values are 0, if not necessary (or not found
2952 to be necessary yet), and -1 if needed but not determined
2953 yet. */
2954 bfd_vma dt_tlsdesc_plt;
2955
2956 /* The offset into sgot of the GOT entry used by the PLT entry
2957 above. */
2958 bfd_vma dt_tlsdesc_got;
2959
2960 /* Offset in .plt section of tls_arm_trampoline. */
2961 bfd_vma tls_trampoline;
2962
2963 /* Data for R_ARM_TLS_LDM32 relocations. */
2964 union
2965 {
2966 bfd_signed_vma refcount;
2967 bfd_vma offset;
2968 } tls_ldm_got;
2969
2970 /* Small local sym cache. */
2971 struct sym_cache sym_cache;
2972
2973 /* For convenience in allocate_dynrelocs. */
2974 bfd * obfd;
2975
2976 /* The amount of space used by the reserved portion of the sgotplt
2977 section, plus whatever space is used by the jump slots. */
2978 bfd_vma sgotplt_jump_table_size;
2979
2980 /* The stub hash table. */
2981 struct bfd_hash_table stub_hash_table;
2982
2983 /* Linker stub bfd. */
2984 bfd *stub_bfd;
2985
2986 /* Linker call-backs. */
2987 asection * (*add_stub_section) (const char *, asection *);
2988 void (*layout_sections_again) (void);
2989
2990 /* Array to keep track of which stub sections have been created, and
2991 information on stub grouping. */
2992 struct map_stub *stub_group;
2993
2994 /* Number of elements in stub_group. */
2995 int top_id;
2996
2997 /* Assorted information used by elf32_arm_size_stubs. */
2998 unsigned int bfd_count;
2999 int top_index;
3000 asection **input_list;
3001 };
3002
3003 /* Create an entry in an ARM ELF linker hash table. */
3004
3005 static struct bfd_hash_entry *
3006 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3007 struct bfd_hash_table * table,
3008 const char * string)
3009 {
3010 struct elf32_arm_link_hash_entry * ret =
3011 (struct elf32_arm_link_hash_entry *) entry;
3012
3013 /* Allocate the structure if it has not already been allocated by a
3014 subclass. */
3015 if (ret == NULL)
3016 ret = (struct elf32_arm_link_hash_entry *)
3017 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3018 if (ret == NULL)
3019 return (struct bfd_hash_entry *) ret;
3020
3021 /* Call the allocation method of the superclass. */
3022 ret = ((struct elf32_arm_link_hash_entry *)
3023 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3024 table, string));
3025 if (ret != NULL)
3026 {
3027 ret->dyn_relocs = NULL;
3028 ret->tls_type = GOT_UNKNOWN;
3029 ret->tlsdesc_got = (bfd_vma) -1;
3030 ret->plt.thumb_refcount = 0;
3031 ret->plt.maybe_thumb_refcount = 0;
3032 ret->plt.noncall_refcount = 0;
3033 ret->plt.got_offset = -1;
3034 ret->is_iplt = FALSE;
3035 ret->export_glue = NULL;
3036
3037 ret->stub_cache = NULL;
3038 }
3039
3040 return (struct bfd_hash_entry *) ret;
3041 }
3042
3043 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3044 symbols. */
3045
3046 static bfd_boolean
3047 elf32_arm_allocate_local_sym_info (bfd *abfd)
3048 {
3049 if (elf_local_got_refcounts (abfd) == NULL)
3050 {
3051 bfd_size_type num_syms;
3052 bfd_size_type size;
3053 char *data;
3054
3055 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3056 size = num_syms * (sizeof (bfd_signed_vma)
3057 + sizeof (struct arm_local_iplt_info *)
3058 + sizeof (bfd_vma)
3059 + sizeof (char));
3060 data = bfd_zalloc (abfd, size);
3061 if (data == NULL)
3062 return FALSE;
3063
3064 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3065 data += num_syms * sizeof (bfd_signed_vma);
3066
3067 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3068 data += num_syms * sizeof (struct arm_local_iplt_info *);
3069
3070 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3071 data += num_syms * sizeof (bfd_vma);
3072
3073 elf32_arm_local_got_tls_type (abfd) = data;
3074 }
3075 return TRUE;
3076 }
3077
3078 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3079 to input bfd ABFD. Create the information if it doesn't already exist.
3080 Return null if an allocation fails. */
3081
3082 static struct arm_local_iplt_info *
3083 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3084 {
3085 struct arm_local_iplt_info **ptr;
3086
3087 if (!elf32_arm_allocate_local_sym_info (abfd))
3088 return NULL;
3089
3090 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3091 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3092 if (*ptr == NULL)
3093 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3094 return *ptr;
3095 }
3096
3097 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3098 in ABFD's symbol table. If the symbol is global, H points to its
3099 hash table entry, otherwise H is null.
3100
3101 Return true if the symbol does have PLT information. When returning
3102 true, point *ROOT_PLT at the target-independent reference count/offset
3103 union and *ARM_PLT at the ARM-specific information. */
3104
3105 static bfd_boolean
3106 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3107 unsigned long r_symndx, union gotplt_union **root_plt,
3108 struct arm_plt_info **arm_plt)
3109 {
3110 struct arm_local_iplt_info *local_iplt;
3111
3112 if (h != NULL)
3113 {
3114 *root_plt = &h->root.plt;
3115 *arm_plt = &h->plt;
3116 return TRUE;
3117 }
3118
3119 if (elf32_arm_local_iplt (abfd) == NULL)
3120 return FALSE;
3121
3122 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3123 if (local_iplt == NULL)
3124 return FALSE;
3125
3126 *root_plt = &local_iplt->root;
3127 *arm_plt = &local_iplt->arm;
3128 return TRUE;
3129 }
3130
3131 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3132 before it. */
3133
3134 static bfd_boolean
3135 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3136 struct arm_plt_info *arm_plt)
3137 {
3138 struct elf32_arm_link_hash_table *htab;
3139
3140 htab = elf32_arm_hash_table (info);
3141 return (arm_plt->thumb_refcount != 0
3142 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3143 }
3144
3145 /* Return a pointer to the head of the dynamic reloc list that should
3146 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3147 ABFD's symbol table. Return null if an error occurs. */
3148
3149 static struct elf_dyn_relocs **
3150 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3151 Elf_Internal_Sym *isym)
3152 {
3153 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3154 {
3155 struct arm_local_iplt_info *local_iplt;
3156
3157 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3158 if (local_iplt == NULL)
3159 return NULL;
3160 return &local_iplt->dyn_relocs;
3161 }
3162 else
3163 {
3164 /* Track dynamic relocs needed for local syms too.
3165 We really need local syms available to do this
3166 easily. Oh well. */
3167 asection *s;
3168 void *vpp;
3169
3170 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3171 if (s == NULL)
3172 abort ();
3173
3174 vpp = &elf_section_data (s)->local_dynrel;
3175 return (struct elf_dyn_relocs **) vpp;
3176 }
3177 }
3178
3179 /* Initialize an entry in the stub hash table. */
3180
3181 static struct bfd_hash_entry *
3182 stub_hash_newfunc (struct bfd_hash_entry *entry,
3183 struct bfd_hash_table *table,
3184 const char *string)
3185 {
3186 /* Allocate the structure if it has not already been allocated by a
3187 subclass. */
3188 if (entry == NULL)
3189 {
3190 entry = (struct bfd_hash_entry *)
3191 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3192 if (entry == NULL)
3193 return entry;
3194 }
3195
3196 /* Call the allocation method of the superclass. */
3197 entry = bfd_hash_newfunc (entry, table, string);
3198 if (entry != NULL)
3199 {
3200 struct elf32_arm_stub_hash_entry *eh;
3201
3202 /* Initialize the local fields. */
3203 eh = (struct elf32_arm_stub_hash_entry *) entry;
3204 eh->stub_sec = NULL;
3205 eh->stub_offset = 0;
3206 eh->target_value = 0;
3207 eh->target_section = NULL;
3208 eh->target_addend = 0;
3209 eh->orig_insn = 0;
3210 eh->stub_type = arm_stub_none;
3211 eh->stub_size = 0;
3212 eh->stub_template = NULL;
3213 eh->stub_template_size = 0;
3214 eh->h = NULL;
3215 eh->id_sec = NULL;
3216 eh->output_name = NULL;
3217 }
3218
3219 return entry;
3220 }
3221
3222 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3223 shortcuts to them in our hash table. */
3224
3225 static bfd_boolean
3226 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3227 {
3228 struct elf32_arm_link_hash_table *htab;
3229
3230 htab = elf32_arm_hash_table (info);
3231 if (htab == NULL)
3232 return FALSE;
3233
3234 /* BPABI objects never have a GOT, or associated sections. */
3235 if (htab->symbian_p)
3236 return TRUE;
3237
3238 if (! _bfd_elf_create_got_section (dynobj, info))
3239 return FALSE;
3240
3241 return TRUE;
3242 }
3243
3244 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3245
3246 static bfd_boolean
3247 create_ifunc_sections (struct bfd_link_info *info)
3248 {
3249 struct elf32_arm_link_hash_table *htab;
3250 const struct elf_backend_data *bed;
3251 bfd *dynobj;
3252 asection *s;
3253 flagword flags;
3254
3255 htab = elf32_arm_hash_table (info);
3256 dynobj = htab->root.dynobj;
3257 bed = get_elf_backend_data (dynobj);
3258 flags = bed->dynamic_sec_flags;
3259
3260 if (htab->root.iplt == NULL)
3261 {
3262 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3263 flags | SEC_READONLY | SEC_CODE);
3264 if (s == NULL
3265 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3266 return FALSE;
3267 htab->root.iplt = s;
3268 }
3269
3270 if (htab->root.irelplt == NULL)
3271 {
3272 s = bfd_make_section_anyway_with_flags (dynobj,
3273 RELOC_SECTION (htab, ".iplt"),
3274 flags | SEC_READONLY);
3275 if (s == NULL
3276 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3277 return FALSE;
3278 htab->root.irelplt = s;
3279 }
3280
3281 if (htab->root.igotplt == NULL)
3282 {
3283 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3284 if (s == NULL
3285 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3286 return FALSE;
3287 htab->root.igotplt = s;
3288 }
3289 return TRUE;
3290 }
3291
3292 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3293 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3294 hash table. */
3295
3296 static bfd_boolean
3297 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3298 {
3299 struct elf32_arm_link_hash_table *htab;
3300
3301 htab = elf32_arm_hash_table (info);
3302 if (htab == NULL)
3303 return FALSE;
3304
3305 if (!htab->root.sgot && !create_got_section (dynobj, info))
3306 return FALSE;
3307
3308 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3309 return FALSE;
3310
3311 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3312 if (!info->shared)
3313 htab->srelbss = bfd_get_linker_section (dynobj,
3314 RELOC_SECTION (htab, ".bss"));
3315
3316 if (htab->vxworks_p)
3317 {
3318 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3319 return FALSE;
3320
3321 if (info->shared)
3322 {
3323 htab->plt_header_size = 0;
3324 htab->plt_entry_size
3325 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3326 }
3327 else
3328 {
3329 htab->plt_header_size
3330 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3331 htab->plt_entry_size
3332 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3333 }
3334 }
3335
3336 if (!htab->root.splt
3337 || !htab->root.srelplt
3338 || !htab->sdynbss
3339 || (!info->shared && !htab->srelbss))
3340 abort ();
3341
3342 return TRUE;
3343 }
3344
3345 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3346
3347 static void
3348 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3349 struct elf_link_hash_entry *dir,
3350 struct elf_link_hash_entry *ind)
3351 {
3352 struct elf32_arm_link_hash_entry *edir, *eind;
3353
3354 edir = (struct elf32_arm_link_hash_entry *) dir;
3355 eind = (struct elf32_arm_link_hash_entry *) ind;
3356
3357 if (eind->dyn_relocs != NULL)
3358 {
3359 if (edir->dyn_relocs != NULL)
3360 {
3361 struct elf_dyn_relocs **pp;
3362 struct elf_dyn_relocs *p;
3363
3364 /* Add reloc counts against the indirect sym to the direct sym
3365 list. Merge any entries against the same section. */
3366 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3367 {
3368 struct elf_dyn_relocs *q;
3369
3370 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3371 if (q->sec == p->sec)
3372 {
3373 q->pc_count += p->pc_count;
3374 q->count += p->count;
3375 *pp = p->next;
3376 break;
3377 }
3378 if (q == NULL)
3379 pp = &p->next;
3380 }
3381 *pp = edir->dyn_relocs;
3382 }
3383
3384 edir->dyn_relocs = eind->dyn_relocs;
3385 eind->dyn_relocs = NULL;
3386 }
3387
3388 if (ind->root.type == bfd_link_hash_indirect)
3389 {
3390 /* Copy over PLT info. */
3391 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3392 eind->plt.thumb_refcount = 0;
3393 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3394 eind->plt.maybe_thumb_refcount = 0;
3395 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3396 eind->plt.noncall_refcount = 0;
3397
3398 /* We should only allocate a function to .iplt once the final
3399 symbol information is known. */
3400 BFD_ASSERT (!eind->is_iplt);
3401
3402 if (dir->got.refcount <= 0)
3403 {
3404 edir->tls_type = eind->tls_type;
3405 eind->tls_type = GOT_UNKNOWN;
3406 }
3407 }
3408
3409 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3410 }
3411
3412 /* Create an ARM elf linker hash table. */
3413
3414 static struct bfd_link_hash_table *
3415 elf32_arm_link_hash_table_create (bfd *abfd)
3416 {
3417 struct elf32_arm_link_hash_table *ret;
3418 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3419
3420 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3421 if (ret == NULL)
3422 return NULL;
3423
3424 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3425 elf32_arm_link_hash_newfunc,
3426 sizeof (struct elf32_arm_link_hash_entry),
3427 ARM_ELF_DATA))
3428 {
3429 free (ret);
3430 return NULL;
3431 }
3432
3433 ret->sdynbss = NULL;
3434 ret->srelbss = NULL;
3435 ret->srelplt2 = NULL;
3436 ret->dt_tlsdesc_plt = 0;
3437 ret->dt_tlsdesc_got = 0;
3438 ret->tls_trampoline = 0;
3439 ret->next_tls_desc_index = 0;
3440 ret->num_tls_desc = 0;
3441 ret->thumb_glue_size = 0;
3442 ret->arm_glue_size = 0;
3443 ret->bx_glue_size = 0;
3444 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3445 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3446 ret->vfp11_erratum_glue_size = 0;
3447 ret->num_vfp11_fixes = 0;
3448 ret->fix_cortex_a8 = 0;
3449 ret->fix_arm1176 = 0;
3450 ret->bfd_of_glue_owner = NULL;
3451 ret->byteswap_code = 0;
3452 ret->target1_is_rel = 0;
3453 ret->target2_reloc = R_ARM_NONE;
3454 #ifdef FOUR_WORD_PLT
3455 ret->plt_header_size = 16;
3456 ret->plt_entry_size = 16;
3457 #else
3458 ret->plt_header_size = 20;
3459 ret->plt_entry_size = 12;
3460 #endif
3461 ret->fix_v4bx = 0;
3462 ret->use_blx = 0;
3463 ret->vxworks_p = 0;
3464 ret->symbian_p = 0;
3465 ret->nacl_p = 0;
3466 ret->use_rel = 1;
3467 ret->sym_cache.abfd = NULL;
3468 ret->obfd = abfd;
3469 ret->tls_ldm_got.refcount = 0;
3470 ret->stub_bfd = NULL;
3471 ret->add_stub_section = NULL;
3472 ret->layout_sections_again = NULL;
3473 ret->stub_group = NULL;
3474 ret->top_id = 0;
3475 ret->bfd_count = 0;
3476 ret->top_index = 0;
3477 ret->input_list = NULL;
3478
3479 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3480 sizeof (struct elf32_arm_stub_hash_entry)))
3481 {
3482 free (ret);
3483 return NULL;
3484 }
3485
3486 return &ret->root.root;
3487 }
3488
3489 /* Free the derived linker hash table. */
3490
3491 static void
3492 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3493 {
3494 struct elf32_arm_link_hash_table *ret
3495 = (struct elf32_arm_link_hash_table *) hash;
3496
3497 bfd_hash_table_free (&ret->stub_hash_table);
3498 _bfd_generic_link_hash_table_free (hash);
3499 }
3500
3501 /* Determine if we're dealing with a Thumb only architecture. */
3502
3503 static bfd_boolean
3504 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3505 {
3506 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3507 Tag_CPU_arch);
3508 int profile;
3509
3510 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3511 return TRUE;
3512
3513 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3514 return FALSE;
3515
3516 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3517 Tag_CPU_arch_profile);
3518
3519 return profile == 'M';
3520 }
3521
3522 /* Determine if we're dealing with a Thumb-2 object. */
3523
3524 static bfd_boolean
3525 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3526 {
3527 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3528 Tag_CPU_arch);
3529 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3530 }
3531
3532 /* Determine what kind of NOPs are available. */
3533
3534 static bfd_boolean
3535 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3536 {
3537 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3538 Tag_CPU_arch);
3539 return arch == TAG_CPU_ARCH_V6T2
3540 || arch == TAG_CPU_ARCH_V6K
3541 || arch == TAG_CPU_ARCH_V7
3542 || arch == TAG_CPU_ARCH_V7E_M;
3543 }
3544
3545 static bfd_boolean
3546 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3547 {
3548 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3549 Tag_CPU_arch);
3550 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3551 || arch == TAG_CPU_ARCH_V7E_M);
3552 }
3553
3554 static bfd_boolean
3555 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3556 {
3557 switch (stub_type)
3558 {
3559 case arm_stub_long_branch_thumb_only:
3560 case arm_stub_long_branch_v4t_thumb_arm:
3561 case arm_stub_short_branch_v4t_thumb_arm:
3562 case arm_stub_long_branch_v4t_thumb_arm_pic:
3563 case arm_stub_long_branch_v4t_thumb_tls_pic:
3564 case arm_stub_long_branch_thumb_only_pic:
3565 return TRUE;
3566 case arm_stub_none:
3567 BFD_FAIL ();
3568 return FALSE;
3569 break;
3570 default:
3571 return FALSE;
3572 }
3573 }
3574
3575 /* Determine the type of stub needed, if any, for a call. */
3576
3577 static enum elf32_arm_stub_type
3578 arm_type_of_stub (struct bfd_link_info *info,
3579 asection *input_sec,
3580 const Elf_Internal_Rela *rel,
3581 unsigned char st_type,
3582 enum arm_st_branch_type *actual_branch_type,
3583 struct elf32_arm_link_hash_entry *hash,
3584 bfd_vma destination,
3585 asection *sym_sec,
3586 bfd *input_bfd,
3587 const char *name)
3588 {
3589 bfd_vma location;
3590 bfd_signed_vma branch_offset;
3591 unsigned int r_type;
3592 struct elf32_arm_link_hash_table * globals;
3593 int thumb2;
3594 int thumb_only;
3595 enum elf32_arm_stub_type stub_type = arm_stub_none;
3596 int use_plt = 0;
3597 enum arm_st_branch_type branch_type = *actual_branch_type;
3598 union gotplt_union *root_plt;
3599 struct arm_plt_info *arm_plt;
3600
3601 if (branch_type == ST_BRANCH_LONG)
3602 return stub_type;
3603
3604 globals = elf32_arm_hash_table (info);
3605 if (globals == NULL)
3606 return stub_type;
3607
3608 thumb_only = using_thumb_only (globals);
3609
3610 thumb2 = using_thumb2 (globals);
3611
3612 /* Determine where the call point is. */
3613 location = (input_sec->output_offset
3614 + input_sec->output_section->vma
3615 + rel->r_offset);
3616
3617 r_type = ELF32_R_TYPE (rel->r_info);
3618
3619 /* For TLS call relocs, it is the caller's responsibility to provide
3620 the address of the appropriate trampoline. */
3621 if (r_type != R_ARM_TLS_CALL
3622 && r_type != R_ARM_THM_TLS_CALL
3623 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3624 &root_plt, &arm_plt)
3625 && root_plt->offset != (bfd_vma) -1)
3626 {
3627 asection *splt;
3628
3629 if (hash == NULL || hash->is_iplt)
3630 splt = globals->root.iplt;
3631 else
3632 splt = globals->root.splt;
3633 if (splt != NULL)
3634 {
3635 use_plt = 1;
3636
3637 /* Note when dealing with PLT entries: the main PLT stub is in
3638 ARM mode, so if the branch is in Thumb mode, another
3639 Thumb->ARM stub will be inserted later just before the ARM
3640 PLT stub. We don't take this extra distance into account
3641 here, because if a long branch stub is needed, we'll add a
3642 Thumb->Arm one and branch directly to the ARM PLT entry
3643 because it avoids spreading offset corrections in several
3644 places. */
3645
3646 destination = (splt->output_section->vma
3647 + splt->output_offset
3648 + root_plt->offset);
3649 st_type = STT_FUNC;
3650 branch_type = ST_BRANCH_TO_ARM;
3651 }
3652 }
3653 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3654 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3655
3656 branch_offset = (bfd_signed_vma)(destination - location);
3657
3658 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3659 || r_type == R_ARM_THM_TLS_CALL)
3660 {
3661 /* Handle cases where:
3662 - this call goes too far (different Thumb/Thumb2 max
3663 distance)
3664 - it's a Thumb->Arm call and blx is not available, or it's a
3665 Thumb->Arm branch (not bl). A stub is needed in this case,
3666 but only if this call is not through a PLT entry. Indeed,
3667 PLT stubs handle mode switching already.
3668 */
3669 if ((!thumb2
3670 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3671 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3672 || (thumb2
3673 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3674 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3675 || (branch_type == ST_BRANCH_TO_ARM
3676 && (((r_type == R_ARM_THM_CALL
3677 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3678 || (r_type == R_ARM_THM_JUMP24))
3679 && !use_plt))
3680 {
3681 if (branch_type == ST_BRANCH_TO_THUMB)
3682 {
3683 /* Thumb to thumb. */
3684 if (!thumb_only)
3685 {
3686 stub_type = (info->shared | globals->pic_veneer)
3687 /* PIC stubs. */
3688 ? ((globals->use_blx
3689 && (r_type == R_ARM_THM_CALL))
3690 /* V5T and above. Stub starts with ARM code, so
3691 we must be able to switch mode before
3692 reaching it, which is only possible for 'bl'
3693 (ie R_ARM_THM_CALL relocation). */
3694 ? arm_stub_long_branch_any_thumb_pic
3695 /* On V4T, use Thumb code only. */
3696 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3697
3698 /* non-PIC stubs. */
3699 : ((globals->use_blx
3700 && (r_type == R_ARM_THM_CALL))
3701 /* V5T and above. */
3702 ? arm_stub_long_branch_any_any
3703 /* V4T. */
3704 : arm_stub_long_branch_v4t_thumb_thumb);
3705 }
3706 else
3707 {
3708 stub_type = (info->shared | globals->pic_veneer)
3709 /* PIC stub. */
3710 ? arm_stub_long_branch_thumb_only_pic
3711 /* non-PIC stub. */
3712 : arm_stub_long_branch_thumb_only;
3713 }
3714 }
3715 else
3716 {
3717 /* Thumb to arm. */
3718 if (sym_sec != NULL
3719 && sym_sec->owner != NULL
3720 && !INTERWORK_FLAG (sym_sec->owner))
3721 {
3722 (*_bfd_error_handler)
3723 (_("%B(%s): warning: interworking not enabled.\n"
3724 " first occurrence: %B: Thumb call to ARM"),
3725 sym_sec->owner, input_bfd, name);
3726 }
3727
3728 stub_type =
3729 (info->shared | globals->pic_veneer)
3730 /* PIC stubs. */
3731 ? (r_type == R_ARM_THM_TLS_CALL
3732 /* TLS PIC stubs */
3733 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3734 : arm_stub_long_branch_v4t_thumb_tls_pic)
3735 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3736 /* V5T PIC and above. */
3737 ? arm_stub_long_branch_any_arm_pic
3738 /* V4T PIC stub. */
3739 : arm_stub_long_branch_v4t_thumb_arm_pic))
3740
3741 /* non-PIC stubs. */
3742 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3743 /* V5T and above. */
3744 ? arm_stub_long_branch_any_any
3745 /* V4T. */
3746 : arm_stub_long_branch_v4t_thumb_arm);
3747
3748 /* Handle v4t short branches. */
3749 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3750 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3751 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3752 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3753 }
3754 }
3755 }
3756 else if (r_type == R_ARM_CALL
3757 || r_type == R_ARM_JUMP24
3758 || r_type == R_ARM_PLT32
3759 || r_type == R_ARM_TLS_CALL)
3760 {
3761 if (branch_type == ST_BRANCH_TO_THUMB)
3762 {
3763 /* Arm to thumb. */
3764
3765 if (sym_sec != NULL
3766 && sym_sec->owner != NULL
3767 && !INTERWORK_FLAG (sym_sec->owner))
3768 {
3769 (*_bfd_error_handler)
3770 (_("%B(%s): warning: interworking not enabled.\n"
3771 " first occurrence: %B: ARM call to Thumb"),
3772 sym_sec->owner, input_bfd, name);
3773 }
3774
3775 /* We have an extra 2-bytes reach because of
3776 the mode change (bit 24 (H) of BLX encoding). */
3777 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3778 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3779 || (r_type == R_ARM_CALL && !globals->use_blx)
3780 || (r_type == R_ARM_JUMP24)
3781 || (r_type == R_ARM_PLT32))
3782 {
3783 stub_type = (info->shared | globals->pic_veneer)
3784 /* PIC stubs. */
3785 ? ((globals->use_blx)
3786 /* V5T and above. */
3787 ? arm_stub_long_branch_any_thumb_pic
3788 /* V4T stub. */
3789 : arm_stub_long_branch_v4t_arm_thumb_pic)
3790
3791 /* non-PIC stubs. */
3792 : ((globals->use_blx)
3793 /* V5T and above. */
3794 ? arm_stub_long_branch_any_any
3795 /* V4T. */
3796 : arm_stub_long_branch_v4t_arm_thumb);
3797 }
3798 }
3799 else
3800 {
3801 /* Arm to arm. */
3802 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3803 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3804 {
3805 stub_type =
3806 (info->shared | globals->pic_veneer)
3807 /* PIC stubs. */
3808 ? (r_type == R_ARM_TLS_CALL
3809 /* TLS PIC Stub */
3810 ? arm_stub_long_branch_any_tls_pic
3811 : arm_stub_long_branch_any_arm_pic)
3812 /* non-PIC stubs. */
3813 : arm_stub_long_branch_any_any;
3814 }
3815 }
3816 }
3817
3818 /* If a stub is needed, record the actual destination type. */
3819 if (stub_type != arm_stub_none)
3820 *actual_branch_type = branch_type;
3821
3822 return stub_type;
3823 }
3824
3825 /* Build a name for an entry in the stub hash table. */
3826
3827 static char *
3828 elf32_arm_stub_name (const asection *input_section,
3829 const asection *sym_sec,
3830 const struct elf32_arm_link_hash_entry *hash,
3831 const Elf_Internal_Rela *rel,
3832 enum elf32_arm_stub_type stub_type)
3833 {
3834 char *stub_name;
3835 bfd_size_type len;
3836
3837 if (hash)
3838 {
3839 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3840 stub_name = (char *) bfd_malloc (len);
3841 if (stub_name != NULL)
3842 sprintf (stub_name, "%08x_%s+%x_%d",
3843 input_section->id & 0xffffffff,
3844 hash->root.root.root.string,
3845 (int) rel->r_addend & 0xffffffff,
3846 (int) stub_type);
3847 }
3848 else
3849 {
3850 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3851 stub_name = (char *) bfd_malloc (len);
3852 if (stub_name != NULL)
3853 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3854 input_section->id & 0xffffffff,
3855 sym_sec->id & 0xffffffff,
3856 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3857 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3858 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3859 (int) rel->r_addend & 0xffffffff,
3860 (int) stub_type);
3861 }
3862
3863 return stub_name;
3864 }
3865
3866 /* Look up an entry in the stub hash. Stub entries are cached because
3867 creating the stub name takes a bit of time. */
3868
3869 static struct elf32_arm_stub_hash_entry *
3870 elf32_arm_get_stub_entry (const asection *input_section,
3871 const asection *sym_sec,
3872 struct elf_link_hash_entry *hash,
3873 const Elf_Internal_Rela *rel,
3874 struct elf32_arm_link_hash_table *htab,
3875 enum elf32_arm_stub_type stub_type)
3876 {
3877 struct elf32_arm_stub_hash_entry *stub_entry;
3878 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3879 const asection *id_sec;
3880
3881 if ((input_section->flags & SEC_CODE) == 0)
3882 return NULL;
3883
3884 /* If this input section is part of a group of sections sharing one
3885 stub section, then use the id of the first section in the group.
3886 Stub names need to include a section id, as there may well be
3887 more than one stub used to reach say, printf, and we need to
3888 distinguish between them. */
3889 id_sec = htab->stub_group[input_section->id].link_sec;
3890
3891 if (h != NULL && h->stub_cache != NULL
3892 && h->stub_cache->h == h
3893 && h->stub_cache->id_sec == id_sec
3894 && h->stub_cache->stub_type == stub_type)
3895 {
3896 stub_entry = h->stub_cache;
3897 }
3898 else
3899 {
3900 char *stub_name;
3901
3902 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3903 if (stub_name == NULL)
3904 return NULL;
3905
3906 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3907 stub_name, FALSE, FALSE);
3908 if (h != NULL)
3909 h->stub_cache = stub_entry;
3910
3911 free (stub_name);
3912 }
3913
3914 return stub_entry;
3915 }
3916
3917 /* Find or create a stub section. Returns a pointer to the stub section, and
3918 the section to which the stub section will be attached (in *LINK_SEC_P).
3919 LINK_SEC_P may be NULL. */
3920
3921 static asection *
3922 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3923 struct elf32_arm_link_hash_table *htab)
3924 {
3925 asection *link_sec;
3926 asection *stub_sec;
3927
3928 link_sec = htab->stub_group[section->id].link_sec;
3929 BFD_ASSERT (link_sec != NULL);
3930 stub_sec = htab->stub_group[section->id].stub_sec;
3931
3932 if (stub_sec == NULL)
3933 {
3934 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3935 if (stub_sec == NULL)
3936 {
3937 size_t namelen;
3938 bfd_size_type len;
3939 char *s_name;
3940
3941 namelen = strlen (link_sec->name);
3942 len = namelen + sizeof (STUB_SUFFIX);
3943 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3944 if (s_name == NULL)
3945 return NULL;
3946
3947 memcpy (s_name, link_sec->name, namelen);
3948 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3949 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3950 if (stub_sec == NULL)
3951 return NULL;
3952 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3953 }
3954 htab->stub_group[section->id].stub_sec = stub_sec;
3955 }
3956
3957 if (link_sec_p)
3958 *link_sec_p = link_sec;
3959
3960 return stub_sec;
3961 }
3962
3963 /* Add a new stub entry to the stub hash. Not all fields of the new
3964 stub entry are initialised. */
3965
3966 static struct elf32_arm_stub_hash_entry *
3967 elf32_arm_add_stub (const char *stub_name,
3968 asection *section,
3969 struct elf32_arm_link_hash_table *htab)
3970 {
3971 asection *link_sec;
3972 asection *stub_sec;
3973 struct elf32_arm_stub_hash_entry *stub_entry;
3974
3975 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3976 if (stub_sec == NULL)
3977 return NULL;
3978
3979 /* Enter this entry into the linker stub hash table. */
3980 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3981 TRUE, FALSE);
3982 if (stub_entry == NULL)
3983 {
3984 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3985 section->owner,
3986 stub_name);
3987 return NULL;
3988 }
3989
3990 stub_entry->stub_sec = stub_sec;
3991 stub_entry->stub_offset = 0;
3992 stub_entry->id_sec = link_sec;
3993
3994 return stub_entry;
3995 }
3996
3997 /* Store an Arm insn into an output section not processed by
3998 elf32_arm_write_section. */
3999
4000 static void
4001 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4002 bfd * output_bfd, bfd_vma val, void * ptr)
4003 {
4004 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4005 bfd_putl32 (val, ptr);
4006 else
4007 bfd_putb32 (val, ptr);
4008 }
4009
4010 /* Store a 16-bit Thumb insn into an output section not processed by
4011 elf32_arm_write_section. */
4012
4013 static void
4014 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4015 bfd * output_bfd, bfd_vma val, void * ptr)
4016 {
4017 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4018 bfd_putl16 (val, ptr);
4019 else
4020 bfd_putb16 (val, ptr);
4021 }
4022
4023 /* If it's possible to change R_TYPE to a more efficient access
4024 model, return the new reloc type. */
4025
4026 static unsigned
4027 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4028 struct elf_link_hash_entry *h)
4029 {
4030 int is_local = (h == NULL);
4031
4032 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4033 return r_type;
4034
4035 /* We do not support relaxations for Old TLS models. */
4036 switch (r_type)
4037 {
4038 case R_ARM_TLS_GOTDESC:
4039 case R_ARM_TLS_CALL:
4040 case R_ARM_THM_TLS_CALL:
4041 case R_ARM_TLS_DESCSEQ:
4042 case R_ARM_THM_TLS_DESCSEQ:
4043 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4044 }
4045
4046 return r_type;
4047 }
4048
4049 static bfd_reloc_status_type elf32_arm_final_link_relocate
4050 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4051 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4052 const char *, unsigned char, enum arm_st_branch_type,
4053 struct elf_link_hash_entry *, bfd_boolean *, char **);
4054
4055 static unsigned int
4056 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4057 {
4058 switch (stub_type)
4059 {
4060 case arm_stub_a8_veneer_b_cond:
4061 case arm_stub_a8_veneer_b:
4062 case arm_stub_a8_veneer_bl:
4063 return 2;
4064
4065 case arm_stub_long_branch_any_any:
4066 case arm_stub_long_branch_v4t_arm_thumb:
4067 case arm_stub_long_branch_thumb_only:
4068 case arm_stub_long_branch_v4t_thumb_thumb:
4069 case arm_stub_long_branch_v4t_thumb_arm:
4070 case arm_stub_short_branch_v4t_thumb_arm:
4071 case arm_stub_long_branch_any_arm_pic:
4072 case arm_stub_long_branch_any_thumb_pic:
4073 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4074 case arm_stub_long_branch_v4t_arm_thumb_pic:
4075 case arm_stub_long_branch_v4t_thumb_arm_pic:
4076 case arm_stub_long_branch_thumb_only_pic:
4077 case arm_stub_long_branch_any_tls_pic:
4078 case arm_stub_long_branch_v4t_thumb_tls_pic:
4079 case arm_stub_a8_veneer_blx:
4080 return 4;
4081
4082 default:
4083 abort (); /* Should be unreachable. */
4084 }
4085 }
4086
4087 static bfd_boolean
4088 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4089 void * in_arg)
4090 {
4091 #define MAXRELOCS 2
4092 struct elf32_arm_stub_hash_entry *stub_entry;
4093 struct elf32_arm_link_hash_table *globals;
4094 struct bfd_link_info *info;
4095 asection *stub_sec;
4096 bfd *stub_bfd;
4097 bfd_byte *loc;
4098 bfd_vma sym_value;
4099 int template_size;
4100 int size;
4101 const insn_sequence *template_sequence;
4102 int i;
4103 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4104 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4105 int nrelocs = 0;
4106
4107 /* Massage our args to the form they really have. */
4108 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4109 info = (struct bfd_link_info *) in_arg;
4110
4111 globals = elf32_arm_hash_table (info);
4112 if (globals == NULL)
4113 return FALSE;
4114
4115 stub_sec = stub_entry->stub_sec;
4116
4117 if ((globals->fix_cortex_a8 < 0)
4118 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4119 /* We have to do less-strictly-aligned fixes last. */
4120 return TRUE;
4121
4122 /* Make a note of the offset within the stubs for this entry. */
4123 stub_entry->stub_offset = stub_sec->size;
4124 loc = stub_sec->contents + stub_entry->stub_offset;
4125
4126 stub_bfd = stub_sec->owner;
4127
4128 /* This is the address of the stub destination. */
4129 sym_value = (stub_entry->target_value
4130 + stub_entry->target_section->output_offset
4131 + stub_entry->target_section->output_section->vma);
4132
4133 template_sequence = stub_entry->stub_template;
4134 template_size = stub_entry->stub_template_size;
4135
4136 size = 0;
4137 for (i = 0; i < template_size; i++)
4138 {
4139 switch (template_sequence[i].type)
4140 {
4141 case THUMB16_TYPE:
4142 {
4143 bfd_vma data = (bfd_vma) template_sequence[i].data;
4144 if (template_sequence[i].reloc_addend != 0)
4145 {
4146 /* We've borrowed the reloc_addend field to mean we should
4147 insert a condition code into this (Thumb-1 branch)
4148 instruction. See THUMB16_BCOND_INSN. */
4149 BFD_ASSERT ((data & 0xff00) == 0xd000);
4150 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4151 }
4152 bfd_put_16 (stub_bfd, data, loc + size);
4153 size += 2;
4154 }
4155 break;
4156
4157 case THUMB32_TYPE:
4158 bfd_put_16 (stub_bfd,
4159 (template_sequence[i].data >> 16) & 0xffff,
4160 loc + size);
4161 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4162 loc + size + 2);
4163 if (template_sequence[i].r_type != R_ARM_NONE)
4164 {
4165 stub_reloc_idx[nrelocs] = i;
4166 stub_reloc_offset[nrelocs++] = size;
4167 }
4168 size += 4;
4169 break;
4170
4171 case ARM_TYPE:
4172 bfd_put_32 (stub_bfd, template_sequence[i].data,
4173 loc + size);
4174 /* Handle cases where the target is encoded within the
4175 instruction. */
4176 if (template_sequence[i].r_type == R_ARM_JUMP24)
4177 {
4178 stub_reloc_idx[nrelocs] = i;
4179 stub_reloc_offset[nrelocs++] = size;
4180 }
4181 size += 4;
4182 break;
4183
4184 case DATA_TYPE:
4185 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4186 stub_reloc_idx[nrelocs] = i;
4187 stub_reloc_offset[nrelocs++] = size;
4188 size += 4;
4189 break;
4190
4191 default:
4192 BFD_FAIL ();
4193 return FALSE;
4194 }
4195 }
4196
4197 stub_sec->size += size;
4198
4199 /* Stub size has already been computed in arm_size_one_stub. Check
4200 consistency. */
4201 BFD_ASSERT (size == stub_entry->stub_size);
4202
4203 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4204 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4205 sym_value |= 1;
4206
4207 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4208 in each stub. */
4209 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4210
4211 for (i = 0; i < nrelocs; i++)
4212 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4213 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4214 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4215 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4216 {
4217 Elf_Internal_Rela rel;
4218 bfd_boolean unresolved_reloc;
4219 char *error_message;
4220 enum arm_st_branch_type branch_type
4221 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4222 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4223 bfd_vma points_to = sym_value + stub_entry->target_addend;
4224
4225 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4226 rel.r_info = ELF32_R_INFO (0,
4227 template_sequence[stub_reloc_idx[i]].r_type);
4228 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4229
4230 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4231 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4232 template should refer back to the instruction after the original
4233 branch. */
4234 points_to = sym_value;
4235
4236 /* There may be unintended consequences if this is not true. */
4237 BFD_ASSERT (stub_entry->h == NULL);
4238
4239 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4240 properly. We should probably use this function unconditionally,
4241 rather than only for certain relocations listed in the enclosing
4242 conditional, for the sake of consistency. */
4243 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4244 (template_sequence[stub_reloc_idx[i]].r_type),
4245 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4246 points_to, info, stub_entry->target_section, "", STT_FUNC,
4247 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4248 &unresolved_reloc, &error_message);
4249 }
4250 else
4251 {
4252 Elf_Internal_Rela rel;
4253 bfd_boolean unresolved_reloc;
4254 char *error_message;
4255 bfd_vma points_to = sym_value + stub_entry->target_addend
4256 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4257
4258 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4259 rel.r_info = ELF32_R_INFO (0,
4260 template_sequence[stub_reloc_idx[i]].r_type);
4261 rel.r_addend = 0;
4262
4263 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4264 (template_sequence[stub_reloc_idx[i]].r_type),
4265 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4266 points_to, info, stub_entry->target_section, "", STT_FUNC,
4267 stub_entry->branch_type,
4268 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4269 &error_message);
4270 }
4271
4272 return TRUE;
4273 #undef MAXRELOCS
4274 }
4275
4276 /* Calculate the template, template size and instruction size for a stub.
4277 Return value is the instruction size. */
4278
4279 static unsigned int
4280 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4281 const insn_sequence **stub_template,
4282 int *stub_template_size)
4283 {
4284 const insn_sequence *template_sequence = NULL;
4285 int template_size = 0, i;
4286 unsigned int size;
4287
4288 template_sequence = stub_definitions[stub_type].template_sequence;
4289 if (stub_template)
4290 *stub_template = template_sequence;
4291
4292 template_size = stub_definitions[stub_type].template_size;
4293 if (stub_template_size)
4294 *stub_template_size = template_size;
4295
4296 size = 0;
4297 for (i = 0; i < template_size; i++)
4298 {
4299 switch (template_sequence[i].type)
4300 {
4301 case THUMB16_TYPE:
4302 size += 2;
4303 break;
4304
4305 case ARM_TYPE:
4306 case THUMB32_TYPE:
4307 case DATA_TYPE:
4308 size += 4;
4309 break;
4310
4311 default:
4312 BFD_FAIL ();
4313 return 0;
4314 }
4315 }
4316
4317 return size;
4318 }
4319
4320 /* As above, but don't actually build the stub. Just bump offset so
4321 we know stub section sizes. */
4322
4323 static bfd_boolean
4324 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4325 void *in_arg ATTRIBUTE_UNUSED)
4326 {
4327 struct elf32_arm_stub_hash_entry *stub_entry;
4328 const insn_sequence *template_sequence;
4329 int template_size, size;
4330
4331 /* Massage our args to the form they really have. */
4332 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4333
4334 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4335 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4336
4337 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4338 &template_size);
4339
4340 stub_entry->stub_size = size;
4341 stub_entry->stub_template = template_sequence;
4342 stub_entry->stub_template_size = template_size;
4343
4344 size = (size + 7) & ~7;
4345 stub_entry->stub_sec->size += size;
4346
4347 return TRUE;
4348 }
4349
4350 /* External entry points for sizing and building linker stubs. */
4351
4352 /* Set up various things so that we can make a list of input sections
4353 for each output section included in the link. Returns -1 on error,
4354 0 when no stubs will be needed, and 1 on success. */
4355
4356 int
4357 elf32_arm_setup_section_lists (bfd *output_bfd,
4358 struct bfd_link_info *info)
4359 {
4360 bfd *input_bfd;
4361 unsigned int bfd_count;
4362 int top_id, top_index;
4363 asection *section;
4364 asection **input_list, **list;
4365 bfd_size_type amt;
4366 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4367
4368 if (htab == NULL)
4369 return 0;
4370 if (! is_elf_hash_table (htab))
4371 return 0;
4372
4373 /* Count the number of input BFDs and find the top input section id. */
4374 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4375 input_bfd != NULL;
4376 input_bfd = input_bfd->link_next)
4377 {
4378 bfd_count += 1;
4379 for (section = input_bfd->sections;
4380 section != NULL;
4381 section = section->next)
4382 {
4383 if (top_id < section->id)
4384 top_id = section->id;
4385 }
4386 }
4387 htab->bfd_count = bfd_count;
4388
4389 amt = sizeof (struct map_stub) * (top_id + 1);
4390 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4391 if (htab->stub_group == NULL)
4392 return -1;
4393 htab->top_id = top_id;
4394
4395 /* We can't use output_bfd->section_count here to find the top output
4396 section index as some sections may have been removed, and
4397 _bfd_strip_section_from_output doesn't renumber the indices. */
4398 for (section = output_bfd->sections, top_index = 0;
4399 section != NULL;
4400 section = section->next)
4401 {
4402 if (top_index < section->index)
4403 top_index = section->index;
4404 }
4405
4406 htab->top_index = top_index;
4407 amt = sizeof (asection *) * (top_index + 1);
4408 input_list = (asection **) bfd_malloc (amt);
4409 htab->input_list = input_list;
4410 if (input_list == NULL)
4411 return -1;
4412
4413 /* For sections we aren't interested in, mark their entries with a
4414 value we can check later. */
4415 list = input_list + top_index;
4416 do
4417 *list = bfd_abs_section_ptr;
4418 while (list-- != input_list);
4419
4420 for (section = output_bfd->sections;
4421 section != NULL;
4422 section = section->next)
4423 {
4424 if ((section->flags & SEC_CODE) != 0)
4425 input_list[section->index] = NULL;
4426 }
4427
4428 return 1;
4429 }
4430
4431 /* The linker repeatedly calls this function for each input section,
4432 in the order that input sections are linked into output sections.
4433 Build lists of input sections to determine groupings between which
4434 we may insert linker stubs. */
4435
4436 void
4437 elf32_arm_next_input_section (struct bfd_link_info *info,
4438 asection *isec)
4439 {
4440 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4441
4442 if (htab == NULL)
4443 return;
4444
4445 if (isec->output_section->index <= htab->top_index)
4446 {
4447 asection **list = htab->input_list + isec->output_section->index;
4448
4449 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4450 {
4451 /* Steal the link_sec pointer for our list. */
4452 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4453 /* This happens to make the list in reverse order,
4454 which we reverse later. */
4455 PREV_SEC (isec) = *list;
4456 *list = isec;
4457 }
4458 }
4459 }
4460
4461 /* See whether we can group stub sections together. Grouping stub
4462 sections may result in fewer stubs. More importantly, we need to
4463 put all .init* and .fini* stubs at the end of the .init or
4464 .fini output sections respectively, because glibc splits the
4465 _init and _fini functions into multiple parts. Putting a stub in
4466 the middle of a function is not a good idea. */
4467
4468 static void
4469 group_sections (struct elf32_arm_link_hash_table *htab,
4470 bfd_size_type stub_group_size,
4471 bfd_boolean stubs_always_after_branch)
4472 {
4473 asection **list = htab->input_list;
4474
4475 do
4476 {
4477 asection *tail = *list;
4478 asection *head;
4479
4480 if (tail == bfd_abs_section_ptr)
4481 continue;
4482
4483 /* Reverse the list: we must avoid placing stubs at the
4484 beginning of the section because the beginning of the text
4485 section may be required for an interrupt vector in bare metal
4486 code. */
4487 #define NEXT_SEC PREV_SEC
4488 head = NULL;
4489 while (tail != NULL)
4490 {
4491 /* Pop from tail. */
4492 asection *item = tail;
4493 tail = PREV_SEC (item);
4494
4495 /* Push on head. */
4496 NEXT_SEC (item) = head;
4497 head = item;
4498 }
4499
4500 while (head != NULL)
4501 {
4502 asection *curr;
4503 asection *next;
4504 bfd_vma stub_group_start = head->output_offset;
4505 bfd_vma end_of_next;
4506
4507 curr = head;
4508 while (NEXT_SEC (curr) != NULL)
4509 {
4510 next = NEXT_SEC (curr);
4511 end_of_next = next->output_offset + next->size;
4512 if (end_of_next - stub_group_start >= stub_group_size)
4513 /* End of NEXT is too far from start, so stop. */
4514 break;
4515 /* Add NEXT to the group. */
4516 curr = next;
4517 }
4518
4519 /* OK, the size from the start to the start of CURR is less
4520 than stub_group_size and thus can be handled by one stub
4521 section. (Or the head section is itself larger than
4522 stub_group_size, in which case we may be toast.)
4523 We should really be keeping track of the total size of
4524 stubs added here, as stubs contribute to the final output
4525 section size. */
4526 do
4527 {
4528 next = NEXT_SEC (head);
4529 /* Set up this stub group. */
4530 htab->stub_group[head->id].link_sec = curr;
4531 }
4532 while (head != curr && (head = next) != NULL);
4533
4534 /* But wait, there's more! Input sections up to stub_group_size
4535 bytes after the stub section can be handled by it too. */
4536 if (!stubs_always_after_branch)
4537 {
4538 stub_group_start = curr->output_offset + curr->size;
4539
4540 while (next != NULL)
4541 {
4542 end_of_next = next->output_offset + next->size;
4543 if (end_of_next - stub_group_start >= stub_group_size)
4544 /* End of NEXT is too far from stubs, so stop. */
4545 break;
4546 /* Add NEXT to the stub group. */
4547 head = next;
4548 next = NEXT_SEC (head);
4549 htab->stub_group[head->id].link_sec = curr;
4550 }
4551 }
4552 head = next;
4553 }
4554 }
4555 while (list++ != htab->input_list + htab->top_index);
4556
4557 free (htab->input_list);
4558 #undef PREV_SEC
4559 #undef NEXT_SEC
4560 }
4561
4562 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4563 erratum fix. */
4564
4565 static int
4566 a8_reloc_compare (const void *a, const void *b)
4567 {
4568 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4569 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4570
4571 if (ra->from < rb->from)
4572 return -1;
4573 else if (ra->from > rb->from)
4574 return 1;
4575 else
4576 return 0;
4577 }
4578
4579 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4580 const char *, char **);
4581
4582 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4583 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4584 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4585 otherwise. */
4586
4587 static bfd_boolean
4588 cortex_a8_erratum_scan (bfd *input_bfd,
4589 struct bfd_link_info *info,
4590 struct a8_erratum_fix **a8_fixes_p,
4591 unsigned int *num_a8_fixes_p,
4592 unsigned int *a8_fix_table_size_p,
4593 struct a8_erratum_reloc *a8_relocs,
4594 unsigned int num_a8_relocs,
4595 unsigned prev_num_a8_fixes,
4596 bfd_boolean *stub_changed_p)
4597 {
4598 asection *section;
4599 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4600 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4601 unsigned int num_a8_fixes = *num_a8_fixes_p;
4602 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4603
4604 if (htab == NULL)
4605 return FALSE;
4606
4607 for (section = input_bfd->sections;
4608 section != NULL;
4609 section = section->next)
4610 {
4611 bfd_byte *contents = NULL;
4612 struct _arm_elf_section_data *sec_data;
4613 unsigned int span;
4614 bfd_vma base_vma;
4615
4616 if (elf_section_type (section) != SHT_PROGBITS
4617 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4618 || (section->flags & SEC_EXCLUDE) != 0
4619 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4620 || (section->output_section == bfd_abs_section_ptr))
4621 continue;
4622
4623 base_vma = section->output_section->vma + section->output_offset;
4624
4625 if (elf_section_data (section)->this_hdr.contents != NULL)
4626 contents = elf_section_data (section)->this_hdr.contents;
4627 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4628 return TRUE;
4629
4630 sec_data = elf32_arm_section_data (section);
4631
4632 for (span = 0; span < sec_data->mapcount; span++)
4633 {
4634 unsigned int span_start = sec_data->map[span].vma;
4635 unsigned int span_end = (span == sec_data->mapcount - 1)
4636 ? section->size : sec_data->map[span + 1].vma;
4637 unsigned int i;
4638 char span_type = sec_data->map[span].type;
4639 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4640
4641 if (span_type != 't')
4642 continue;
4643
4644 /* Span is entirely within a single 4KB region: skip scanning. */
4645 if (((base_vma + span_start) & ~0xfff)
4646 == ((base_vma + span_end) & ~0xfff))
4647 continue;
4648
4649 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4650
4651 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4652 * The branch target is in the same 4KB region as the
4653 first half of the branch.
4654 * The instruction before the branch is a 32-bit
4655 length non-branch instruction. */
4656 for (i = span_start; i < span_end;)
4657 {
4658 unsigned int insn = bfd_getl16 (&contents[i]);
4659 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4660 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4661
4662 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4663 insn_32bit = TRUE;
4664
4665 if (insn_32bit)
4666 {
4667 /* Load the rest of the insn (in manual-friendly order). */
4668 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4669
4670 /* Encoding T4: B<c>.W. */
4671 is_b = (insn & 0xf800d000) == 0xf0009000;
4672 /* Encoding T1: BL<c>.W. */
4673 is_bl = (insn & 0xf800d000) == 0xf000d000;
4674 /* Encoding T2: BLX<c>.W. */
4675 is_blx = (insn & 0xf800d000) == 0xf000c000;
4676 /* Encoding T3: B<c>.W (not permitted in IT block). */
4677 is_bcc = (insn & 0xf800d000) == 0xf0008000
4678 && (insn & 0x07f00000) != 0x03800000;
4679 }
4680
4681 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4682
4683 if (((base_vma + i) & 0xfff) == 0xffe
4684 && insn_32bit
4685 && is_32bit_branch
4686 && last_was_32bit
4687 && ! last_was_branch)
4688 {
4689 bfd_signed_vma offset = 0;
4690 bfd_boolean force_target_arm = FALSE;
4691 bfd_boolean force_target_thumb = FALSE;
4692 bfd_vma target;
4693 enum elf32_arm_stub_type stub_type = arm_stub_none;
4694 struct a8_erratum_reloc key, *found;
4695 bfd_boolean use_plt = FALSE;
4696
4697 key.from = base_vma + i;
4698 found = (struct a8_erratum_reloc *)
4699 bsearch (&key, a8_relocs, num_a8_relocs,
4700 sizeof (struct a8_erratum_reloc),
4701 &a8_reloc_compare);
4702
4703 if (found)
4704 {
4705 char *error_message = NULL;
4706 struct elf_link_hash_entry *entry;
4707
4708 /* We don't care about the error returned from this
4709 function, only if there is glue or not. */
4710 entry = find_thumb_glue (info, found->sym_name,
4711 &error_message);
4712
4713 if (entry)
4714 found->non_a8_stub = TRUE;
4715
4716 /* Keep a simpler condition, for the sake of clarity. */
4717 if (htab->root.splt != NULL && found->hash != NULL
4718 && found->hash->root.plt.offset != (bfd_vma) -1)
4719 use_plt = TRUE;
4720
4721 if (found->r_type == R_ARM_THM_CALL)
4722 {
4723 if (found->branch_type == ST_BRANCH_TO_ARM
4724 || use_plt)
4725 force_target_arm = TRUE;
4726 else
4727 force_target_thumb = TRUE;
4728 }
4729 }
4730
4731 /* Check if we have an offending branch instruction. */
4732
4733 if (found && found->non_a8_stub)
4734 /* We've already made a stub for this instruction, e.g.
4735 it's a long branch or a Thumb->ARM stub. Assume that
4736 stub will suffice to work around the A8 erratum (see
4737 setting of always_after_branch above). */
4738 ;
4739 else if (is_bcc)
4740 {
4741 offset = (insn & 0x7ff) << 1;
4742 offset |= (insn & 0x3f0000) >> 4;
4743 offset |= (insn & 0x2000) ? 0x40000 : 0;
4744 offset |= (insn & 0x800) ? 0x80000 : 0;
4745 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4746 if (offset & 0x100000)
4747 offset |= ~ ((bfd_signed_vma) 0xfffff);
4748 stub_type = arm_stub_a8_veneer_b_cond;
4749 }
4750 else if (is_b || is_bl || is_blx)
4751 {
4752 int s = (insn & 0x4000000) != 0;
4753 int j1 = (insn & 0x2000) != 0;
4754 int j2 = (insn & 0x800) != 0;
4755 int i1 = !(j1 ^ s);
4756 int i2 = !(j2 ^ s);
4757
4758 offset = (insn & 0x7ff) << 1;
4759 offset |= (insn & 0x3ff0000) >> 4;
4760 offset |= i2 << 22;
4761 offset |= i1 << 23;
4762 offset |= s << 24;
4763 if (offset & 0x1000000)
4764 offset |= ~ ((bfd_signed_vma) 0xffffff);
4765
4766 if (is_blx)
4767 offset &= ~ ((bfd_signed_vma) 3);
4768
4769 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4770 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4771 }
4772
4773 if (stub_type != arm_stub_none)
4774 {
4775 bfd_vma pc_for_insn = base_vma + i + 4;
4776
4777 /* The original instruction is a BL, but the target is
4778 an ARM instruction. If we were not making a stub,
4779 the BL would have been converted to a BLX. Use the
4780 BLX stub instead in that case. */
4781 if (htab->use_blx && force_target_arm
4782 && stub_type == arm_stub_a8_veneer_bl)
4783 {
4784 stub_type = arm_stub_a8_veneer_blx;
4785 is_blx = TRUE;
4786 is_bl = FALSE;
4787 }
4788 /* Conversely, if the original instruction was
4789 BLX but the target is Thumb mode, use the BL
4790 stub. */
4791 else if (force_target_thumb
4792 && stub_type == arm_stub_a8_veneer_blx)
4793 {
4794 stub_type = arm_stub_a8_veneer_bl;
4795 is_blx = FALSE;
4796 is_bl = TRUE;
4797 }
4798
4799 if (is_blx)
4800 pc_for_insn &= ~ ((bfd_vma) 3);
4801
4802 /* If we found a relocation, use the proper destination,
4803 not the offset in the (unrelocated) instruction.
4804 Note this is always done if we switched the stub type
4805 above. */
4806 if (found)
4807 offset =
4808 (bfd_signed_vma) (found->destination - pc_for_insn);
4809
4810 /* If the stub will use a Thumb-mode branch to a
4811 PLT target, redirect it to the preceding Thumb
4812 entry point. */
4813 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4814 offset -= PLT_THUMB_STUB_SIZE;
4815
4816 target = pc_for_insn + offset;
4817
4818 /* The BLX stub is ARM-mode code. Adjust the offset to
4819 take the different PC value (+8 instead of +4) into
4820 account. */
4821 if (stub_type == arm_stub_a8_veneer_blx)
4822 offset += 4;
4823
4824 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4825 {
4826 char *stub_name = NULL;
4827
4828 if (num_a8_fixes == a8_fix_table_size)
4829 {
4830 a8_fix_table_size *= 2;
4831 a8_fixes = (struct a8_erratum_fix *)
4832 bfd_realloc (a8_fixes,
4833 sizeof (struct a8_erratum_fix)
4834 * a8_fix_table_size);
4835 }
4836
4837 if (num_a8_fixes < prev_num_a8_fixes)
4838 {
4839 /* If we're doing a subsequent scan,
4840 check if we've found the same fix as
4841 before, and try and reuse the stub
4842 name. */
4843 stub_name = a8_fixes[num_a8_fixes].stub_name;
4844 if ((a8_fixes[num_a8_fixes].section != section)
4845 || (a8_fixes[num_a8_fixes].offset != i))
4846 {
4847 free (stub_name);
4848 stub_name = NULL;
4849 *stub_changed_p = TRUE;
4850 }
4851 }
4852
4853 if (!stub_name)
4854 {
4855 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4856 if (stub_name != NULL)
4857 sprintf (stub_name, "%x:%x", section->id, i);
4858 }
4859
4860 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4861 a8_fixes[num_a8_fixes].section = section;
4862 a8_fixes[num_a8_fixes].offset = i;
4863 a8_fixes[num_a8_fixes].addend = offset;
4864 a8_fixes[num_a8_fixes].orig_insn = insn;
4865 a8_fixes[num_a8_fixes].stub_name = stub_name;
4866 a8_fixes[num_a8_fixes].stub_type = stub_type;
4867 a8_fixes[num_a8_fixes].branch_type =
4868 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4869
4870 num_a8_fixes++;
4871 }
4872 }
4873 }
4874
4875 i += insn_32bit ? 4 : 2;
4876 last_was_32bit = insn_32bit;
4877 last_was_branch = is_32bit_branch;
4878 }
4879 }
4880
4881 if (elf_section_data (section)->this_hdr.contents == NULL)
4882 free (contents);
4883 }
4884
4885 *a8_fixes_p = a8_fixes;
4886 *num_a8_fixes_p = num_a8_fixes;
4887 *a8_fix_table_size_p = a8_fix_table_size;
4888
4889 return FALSE;
4890 }
4891
4892 /* Determine and set the size of the stub section for a final link.
4893
4894 The basic idea here is to examine all the relocations looking for
4895 PC-relative calls to a target that is unreachable with a "bl"
4896 instruction. */
4897
4898 bfd_boolean
4899 elf32_arm_size_stubs (bfd *output_bfd,
4900 bfd *stub_bfd,
4901 struct bfd_link_info *info,
4902 bfd_signed_vma group_size,
4903 asection * (*add_stub_section) (const char *, asection *),
4904 void (*layout_sections_again) (void))
4905 {
4906 bfd_size_type stub_group_size;
4907 bfd_boolean stubs_always_after_branch;
4908 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4909 struct a8_erratum_fix *a8_fixes = NULL;
4910 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4911 struct a8_erratum_reloc *a8_relocs = NULL;
4912 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4913
4914 if (htab == NULL)
4915 return FALSE;
4916
4917 if (htab->fix_cortex_a8)
4918 {
4919 a8_fixes = (struct a8_erratum_fix *)
4920 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4921 a8_relocs = (struct a8_erratum_reloc *)
4922 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4923 }
4924
4925 /* Propagate mach to stub bfd, because it may not have been
4926 finalized when we created stub_bfd. */
4927 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4928 bfd_get_mach (output_bfd));
4929
4930 /* Stash our params away. */
4931 htab->stub_bfd = stub_bfd;
4932 htab->add_stub_section = add_stub_section;
4933 htab->layout_sections_again = layout_sections_again;
4934 stubs_always_after_branch = group_size < 0;
4935
4936 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4937 as the first half of a 32-bit branch straddling two 4K pages. This is a
4938 crude way of enforcing that. */
4939 if (htab->fix_cortex_a8)
4940 stubs_always_after_branch = 1;
4941
4942 if (group_size < 0)
4943 stub_group_size = -group_size;
4944 else
4945 stub_group_size = group_size;
4946
4947 if (stub_group_size == 1)
4948 {
4949 /* Default values. */
4950 /* Thumb branch range is +-4MB has to be used as the default
4951 maximum size (a given section can contain both ARM and Thumb
4952 code, so the worst case has to be taken into account).
4953
4954 This value is 24K less than that, which allows for 2025
4955 12-byte stubs. If we exceed that, then we will fail to link.
4956 The user will have to relink with an explicit group size
4957 option. */
4958 stub_group_size = 4170000;
4959 }
4960
4961 group_sections (htab, stub_group_size, stubs_always_after_branch);
4962
4963 /* If we're applying the cortex A8 fix, we need to determine the
4964 program header size now, because we cannot change it later --
4965 that could alter section placements. Notice the A8 erratum fix
4966 ends up requiring the section addresses to remain unchanged
4967 modulo the page size. That's something we cannot represent
4968 inside BFD, and we don't want to force the section alignment to
4969 be the page size. */
4970 if (htab->fix_cortex_a8)
4971 (*htab->layout_sections_again) ();
4972
4973 while (1)
4974 {
4975 bfd *input_bfd;
4976 unsigned int bfd_indx;
4977 asection *stub_sec;
4978 bfd_boolean stub_changed = FALSE;
4979 unsigned prev_num_a8_fixes = num_a8_fixes;
4980
4981 num_a8_fixes = 0;
4982 for (input_bfd = info->input_bfds, bfd_indx = 0;
4983 input_bfd != NULL;
4984 input_bfd = input_bfd->link_next, bfd_indx++)
4985 {
4986 Elf_Internal_Shdr *symtab_hdr;
4987 asection *section;
4988 Elf_Internal_Sym *local_syms = NULL;
4989
4990 if (!is_arm_elf (input_bfd))
4991 continue;
4992
4993 num_a8_relocs = 0;
4994
4995 /* We'll need the symbol table in a second. */
4996 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4997 if (symtab_hdr->sh_info == 0)
4998 continue;
4999
5000 /* Walk over each section attached to the input bfd. */
5001 for (section = input_bfd->sections;
5002 section != NULL;
5003 section = section->next)
5004 {
5005 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5006
5007 /* If there aren't any relocs, then there's nothing more
5008 to do. */
5009 if ((section->flags & SEC_RELOC) == 0
5010 || section->reloc_count == 0
5011 || (section->flags & SEC_CODE) == 0)
5012 continue;
5013
5014 /* If this section is a link-once section that will be
5015 discarded, then don't create any stubs. */
5016 if (section->output_section == NULL
5017 || section->output_section->owner != output_bfd)
5018 continue;
5019
5020 /* Get the relocs. */
5021 internal_relocs
5022 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5023 NULL, info->keep_memory);
5024 if (internal_relocs == NULL)
5025 goto error_ret_free_local;
5026
5027 /* Now examine each relocation. */
5028 irela = internal_relocs;
5029 irelaend = irela + section->reloc_count;
5030 for (; irela < irelaend; irela++)
5031 {
5032 unsigned int r_type, r_indx;
5033 enum elf32_arm_stub_type stub_type;
5034 struct elf32_arm_stub_hash_entry *stub_entry;
5035 asection *sym_sec;
5036 bfd_vma sym_value;
5037 bfd_vma destination;
5038 struct elf32_arm_link_hash_entry *hash;
5039 const char *sym_name;
5040 char *stub_name;
5041 const asection *id_sec;
5042 unsigned char st_type;
5043 enum arm_st_branch_type branch_type;
5044 bfd_boolean created_stub = FALSE;
5045
5046 r_type = ELF32_R_TYPE (irela->r_info);
5047 r_indx = ELF32_R_SYM (irela->r_info);
5048
5049 if (r_type >= (unsigned int) R_ARM_max)
5050 {
5051 bfd_set_error (bfd_error_bad_value);
5052 error_ret_free_internal:
5053 if (elf_section_data (section)->relocs == NULL)
5054 free (internal_relocs);
5055 goto error_ret_free_local;
5056 }
5057
5058 hash = NULL;
5059 if (r_indx >= symtab_hdr->sh_info)
5060 hash = elf32_arm_hash_entry
5061 (elf_sym_hashes (input_bfd)
5062 [r_indx - symtab_hdr->sh_info]);
5063
5064 /* Only look for stubs on branch instructions, or
5065 non-relaxed TLSCALL */
5066 if ((r_type != (unsigned int) R_ARM_CALL)
5067 && (r_type != (unsigned int) R_ARM_THM_CALL)
5068 && (r_type != (unsigned int) R_ARM_JUMP24)
5069 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5070 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5071 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5072 && (r_type != (unsigned int) R_ARM_PLT32)
5073 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5074 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5075 && r_type == elf32_arm_tls_transition
5076 (info, r_type, &hash->root)
5077 && ((hash ? hash->tls_type
5078 : (elf32_arm_local_got_tls_type
5079 (input_bfd)[r_indx]))
5080 & GOT_TLS_GDESC) != 0))
5081 continue;
5082
5083 /* Now determine the call target, its name, value,
5084 section. */
5085 sym_sec = NULL;
5086 sym_value = 0;
5087 destination = 0;
5088 sym_name = NULL;
5089
5090 if (r_type == (unsigned int) R_ARM_TLS_CALL
5091 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5092 {
5093 /* A non-relaxed TLS call. The target is the
5094 plt-resident trampoline and nothing to do
5095 with the symbol. */
5096 BFD_ASSERT (htab->tls_trampoline > 0);
5097 sym_sec = htab->root.splt;
5098 sym_value = htab->tls_trampoline;
5099 hash = 0;
5100 st_type = STT_FUNC;
5101 branch_type = ST_BRANCH_TO_ARM;
5102 }
5103 else if (!hash)
5104 {
5105 /* It's a local symbol. */
5106 Elf_Internal_Sym *sym;
5107
5108 if (local_syms == NULL)
5109 {
5110 local_syms
5111 = (Elf_Internal_Sym *) symtab_hdr->contents;
5112 if (local_syms == NULL)
5113 local_syms
5114 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5115 symtab_hdr->sh_info, 0,
5116 NULL, NULL, NULL);
5117 if (local_syms == NULL)
5118 goto error_ret_free_internal;
5119 }
5120
5121 sym = local_syms + r_indx;
5122 if (sym->st_shndx == SHN_UNDEF)
5123 sym_sec = bfd_und_section_ptr;
5124 else if (sym->st_shndx == SHN_ABS)
5125 sym_sec = bfd_abs_section_ptr;
5126 else if (sym->st_shndx == SHN_COMMON)
5127 sym_sec = bfd_com_section_ptr;
5128 else
5129 sym_sec =
5130 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5131
5132 if (!sym_sec)
5133 /* This is an undefined symbol. It can never
5134 be resolved. */
5135 continue;
5136
5137 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5138 sym_value = sym->st_value;
5139 destination = (sym_value + irela->r_addend
5140 + sym_sec->output_offset
5141 + sym_sec->output_section->vma);
5142 st_type = ELF_ST_TYPE (sym->st_info);
5143 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5144 sym_name
5145 = bfd_elf_string_from_elf_section (input_bfd,
5146 symtab_hdr->sh_link,
5147 sym->st_name);
5148 }
5149 else
5150 {
5151 /* It's an external symbol. */
5152 while (hash->root.root.type == bfd_link_hash_indirect
5153 || hash->root.root.type == bfd_link_hash_warning)
5154 hash = ((struct elf32_arm_link_hash_entry *)
5155 hash->root.root.u.i.link);
5156
5157 if (hash->root.root.type == bfd_link_hash_defined
5158 || hash->root.root.type == bfd_link_hash_defweak)
5159 {
5160 sym_sec = hash->root.root.u.def.section;
5161 sym_value = hash->root.root.u.def.value;
5162
5163 struct elf32_arm_link_hash_table *globals =
5164 elf32_arm_hash_table (info);
5165
5166 /* For a destination in a shared library,
5167 use the PLT stub as target address to
5168 decide whether a branch stub is
5169 needed. */
5170 if (globals != NULL
5171 && globals->root.splt != NULL
5172 && hash != NULL
5173 && hash->root.plt.offset != (bfd_vma) -1)
5174 {
5175 sym_sec = globals->root.splt;
5176 sym_value = hash->root.plt.offset;
5177 if (sym_sec->output_section != NULL)
5178 destination = (sym_value
5179 + sym_sec->output_offset
5180 + sym_sec->output_section->vma);
5181 }
5182 else if (sym_sec->output_section != NULL)
5183 destination = (sym_value + irela->r_addend
5184 + sym_sec->output_offset
5185 + sym_sec->output_section->vma);
5186 }
5187 else if ((hash->root.root.type == bfd_link_hash_undefined)
5188 || (hash->root.root.type == bfd_link_hash_undefweak))
5189 {
5190 /* For a shared library, use the PLT stub as
5191 target address to decide whether a long
5192 branch stub is needed.
5193 For absolute code, they cannot be handled. */
5194 struct elf32_arm_link_hash_table *globals =
5195 elf32_arm_hash_table (info);
5196
5197 if (globals != NULL
5198 && globals->root.splt != NULL
5199 && hash != NULL
5200 && hash->root.plt.offset != (bfd_vma) -1)
5201 {
5202 sym_sec = globals->root.splt;
5203 sym_value = hash->root.plt.offset;
5204 if (sym_sec->output_section != NULL)
5205 destination = (sym_value
5206 + sym_sec->output_offset
5207 + sym_sec->output_section->vma);
5208 }
5209 else
5210 continue;
5211 }
5212 else
5213 {
5214 bfd_set_error (bfd_error_bad_value);
5215 goto error_ret_free_internal;
5216 }
5217 st_type = hash->root.type;
5218 branch_type = hash->root.target_internal;
5219 sym_name = hash->root.root.root.string;
5220 }
5221
5222 do
5223 {
5224 /* Determine what (if any) linker stub is needed. */
5225 stub_type = arm_type_of_stub (info, section, irela,
5226 st_type, &branch_type,
5227 hash, destination, sym_sec,
5228 input_bfd, sym_name);
5229 if (stub_type == arm_stub_none)
5230 break;
5231
5232 /* Support for grouping stub sections. */
5233 id_sec = htab->stub_group[section->id].link_sec;
5234
5235 /* Get the name of this stub. */
5236 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5237 irela, stub_type);
5238 if (!stub_name)
5239 goto error_ret_free_internal;
5240
5241 /* We've either created a stub for this reloc already,
5242 or we are about to. */
5243 created_stub = TRUE;
5244
5245 stub_entry = arm_stub_hash_lookup
5246 (&htab->stub_hash_table, stub_name,
5247 FALSE, FALSE);
5248 if (stub_entry != NULL)
5249 {
5250 /* The proper stub has already been created. */
5251 free (stub_name);
5252 stub_entry->target_value = sym_value;
5253 break;
5254 }
5255
5256 stub_entry = elf32_arm_add_stub (stub_name, section,
5257 htab);
5258 if (stub_entry == NULL)
5259 {
5260 free (stub_name);
5261 goto error_ret_free_internal;
5262 }
5263
5264 stub_entry->target_value = sym_value;
5265 stub_entry->target_section = sym_sec;
5266 stub_entry->stub_type = stub_type;
5267 stub_entry->h = hash;
5268 stub_entry->branch_type = branch_type;
5269
5270 if (sym_name == NULL)
5271 sym_name = "unnamed";
5272 stub_entry->output_name = (char *)
5273 bfd_alloc (htab->stub_bfd,
5274 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5275 + strlen (sym_name));
5276 if (stub_entry->output_name == NULL)
5277 {
5278 free (stub_name);
5279 goto error_ret_free_internal;
5280 }
5281
5282 /* For historical reasons, use the existing names for
5283 ARM-to-Thumb and Thumb-to-ARM stubs. */
5284 if ((r_type == (unsigned int) R_ARM_THM_CALL
5285 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5286 && branch_type == ST_BRANCH_TO_ARM)
5287 sprintf (stub_entry->output_name,
5288 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5289 else if ((r_type == (unsigned int) R_ARM_CALL
5290 || r_type == (unsigned int) R_ARM_JUMP24)
5291 && branch_type == ST_BRANCH_TO_THUMB)
5292 sprintf (stub_entry->output_name,
5293 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5294 else
5295 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5296 sym_name);
5297
5298 stub_changed = TRUE;
5299 }
5300 while (0);
5301
5302 /* Look for relocations which might trigger Cortex-A8
5303 erratum. */
5304 if (htab->fix_cortex_a8
5305 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5306 || r_type == (unsigned int) R_ARM_THM_JUMP19
5307 || r_type == (unsigned int) R_ARM_THM_CALL
5308 || r_type == (unsigned int) R_ARM_THM_XPC22))
5309 {
5310 bfd_vma from = section->output_section->vma
5311 + section->output_offset
5312 + irela->r_offset;
5313
5314 if ((from & 0xfff) == 0xffe)
5315 {
5316 /* Found a candidate. Note we haven't checked the
5317 destination is within 4K here: if we do so (and
5318 don't create an entry in a8_relocs) we can't tell
5319 that a branch should have been relocated when
5320 scanning later. */
5321 if (num_a8_relocs == a8_reloc_table_size)
5322 {
5323 a8_reloc_table_size *= 2;
5324 a8_relocs = (struct a8_erratum_reloc *)
5325 bfd_realloc (a8_relocs,
5326 sizeof (struct a8_erratum_reloc)
5327 * a8_reloc_table_size);
5328 }
5329
5330 a8_relocs[num_a8_relocs].from = from;
5331 a8_relocs[num_a8_relocs].destination = destination;
5332 a8_relocs[num_a8_relocs].r_type = r_type;
5333 a8_relocs[num_a8_relocs].branch_type = branch_type;
5334 a8_relocs[num_a8_relocs].sym_name = sym_name;
5335 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5336 a8_relocs[num_a8_relocs].hash = hash;
5337
5338 num_a8_relocs++;
5339 }
5340 }
5341 }
5342
5343 /* We're done with the internal relocs, free them. */
5344 if (elf_section_data (section)->relocs == NULL)
5345 free (internal_relocs);
5346 }
5347
5348 if (htab->fix_cortex_a8)
5349 {
5350 /* Sort relocs which might apply to Cortex-A8 erratum. */
5351 qsort (a8_relocs, num_a8_relocs,
5352 sizeof (struct a8_erratum_reloc),
5353 &a8_reloc_compare);
5354
5355 /* Scan for branches which might trigger Cortex-A8 erratum. */
5356 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5357 &num_a8_fixes, &a8_fix_table_size,
5358 a8_relocs, num_a8_relocs,
5359 prev_num_a8_fixes, &stub_changed)
5360 != 0)
5361 goto error_ret_free_local;
5362 }
5363 }
5364
5365 if (prev_num_a8_fixes != num_a8_fixes)
5366 stub_changed = TRUE;
5367
5368 if (!stub_changed)
5369 break;
5370
5371 /* OK, we've added some stubs. Find out the new size of the
5372 stub sections. */
5373 for (stub_sec = htab->stub_bfd->sections;
5374 stub_sec != NULL;
5375 stub_sec = stub_sec->next)
5376 {
5377 /* Ignore non-stub sections. */
5378 if (!strstr (stub_sec->name, STUB_SUFFIX))
5379 continue;
5380
5381 stub_sec->size = 0;
5382 }
5383
5384 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5385
5386 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5387 if (htab->fix_cortex_a8)
5388 for (i = 0; i < num_a8_fixes; i++)
5389 {
5390 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5391 a8_fixes[i].section, htab);
5392
5393 if (stub_sec == NULL)
5394 goto error_ret_free_local;
5395
5396 stub_sec->size
5397 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5398 NULL);
5399 }
5400
5401
5402 /* Ask the linker to do its stuff. */
5403 (*htab->layout_sections_again) ();
5404 }
5405
5406 /* Add stubs for Cortex-A8 erratum fixes now. */
5407 if (htab->fix_cortex_a8)
5408 {
5409 for (i = 0; i < num_a8_fixes; i++)
5410 {
5411 struct elf32_arm_stub_hash_entry *stub_entry;
5412 char *stub_name = a8_fixes[i].stub_name;
5413 asection *section = a8_fixes[i].section;
5414 unsigned int section_id = a8_fixes[i].section->id;
5415 asection *link_sec = htab->stub_group[section_id].link_sec;
5416 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5417 const insn_sequence *template_sequence;
5418 int template_size, size = 0;
5419
5420 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5421 TRUE, FALSE);
5422 if (stub_entry == NULL)
5423 {
5424 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5425 section->owner,
5426 stub_name);
5427 return FALSE;
5428 }
5429
5430 stub_entry->stub_sec = stub_sec;
5431 stub_entry->stub_offset = 0;
5432 stub_entry->id_sec = link_sec;
5433 stub_entry->stub_type = a8_fixes[i].stub_type;
5434 stub_entry->target_section = a8_fixes[i].section;
5435 stub_entry->target_value = a8_fixes[i].offset;
5436 stub_entry->target_addend = a8_fixes[i].addend;
5437 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5438 stub_entry->branch_type = a8_fixes[i].branch_type;
5439
5440 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5441 &template_sequence,
5442 &template_size);
5443
5444 stub_entry->stub_size = size;
5445 stub_entry->stub_template = template_sequence;
5446 stub_entry->stub_template_size = template_size;
5447 }
5448
5449 /* Stash the Cortex-A8 erratum fix array for use later in
5450 elf32_arm_write_section(). */
5451 htab->a8_erratum_fixes = a8_fixes;
5452 htab->num_a8_erratum_fixes = num_a8_fixes;
5453 }
5454 else
5455 {
5456 htab->a8_erratum_fixes = NULL;
5457 htab->num_a8_erratum_fixes = 0;
5458 }
5459 return TRUE;
5460
5461 error_ret_free_local:
5462 return FALSE;
5463 }
5464
5465 /* Build all the stubs associated with the current output file. The
5466 stubs are kept in a hash table attached to the main linker hash
5467 table. We also set up the .plt entries for statically linked PIC
5468 functions here. This function is called via arm_elf_finish in the
5469 linker. */
5470
5471 bfd_boolean
5472 elf32_arm_build_stubs (struct bfd_link_info *info)
5473 {
5474 asection *stub_sec;
5475 struct bfd_hash_table *table;
5476 struct elf32_arm_link_hash_table *htab;
5477
5478 htab = elf32_arm_hash_table (info);
5479 if (htab == NULL)
5480 return FALSE;
5481
5482 for (stub_sec = htab->stub_bfd->sections;
5483 stub_sec != NULL;
5484 stub_sec = stub_sec->next)
5485 {
5486 bfd_size_type size;
5487
5488 /* Ignore non-stub sections. */
5489 if (!strstr (stub_sec->name, STUB_SUFFIX))
5490 continue;
5491
5492 /* Allocate memory to hold the linker stubs. */
5493 size = stub_sec->size;
5494 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5495 if (stub_sec->contents == NULL && size != 0)
5496 return FALSE;
5497 stub_sec->size = 0;
5498 }
5499
5500 /* Build the stubs as directed by the stub hash table. */
5501 table = &htab->stub_hash_table;
5502 bfd_hash_traverse (table, arm_build_one_stub, info);
5503 if (htab->fix_cortex_a8)
5504 {
5505 /* Place the cortex a8 stubs last. */
5506 htab->fix_cortex_a8 = -1;
5507 bfd_hash_traverse (table, arm_build_one_stub, info);
5508 }
5509
5510 return TRUE;
5511 }
5512
5513 /* Locate the Thumb encoded calling stub for NAME. */
5514
5515 static struct elf_link_hash_entry *
5516 find_thumb_glue (struct bfd_link_info *link_info,
5517 const char *name,
5518 char **error_message)
5519 {
5520 char *tmp_name;
5521 struct elf_link_hash_entry *hash;
5522 struct elf32_arm_link_hash_table *hash_table;
5523
5524 /* We need a pointer to the armelf specific hash table. */
5525 hash_table = elf32_arm_hash_table (link_info);
5526 if (hash_table == NULL)
5527 return NULL;
5528
5529 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5530 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5531
5532 BFD_ASSERT (tmp_name);
5533
5534 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5535
5536 hash = elf_link_hash_lookup
5537 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5538
5539 if (hash == NULL
5540 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5541 tmp_name, name) == -1)
5542 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5543
5544 free (tmp_name);
5545
5546 return hash;
5547 }
5548
5549 /* Locate the ARM encoded calling stub for NAME. */
5550
5551 static struct elf_link_hash_entry *
5552 find_arm_glue (struct bfd_link_info *link_info,
5553 const char *name,
5554 char **error_message)
5555 {
5556 char *tmp_name;
5557 struct elf_link_hash_entry *myh;
5558 struct elf32_arm_link_hash_table *hash_table;
5559
5560 /* We need a pointer to the elfarm specific hash table. */
5561 hash_table = elf32_arm_hash_table (link_info);
5562 if (hash_table == NULL)
5563 return NULL;
5564
5565 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5566 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5567
5568 BFD_ASSERT (tmp_name);
5569
5570 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5571
5572 myh = elf_link_hash_lookup
5573 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5574
5575 if (myh == NULL
5576 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5577 tmp_name, name) == -1)
5578 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5579
5580 free (tmp_name);
5581
5582 return myh;
5583 }
5584
5585 /* ARM->Thumb glue (static images):
5586
5587 .arm
5588 __func_from_arm:
5589 ldr r12, __func_addr
5590 bx r12
5591 __func_addr:
5592 .word func @ behave as if you saw a ARM_32 reloc.
5593
5594 (v5t static images)
5595 .arm
5596 __func_from_arm:
5597 ldr pc, __func_addr
5598 __func_addr:
5599 .word func @ behave as if you saw a ARM_32 reloc.
5600
5601 (relocatable images)
5602 .arm
5603 __func_from_arm:
5604 ldr r12, __func_offset
5605 add r12, r12, pc
5606 bx r12
5607 __func_offset:
5608 .word func - . */
5609
5610 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5611 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5612 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5613 static const insn32 a2t3_func_addr_insn = 0x00000001;
5614
5615 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5616 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5617 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5618
5619 #define ARM2THUMB_PIC_GLUE_SIZE 16
5620 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5621 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5622 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5623
5624 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5625
5626 .thumb .thumb
5627 .align 2 .align 2
5628 __func_from_thumb: __func_from_thumb:
5629 bx pc push {r6, lr}
5630 nop ldr r6, __func_addr
5631 .arm mov lr, pc
5632 b func bx r6
5633 .arm
5634 ;; back_to_thumb
5635 ldmia r13! {r6, lr}
5636 bx lr
5637 __func_addr:
5638 .word func */
5639
5640 #define THUMB2ARM_GLUE_SIZE 8
5641 static const insn16 t2a1_bx_pc_insn = 0x4778;
5642 static const insn16 t2a2_noop_insn = 0x46c0;
5643 static const insn32 t2a3_b_insn = 0xea000000;
5644
5645 #define VFP11_ERRATUM_VENEER_SIZE 8
5646
5647 #define ARM_BX_VENEER_SIZE 12
5648 static const insn32 armbx1_tst_insn = 0xe3100001;
5649 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5650 static const insn32 armbx3_bx_insn = 0xe12fff10;
5651
5652 #ifndef ELFARM_NABI_C_INCLUDED
5653 static void
5654 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5655 {
5656 asection * s;
5657 bfd_byte * contents;
5658
5659 if (size == 0)
5660 {
5661 /* Do not include empty glue sections in the output. */
5662 if (abfd != NULL)
5663 {
5664 s = bfd_get_linker_section (abfd, name);
5665 if (s != NULL)
5666 s->flags |= SEC_EXCLUDE;
5667 }
5668 return;
5669 }
5670
5671 BFD_ASSERT (abfd != NULL);
5672
5673 s = bfd_get_linker_section (abfd, name);
5674 BFD_ASSERT (s != NULL);
5675
5676 contents = (bfd_byte *) bfd_alloc (abfd, size);
5677
5678 BFD_ASSERT (s->size == size);
5679 s->contents = contents;
5680 }
5681
5682 bfd_boolean
5683 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5684 {
5685 struct elf32_arm_link_hash_table * globals;
5686
5687 globals = elf32_arm_hash_table (info);
5688 BFD_ASSERT (globals != NULL);
5689
5690 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5691 globals->arm_glue_size,
5692 ARM2THUMB_GLUE_SECTION_NAME);
5693
5694 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5695 globals->thumb_glue_size,
5696 THUMB2ARM_GLUE_SECTION_NAME);
5697
5698 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5699 globals->vfp11_erratum_glue_size,
5700 VFP11_ERRATUM_VENEER_SECTION_NAME);
5701
5702 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5703 globals->bx_glue_size,
5704 ARM_BX_GLUE_SECTION_NAME);
5705
5706 return TRUE;
5707 }
5708
5709 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5710 returns the symbol identifying the stub. */
5711
5712 static struct elf_link_hash_entry *
5713 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5714 struct elf_link_hash_entry * h)
5715 {
5716 const char * name = h->root.root.string;
5717 asection * s;
5718 char * tmp_name;
5719 struct elf_link_hash_entry * myh;
5720 struct bfd_link_hash_entry * bh;
5721 struct elf32_arm_link_hash_table * globals;
5722 bfd_vma val;
5723 bfd_size_type size;
5724
5725 globals = elf32_arm_hash_table (link_info);
5726 BFD_ASSERT (globals != NULL);
5727 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5728
5729 s = bfd_get_linker_section
5730 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5731
5732 BFD_ASSERT (s != NULL);
5733
5734 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5735 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5736
5737 BFD_ASSERT (tmp_name);
5738
5739 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5740
5741 myh = elf_link_hash_lookup
5742 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5743
5744 if (myh != NULL)
5745 {
5746 /* We've already seen this guy. */
5747 free (tmp_name);
5748 return myh;
5749 }
5750
5751 /* The only trick here is using hash_table->arm_glue_size as the value.
5752 Even though the section isn't allocated yet, this is where we will be
5753 putting it. The +1 on the value marks that the stub has not been
5754 output yet - not that it is a Thumb function. */
5755 bh = NULL;
5756 val = globals->arm_glue_size + 1;
5757 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5758 tmp_name, BSF_GLOBAL, s, val,
5759 NULL, TRUE, FALSE, &bh);
5760
5761 myh = (struct elf_link_hash_entry *) bh;
5762 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5763 myh->forced_local = 1;
5764
5765 free (tmp_name);
5766
5767 if (link_info->shared || globals->root.is_relocatable_executable
5768 || globals->pic_veneer)
5769 size = ARM2THUMB_PIC_GLUE_SIZE;
5770 else if (globals->use_blx)
5771 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5772 else
5773 size = ARM2THUMB_STATIC_GLUE_SIZE;
5774
5775 s->size += size;
5776 globals->arm_glue_size += size;
5777
5778 return myh;
5779 }
5780
5781 /* Allocate space for ARMv4 BX veneers. */
5782
5783 static void
5784 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5785 {
5786 asection * s;
5787 struct elf32_arm_link_hash_table *globals;
5788 char *tmp_name;
5789 struct elf_link_hash_entry *myh;
5790 struct bfd_link_hash_entry *bh;
5791 bfd_vma val;
5792
5793 /* BX PC does not need a veneer. */
5794 if (reg == 15)
5795 return;
5796
5797 globals = elf32_arm_hash_table (link_info);
5798 BFD_ASSERT (globals != NULL);
5799 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5800
5801 /* Check if this veneer has already been allocated. */
5802 if (globals->bx_glue_offset[reg])
5803 return;
5804
5805 s = bfd_get_linker_section
5806 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5807
5808 BFD_ASSERT (s != NULL);
5809
5810 /* Add symbol for veneer. */
5811 tmp_name = (char *)
5812 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5813
5814 BFD_ASSERT (tmp_name);
5815
5816 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5817
5818 myh = elf_link_hash_lookup
5819 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5820
5821 BFD_ASSERT (myh == NULL);
5822
5823 bh = NULL;
5824 val = globals->bx_glue_size;
5825 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5826 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5827 NULL, TRUE, FALSE, &bh);
5828
5829 myh = (struct elf_link_hash_entry *) bh;
5830 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5831 myh->forced_local = 1;
5832
5833 s->size += ARM_BX_VENEER_SIZE;
5834 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5835 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5836 }
5837
5838
5839 /* Add an entry to the code/data map for section SEC. */
5840
5841 static void
5842 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5843 {
5844 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5845 unsigned int newidx;
5846
5847 if (sec_data->map == NULL)
5848 {
5849 sec_data->map = (elf32_arm_section_map *)
5850 bfd_malloc (sizeof (elf32_arm_section_map));
5851 sec_data->mapcount = 0;
5852 sec_data->mapsize = 1;
5853 }
5854
5855 newidx = sec_data->mapcount++;
5856
5857 if (sec_data->mapcount > sec_data->mapsize)
5858 {
5859 sec_data->mapsize *= 2;
5860 sec_data->map = (elf32_arm_section_map *)
5861 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5862 * sizeof (elf32_arm_section_map));
5863 }
5864
5865 if (sec_data->map)
5866 {
5867 sec_data->map[newidx].vma = vma;
5868 sec_data->map[newidx].type = type;
5869 }
5870 }
5871
5872
5873 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5874 veneers are handled for now. */
5875
5876 static bfd_vma
5877 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5878 elf32_vfp11_erratum_list *branch,
5879 bfd *branch_bfd,
5880 asection *branch_sec,
5881 unsigned int offset)
5882 {
5883 asection *s;
5884 struct elf32_arm_link_hash_table *hash_table;
5885 char *tmp_name;
5886 struct elf_link_hash_entry *myh;
5887 struct bfd_link_hash_entry *bh;
5888 bfd_vma val;
5889 struct _arm_elf_section_data *sec_data;
5890 elf32_vfp11_erratum_list *newerr;
5891
5892 hash_table = elf32_arm_hash_table (link_info);
5893 BFD_ASSERT (hash_table != NULL);
5894 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5895
5896 s = bfd_get_linker_section
5897 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5898
5899 sec_data = elf32_arm_section_data (s);
5900
5901 BFD_ASSERT (s != NULL);
5902
5903 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5904 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5905
5906 BFD_ASSERT (tmp_name);
5907
5908 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5909 hash_table->num_vfp11_fixes);
5910
5911 myh = elf_link_hash_lookup
5912 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5913
5914 BFD_ASSERT (myh == NULL);
5915
5916 bh = NULL;
5917 val = hash_table->vfp11_erratum_glue_size;
5918 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5919 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5920 NULL, TRUE, FALSE, &bh);
5921
5922 myh = (struct elf_link_hash_entry *) bh;
5923 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5924 myh->forced_local = 1;
5925
5926 /* Link veneer back to calling location. */
5927 sec_data->erratumcount += 1;
5928 newerr = (elf32_vfp11_erratum_list *)
5929 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5930
5931 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5932 newerr->vma = -1;
5933 newerr->u.v.branch = branch;
5934 newerr->u.v.id = hash_table->num_vfp11_fixes;
5935 branch->u.b.veneer = newerr;
5936
5937 newerr->next = sec_data->erratumlist;
5938 sec_data->erratumlist = newerr;
5939
5940 /* A symbol for the return from the veneer. */
5941 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5942 hash_table->num_vfp11_fixes);
5943
5944 myh = elf_link_hash_lookup
5945 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5946
5947 if (myh != NULL)
5948 abort ();
5949
5950 bh = NULL;
5951 val = offset + 4;
5952 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5953 branch_sec, val, NULL, TRUE, FALSE, &bh);
5954
5955 myh = (struct elf_link_hash_entry *) bh;
5956 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5957 myh->forced_local = 1;
5958
5959 free (tmp_name);
5960
5961 /* Generate a mapping symbol for the veneer section, and explicitly add an
5962 entry for that symbol to the code/data map for the section. */
5963 if (hash_table->vfp11_erratum_glue_size == 0)
5964 {
5965 bh = NULL;
5966 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5967 ever requires this erratum fix. */
5968 _bfd_generic_link_add_one_symbol (link_info,
5969 hash_table->bfd_of_glue_owner, "$a",
5970 BSF_LOCAL, s, 0, NULL,
5971 TRUE, FALSE, &bh);
5972
5973 myh = (struct elf_link_hash_entry *) bh;
5974 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5975 myh->forced_local = 1;
5976
5977 /* The elf32_arm_init_maps function only cares about symbols from input
5978 BFDs. We must make a note of this generated mapping symbol
5979 ourselves so that code byteswapping works properly in
5980 elf32_arm_write_section. */
5981 elf32_arm_section_map_add (s, 'a', 0);
5982 }
5983
5984 s->size += VFP11_ERRATUM_VENEER_SIZE;
5985 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5986 hash_table->num_vfp11_fixes++;
5987
5988 /* The offset of the veneer. */
5989 return val;
5990 }
5991
5992 #define ARM_GLUE_SECTION_FLAGS \
5993 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5994 | SEC_READONLY | SEC_LINKER_CREATED)
5995
5996 /* Create a fake section for use by the ARM backend of the linker. */
5997
5998 static bfd_boolean
5999 arm_make_glue_section (bfd * abfd, const char * name)
6000 {
6001 asection * sec;
6002
6003 sec = bfd_get_linker_section (abfd, name);
6004 if (sec != NULL)
6005 /* Already made. */
6006 return TRUE;
6007
6008 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6009
6010 if (sec == NULL
6011 || !bfd_set_section_alignment (abfd, sec, 2))
6012 return FALSE;
6013
6014 /* Set the gc mark to prevent the section from being removed by garbage
6015 collection, despite the fact that no relocs refer to this section. */
6016 sec->gc_mark = 1;
6017
6018 return TRUE;
6019 }
6020
6021 /* Add the glue sections to ABFD. This function is called from the
6022 linker scripts in ld/emultempl/{armelf}.em. */
6023
6024 bfd_boolean
6025 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6026 struct bfd_link_info *info)
6027 {
6028 /* If we are only performing a partial
6029 link do not bother adding the glue. */
6030 if (info->relocatable)
6031 return TRUE;
6032
6033 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6034 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6035 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6036 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6037 }
6038
6039 /* Select a BFD to be used to hold the sections used by the glue code.
6040 This function is called from the linker scripts in ld/emultempl/
6041 {armelf/pe}.em. */
6042
6043 bfd_boolean
6044 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6045 {
6046 struct elf32_arm_link_hash_table *globals;
6047
6048 /* If we are only performing a partial link
6049 do not bother getting a bfd to hold the glue. */
6050 if (info->relocatable)
6051 return TRUE;
6052
6053 /* Make sure we don't attach the glue sections to a dynamic object. */
6054 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6055
6056 globals = elf32_arm_hash_table (info);
6057 BFD_ASSERT (globals != NULL);
6058
6059 if (globals->bfd_of_glue_owner != NULL)
6060 return TRUE;
6061
6062 /* Save the bfd for later use. */
6063 globals->bfd_of_glue_owner = abfd;
6064
6065 return TRUE;
6066 }
6067
6068 static void
6069 check_use_blx (struct elf32_arm_link_hash_table *globals)
6070 {
6071 int cpu_arch;
6072
6073 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6074 Tag_CPU_arch);
6075
6076 if (globals->fix_arm1176)
6077 {
6078 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6079 globals->use_blx = 1;
6080 }
6081 else
6082 {
6083 if (cpu_arch > TAG_CPU_ARCH_V4T)
6084 globals->use_blx = 1;
6085 }
6086 }
6087
6088 bfd_boolean
6089 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6090 struct bfd_link_info *link_info)
6091 {
6092 Elf_Internal_Shdr *symtab_hdr;
6093 Elf_Internal_Rela *internal_relocs = NULL;
6094 Elf_Internal_Rela *irel, *irelend;
6095 bfd_byte *contents = NULL;
6096
6097 asection *sec;
6098 struct elf32_arm_link_hash_table *globals;
6099
6100 /* If we are only performing a partial link do not bother
6101 to construct any glue. */
6102 if (link_info->relocatable)
6103 return TRUE;
6104
6105 /* Here we have a bfd that is to be included on the link. We have a
6106 hook to do reloc rummaging, before section sizes are nailed down. */
6107 globals = elf32_arm_hash_table (link_info);
6108 BFD_ASSERT (globals != NULL);
6109
6110 check_use_blx (globals);
6111
6112 if (globals->byteswap_code && !bfd_big_endian (abfd))
6113 {
6114 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6115 abfd);
6116 return FALSE;
6117 }
6118
6119 /* PR 5398: If we have not decided to include any loadable sections in
6120 the output then we will not have a glue owner bfd. This is OK, it
6121 just means that there is nothing else for us to do here. */
6122 if (globals->bfd_of_glue_owner == NULL)
6123 return TRUE;
6124
6125 /* Rummage around all the relocs and map the glue vectors. */
6126 sec = abfd->sections;
6127
6128 if (sec == NULL)
6129 return TRUE;
6130
6131 for (; sec != NULL; sec = sec->next)
6132 {
6133 if (sec->reloc_count == 0)
6134 continue;
6135
6136 if ((sec->flags & SEC_EXCLUDE) != 0)
6137 continue;
6138
6139 symtab_hdr = & elf_symtab_hdr (abfd);
6140
6141 /* Load the relocs. */
6142 internal_relocs
6143 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6144
6145 if (internal_relocs == NULL)
6146 goto error_return;
6147
6148 irelend = internal_relocs + sec->reloc_count;
6149 for (irel = internal_relocs; irel < irelend; irel++)
6150 {
6151 long r_type;
6152 unsigned long r_index;
6153
6154 struct elf_link_hash_entry *h;
6155
6156 r_type = ELF32_R_TYPE (irel->r_info);
6157 r_index = ELF32_R_SYM (irel->r_info);
6158
6159 /* These are the only relocation types we care about. */
6160 if ( r_type != R_ARM_PC24
6161 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6162 continue;
6163
6164 /* Get the section contents if we haven't done so already. */
6165 if (contents == NULL)
6166 {
6167 /* Get cached copy if it exists. */
6168 if (elf_section_data (sec)->this_hdr.contents != NULL)
6169 contents = elf_section_data (sec)->this_hdr.contents;
6170 else
6171 {
6172 /* Go get them off disk. */
6173 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6174 goto error_return;
6175 }
6176 }
6177
6178 if (r_type == R_ARM_V4BX)
6179 {
6180 int reg;
6181
6182 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6183 record_arm_bx_glue (link_info, reg);
6184 continue;
6185 }
6186
6187 /* If the relocation is not against a symbol it cannot concern us. */
6188 h = NULL;
6189
6190 /* We don't care about local symbols. */
6191 if (r_index < symtab_hdr->sh_info)
6192 continue;
6193
6194 /* This is an external symbol. */
6195 r_index -= symtab_hdr->sh_info;
6196 h = (struct elf_link_hash_entry *)
6197 elf_sym_hashes (abfd)[r_index];
6198
6199 /* If the relocation is against a static symbol it must be within
6200 the current section and so cannot be a cross ARM/Thumb relocation. */
6201 if (h == NULL)
6202 continue;
6203
6204 /* If the call will go through a PLT entry then we do not need
6205 glue. */
6206 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6207 continue;
6208
6209 switch (r_type)
6210 {
6211 case R_ARM_PC24:
6212 /* This one is a call from arm code. We need to look up
6213 the target of the call. If it is a thumb target, we
6214 insert glue. */
6215 if (h->target_internal == ST_BRANCH_TO_THUMB)
6216 record_arm_to_thumb_glue (link_info, h);
6217 break;
6218
6219 default:
6220 abort ();
6221 }
6222 }
6223
6224 if (contents != NULL
6225 && elf_section_data (sec)->this_hdr.contents != contents)
6226 free (contents);
6227 contents = NULL;
6228
6229 if (internal_relocs != NULL
6230 && elf_section_data (sec)->relocs != internal_relocs)
6231 free (internal_relocs);
6232 internal_relocs = NULL;
6233 }
6234
6235 return TRUE;
6236
6237 error_return:
6238 if (contents != NULL
6239 && elf_section_data (sec)->this_hdr.contents != contents)
6240 free (contents);
6241 if (internal_relocs != NULL
6242 && elf_section_data (sec)->relocs != internal_relocs)
6243 free (internal_relocs);
6244
6245 return FALSE;
6246 }
6247 #endif
6248
6249
6250 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6251
6252 void
6253 bfd_elf32_arm_init_maps (bfd *abfd)
6254 {
6255 Elf_Internal_Sym *isymbuf;
6256 Elf_Internal_Shdr *hdr;
6257 unsigned int i, localsyms;
6258
6259 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6260 if (! is_arm_elf (abfd))
6261 return;
6262
6263 if ((abfd->flags & DYNAMIC) != 0)
6264 return;
6265
6266 hdr = & elf_symtab_hdr (abfd);
6267 localsyms = hdr->sh_info;
6268
6269 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6270 should contain the number of local symbols, which should come before any
6271 global symbols. Mapping symbols are always local. */
6272 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6273 NULL);
6274
6275 /* No internal symbols read? Skip this BFD. */
6276 if (isymbuf == NULL)
6277 return;
6278
6279 for (i = 0; i < localsyms; i++)
6280 {
6281 Elf_Internal_Sym *isym = &isymbuf[i];
6282 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6283 const char *name;
6284
6285 if (sec != NULL
6286 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6287 {
6288 name = bfd_elf_string_from_elf_section (abfd,
6289 hdr->sh_link, isym->st_name);
6290
6291 if (bfd_is_arm_special_symbol_name (name,
6292 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6293 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6294 }
6295 }
6296 }
6297
6298
6299 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6300 say what they wanted. */
6301
6302 void
6303 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6304 {
6305 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6306 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6307
6308 if (globals == NULL)
6309 return;
6310
6311 if (globals->fix_cortex_a8 == -1)
6312 {
6313 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6314 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6315 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6316 || out_attr[Tag_CPU_arch_profile].i == 0))
6317 globals->fix_cortex_a8 = 1;
6318 else
6319 globals->fix_cortex_a8 = 0;
6320 }
6321 }
6322
6323
6324 void
6325 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6326 {
6327 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6328 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6329
6330 if (globals == NULL)
6331 return;
6332 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6333 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6334 {
6335 switch (globals->vfp11_fix)
6336 {
6337 case BFD_ARM_VFP11_FIX_DEFAULT:
6338 case BFD_ARM_VFP11_FIX_NONE:
6339 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6340 break;
6341
6342 default:
6343 /* Give a warning, but do as the user requests anyway. */
6344 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6345 "workaround is not necessary for target architecture"), obfd);
6346 }
6347 }
6348 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6349 /* For earlier architectures, we might need the workaround, but do not
6350 enable it by default. If users is running with broken hardware, they
6351 must enable the erratum fix explicitly. */
6352 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6353 }
6354
6355
6356 enum bfd_arm_vfp11_pipe
6357 {
6358 VFP11_FMAC,
6359 VFP11_LS,
6360 VFP11_DS,
6361 VFP11_BAD
6362 };
6363
6364 /* Return a VFP register number. This is encoded as RX:X for single-precision
6365 registers, or X:RX for double-precision registers, where RX is the group of
6366 four bits in the instruction encoding and X is the single extension bit.
6367 RX and X fields are specified using their lowest (starting) bit. The return
6368 value is:
6369
6370 0...31: single-precision registers s0...s31
6371 32...63: double-precision registers d0...d31.
6372
6373 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6374 encounter VFP3 instructions, so we allow the full range for DP registers. */
6375
6376 static unsigned int
6377 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6378 unsigned int x)
6379 {
6380 if (is_double)
6381 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6382 else
6383 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6384 }
6385
6386 /* Set bits in *WMASK according to a register number REG as encoded by
6387 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6388
6389 static void
6390 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6391 {
6392 if (reg < 32)
6393 *wmask |= 1 << reg;
6394 else if (reg < 48)
6395 *wmask |= 3 << ((reg - 32) * 2);
6396 }
6397
6398 /* Return TRUE if WMASK overwrites anything in REGS. */
6399
6400 static bfd_boolean
6401 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6402 {
6403 int i;
6404
6405 for (i = 0; i < numregs; i++)
6406 {
6407 unsigned int reg = regs[i];
6408
6409 if (reg < 32 && (wmask & (1 << reg)) != 0)
6410 return TRUE;
6411
6412 reg -= 32;
6413
6414 if (reg >= 16)
6415 continue;
6416
6417 if ((wmask & (3 << (reg * 2))) != 0)
6418 return TRUE;
6419 }
6420
6421 return FALSE;
6422 }
6423
6424 /* In this function, we're interested in two things: finding input registers
6425 for VFP data-processing instructions, and finding the set of registers which
6426 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6427 hold the written set, so FLDM etc. are easy to deal with (we're only
6428 interested in 32 SP registers or 16 dp registers, due to the VFP version
6429 implemented by the chip in question). DP registers are marked by setting
6430 both SP registers in the write mask). */
6431
6432 static enum bfd_arm_vfp11_pipe
6433 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6434 int *numregs)
6435 {
6436 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6437 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6438
6439 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6440 {
6441 unsigned int pqrs;
6442 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6443 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6444
6445 pqrs = ((insn & 0x00800000) >> 20)
6446 | ((insn & 0x00300000) >> 19)
6447 | ((insn & 0x00000040) >> 6);
6448
6449 switch (pqrs)
6450 {
6451 case 0: /* fmac[sd]. */
6452 case 1: /* fnmac[sd]. */
6453 case 2: /* fmsc[sd]. */
6454 case 3: /* fnmsc[sd]. */
6455 vpipe = VFP11_FMAC;
6456 bfd_arm_vfp11_write_mask (destmask, fd);
6457 regs[0] = fd;
6458 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6459 regs[2] = fm;
6460 *numregs = 3;
6461 break;
6462
6463 case 4: /* fmul[sd]. */
6464 case 5: /* fnmul[sd]. */
6465 case 6: /* fadd[sd]. */
6466 case 7: /* fsub[sd]. */
6467 vpipe = VFP11_FMAC;
6468 goto vfp_binop;
6469
6470 case 8: /* fdiv[sd]. */
6471 vpipe = VFP11_DS;
6472 vfp_binop:
6473 bfd_arm_vfp11_write_mask (destmask, fd);
6474 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6475 regs[1] = fm;
6476 *numregs = 2;
6477 break;
6478
6479 case 15: /* extended opcode. */
6480 {
6481 unsigned int extn = ((insn >> 15) & 0x1e)
6482 | ((insn >> 7) & 1);
6483
6484 switch (extn)
6485 {
6486 case 0: /* fcpy[sd]. */
6487 case 1: /* fabs[sd]. */
6488 case 2: /* fneg[sd]. */
6489 case 8: /* fcmp[sd]. */
6490 case 9: /* fcmpe[sd]. */
6491 case 10: /* fcmpz[sd]. */
6492 case 11: /* fcmpez[sd]. */
6493 case 16: /* fuito[sd]. */
6494 case 17: /* fsito[sd]. */
6495 case 24: /* ftoui[sd]. */
6496 case 25: /* ftouiz[sd]. */
6497 case 26: /* ftosi[sd]. */
6498 case 27: /* ftosiz[sd]. */
6499 /* These instructions will not bounce due to underflow. */
6500 *numregs = 0;
6501 vpipe = VFP11_FMAC;
6502 break;
6503
6504 case 3: /* fsqrt[sd]. */
6505 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6506 registers to cause the erratum in previous instructions. */
6507 bfd_arm_vfp11_write_mask (destmask, fd);
6508 vpipe = VFP11_DS;
6509 break;
6510
6511 case 15: /* fcvt{ds,sd}. */
6512 {
6513 int rnum = 0;
6514
6515 bfd_arm_vfp11_write_mask (destmask, fd);
6516
6517 /* Only FCVTSD can underflow. */
6518 if ((insn & 0x100) != 0)
6519 regs[rnum++] = fm;
6520
6521 *numregs = rnum;
6522
6523 vpipe = VFP11_FMAC;
6524 }
6525 break;
6526
6527 default:
6528 return VFP11_BAD;
6529 }
6530 }
6531 break;
6532
6533 default:
6534 return VFP11_BAD;
6535 }
6536 }
6537 /* Two-register transfer. */
6538 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6539 {
6540 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6541
6542 if ((insn & 0x100000) == 0)
6543 {
6544 if (is_double)
6545 bfd_arm_vfp11_write_mask (destmask, fm);
6546 else
6547 {
6548 bfd_arm_vfp11_write_mask (destmask, fm);
6549 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6550 }
6551 }
6552
6553 vpipe = VFP11_LS;
6554 }
6555 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6556 {
6557 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6558 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6559
6560 switch (puw)
6561 {
6562 case 0: /* Two-reg transfer. We should catch these above. */
6563 abort ();
6564
6565 case 2: /* fldm[sdx]. */
6566 case 3:
6567 case 5:
6568 {
6569 unsigned int i, offset = insn & 0xff;
6570
6571 if (is_double)
6572 offset >>= 1;
6573
6574 for (i = fd; i < fd + offset; i++)
6575 bfd_arm_vfp11_write_mask (destmask, i);
6576 }
6577 break;
6578
6579 case 4: /* fld[sd]. */
6580 case 6:
6581 bfd_arm_vfp11_write_mask (destmask, fd);
6582 break;
6583
6584 default:
6585 return VFP11_BAD;
6586 }
6587
6588 vpipe = VFP11_LS;
6589 }
6590 /* Single-register transfer. Note L==0. */
6591 else if ((insn & 0x0f100e10) == 0x0e000a10)
6592 {
6593 unsigned int opcode = (insn >> 21) & 7;
6594 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6595
6596 switch (opcode)
6597 {
6598 case 0: /* fmsr/fmdlr. */
6599 case 1: /* fmdhr. */
6600 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6601 destination register. I don't know if this is exactly right,
6602 but it is the conservative choice. */
6603 bfd_arm_vfp11_write_mask (destmask, fn);
6604 break;
6605
6606 case 7: /* fmxr. */
6607 break;
6608 }
6609
6610 vpipe = VFP11_LS;
6611 }
6612
6613 return vpipe;
6614 }
6615
6616
6617 static int elf32_arm_compare_mapping (const void * a, const void * b);
6618
6619
6620 /* Look for potentially-troublesome code sequences which might trigger the
6621 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6622 (available from ARM) for details of the erratum. A short version is
6623 described in ld.texinfo. */
6624
6625 bfd_boolean
6626 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6627 {
6628 asection *sec;
6629 bfd_byte *contents = NULL;
6630 int state = 0;
6631 int regs[3], numregs = 0;
6632 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6633 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6634
6635 if (globals == NULL)
6636 return FALSE;
6637
6638 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6639 The states transition as follows:
6640
6641 0 -> 1 (vector) or 0 -> 2 (scalar)
6642 A VFP FMAC-pipeline instruction has been seen. Fill
6643 regs[0]..regs[numregs-1] with its input operands. Remember this
6644 instruction in 'first_fmac'.
6645
6646 1 -> 2
6647 Any instruction, except for a VFP instruction which overwrites
6648 regs[*].
6649
6650 1 -> 3 [ -> 0 ] or
6651 2 -> 3 [ -> 0 ]
6652 A VFP instruction has been seen which overwrites any of regs[*].
6653 We must make a veneer! Reset state to 0 before examining next
6654 instruction.
6655
6656 2 -> 0
6657 If we fail to match anything in state 2, reset to state 0 and reset
6658 the instruction pointer to the instruction after 'first_fmac'.
6659
6660 If the VFP11 vector mode is in use, there must be at least two unrelated
6661 instructions between anti-dependent VFP11 instructions to properly avoid
6662 triggering the erratum, hence the use of the extra state 1. */
6663
6664 /* If we are only performing a partial link do not bother
6665 to construct any glue. */
6666 if (link_info->relocatable)
6667 return TRUE;
6668
6669 /* Skip if this bfd does not correspond to an ELF image. */
6670 if (! is_arm_elf (abfd))
6671 return TRUE;
6672
6673 /* We should have chosen a fix type by the time we get here. */
6674 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6675
6676 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6677 return TRUE;
6678
6679 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6680 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6681 return TRUE;
6682
6683 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6684 {
6685 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6686 struct _arm_elf_section_data *sec_data;
6687
6688 /* If we don't have executable progbits, we're not interested in this
6689 section. Also skip if section is to be excluded. */
6690 if (elf_section_type (sec) != SHT_PROGBITS
6691 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6692 || (sec->flags & SEC_EXCLUDE) != 0
6693 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6694 || sec->output_section == bfd_abs_section_ptr
6695 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6696 continue;
6697
6698 sec_data = elf32_arm_section_data (sec);
6699
6700 if (sec_data->mapcount == 0)
6701 continue;
6702
6703 if (elf_section_data (sec)->this_hdr.contents != NULL)
6704 contents = elf_section_data (sec)->this_hdr.contents;
6705 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6706 goto error_return;
6707
6708 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6709 elf32_arm_compare_mapping);
6710
6711 for (span = 0; span < sec_data->mapcount; span++)
6712 {
6713 unsigned int span_start = sec_data->map[span].vma;
6714 unsigned int span_end = (span == sec_data->mapcount - 1)
6715 ? sec->size : sec_data->map[span + 1].vma;
6716 char span_type = sec_data->map[span].type;
6717
6718 /* FIXME: Only ARM mode is supported at present. We may need to
6719 support Thumb-2 mode also at some point. */
6720 if (span_type != 'a')
6721 continue;
6722
6723 for (i = span_start; i < span_end;)
6724 {
6725 unsigned int next_i = i + 4;
6726 unsigned int insn = bfd_big_endian (abfd)
6727 ? (contents[i] << 24)
6728 | (contents[i + 1] << 16)
6729 | (contents[i + 2] << 8)
6730 | contents[i + 3]
6731 : (contents[i + 3] << 24)
6732 | (contents[i + 2] << 16)
6733 | (contents[i + 1] << 8)
6734 | contents[i];
6735 unsigned int writemask = 0;
6736 enum bfd_arm_vfp11_pipe vpipe;
6737
6738 switch (state)
6739 {
6740 case 0:
6741 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6742 &numregs);
6743 /* I'm assuming the VFP11 erratum can trigger with denorm
6744 operands on either the FMAC or the DS pipeline. This might
6745 lead to slightly overenthusiastic veneer insertion. */
6746 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6747 {
6748 state = use_vector ? 1 : 2;
6749 first_fmac = i;
6750 veneer_of_insn = insn;
6751 }
6752 break;
6753
6754 case 1:
6755 {
6756 int other_regs[3], other_numregs;
6757 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6758 other_regs,
6759 &other_numregs);
6760 if (vpipe != VFP11_BAD
6761 && bfd_arm_vfp11_antidependency (writemask, regs,
6762 numregs))
6763 state = 3;
6764 else
6765 state = 2;
6766 }
6767 break;
6768
6769 case 2:
6770 {
6771 int other_regs[3], other_numregs;
6772 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6773 other_regs,
6774 &other_numregs);
6775 if (vpipe != VFP11_BAD
6776 && bfd_arm_vfp11_antidependency (writemask, regs,
6777 numregs))
6778 state = 3;
6779 else
6780 {
6781 state = 0;
6782 next_i = first_fmac + 4;
6783 }
6784 }
6785 break;
6786
6787 case 3:
6788 abort (); /* Should be unreachable. */
6789 }
6790
6791 if (state == 3)
6792 {
6793 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6794 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6795
6796 elf32_arm_section_data (sec)->erratumcount += 1;
6797
6798 newerr->u.b.vfp_insn = veneer_of_insn;
6799
6800 switch (span_type)
6801 {
6802 case 'a':
6803 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6804 break;
6805
6806 default:
6807 abort ();
6808 }
6809
6810 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6811 first_fmac);
6812
6813 newerr->vma = -1;
6814
6815 newerr->next = sec_data->erratumlist;
6816 sec_data->erratumlist = newerr;
6817
6818 state = 0;
6819 }
6820
6821 i = next_i;
6822 }
6823 }
6824
6825 if (contents != NULL
6826 && elf_section_data (sec)->this_hdr.contents != contents)
6827 free (contents);
6828 contents = NULL;
6829 }
6830
6831 return TRUE;
6832
6833 error_return:
6834 if (contents != NULL
6835 && elf_section_data (sec)->this_hdr.contents != contents)
6836 free (contents);
6837
6838 return FALSE;
6839 }
6840
6841 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6842 after sections have been laid out, using specially-named symbols. */
6843
6844 void
6845 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6846 struct bfd_link_info *link_info)
6847 {
6848 asection *sec;
6849 struct elf32_arm_link_hash_table *globals;
6850 char *tmp_name;
6851
6852 if (link_info->relocatable)
6853 return;
6854
6855 /* Skip if this bfd does not correspond to an ELF image. */
6856 if (! is_arm_elf (abfd))
6857 return;
6858
6859 globals = elf32_arm_hash_table (link_info);
6860 if (globals == NULL)
6861 return;
6862
6863 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6864 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6865
6866 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6867 {
6868 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6869 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6870
6871 for (; errnode != NULL; errnode = errnode->next)
6872 {
6873 struct elf_link_hash_entry *myh;
6874 bfd_vma vma;
6875
6876 switch (errnode->type)
6877 {
6878 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6879 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6880 /* Find veneer symbol. */
6881 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6882 errnode->u.b.veneer->u.v.id);
6883
6884 myh = elf_link_hash_lookup
6885 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6886
6887 if (myh == NULL)
6888 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6889 "`%s'"), abfd, tmp_name);
6890
6891 vma = myh->root.u.def.section->output_section->vma
6892 + myh->root.u.def.section->output_offset
6893 + myh->root.u.def.value;
6894
6895 errnode->u.b.veneer->vma = vma;
6896 break;
6897
6898 case VFP11_ERRATUM_ARM_VENEER:
6899 case VFP11_ERRATUM_THUMB_VENEER:
6900 /* Find return location. */
6901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6902 errnode->u.v.id);
6903
6904 myh = elf_link_hash_lookup
6905 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6906
6907 if (myh == NULL)
6908 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6909 "`%s'"), abfd, tmp_name);
6910
6911 vma = myh->root.u.def.section->output_section->vma
6912 + myh->root.u.def.section->output_offset
6913 + myh->root.u.def.value;
6914
6915 errnode->u.v.branch->vma = vma;
6916 break;
6917
6918 default:
6919 abort ();
6920 }
6921 }
6922 }
6923
6924 free (tmp_name);
6925 }
6926
6927
6928 /* Set target relocation values needed during linking. */
6929
6930 void
6931 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6932 struct bfd_link_info *link_info,
6933 int target1_is_rel,
6934 char * target2_type,
6935 int fix_v4bx,
6936 int use_blx,
6937 bfd_arm_vfp11_fix vfp11_fix,
6938 int no_enum_warn, int no_wchar_warn,
6939 int pic_veneer, int fix_cortex_a8,
6940 int fix_arm1176)
6941 {
6942 struct elf32_arm_link_hash_table *globals;
6943
6944 globals = elf32_arm_hash_table (link_info);
6945 if (globals == NULL)
6946 return;
6947
6948 globals->target1_is_rel = target1_is_rel;
6949 if (strcmp (target2_type, "rel") == 0)
6950 globals->target2_reloc = R_ARM_REL32;
6951 else if (strcmp (target2_type, "abs") == 0)
6952 globals->target2_reloc = R_ARM_ABS32;
6953 else if (strcmp (target2_type, "got-rel") == 0)
6954 globals->target2_reloc = R_ARM_GOT_PREL;
6955 else
6956 {
6957 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6958 target2_type);
6959 }
6960 globals->fix_v4bx = fix_v4bx;
6961 globals->use_blx |= use_blx;
6962 globals->vfp11_fix = vfp11_fix;
6963 globals->pic_veneer = pic_veneer;
6964 globals->fix_cortex_a8 = fix_cortex_a8;
6965 globals->fix_arm1176 = fix_arm1176;
6966
6967 BFD_ASSERT (is_arm_elf (output_bfd));
6968 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6969 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6970 }
6971
6972 /* Replace the target offset of a Thumb bl or b.w instruction. */
6973
6974 static void
6975 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6976 {
6977 bfd_vma upper;
6978 bfd_vma lower;
6979 int reloc_sign;
6980
6981 BFD_ASSERT ((offset & 1) == 0);
6982
6983 upper = bfd_get_16 (abfd, insn);
6984 lower = bfd_get_16 (abfd, insn + 2);
6985 reloc_sign = (offset < 0) ? 1 : 0;
6986 upper = (upper & ~(bfd_vma) 0x7ff)
6987 | ((offset >> 12) & 0x3ff)
6988 | (reloc_sign << 10);
6989 lower = (lower & ~(bfd_vma) 0x2fff)
6990 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6991 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6992 | ((offset >> 1) & 0x7ff);
6993 bfd_put_16 (abfd, upper, insn);
6994 bfd_put_16 (abfd, lower, insn + 2);
6995 }
6996
6997 /* Thumb code calling an ARM function. */
6998
6999 static int
7000 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7001 const char * name,
7002 bfd * input_bfd,
7003 bfd * output_bfd,
7004 asection * input_section,
7005 bfd_byte * hit_data,
7006 asection * sym_sec,
7007 bfd_vma offset,
7008 bfd_signed_vma addend,
7009 bfd_vma val,
7010 char **error_message)
7011 {
7012 asection * s = 0;
7013 bfd_vma my_offset;
7014 long int ret_offset;
7015 struct elf_link_hash_entry * myh;
7016 struct elf32_arm_link_hash_table * globals;
7017
7018 myh = find_thumb_glue (info, name, error_message);
7019 if (myh == NULL)
7020 return FALSE;
7021
7022 globals = elf32_arm_hash_table (info);
7023 BFD_ASSERT (globals != NULL);
7024 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7025
7026 my_offset = myh->root.u.def.value;
7027
7028 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7029 THUMB2ARM_GLUE_SECTION_NAME);
7030
7031 BFD_ASSERT (s != NULL);
7032 BFD_ASSERT (s->contents != NULL);
7033 BFD_ASSERT (s->output_section != NULL);
7034
7035 if ((my_offset & 0x01) == 0x01)
7036 {
7037 if (sym_sec != NULL
7038 && sym_sec->owner != NULL
7039 && !INTERWORK_FLAG (sym_sec->owner))
7040 {
7041 (*_bfd_error_handler)
7042 (_("%B(%s): warning: interworking not enabled.\n"
7043 " first occurrence: %B: Thumb call to ARM"),
7044 sym_sec->owner, input_bfd, name);
7045
7046 return FALSE;
7047 }
7048
7049 --my_offset;
7050 myh->root.u.def.value = my_offset;
7051
7052 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7053 s->contents + my_offset);
7054
7055 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7056 s->contents + my_offset + 2);
7057
7058 ret_offset =
7059 /* Address of destination of the stub. */
7060 ((bfd_signed_vma) val)
7061 - ((bfd_signed_vma)
7062 /* Offset from the start of the current section
7063 to the start of the stubs. */
7064 (s->output_offset
7065 /* Offset of the start of this stub from the start of the stubs. */
7066 + my_offset
7067 /* Address of the start of the current section. */
7068 + s->output_section->vma)
7069 /* The branch instruction is 4 bytes into the stub. */
7070 + 4
7071 /* ARM branches work from the pc of the instruction + 8. */
7072 + 8);
7073
7074 put_arm_insn (globals, output_bfd,
7075 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7076 s->contents + my_offset + 4);
7077 }
7078
7079 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7080
7081 /* Now go back and fix up the original BL insn to point to here. */
7082 ret_offset =
7083 /* Address of where the stub is located. */
7084 (s->output_section->vma + s->output_offset + my_offset)
7085 /* Address of where the BL is located. */
7086 - (input_section->output_section->vma + input_section->output_offset
7087 + offset)
7088 /* Addend in the relocation. */
7089 - addend
7090 /* Biassing for PC-relative addressing. */
7091 - 8;
7092
7093 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7094
7095 return TRUE;
7096 }
7097
7098 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7099
7100 static struct elf_link_hash_entry *
7101 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7102 const char * name,
7103 bfd * input_bfd,
7104 bfd * output_bfd,
7105 asection * sym_sec,
7106 bfd_vma val,
7107 asection * s,
7108 char ** error_message)
7109 {
7110 bfd_vma my_offset;
7111 long int ret_offset;
7112 struct elf_link_hash_entry * myh;
7113 struct elf32_arm_link_hash_table * globals;
7114
7115 myh = find_arm_glue (info, name, error_message);
7116 if (myh == NULL)
7117 return NULL;
7118
7119 globals = elf32_arm_hash_table (info);
7120 BFD_ASSERT (globals != NULL);
7121 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7122
7123 my_offset = myh->root.u.def.value;
7124
7125 if ((my_offset & 0x01) == 0x01)
7126 {
7127 if (sym_sec != NULL
7128 && sym_sec->owner != NULL
7129 && !INTERWORK_FLAG (sym_sec->owner))
7130 {
7131 (*_bfd_error_handler)
7132 (_("%B(%s): warning: interworking not enabled.\n"
7133 " first occurrence: %B: arm call to thumb"),
7134 sym_sec->owner, input_bfd, name);
7135 }
7136
7137 --my_offset;
7138 myh->root.u.def.value = my_offset;
7139
7140 if (info->shared || globals->root.is_relocatable_executable
7141 || globals->pic_veneer)
7142 {
7143 /* For relocatable objects we can't use absolute addresses,
7144 so construct the address from a relative offset. */
7145 /* TODO: If the offset is small it's probably worth
7146 constructing the address with adds. */
7147 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7148 s->contents + my_offset);
7149 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7150 s->contents + my_offset + 4);
7151 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7152 s->contents + my_offset + 8);
7153 /* Adjust the offset by 4 for the position of the add,
7154 and 8 for the pipeline offset. */
7155 ret_offset = (val - (s->output_offset
7156 + s->output_section->vma
7157 + my_offset + 12))
7158 | 1;
7159 bfd_put_32 (output_bfd, ret_offset,
7160 s->contents + my_offset + 12);
7161 }
7162 else if (globals->use_blx)
7163 {
7164 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7165 s->contents + my_offset);
7166
7167 /* It's a thumb address. Add the low order bit. */
7168 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7169 s->contents + my_offset + 4);
7170 }
7171 else
7172 {
7173 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7174 s->contents + my_offset);
7175
7176 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7177 s->contents + my_offset + 4);
7178
7179 /* It's a thumb address. Add the low order bit. */
7180 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7181 s->contents + my_offset + 8);
7182
7183 my_offset += 12;
7184 }
7185 }
7186
7187 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7188
7189 return myh;
7190 }
7191
7192 /* Arm code calling a Thumb function. */
7193
7194 static int
7195 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7196 const char * name,
7197 bfd * input_bfd,
7198 bfd * output_bfd,
7199 asection * input_section,
7200 bfd_byte * hit_data,
7201 asection * sym_sec,
7202 bfd_vma offset,
7203 bfd_signed_vma addend,
7204 bfd_vma val,
7205 char **error_message)
7206 {
7207 unsigned long int tmp;
7208 bfd_vma my_offset;
7209 asection * s;
7210 long int ret_offset;
7211 struct elf_link_hash_entry * myh;
7212 struct elf32_arm_link_hash_table * globals;
7213
7214 globals = elf32_arm_hash_table (info);
7215 BFD_ASSERT (globals != NULL);
7216 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7217
7218 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7219 ARM2THUMB_GLUE_SECTION_NAME);
7220 BFD_ASSERT (s != NULL);
7221 BFD_ASSERT (s->contents != NULL);
7222 BFD_ASSERT (s->output_section != NULL);
7223
7224 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7225 sym_sec, val, s, error_message);
7226 if (!myh)
7227 return FALSE;
7228
7229 my_offset = myh->root.u.def.value;
7230 tmp = bfd_get_32 (input_bfd, hit_data);
7231 tmp = tmp & 0xFF000000;
7232
7233 /* Somehow these are both 4 too far, so subtract 8. */
7234 ret_offset = (s->output_offset
7235 + my_offset
7236 + s->output_section->vma
7237 - (input_section->output_offset
7238 + input_section->output_section->vma
7239 + offset + addend)
7240 - 8);
7241
7242 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7243
7244 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7245
7246 return TRUE;
7247 }
7248
7249 /* Populate Arm stub for an exported Thumb function. */
7250
7251 static bfd_boolean
7252 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7253 {
7254 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7255 asection * s;
7256 struct elf_link_hash_entry * myh;
7257 struct elf32_arm_link_hash_entry *eh;
7258 struct elf32_arm_link_hash_table * globals;
7259 asection *sec;
7260 bfd_vma val;
7261 char *error_message;
7262
7263 eh = elf32_arm_hash_entry (h);
7264 /* Allocate stubs for exported Thumb functions on v4t. */
7265 if (eh->export_glue == NULL)
7266 return TRUE;
7267
7268 globals = elf32_arm_hash_table (info);
7269 BFD_ASSERT (globals != NULL);
7270 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7271
7272 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7273 ARM2THUMB_GLUE_SECTION_NAME);
7274 BFD_ASSERT (s != NULL);
7275 BFD_ASSERT (s->contents != NULL);
7276 BFD_ASSERT (s->output_section != NULL);
7277
7278 sec = eh->export_glue->root.u.def.section;
7279
7280 BFD_ASSERT (sec->output_section != NULL);
7281
7282 val = eh->export_glue->root.u.def.value + sec->output_offset
7283 + sec->output_section->vma;
7284
7285 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7286 h->root.u.def.section->owner,
7287 globals->obfd, sec, val, s,
7288 &error_message);
7289 BFD_ASSERT (myh);
7290 return TRUE;
7291 }
7292
7293 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7294
7295 static bfd_vma
7296 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7297 {
7298 bfd_byte *p;
7299 bfd_vma glue_addr;
7300 asection *s;
7301 struct elf32_arm_link_hash_table *globals;
7302
7303 globals = elf32_arm_hash_table (info);
7304 BFD_ASSERT (globals != NULL);
7305 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7306
7307 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7308 ARM_BX_GLUE_SECTION_NAME);
7309 BFD_ASSERT (s != NULL);
7310 BFD_ASSERT (s->contents != NULL);
7311 BFD_ASSERT (s->output_section != NULL);
7312
7313 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7314
7315 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7316
7317 if ((globals->bx_glue_offset[reg] & 1) == 0)
7318 {
7319 p = s->contents + glue_addr;
7320 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7321 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7322 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7323 globals->bx_glue_offset[reg] |= 1;
7324 }
7325
7326 return glue_addr + s->output_section->vma + s->output_offset;
7327 }
7328
7329 /* Generate Arm stubs for exported Thumb symbols. */
7330 static void
7331 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7332 struct bfd_link_info *link_info)
7333 {
7334 struct elf32_arm_link_hash_table * globals;
7335
7336 if (link_info == NULL)
7337 /* Ignore this if we are not called by the ELF backend linker. */
7338 return;
7339
7340 globals = elf32_arm_hash_table (link_info);
7341 if (globals == NULL)
7342 return;
7343
7344 /* If blx is available then exported Thumb symbols are OK and there is
7345 nothing to do. */
7346 if (globals->use_blx)
7347 return;
7348
7349 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7350 link_info);
7351 }
7352
7353 /* Reserve space for COUNT dynamic relocations in relocation selection
7354 SRELOC. */
7355
7356 static void
7357 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7358 bfd_size_type count)
7359 {
7360 struct elf32_arm_link_hash_table *htab;
7361
7362 htab = elf32_arm_hash_table (info);
7363 BFD_ASSERT (htab->root.dynamic_sections_created);
7364 if (sreloc == NULL)
7365 abort ();
7366 sreloc->size += RELOC_SIZE (htab) * count;
7367 }
7368
7369 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7370 dynamic, the relocations should go in SRELOC, otherwise they should
7371 go in the special .rel.iplt section. */
7372
7373 static void
7374 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7375 bfd_size_type count)
7376 {
7377 struct elf32_arm_link_hash_table *htab;
7378
7379 htab = elf32_arm_hash_table (info);
7380 if (!htab->root.dynamic_sections_created)
7381 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7382 else
7383 {
7384 BFD_ASSERT (sreloc != NULL);
7385 sreloc->size += RELOC_SIZE (htab) * count;
7386 }
7387 }
7388
7389 /* Add relocation REL to the end of relocation section SRELOC. */
7390
7391 static void
7392 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7393 asection *sreloc, Elf_Internal_Rela *rel)
7394 {
7395 bfd_byte *loc;
7396 struct elf32_arm_link_hash_table *htab;
7397
7398 htab = elf32_arm_hash_table (info);
7399 if (!htab->root.dynamic_sections_created
7400 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7401 sreloc = htab->root.irelplt;
7402 if (sreloc == NULL)
7403 abort ();
7404 loc = sreloc->contents;
7405 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7406 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7407 abort ();
7408 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7409 }
7410
7411 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7412 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7413 to .plt. */
7414
7415 static void
7416 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7417 bfd_boolean is_iplt_entry,
7418 union gotplt_union *root_plt,
7419 struct arm_plt_info *arm_plt)
7420 {
7421 struct elf32_arm_link_hash_table *htab;
7422 asection *splt;
7423 asection *sgotplt;
7424
7425 htab = elf32_arm_hash_table (info);
7426
7427 if (is_iplt_entry)
7428 {
7429 splt = htab->root.iplt;
7430 sgotplt = htab->root.igotplt;
7431
7432 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7433 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7434 }
7435 else
7436 {
7437 splt = htab->root.splt;
7438 sgotplt = htab->root.sgotplt;
7439
7440 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7441 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7442
7443 /* If this is the first .plt entry, make room for the special
7444 first entry. */
7445 if (splt->size == 0)
7446 splt->size += htab->plt_header_size;
7447 }
7448
7449 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7451 splt->size += PLT_THUMB_STUB_SIZE;
7452 root_plt->offset = splt->size;
7453 splt->size += htab->plt_entry_size;
7454
7455 if (!htab->symbian_p)
7456 {
7457 /* We also need to make an entry in the .got.plt section, which
7458 will be placed in the .got section by the linker script. */
7459 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7460 sgotplt->size += 4;
7461 }
7462 }
7463
7464 static bfd_vma
7465 arm_movw_immediate (bfd_vma value)
7466 {
7467 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7468 }
7469
7470 static bfd_vma
7471 arm_movt_immediate (bfd_vma value)
7472 {
7473 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7474 }
7475
7476 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7477 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7478 Otherwise, DYNINDX is the index of the symbol in the dynamic
7479 symbol table and SYM_VALUE is undefined.
7480
7481 ROOT_PLT points to the offset of the PLT entry from the start of its
7482 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7483 bookkeeping information. */
7484
7485 static void
7486 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7487 union gotplt_union *root_plt,
7488 struct arm_plt_info *arm_plt,
7489 int dynindx, bfd_vma sym_value)
7490 {
7491 struct elf32_arm_link_hash_table *htab;
7492 asection *sgot;
7493 asection *splt;
7494 asection *srel;
7495 bfd_byte *loc;
7496 bfd_vma plt_index;
7497 Elf_Internal_Rela rel;
7498 bfd_vma plt_header_size;
7499 bfd_vma got_header_size;
7500
7501 htab = elf32_arm_hash_table (info);
7502
7503 /* Pick the appropriate sections and sizes. */
7504 if (dynindx == -1)
7505 {
7506 splt = htab->root.iplt;
7507 sgot = htab->root.igotplt;
7508 srel = htab->root.irelplt;
7509
7510 /* There are no reserved entries in .igot.plt, and no special
7511 first entry in .iplt. */
7512 got_header_size = 0;
7513 plt_header_size = 0;
7514 }
7515 else
7516 {
7517 splt = htab->root.splt;
7518 sgot = htab->root.sgotplt;
7519 srel = htab->root.srelplt;
7520
7521 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7522 plt_header_size = htab->plt_header_size;
7523 }
7524 BFD_ASSERT (splt != NULL && srel != NULL);
7525
7526 /* Fill in the entry in the procedure linkage table. */
7527 if (htab->symbian_p)
7528 {
7529 BFD_ASSERT (dynindx >= 0);
7530 put_arm_insn (htab, output_bfd,
7531 elf32_arm_symbian_plt_entry[0],
7532 splt->contents + root_plt->offset);
7533 bfd_put_32 (output_bfd,
7534 elf32_arm_symbian_plt_entry[1],
7535 splt->contents + root_plt->offset + 4);
7536
7537 /* Fill in the entry in the .rel.plt section. */
7538 rel.r_offset = (splt->output_section->vma
7539 + splt->output_offset
7540 + root_plt->offset + 4);
7541 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7542
7543 /* Get the index in the procedure linkage table which
7544 corresponds to this symbol. This is the index of this symbol
7545 in all the symbols for which we are making plt entries. The
7546 first entry in the procedure linkage table is reserved. */
7547 plt_index = ((root_plt->offset - plt_header_size)
7548 / htab->plt_entry_size);
7549 }
7550 else
7551 {
7552 bfd_vma got_offset, got_address, plt_address;
7553 bfd_vma got_displacement, initial_got_entry;
7554 bfd_byte * ptr;
7555
7556 BFD_ASSERT (sgot != NULL);
7557
7558 /* Get the offset into the .(i)got.plt table of the entry that
7559 corresponds to this function. */
7560 got_offset = (arm_plt->got_offset & -2);
7561
7562 /* Get the index in the procedure linkage table which
7563 corresponds to this symbol. This is the index of this symbol
7564 in all the symbols for which we are making plt entries.
7565 After the reserved .got.plt entries, all symbols appear in
7566 the same order as in .plt. */
7567 plt_index = (got_offset - got_header_size) / 4;
7568
7569 /* Calculate the address of the GOT entry. */
7570 got_address = (sgot->output_section->vma
7571 + sgot->output_offset
7572 + got_offset);
7573
7574 /* ...and the address of the PLT entry. */
7575 plt_address = (splt->output_section->vma
7576 + splt->output_offset
7577 + root_plt->offset);
7578
7579 ptr = splt->contents + root_plt->offset;
7580 if (htab->vxworks_p && info->shared)
7581 {
7582 unsigned int i;
7583 bfd_vma val;
7584
7585 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7586 {
7587 val = elf32_arm_vxworks_shared_plt_entry[i];
7588 if (i == 2)
7589 val |= got_address - sgot->output_section->vma;
7590 if (i == 5)
7591 val |= plt_index * RELOC_SIZE (htab);
7592 if (i == 2 || i == 5)
7593 bfd_put_32 (output_bfd, val, ptr);
7594 else
7595 put_arm_insn (htab, output_bfd, val, ptr);
7596 }
7597 }
7598 else if (htab->vxworks_p)
7599 {
7600 unsigned int i;
7601 bfd_vma val;
7602
7603 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7604 {
7605 val = elf32_arm_vxworks_exec_plt_entry[i];
7606 if (i == 2)
7607 val |= got_address;
7608 if (i == 4)
7609 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7610 if (i == 5)
7611 val |= plt_index * RELOC_SIZE (htab);
7612 if (i == 2 || i == 5)
7613 bfd_put_32 (output_bfd, val, ptr);
7614 else
7615 put_arm_insn (htab, output_bfd, val, ptr);
7616 }
7617
7618 loc = (htab->srelplt2->contents
7619 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7620
7621 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7622 referencing the GOT for this PLT entry. */
7623 rel.r_offset = plt_address + 8;
7624 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7625 rel.r_addend = got_offset;
7626 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7627 loc += RELOC_SIZE (htab);
7628
7629 /* Create the R_ARM_ABS32 relocation referencing the
7630 beginning of the PLT for this GOT entry. */
7631 rel.r_offset = got_address;
7632 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7633 rel.r_addend = 0;
7634 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7635 }
7636 else if (htab->nacl_p)
7637 {
7638 /* Calculate the displacement between the PLT slot and the
7639 common tail that's part of the special initial PLT slot. */
7640 int32_t tail_displacement
7641 = ((splt->output_section->vma + splt->output_offset
7642 + ARM_NACL_PLT_TAIL_OFFSET)
7643 - (plt_address + htab->plt_entry_size + 4));
7644 BFD_ASSERT ((tail_displacement & 3) == 0);
7645 tail_displacement >>= 2;
7646
7647 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7648 || (-tail_displacement & 0xff000000) == 0);
7649
7650 /* Calculate the displacement between the PLT slot and the entry
7651 in the GOT. The offset accounts for the value produced by
7652 adding to pc in the penultimate instruction of the PLT stub. */
7653 got_displacement = (got_address
7654 - (plt_address + htab->plt_entry_size));
7655
7656 /* NaCl does not support interworking at all. */
7657 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7658
7659 put_arm_insn (htab, output_bfd,
7660 elf32_arm_nacl_plt_entry[0]
7661 | arm_movw_immediate (got_displacement),
7662 ptr + 0);
7663 put_arm_insn (htab, output_bfd,
7664 elf32_arm_nacl_plt_entry[1]
7665 | arm_movt_immediate (got_displacement),
7666 ptr + 4);
7667 put_arm_insn (htab, output_bfd,
7668 elf32_arm_nacl_plt_entry[2],
7669 ptr + 8);
7670 put_arm_insn (htab, output_bfd,
7671 elf32_arm_nacl_plt_entry[3]
7672 | (tail_displacement & 0x00ffffff),
7673 ptr + 12);
7674 }
7675 else
7676 {
7677 /* Calculate the displacement between the PLT slot and the
7678 entry in the GOT. The eight-byte offset accounts for the
7679 value produced by adding to pc in the first instruction
7680 of the PLT stub. */
7681 got_displacement = got_address - (plt_address + 8);
7682
7683 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7684
7685 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7686 {
7687 put_thumb_insn (htab, output_bfd,
7688 elf32_arm_plt_thumb_stub[0], ptr - 4);
7689 put_thumb_insn (htab, output_bfd,
7690 elf32_arm_plt_thumb_stub[1], ptr - 2);
7691 }
7692
7693 put_arm_insn (htab, output_bfd,
7694 elf32_arm_plt_entry[0]
7695 | ((got_displacement & 0x0ff00000) >> 20),
7696 ptr + 0);
7697 put_arm_insn (htab, output_bfd,
7698 elf32_arm_plt_entry[1]
7699 | ((got_displacement & 0x000ff000) >> 12),
7700 ptr+ 4);
7701 put_arm_insn (htab, output_bfd,
7702 elf32_arm_plt_entry[2]
7703 | (got_displacement & 0x00000fff),
7704 ptr + 8);
7705 #ifdef FOUR_WORD_PLT
7706 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7707 #endif
7708 }
7709
7710 /* Fill in the entry in the .rel(a).(i)plt section. */
7711 rel.r_offset = got_address;
7712 rel.r_addend = 0;
7713 if (dynindx == -1)
7714 {
7715 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7716 The dynamic linker or static executable then calls SYM_VALUE
7717 to determine the correct run-time value of the .igot.plt entry. */
7718 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7719 initial_got_entry = sym_value;
7720 }
7721 else
7722 {
7723 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7724 initial_got_entry = (splt->output_section->vma
7725 + splt->output_offset);
7726 }
7727
7728 /* Fill in the entry in the global offset table. */
7729 bfd_put_32 (output_bfd, initial_got_entry,
7730 sgot->contents + got_offset);
7731 }
7732
7733 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7734 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7735 }
7736
7737 /* Some relocations map to different relocations depending on the
7738 target. Return the real relocation. */
7739
7740 static int
7741 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7742 int r_type)
7743 {
7744 switch (r_type)
7745 {
7746 case R_ARM_TARGET1:
7747 if (globals->target1_is_rel)
7748 return R_ARM_REL32;
7749 else
7750 return R_ARM_ABS32;
7751
7752 case R_ARM_TARGET2:
7753 return globals->target2_reloc;
7754
7755 default:
7756 return r_type;
7757 }
7758 }
7759
7760 /* Return the base VMA address which should be subtracted from real addresses
7761 when resolving @dtpoff relocation.
7762 This is PT_TLS segment p_vaddr. */
7763
7764 static bfd_vma
7765 dtpoff_base (struct bfd_link_info *info)
7766 {
7767 /* If tls_sec is NULL, we should have signalled an error already. */
7768 if (elf_hash_table (info)->tls_sec == NULL)
7769 return 0;
7770 return elf_hash_table (info)->tls_sec->vma;
7771 }
7772
7773 /* Return the relocation value for @tpoff relocation
7774 if STT_TLS virtual address is ADDRESS. */
7775
7776 static bfd_vma
7777 tpoff (struct bfd_link_info *info, bfd_vma address)
7778 {
7779 struct elf_link_hash_table *htab = elf_hash_table (info);
7780 bfd_vma base;
7781
7782 /* If tls_sec is NULL, we should have signalled an error already. */
7783 if (htab->tls_sec == NULL)
7784 return 0;
7785 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7786 return address - htab->tls_sec->vma + base;
7787 }
7788
7789 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7790 VALUE is the relocation value. */
7791
7792 static bfd_reloc_status_type
7793 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7794 {
7795 if (value > 0xfff)
7796 return bfd_reloc_overflow;
7797
7798 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7799 bfd_put_32 (abfd, value, data);
7800 return bfd_reloc_ok;
7801 }
7802
7803 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7804 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7805 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7806
7807 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7808 is to then call final_link_relocate. Return other values in the
7809 case of error.
7810
7811 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7812 the pre-relaxed code. It would be nice if the relocs were updated
7813 to match the optimization. */
7814
7815 static bfd_reloc_status_type
7816 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7817 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7818 Elf_Internal_Rela *rel, unsigned long is_local)
7819 {
7820 unsigned long insn;
7821
7822 switch (ELF32_R_TYPE (rel->r_info))
7823 {
7824 default:
7825 return bfd_reloc_notsupported;
7826
7827 case R_ARM_TLS_GOTDESC:
7828 if (is_local)
7829 insn = 0;
7830 else
7831 {
7832 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7833 if (insn & 1)
7834 insn -= 5; /* THUMB */
7835 else
7836 insn -= 8; /* ARM */
7837 }
7838 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7839 return bfd_reloc_continue;
7840
7841 case R_ARM_THM_TLS_DESCSEQ:
7842 /* Thumb insn. */
7843 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7844 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7845 {
7846 if (is_local)
7847 /* nop */
7848 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7849 }
7850 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7851 {
7852 if (is_local)
7853 /* nop */
7854 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7855 else
7856 /* ldr rx,[ry] */
7857 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7858 }
7859 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7860 {
7861 if (is_local)
7862 /* nop */
7863 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7864 else
7865 /* mov r0, rx */
7866 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7867 contents + rel->r_offset);
7868 }
7869 else
7870 {
7871 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7872 /* It's a 32 bit instruction, fetch the rest of it for
7873 error generation. */
7874 insn = (insn << 16)
7875 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7876 (*_bfd_error_handler)
7877 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7878 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7879 return bfd_reloc_notsupported;
7880 }
7881 break;
7882
7883 case R_ARM_TLS_DESCSEQ:
7884 /* arm insn. */
7885 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7886 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7887 {
7888 if (is_local)
7889 /* mov rx, ry */
7890 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7891 contents + rel->r_offset);
7892 }
7893 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7894 {
7895 if (is_local)
7896 /* nop */
7897 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7898 else
7899 /* ldr rx,[ry] */
7900 bfd_put_32 (input_bfd, insn & 0xfffff000,
7901 contents + rel->r_offset);
7902 }
7903 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7904 {
7905 if (is_local)
7906 /* nop */
7907 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7908 else
7909 /* mov r0, rx */
7910 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7911 contents + rel->r_offset);
7912 }
7913 else
7914 {
7915 (*_bfd_error_handler)
7916 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7917 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7918 return bfd_reloc_notsupported;
7919 }
7920 break;
7921
7922 case R_ARM_TLS_CALL:
7923 /* GD->IE relaxation, turn the instruction into 'nop' or
7924 'ldr r0, [pc,r0]' */
7925 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7926 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7927 break;
7928
7929 case R_ARM_THM_TLS_CALL:
7930 /* GD->IE relaxation */
7931 if (!is_local)
7932 /* add r0,pc; ldr r0, [r0] */
7933 insn = 0x44786800;
7934 else if (arch_has_thumb2_nop (globals))
7935 /* nop.w */
7936 insn = 0xf3af8000;
7937 else
7938 /* nop; nop */
7939 insn = 0xbf00bf00;
7940
7941 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7942 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7943 break;
7944 }
7945 return bfd_reloc_ok;
7946 }
7947
7948 /* For a given value of n, calculate the value of G_n as required to
7949 deal with group relocations. We return it in the form of an
7950 encoded constant-and-rotation, together with the final residual. If n is
7951 specified as less than zero, then final_residual is filled with the
7952 input value and no further action is performed. */
7953
7954 static bfd_vma
7955 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7956 {
7957 int current_n;
7958 bfd_vma g_n;
7959 bfd_vma encoded_g_n = 0;
7960 bfd_vma residual = value; /* Also known as Y_n. */
7961
7962 for (current_n = 0; current_n <= n; current_n++)
7963 {
7964 int shift;
7965
7966 /* Calculate which part of the value to mask. */
7967 if (residual == 0)
7968 shift = 0;
7969 else
7970 {
7971 int msb;
7972
7973 /* Determine the most significant bit in the residual and
7974 align the resulting value to a 2-bit boundary. */
7975 for (msb = 30; msb >= 0; msb -= 2)
7976 if (residual & (3 << msb))
7977 break;
7978
7979 /* The desired shift is now (msb - 6), or zero, whichever
7980 is the greater. */
7981 shift = msb - 6;
7982 if (shift < 0)
7983 shift = 0;
7984 }
7985
7986 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7987 g_n = residual & (0xff << shift);
7988 encoded_g_n = (g_n >> shift)
7989 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7990
7991 /* Calculate the residual for the next time around. */
7992 residual &= ~g_n;
7993 }
7994
7995 *final_residual = residual;
7996
7997 return encoded_g_n;
7998 }
7999
8000 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8001 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8002
8003 static int
8004 identify_add_or_sub (bfd_vma insn)
8005 {
8006 int opcode = insn & 0x1e00000;
8007
8008 if (opcode == 1 << 23) /* ADD */
8009 return 1;
8010
8011 if (opcode == 1 << 22) /* SUB */
8012 return -1;
8013
8014 return 0;
8015 }
8016
8017 /* Perform a relocation as part of a final link. */
8018
8019 static bfd_reloc_status_type
8020 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8021 bfd * input_bfd,
8022 bfd * output_bfd,
8023 asection * input_section,
8024 bfd_byte * contents,
8025 Elf_Internal_Rela * rel,
8026 bfd_vma value,
8027 struct bfd_link_info * info,
8028 asection * sym_sec,
8029 const char * sym_name,
8030 unsigned char st_type,
8031 enum arm_st_branch_type branch_type,
8032 struct elf_link_hash_entry * h,
8033 bfd_boolean * unresolved_reloc_p,
8034 char ** error_message)
8035 {
8036 unsigned long r_type = howto->type;
8037 unsigned long r_symndx;
8038 bfd_byte * hit_data = contents + rel->r_offset;
8039 bfd_vma * local_got_offsets;
8040 bfd_vma * local_tlsdesc_gotents;
8041 asection * sgot;
8042 asection * splt;
8043 asection * sreloc = NULL;
8044 asection * srelgot;
8045 bfd_vma addend;
8046 bfd_signed_vma signed_addend;
8047 unsigned char dynreloc_st_type;
8048 bfd_vma dynreloc_value;
8049 struct elf32_arm_link_hash_table * globals;
8050 struct elf32_arm_link_hash_entry *eh;
8051 union gotplt_union *root_plt;
8052 struct arm_plt_info *arm_plt;
8053 bfd_vma plt_offset;
8054 bfd_vma gotplt_offset;
8055 bfd_boolean has_iplt_entry;
8056
8057 globals = elf32_arm_hash_table (info);
8058 if (globals == NULL)
8059 return bfd_reloc_notsupported;
8060
8061 BFD_ASSERT (is_arm_elf (input_bfd));
8062
8063 /* Some relocation types map to different relocations depending on the
8064 target. We pick the right one here. */
8065 r_type = arm_real_reloc_type (globals, r_type);
8066
8067 /* It is possible to have linker relaxations on some TLS access
8068 models. Update our information here. */
8069 r_type = elf32_arm_tls_transition (info, r_type, h);
8070
8071 if (r_type != howto->type)
8072 howto = elf32_arm_howto_from_type (r_type);
8073
8074 /* If the start address has been set, then set the EF_ARM_HASENTRY
8075 flag. Setting this more than once is redundant, but the cost is
8076 not too high, and it keeps the code simple.
8077
8078 The test is done here, rather than somewhere else, because the
8079 start address is only set just before the final link commences.
8080
8081 Note - if the user deliberately sets a start address of 0, the
8082 flag will not be set. */
8083 if (bfd_get_start_address (output_bfd) != 0)
8084 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8085
8086 eh = (struct elf32_arm_link_hash_entry *) h;
8087 sgot = globals->root.sgot;
8088 local_got_offsets = elf_local_got_offsets (input_bfd);
8089 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8090
8091 if (globals->root.dynamic_sections_created)
8092 srelgot = globals->root.srelgot;
8093 else
8094 srelgot = NULL;
8095
8096 r_symndx = ELF32_R_SYM (rel->r_info);
8097
8098 if (globals->use_rel)
8099 {
8100 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8101
8102 if (addend & ((howto->src_mask + 1) >> 1))
8103 {
8104 signed_addend = -1;
8105 signed_addend &= ~ howto->src_mask;
8106 signed_addend |= addend;
8107 }
8108 else
8109 signed_addend = addend;
8110 }
8111 else
8112 addend = signed_addend = rel->r_addend;
8113
8114 /* Record the symbol information that should be used in dynamic
8115 relocations. */
8116 dynreloc_st_type = st_type;
8117 dynreloc_value = value;
8118 if (branch_type == ST_BRANCH_TO_THUMB)
8119 dynreloc_value |= 1;
8120
8121 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8122 VALUE appropriately for relocations that we resolve at link time. */
8123 has_iplt_entry = FALSE;
8124 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8125 && root_plt->offset != (bfd_vma) -1)
8126 {
8127 plt_offset = root_plt->offset;
8128 gotplt_offset = arm_plt->got_offset;
8129
8130 if (h == NULL || eh->is_iplt)
8131 {
8132 has_iplt_entry = TRUE;
8133 splt = globals->root.iplt;
8134
8135 /* Populate .iplt entries here, because not all of them will
8136 be seen by finish_dynamic_symbol. The lower bit is set if
8137 we have already populated the entry. */
8138 if (plt_offset & 1)
8139 plt_offset--;
8140 else
8141 {
8142 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8143 -1, dynreloc_value);
8144 root_plt->offset |= 1;
8145 }
8146
8147 /* Static relocations always resolve to the .iplt entry. */
8148 st_type = STT_FUNC;
8149 value = (splt->output_section->vma
8150 + splt->output_offset
8151 + plt_offset);
8152 branch_type = ST_BRANCH_TO_ARM;
8153
8154 /* If there are non-call relocations that resolve to the .iplt
8155 entry, then all dynamic ones must too. */
8156 if (arm_plt->noncall_refcount != 0)
8157 {
8158 dynreloc_st_type = st_type;
8159 dynreloc_value = value;
8160 }
8161 }
8162 else
8163 /* We populate the .plt entry in finish_dynamic_symbol. */
8164 splt = globals->root.splt;
8165 }
8166 else
8167 {
8168 splt = NULL;
8169 plt_offset = (bfd_vma) -1;
8170 gotplt_offset = (bfd_vma) -1;
8171 }
8172
8173 switch (r_type)
8174 {
8175 case R_ARM_NONE:
8176 /* We don't need to find a value for this symbol. It's just a
8177 marker. */
8178 *unresolved_reloc_p = FALSE;
8179 return bfd_reloc_ok;
8180
8181 case R_ARM_ABS12:
8182 if (!globals->vxworks_p)
8183 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8184
8185 case R_ARM_PC24:
8186 case R_ARM_ABS32:
8187 case R_ARM_ABS32_NOI:
8188 case R_ARM_REL32:
8189 case R_ARM_REL32_NOI:
8190 case R_ARM_CALL:
8191 case R_ARM_JUMP24:
8192 case R_ARM_XPC25:
8193 case R_ARM_PREL31:
8194 case R_ARM_PLT32:
8195 /* Handle relocations which should use the PLT entry. ABS32/REL32
8196 will use the symbol's value, which may point to a PLT entry, but we
8197 don't need to handle that here. If we created a PLT entry, all
8198 branches in this object should go to it, except if the PLT is too
8199 far away, in which case a long branch stub should be inserted. */
8200 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8201 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8202 && r_type != R_ARM_CALL
8203 && r_type != R_ARM_JUMP24
8204 && r_type != R_ARM_PLT32)
8205 && plt_offset != (bfd_vma) -1)
8206 {
8207 /* If we've created a .plt section, and assigned a PLT entry
8208 to this function, it must either be a STT_GNU_IFUNC reference
8209 or not be known to bind locally. In other cases, we should
8210 have cleared the PLT entry by now. */
8211 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8212
8213 value = (splt->output_section->vma
8214 + splt->output_offset
8215 + plt_offset);
8216 *unresolved_reloc_p = FALSE;
8217 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8218 contents, rel->r_offset, value,
8219 rel->r_addend);
8220 }
8221
8222 /* When generating a shared object or relocatable executable, these
8223 relocations are copied into the output file to be resolved at
8224 run time. */
8225 if ((info->shared || globals->root.is_relocatable_executable)
8226 && (input_section->flags & SEC_ALLOC)
8227 && !(globals->vxworks_p
8228 && strcmp (input_section->output_section->name,
8229 ".tls_vars") == 0)
8230 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8231 || !SYMBOL_CALLS_LOCAL (info, h))
8232 && (!strstr (input_section->name, STUB_SUFFIX))
8233 && (h == NULL
8234 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8235 || h->root.type != bfd_link_hash_undefweak)
8236 && r_type != R_ARM_PC24
8237 && r_type != R_ARM_CALL
8238 && r_type != R_ARM_JUMP24
8239 && r_type != R_ARM_PREL31
8240 && r_type != R_ARM_PLT32)
8241 {
8242 Elf_Internal_Rela outrel;
8243 bfd_boolean skip, relocate;
8244
8245 *unresolved_reloc_p = FALSE;
8246
8247 if (sreloc == NULL && globals->root.dynamic_sections_created)
8248 {
8249 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8250 ! globals->use_rel);
8251
8252 if (sreloc == NULL)
8253 return bfd_reloc_notsupported;
8254 }
8255
8256 skip = FALSE;
8257 relocate = FALSE;
8258
8259 outrel.r_addend = addend;
8260 outrel.r_offset =
8261 _bfd_elf_section_offset (output_bfd, info, input_section,
8262 rel->r_offset);
8263 if (outrel.r_offset == (bfd_vma) -1)
8264 skip = TRUE;
8265 else if (outrel.r_offset == (bfd_vma) -2)
8266 skip = TRUE, relocate = TRUE;
8267 outrel.r_offset += (input_section->output_section->vma
8268 + input_section->output_offset);
8269
8270 if (skip)
8271 memset (&outrel, 0, sizeof outrel);
8272 else if (h != NULL
8273 && h->dynindx != -1
8274 && (!info->shared
8275 || !info->symbolic
8276 || !h->def_regular))
8277 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8278 else
8279 {
8280 int symbol;
8281
8282 /* This symbol is local, or marked to become local. */
8283 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8284 if (globals->symbian_p)
8285 {
8286 asection *osec;
8287
8288 /* On Symbian OS, the data segment and text segement
8289 can be relocated independently. Therefore, we
8290 must indicate the segment to which this
8291 relocation is relative. The BPABI allows us to
8292 use any symbol in the right segment; we just use
8293 the section symbol as it is convenient. (We
8294 cannot use the symbol given by "h" directly as it
8295 will not appear in the dynamic symbol table.)
8296
8297 Note that the dynamic linker ignores the section
8298 symbol value, so we don't subtract osec->vma
8299 from the emitted reloc addend. */
8300 if (sym_sec)
8301 osec = sym_sec->output_section;
8302 else
8303 osec = input_section->output_section;
8304 symbol = elf_section_data (osec)->dynindx;
8305 if (symbol == 0)
8306 {
8307 struct elf_link_hash_table *htab = elf_hash_table (info);
8308
8309 if ((osec->flags & SEC_READONLY) == 0
8310 && htab->data_index_section != NULL)
8311 osec = htab->data_index_section;
8312 else
8313 osec = htab->text_index_section;
8314 symbol = elf_section_data (osec)->dynindx;
8315 }
8316 BFD_ASSERT (symbol != 0);
8317 }
8318 else
8319 /* On SVR4-ish systems, the dynamic loader cannot
8320 relocate the text and data segments independently,
8321 so the symbol does not matter. */
8322 symbol = 0;
8323 if (dynreloc_st_type == STT_GNU_IFUNC)
8324 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8325 to the .iplt entry. Instead, every non-call reference
8326 must use an R_ARM_IRELATIVE relocation to obtain the
8327 correct run-time address. */
8328 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8329 else
8330 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8331 if (globals->use_rel)
8332 relocate = TRUE;
8333 else
8334 outrel.r_addend += dynreloc_value;
8335 }
8336
8337 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8338
8339 /* If this reloc is against an external symbol, we do not want to
8340 fiddle with the addend. Otherwise, we need to include the symbol
8341 value so that it becomes an addend for the dynamic reloc. */
8342 if (! relocate)
8343 return bfd_reloc_ok;
8344
8345 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8346 contents, rel->r_offset,
8347 dynreloc_value, (bfd_vma) 0);
8348 }
8349 else switch (r_type)
8350 {
8351 case R_ARM_ABS12:
8352 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8353
8354 case R_ARM_XPC25: /* Arm BLX instruction. */
8355 case R_ARM_CALL:
8356 case R_ARM_JUMP24:
8357 case R_ARM_PC24: /* Arm B/BL instruction. */
8358 case R_ARM_PLT32:
8359 {
8360 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8361
8362 if (r_type == R_ARM_XPC25)
8363 {
8364 /* Check for Arm calling Arm function. */
8365 /* FIXME: Should we translate the instruction into a BL
8366 instruction instead ? */
8367 if (branch_type != ST_BRANCH_TO_THUMB)
8368 (*_bfd_error_handler)
8369 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8370 input_bfd,
8371 h ? h->root.root.string : "(local)");
8372 }
8373 else if (r_type == R_ARM_PC24)
8374 {
8375 /* Check for Arm calling Thumb function. */
8376 if (branch_type == ST_BRANCH_TO_THUMB)
8377 {
8378 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8379 output_bfd, input_section,
8380 hit_data, sym_sec, rel->r_offset,
8381 signed_addend, value,
8382 error_message))
8383 return bfd_reloc_ok;
8384 else
8385 return bfd_reloc_dangerous;
8386 }
8387 }
8388
8389 /* Check if a stub has to be inserted because the
8390 destination is too far or we are changing mode. */
8391 if ( r_type == R_ARM_CALL
8392 || r_type == R_ARM_JUMP24
8393 || r_type == R_ARM_PLT32)
8394 {
8395 enum elf32_arm_stub_type stub_type = arm_stub_none;
8396 struct elf32_arm_link_hash_entry *hash;
8397
8398 hash = (struct elf32_arm_link_hash_entry *) h;
8399 stub_type = arm_type_of_stub (info, input_section, rel,
8400 st_type, &branch_type,
8401 hash, value, sym_sec,
8402 input_bfd, sym_name);
8403
8404 if (stub_type != arm_stub_none)
8405 {
8406 /* The target is out of reach, so redirect the
8407 branch to the local stub for this function. */
8408 stub_entry = elf32_arm_get_stub_entry (input_section,
8409 sym_sec, h,
8410 rel, globals,
8411 stub_type);
8412 {
8413 if (stub_entry != NULL)
8414 value = (stub_entry->stub_offset
8415 + stub_entry->stub_sec->output_offset
8416 + stub_entry->stub_sec->output_section->vma);
8417
8418 if (plt_offset != (bfd_vma) -1)
8419 *unresolved_reloc_p = FALSE;
8420 }
8421 }
8422 else
8423 {
8424 /* If the call goes through a PLT entry, make sure to
8425 check distance to the right destination address. */
8426 if (plt_offset != (bfd_vma) -1)
8427 {
8428 value = (splt->output_section->vma
8429 + splt->output_offset
8430 + plt_offset);
8431 *unresolved_reloc_p = FALSE;
8432 /* The PLT entry is in ARM mode, regardless of the
8433 target function. */
8434 branch_type = ST_BRANCH_TO_ARM;
8435 }
8436 }
8437 }
8438
8439 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8440 where:
8441 S is the address of the symbol in the relocation.
8442 P is address of the instruction being relocated.
8443 A is the addend (extracted from the instruction) in bytes.
8444
8445 S is held in 'value'.
8446 P is the base address of the section containing the
8447 instruction plus the offset of the reloc into that
8448 section, ie:
8449 (input_section->output_section->vma +
8450 input_section->output_offset +
8451 rel->r_offset).
8452 A is the addend, converted into bytes, ie:
8453 (signed_addend * 4)
8454
8455 Note: None of these operations have knowledge of the pipeline
8456 size of the processor, thus it is up to the assembler to
8457 encode this information into the addend. */
8458 value -= (input_section->output_section->vma
8459 + input_section->output_offset);
8460 value -= rel->r_offset;
8461 if (globals->use_rel)
8462 value += (signed_addend << howto->size);
8463 else
8464 /* RELA addends do not have to be adjusted by howto->size. */
8465 value += signed_addend;
8466
8467 signed_addend = value;
8468 signed_addend >>= howto->rightshift;
8469
8470 /* A branch to an undefined weak symbol is turned into a jump to
8471 the next instruction unless a PLT entry will be created.
8472 Do the same for local undefined symbols (but not for STN_UNDEF).
8473 The jump to the next instruction is optimized as a NOP depending
8474 on the architecture. */
8475 if (h ? (h->root.type == bfd_link_hash_undefweak
8476 && plt_offset == (bfd_vma) -1)
8477 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8478 {
8479 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8480
8481 if (arch_has_arm_nop (globals))
8482 value |= 0x0320f000;
8483 else
8484 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8485 }
8486 else
8487 {
8488 /* Perform a signed range check. */
8489 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8490 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8491 return bfd_reloc_overflow;
8492
8493 addend = (value & 2);
8494
8495 value = (signed_addend & howto->dst_mask)
8496 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8497
8498 if (r_type == R_ARM_CALL)
8499 {
8500 /* Set the H bit in the BLX instruction. */
8501 if (branch_type == ST_BRANCH_TO_THUMB)
8502 {
8503 if (addend)
8504 value |= (1 << 24);
8505 else
8506 value &= ~(bfd_vma)(1 << 24);
8507 }
8508
8509 /* Select the correct instruction (BL or BLX). */
8510 /* Only if we are not handling a BL to a stub. In this
8511 case, mode switching is performed by the stub. */
8512 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8513 value |= (1 << 28);
8514 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8515 {
8516 value &= ~(bfd_vma)(1 << 28);
8517 value |= (1 << 24);
8518 }
8519 }
8520 }
8521 }
8522 break;
8523
8524 case R_ARM_ABS32:
8525 value += addend;
8526 if (branch_type == ST_BRANCH_TO_THUMB)
8527 value |= 1;
8528 break;
8529
8530 case R_ARM_ABS32_NOI:
8531 value += addend;
8532 break;
8533
8534 case R_ARM_REL32:
8535 value += addend;
8536 if (branch_type == ST_BRANCH_TO_THUMB)
8537 value |= 1;
8538 value -= (input_section->output_section->vma
8539 + input_section->output_offset + rel->r_offset);
8540 break;
8541
8542 case R_ARM_REL32_NOI:
8543 value += addend;
8544 value -= (input_section->output_section->vma
8545 + input_section->output_offset + rel->r_offset);
8546 break;
8547
8548 case R_ARM_PREL31:
8549 value -= (input_section->output_section->vma
8550 + input_section->output_offset + rel->r_offset);
8551 value += signed_addend;
8552 if (! h || h->root.type != bfd_link_hash_undefweak)
8553 {
8554 /* Check for overflow. */
8555 if ((value ^ (value >> 1)) & (1 << 30))
8556 return bfd_reloc_overflow;
8557 }
8558 value &= 0x7fffffff;
8559 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8560 if (branch_type == ST_BRANCH_TO_THUMB)
8561 value |= 1;
8562 break;
8563 }
8564
8565 bfd_put_32 (input_bfd, value, hit_data);
8566 return bfd_reloc_ok;
8567
8568 case R_ARM_ABS8:
8569 value += addend;
8570
8571 /* There is no way to tell whether the user intended to use a signed or
8572 unsigned addend. When checking for overflow we accept either,
8573 as specified by the AAELF. */
8574 if ((long) value > 0xff || (long) value < -0x80)
8575 return bfd_reloc_overflow;
8576
8577 bfd_put_8 (input_bfd, value, hit_data);
8578 return bfd_reloc_ok;
8579
8580 case R_ARM_ABS16:
8581 value += addend;
8582
8583 /* See comment for R_ARM_ABS8. */
8584 if ((long) value > 0xffff || (long) value < -0x8000)
8585 return bfd_reloc_overflow;
8586
8587 bfd_put_16 (input_bfd, value, hit_data);
8588 return bfd_reloc_ok;
8589
8590 case R_ARM_THM_ABS5:
8591 /* Support ldr and str instructions for the thumb. */
8592 if (globals->use_rel)
8593 {
8594 /* Need to refetch addend. */
8595 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8596 /* ??? Need to determine shift amount from operand size. */
8597 addend >>= howto->rightshift;
8598 }
8599 value += addend;
8600
8601 /* ??? Isn't value unsigned? */
8602 if ((long) value > 0x1f || (long) value < -0x10)
8603 return bfd_reloc_overflow;
8604
8605 /* ??? Value needs to be properly shifted into place first. */
8606 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8607 bfd_put_16 (input_bfd, value, hit_data);
8608 return bfd_reloc_ok;
8609
8610 case R_ARM_THM_ALU_PREL_11_0:
8611 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8612 {
8613 bfd_vma insn;
8614 bfd_signed_vma relocation;
8615
8616 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8617 | bfd_get_16 (input_bfd, hit_data + 2);
8618
8619 if (globals->use_rel)
8620 {
8621 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8622 | ((insn & (1 << 26)) >> 15);
8623 if (insn & 0xf00000)
8624 signed_addend = -signed_addend;
8625 }
8626
8627 relocation = value + signed_addend;
8628 relocation -= Pa (input_section->output_section->vma
8629 + input_section->output_offset
8630 + rel->r_offset);
8631
8632 value = abs (relocation);
8633
8634 if (value >= 0x1000)
8635 return bfd_reloc_overflow;
8636
8637 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8638 | ((value & 0x700) << 4)
8639 | ((value & 0x800) << 15);
8640 if (relocation < 0)
8641 insn |= 0xa00000;
8642
8643 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8644 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8645
8646 return bfd_reloc_ok;
8647 }
8648
8649 case R_ARM_THM_PC8:
8650 /* PR 10073: This reloc is not generated by the GNU toolchain,
8651 but it is supported for compatibility with third party libraries
8652 generated by other compilers, specifically the ARM/IAR. */
8653 {
8654 bfd_vma insn;
8655 bfd_signed_vma relocation;
8656
8657 insn = bfd_get_16 (input_bfd, hit_data);
8658
8659 if (globals->use_rel)
8660 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8661
8662 relocation = value + addend;
8663 relocation -= Pa (input_section->output_section->vma
8664 + input_section->output_offset
8665 + rel->r_offset);
8666
8667 value = abs (relocation);
8668
8669 /* We do not check for overflow of this reloc. Although strictly
8670 speaking this is incorrect, it appears to be necessary in order
8671 to work with IAR generated relocs. Since GCC and GAS do not
8672 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8673 a problem for them. */
8674 value &= 0x3fc;
8675
8676 insn = (insn & 0xff00) | (value >> 2);
8677
8678 bfd_put_16 (input_bfd, insn, hit_data);
8679
8680 return bfd_reloc_ok;
8681 }
8682
8683 case R_ARM_THM_PC12:
8684 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8685 {
8686 bfd_vma insn;
8687 bfd_signed_vma relocation;
8688
8689 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8690 | bfd_get_16 (input_bfd, hit_data + 2);
8691
8692 if (globals->use_rel)
8693 {
8694 signed_addend = insn & 0xfff;
8695 if (!(insn & (1 << 23)))
8696 signed_addend = -signed_addend;
8697 }
8698
8699 relocation = value + signed_addend;
8700 relocation -= Pa (input_section->output_section->vma
8701 + input_section->output_offset
8702 + rel->r_offset);
8703
8704 value = abs (relocation);
8705
8706 if (value >= 0x1000)
8707 return bfd_reloc_overflow;
8708
8709 insn = (insn & 0xff7ff000) | value;
8710 if (relocation >= 0)
8711 insn |= (1 << 23);
8712
8713 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8714 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8715
8716 return bfd_reloc_ok;
8717 }
8718
8719 case R_ARM_THM_XPC22:
8720 case R_ARM_THM_CALL:
8721 case R_ARM_THM_JUMP24:
8722 /* Thumb BL (branch long instruction). */
8723 {
8724 bfd_vma relocation;
8725 bfd_vma reloc_sign;
8726 bfd_boolean overflow = FALSE;
8727 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8728 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8729 bfd_signed_vma reloc_signed_max;
8730 bfd_signed_vma reloc_signed_min;
8731 bfd_vma check;
8732 bfd_signed_vma signed_check;
8733 int bitsize;
8734 const int thumb2 = using_thumb2 (globals);
8735
8736 /* A branch to an undefined weak symbol is turned into a jump to
8737 the next instruction unless a PLT entry will be created.
8738 The jump to the next instruction is optimized as a NOP.W for
8739 Thumb-2 enabled architectures. */
8740 if (h && h->root.type == bfd_link_hash_undefweak
8741 && plt_offset == (bfd_vma) -1)
8742 {
8743 if (arch_has_thumb2_nop (globals))
8744 {
8745 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8746 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8747 }
8748 else
8749 {
8750 bfd_put_16 (input_bfd, 0xe000, hit_data);
8751 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8752 }
8753 return bfd_reloc_ok;
8754 }
8755
8756 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8757 with Thumb-1) involving the J1 and J2 bits. */
8758 if (globals->use_rel)
8759 {
8760 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8761 bfd_vma upper = upper_insn & 0x3ff;
8762 bfd_vma lower = lower_insn & 0x7ff;
8763 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8764 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8765 bfd_vma i1 = j1 ^ s ? 0 : 1;
8766 bfd_vma i2 = j2 ^ s ? 0 : 1;
8767
8768 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8769 /* Sign extend. */
8770 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8771
8772 signed_addend = addend;
8773 }
8774
8775 if (r_type == R_ARM_THM_XPC22)
8776 {
8777 /* Check for Thumb to Thumb call. */
8778 /* FIXME: Should we translate the instruction into a BL
8779 instruction instead ? */
8780 if (branch_type == ST_BRANCH_TO_THUMB)
8781 (*_bfd_error_handler)
8782 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8783 input_bfd,
8784 h ? h->root.root.string : "(local)");
8785 }
8786 else
8787 {
8788 /* If it is not a call to Thumb, assume call to Arm.
8789 If it is a call relative to a section name, then it is not a
8790 function call at all, but rather a long jump. Calls through
8791 the PLT do not require stubs. */
8792 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8793 {
8794 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8795 {
8796 /* Convert BL to BLX. */
8797 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8798 }
8799 else if (( r_type != R_ARM_THM_CALL)
8800 && (r_type != R_ARM_THM_JUMP24))
8801 {
8802 if (elf32_thumb_to_arm_stub
8803 (info, sym_name, input_bfd, output_bfd, input_section,
8804 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8805 error_message))
8806 return bfd_reloc_ok;
8807 else
8808 return bfd_reloc_dangerous;
8809 }
8810 }
8811 else if (branch_type == ST_BRANCH_TO_THUMB
8812 && globals->use_blx
8813 && r_type == R_ARM_THM_CALL)
8814 {
8815 /* Make sure this is a BL. */
8816 lower_insn |= 0x1800;
8817 }
8818 }
8819
8820 enum elf32_arm_stub_type stub_type = arm_stub_none;
8821 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8822 {
8823 /* Check if a stub has to be inserted because the destination
8824 is too far. */
8825 struct elf32_arm_stub_hash_entry *stub_entry;
8826 struct elf32_arm_link_hash_entry *hash;
8827
8828 hash = (struct elf32_arm_link_hash_entry *) h;
8829
8830 stub_type = arm_type_of_stub (info, input_section, rel,
8831 st_type, &branch_type,
8832 hash, value, sym_sec,
8833 input_bfd, sym_name);
8834
8835 if (stub_type != arm_stub_none)
8836 {
8837 /* The target is out of reach or we are changing modes, so
8838 redirect the branch to the local stub for this
8839 function. */
8840 stub_entry = elf32_arm_get_stub_entry (input_section,
8841 sym_sec, h,
8842 rel, globals,
8843 stub_type);
8844 if (stub_entry != NULL)
8845 {
8846 value = (stub_entry->stub_offset
8847 + stub_entry->stub_sec->output_offset
8848 + stub_entry->stub_sec->output_section->vma);
8849
8850 if (plt_offset != (bfd_vma) -1)
8851 *unresolved_reloc_p = FALSE;
8852 }
8853
8854 /* If this call becomes a call to Arm, force BLX. */
8855 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8856 {
8857 if ((stub_entry
8858 && !arm_stub_is_thumb (stub_entry->stub_type))
8859 || branch_type != ST_BRANCH_TO_THUMB)
8860 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8861 }
8862 }
8863 }
8864
8865 /* Handle calls via the PLT. */
8866 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8867 {
8868 value = (splt->output_section->vma
8869 + splt->output_offset
8870 + plt_offset);
8871
8872 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8873 {
8874 /* If the Thumb BLX instruction is available, convert
8875 the BL to a BLX instruction to call the ARM-mode
8876 PLT entry. */
8877 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8878 branch_type = ST_BRANCH_TO_ARM;
8879 }
8880 else
8881 {
8882 /* Target the Thumb stub before the ARM PLT entry. */
8883 value -= PLT_THUMB_STUB_SIZE;
8884 branch_type = ST_BRANCH_TO_THUMB;
8885 }
8886 *unresolved_reloc_p = FALSE;
8887 }
8888
8889 relocation = value + signed_addend;
8890
8891 relocation -= (input_section->output_section->vma
8892 + input_section->output_offset
8893 + rel->r_offset);
8894
8895 check = relocation >> howto->rightshift;
8896
8897 /* If this is a signed value, the rightshift just dropped
8898 leading 1 bits (assuming twos complement). */
8899 if ((bfd_signed_vma) relocation >= 0)
8900 signed_check = check;
8901 else
8902 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8903
8904 /* Calculate the permissable maximum and minimum values for
8905 this relocation according to whether we're relocating for
8906 Thumb-2 or not. */
8907 bitsize = howto->bitsize;
8908 if (!thumb2)
8909 bitsize -= 2;
8910 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8911 reloc_signed_min = ~reloc_signed_max;
8912
8913 /* Assumes two's complement. */
8914 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8915 overflow = TRUE;
8916
8917 if ((lower_insn & 0x5000) == 0x4000)
8918 /* For a BLX instruction, make sure that the relocation is rounded up
8919 to a word boundary. This follows the semantics of the instruction
8920 which specifies that bit 1 of the target address will come from bit
8921 1 of the base address. */
8922 relocation = (relocation + 2) & ~ 3;
8923
8924 /* Put RELOCATION back into the insn. Assumes two's complement.
8925 We use the Thumb-2 encoding, which is safe even if dealing with
8926 a Thumb-1 instruction by virtue of our overflow check above. */
8927 reloc_sign = (signed_check < 0) ? 1 : 0;
8928 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8929 | ((relocation >> 12) & 0x3ff)
8930 | (reloc_sign << 10);
8931 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8932 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8933 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8934 | ((relocation >> 1) & 0x7ff);
8935
8936 /* Put the relocated value back in the object file: */
8937 bfd_put_16 (input_bfd, upper_insn, hit_data);
8938 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8939
8940 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8941 }
8942 break;
8943
8944 case R_ARM_THM_JUMP19:
8945 /* Thumb32 conditional branch instruction. */
8946 {
8947 bfd_vma relocation;
8948 bfd_boolean overflow = FALSE;
8949 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8950 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8951 bfd_signed_vma reloc_signed_max = 0xffffe;
8952 bfd_signed_vma reloc_signed_min = -0x100000;
8953 bfd_signed_vma signed_check;
8954
8955 /* Need to refetch the addend, reconstruct the top three bits,
8956 and squish the two 11 bit pieces together. */
8957 if (globals->use_rel)
8958 {
8959 bfd_vma S = (upper_insn & 0x0400) >> 10;
8960 bfd_vma upper = (upper_insn & 0x003f);
8961 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8962 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8963 bfd_vma lower = (lower_insn & 0x07ff);
8964
8965 upper |= J1 << 6;
8966 upper |= J2 << 7;
8967 upper |= (!S) << 8;
8968 upper -= 0x0100; /* Sign extend. */
8969
8970 addend = (upper << 12) | (lower << 1);
8971 signed_addend = addend;
8972 }
8973
8974 /* Handle calls via the PLT. */
8975 if (plt_offset != (bfd_vma) -1)
8976 {
8977 value = (splt->output_section->vma
8978 + splt->output_offset
8979 + plt_offset);
8980 /* Target the Thumb stub before the ARM PLT entry. */
8981 value -= PLT_THUMB_STUB_SIZE;
8982 *unresolved_reloc_p = FALSE;
8983 }
8984
8985 /* ??? Should handle interworking? GCC might someday try to
8986 use this for tail calls. */
8987
8988 relocation = value + signed_addend;
8989 relocation -= (input_section->output_section->vma
8990 + input_section->output_offset
8991 + rel->r_offset);
8992 signed_check = (bfd_signed_vma) relocation;
8993
8994 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8995 overflow = TRUE;
8996
8997 /* Put RELOCATION back into the insn. */
8998 {
8999 bfd_vma S = (relocation & 0x00100000) >> 20;
9000 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9001 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9002 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9003 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9004
9005 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9006 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9007 }
9008
9009 /* Put the relocated value back in the object file: */
9010 bfd_put_16 (input_bfd, upper_insn, hit_data);
9011 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9012
9013 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9014 }
9015
9016 case R_ARM_THM_JUMP11:
9017 case R_ARM_THM_JUMP8:
9018 case R_ARM_THM_JUMP6:
9019 /* Thumb B (branch) instruction). */
9020 {
9021 bfd_signed_vma relocation;
9022 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9023 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9024 bfd_signed_vma signed_check;
9025
9026 /* CZB cannot jump backward. */
9027 if (r_type == R_ARM_THM_JUMP6)
9028 reloc_signed_min = 0;
9029
9030 if (globals->use_rel)
9031 {
9032 /* Need to refetch addend. */
9033 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9034 if (addend & ((howto->src_mask + 1) >> 1))
9035 {
9036 signed_addend = -1;
9037 signed_addend &= ~ howto->src_mask;
9038 signed_addend |= addend;
9039 }
9040 else
9041 signed_addend = addend;
9042 /* The value in the insn has been right shifted. We need to
9043 undo this, so that we can perform the address calculation
9044 in terms of bytes. */
9045 signed_addend <<= howto->rightshift;
9046 }
9047 relocation = value + signed_addend;
9048
9049 relocation -= (input_section->output_section->vma
9050 + input_section->output_offset
9051 + rel->r_offset);
9052
9053 relocation >>= howto->rightshift;
9054 signed_check = relocation;
9055
9056 if (r_type == R_ARM_THM_JUMP6)
9057 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9058 else
9059 relocation &= howto->dst_mask;
9060 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9061
9062 bfd_put_16 (input_bfd, relocation, hit_data);
9063
9064 /* Assumes two's complement. */
9065 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9066 return bfd_reloc_overflow;
9067
9068 return bfd_reloc_ok;
9069 }
9070
9071 case R_ARM_ALU_PCREL7_0:
9072 case R_ARM_ALU_PCREL15_8:
9073 case R_ARM_ALU_PCREL23_15:
9074 {
9075 bfd_vma insn;
9076 bfd_vma relocation;
9077
9078 insn = bfd_get_32 (input_bfd, hit_data);
9079 if (globals->use_rel)
9080 {
9081 /* Extract the addend. */
9082 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9083 signed_addend = addend;
9084 }
9085 relocation = value + signed_addend;
9086
9087 relocation -= (input_section->output_section->vma
9088 + input_section->output_offset
9089 + rel->r_offset);
9090 insn = (insn & ~0xfff)
9091 | ((howto->bitpos << 7) & 0xf00)
9092 | ((relocation >> howto->bitpos) & 0xff);
9093 bfd_put_32 (input_bfd, value, hit_data);
9094 }
9095 return bfd_reloc_ok;
9096
9097 case R_ARM_GNU_VTINHERIT:
9098 case R_ARM_GNU_VTENTRY:
9099 return bfd_reloc_ok;
9100
9101 case R_ARM_GOTOFF32:
9102 /* Relocation is relative to the start of the
9103 global offset table. */
9104
9105 BFD_ASSERT (sgot != NULL);
9106 if (sgot == NULL)
9107 return bfd_reloc_notsupported;
9108
9109 /* If we are addressing a Thumb function, we need to adjust the
9110 address by one, so that attempts to call the function pointer will
9111 correctly interpret it as Thumb code. */
9112 if (branch_type == ST_BRANCH_TO_THUMB)
9113 value += 1;
9114
9115 /* Note that sgot->output_offset is not involved in this
9116 calculation. We always want the start of .got. If we
9117 define _GLOBAL_OFFSET_TABLE in a different way, as is
9118 permitted by the ABI, we might have to change this
9119 calculation. */
9120 value -= sgot->output_section->vma;
9121 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9122 contents, rel->r_offset, value,
9123 rel->r_addend);
9124
9125 case R_ARM_GOTPC:
9126 /* Use global offset table as symbol value. */
9127 BFD_ASSERT (sgot != NULL);
9128
9129 if (sgot == NULL)
9130 return bfd_reloc_notsupported;
9131
9132 *unresolved_reloc_p = FALSE;
9133 value = sgot->output_section->vma;
9134 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9135 contents, rel->r_offset, value,
9136 rel->r_addend);
9137
9138 case R_ARM_GOT32:
9139 case R_ARM_GOT_PREL:
9140 /* Relocation is to the entry for this symbol in the
9141 global offset table. */
9142 if (sgot == NULL)
9143 return bfd_reloc_notsupported;
9144
9145 if (dynreloc_st_type == STT_GNU_IFUNC
9146 && plt_offset != (bfd_vma) -1
9147 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9148 {
9149 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9150 symbol, and the relocation resolves directly to the runtime
9151 target rather than to the .iplt entry. This means that any
9152 .got entry would be the same value as the .igot.plt entry,
9153 so there's no point creating both. */
9154 sgot = globals->root.igotplt;
9155 value = sgot->output_offset + gotplt_offset;
9156 }
9157 else if (h != NULL)
9158 {
9159 bfd_vma off;
9160
9161 off = h->got.offset;
9162 BFD_ASSERT (off != (bfd_vma) -1);
9163 if ((off & 1) != 0)
9164 {
9165 /* We have already processsed one GOT relocation against
9166 this symbol. */
9167 off &= ~1;
9168 if (globals->root.dynamic_sections_created
9169 && !SYMBOL_REFERENCES_LOCAL (info, h))
9170 *unresolved_reloc_p = FALSE;
9171 }
9172 else
9173 {
9174 Elf_Internal_Rela outrel;
9175
9176 if (!SYMBOL_REFERENCES_LOCAL (info, h))
9177 {
9178 /* If the symbol doesn't resolve locally in a static
9179 object, we have an undefined reference. If the
9180 symbol doesn't resolve locally in a dynamic object,
9181 it should be resolved by the dynamic linker. */
9182 if (globals->root.dynamic_sections_created)
9183 {
9184 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9185 *unresolved_reloc_p = FALSE;
9186 }
9187 else
9188 outrel.r_info = 0;
9189 outrel.r_addend = 0;
9190 }
9191 else
9192 {
9193 if (dynreloc_st_type == STT_GNU_IFUNC)
9194 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9195 else if (info->shared)
9196 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9197 else
9198 outrel.r_info = 0;
9199 outrel.r_addend = dynreloc_value;
9200 }
9201
9202 /* The GOT entry is initialized to zero by default.
9203 See if we should install a different value. */
9204 if (outrel.r_addend != 0
9205 && (outrel.r_info == 0 || globals->use_rel))
9206 {
9207 bfd_put_32 (output_bfd, outrel.r_addend,
9208 sgot->contents + off);
9209 outrel.r_addend = 0;
9210 }
9211
9212 if (outrel.r_info != 0)
9213 {
9214 outrel.r_offset = (sgot->output_section->vma
9215 + sgot->output_offset
9216 + off);
9217 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9218 }
9219 h->got.offset |= 1;
9220 }
9221 value = sgot->output_offset + off;
9222 }
9223 else
9224 {
9225 bfd_vma off;
9226
9227 BFD_ASSERT (local_got_offsets != NULL &&
9228 local_got_offsets[r_symndx] != (bfd_vma) -1);
9229
9230 off = local_got_offsets[r_symndx];
9231
9232 /* The offset must always be a multiple of 4. We use the
9233 least significant bit to record whether we have already
9234 generated the necessary reloc. */
9235 if ((off & 1) != 0)
9236 off &= ~1;
9237 else
9238 {
9239 if (globals->use_rel)
9240 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9241
9242 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9243 {
9244 Elf_Internal_Rela outrel;
9245
9246 outrel.r_addend = addend + dynreloc_value;
9247 outrel.r_offset = (sgot->output_section->vma
9248 + sgot->output_offset
9249 + off);
9250 if (dynreloc_st_type == STT_GNU_IFUNC)
9251 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9252 else
9253 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9254 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9255 }
9256
9257 local_got_offsets[r_symndx] |= 1;
9258 }
9259
9260 value = sgot->output_offset + off;
9261 }
9262 if (r_type != R_ARM_GOT32)
9263 value += sgot->output_section->vma;
9264
9265 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9266 contents, rel->r_offset, value,
9267 rel->r_addend);
9268
9269 case R_ARM_TLS_LDO32:
9270 value = value - dtpoff_base (info);
9271
9272 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9273 contents, rel->r_offset, value,
9274 rel->r_addend);
9275
9276 case R_ARM_TLS_LDM32:
9277 {
9278 bfd_vma off;
9279
9280 if (sgot == NULL)
9281 abort ();
9282
9283 off = globals->tls_ldm_got.offset;
9284
9285 if ((off & 1) != 0)
9286 off &= ~1;
9287 else
9288 {
9289 /* If we don't know the module number, create a relocation
9290 for it. */
9291 if (info->shared)
9292 {
9293 Elf_Internal_Rela outrel;
9294
9295 if (srelgot == NULL)
9296 abort ();
9297
9298 outrel.r_addend = 0;
9299 outrel.r_offset = (sgot->output_section->vma
9300 + sgot->output_offset + off);
9301 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9302
9303 if (globals->use_rel)
9304 bfd_put_32 (output_bfd, outrel.r_addend,
9305 sgot->contents + off);
9306
9307 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9308 }
9309 else
9310 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9311
9312 globals->tls_ldm_got.offset |= 1;
9313 }
9314
9315 value = sgot->output_section->vma + sgot->output_offset + off
9316 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9317
9318 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9319 contents, rel->r_offset, value,
9320 rel->r_addend);
9321 }
9322
9323 case R_ARM_TLS_CALL:
9324 case R_ARM_THM_TLS_CALL:
9325 case R_ARM_TLS_GD32:
9326 case R_ARM_TLS_IE32:
9327 case R_ARM_TLS_GOTDESC:
9328 case R_ARM_TLS_DESCSEQ:
9329 case R_ARM_THM_TLS_DESCSEQ:
9330 {
9331 bfd_vma off, offplt;
9332 int indx = 0;
9333 char tls_type;
9334
9335 BFD_ASSERT (sgot != NULL);
9336
9337 if (h != NULL)
9338 {
9339 bfd_boolean dyn;
9340 dyn = globals->root.dynamic_sections_created;
9341 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9342 && (!info->shared
9343 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9344 {
9345 *unresolved_reloc_p = FALSE;
9346 indx = h->dynindx;
9347 }
9348 off = h->got.offset;
9349 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9350 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9351 }
9352 else
9353 {
9354 BFD_ASSERT (local_got_offsets != NULL);
9355 off = local_got_offsets[r_symndx];
9356 offplt = local_tlsdesc_gotents[r_symndx];
9357 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9358 }
9359
9360 /* Linker relaxations happens from one of the
9361 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9362 if (ELF32_R_TYPE(rel->r_info) != r_type)
9363 tls_type = GOT_TLS_IE;
9364
9365 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9366
9367 if ((off & 1) != 0)
9368 off &= ~1;
9369 else
9370 {
9371 bfd_boolean need_relocs = FALSE;
9372 Elf_Internal_Rela outrel;
9373 int cur_off = off;
9374
9375 /* The GOT entries have not been initialized yet. Do it
9376 now, and emit any relocations. If both an IE GOT and a
9377 GD GOT are necessary, we emit the GD first. */
9378
9379 if ((info->shared || indx != 0)
9380 && (h == NULL
9381 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9382 || h->root.type != bfd_link_hash_undefweak))
9383 {
9384 need_relocs = TRUE;
9385 BFD_ASSERT (srelgot != NULL);
9386 }
9387
9388 if (tls_type & GOT_TLS_GDESC)
9389 {
9390 bfd_byte *loc;
9391
9392 /* We should have relaxed, unless this is an undefined
9393 weak symbol. */
9394 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9395 || info->shared);
9396 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9397 <= globals->root.sgotplt->size);
9398
9399 outrel.r_addend = 0;
9400 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9401 + globals->root.sgotplt->output_offset
9402 + offplt
9403 + globals->sgotplt_jump_table_size);
9404
9405 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9406 sreloc = globals->root.srelplt;
9407 loc = sreloc->contents;
9408 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9409 BFD_ASSERT (loc + RELOC_SIZE (globals)
9410 <= sreloc->contents + sreloc->size);
9411
9412 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9413
9414 /* For globals, the first word in the relocation gets
9415 the relocation index and the top bit set, or zero,
9416 if we're binding now. For locals, it gets the
9417 symbol's offset in the tls section. */
9418 bfd_put_32 (output_bfd,
9419 !h ? value - elf_hash_table (info)->tls_sec->vma
9420 : info->flags & DF_BIND_NOW ? 0
9421 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9422 globals->root.sgotplt->contents + offplt
9423 + globals->sgotplt_jump_table_size);
9424
9425 /* Second word in the relocation is always zero. */
9426 bfd_put_32 (output_bfd, 0,
9427 globals->root.sgotplt->contents + offplt
9428 + globals->sgotplt_jump_table_size + 4);
9429 }
9430 if (tls_type & GOT_TLS_GD)
9431 {
9432 if (need_relocs)
9433 {
9434 outrel.r_addend = 0;
9435 outrel.r_offset = (sgot->output_section->vma
9436 + sgot->output_offset
9437 + cur_off);
9438 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9439
9440 if (globals->use_rel)
9441 bfd_put_32 (output_bfd, outrel.r_addend,
9442 sgot->contents + cur_off);
9443
9444 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9445
9446 if (indx == 0)
9447 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9448 sgot->contents + cur_off + 4);
9449 else
9450 {
9451 outrel.r_addend = 0;
9452 outrel.r_info = ELF32_R_INFO (indx,
9453 R_ARM_TLS_DTPOFF32);
9454 outrel.r_offset += 4;
9455
9456 if (globals->use_rel)
9457 bfd_put_32 (output_bfd, outrel.r_addend,
9458 sgot->contents + cur_off + 4);
9459
9460 elf32_arm_add_dynreloc (output_bfd, info,
9461 srelgot, &outrel);
9462 }
9463 }
9464 else
9465 {
9466 /* If we are not emitting relocations for a
9467 general dynamic reference, then we must be in a
9468 static link or an executable link with the
9469 symbol binding locally. Mark it as belonging
9470 to module 1, the executable. */
9471 bfd_put_32 (output_bfd, 1,
9472 sgot->contents + cur_off);
9473 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9474 sgot->contents + cur_off + 4);
9475 }
9476
9477 cur_off += 8;
9478 }
9479
9480 if (tls_type & GOT_TLS_IE)
9481 {
9482 if (need_relocs)
9483 {
9484 if (indx == 0)
9485 outrel.r_addend = value - dtpoff_base (info);
9486 else
9487 outrel.r_addend = 0;
9488 outrel.r_offset = (sgot->output_section->vma
9489 + sgot->output_offset
9490 + cur_off);
9491 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9492
9493 if (globals->use_rel)
9494 bfd_put_32 (output_bfd, outrel.r_addend,
9495 sgot->contents + cur_off);
9496
9497 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9498 }
9499 else
9500 bfd_put_32 (output_bfd, tpoff (info, value),
9501 sgot->contents + cur_off);
9502 cur_off += 4;
9503 }
9504
9505 if (h != NULL)
9506 h->got.offset |= 1;
9507 else
9508 local_got_offsets[r_symndx] |= 1;
9509 }
9510
9511 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9512 off += 8;
9513 else if (tls_type & GOT_TLS_GDESC)
9514 off = offplt;
9515
9516 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9517 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9518 {
9519 bfd_signed_vma offset;
9520 /* TLS stubs are arm mode. The original symbol is a
9521 data object, so branch_type is bogus. */
9522 branch_type = ST_BRANCH_TO_ARM;
9523 enum elf32_arm_stub_type stub_type
9524 = arm_type_of_stub (info, input_section, rel,
9525 st_type, &branch_type,
9526 (struct elf32_arm_link_hash_entry *)h,
9527 globals->tls_trampoline, globals->root.splt,
9528 input_bfd, sym_name);
9529
9530 if (stub_type != arm_stub_none)
9531 {
9532 struct elf32_arm_stub_hash_entry *stub_entry
9533 = elf32_arm_get_stub_entry
9534 (input_section, globals->root.splt, 0, rel,
9535 globals, stub_type);
9536 offset = (stub_entry->stub_offset
9537 + stub_entry->stub_sec->output_offset
9538 + stub_entry->stub_sec->output_section->vma);
9539 }
9540 else
9541 offset = (globals->root.splt->output_section->vma
9542 + globals->root.splt->output_offset
9543 + globals->tls_trampoline);
9544
9545 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9546 {
9547 unsigned long inst;
9548
9549 offset -= (input_section->output_section->vma
9550 + input_section->output_offset
9551 + rel->r_offset + 8);
9552
9553 inst = offset >> 2;
9554 inst &= 0x00ffffff;
9555 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9556 }
9557 else
9558 {
9559 /* Thumb blx encodes the offset in a complicated
9560 fashion. */
9561 unsigned upper_insn, lower_insn;
9562 unsigned neg;
9563
9564 offset -= (input_section->output_section->vma
9565 + input_section->output_offset
9566 + rel->r_offset + 4);
9567
9568 if (stub_type != arm_stub_none
9569 && arm_stub_is_thumb (stub_type))
9570 {
9571 lower_insn = 0xd000;
9572 }
9573 else
9574 {
9575 lower_insn = 0xc000;
9576 /* Round up the offset to a word boundary */
9577 offset = (offset + 2) & ~2;
9578 }
9579
9580 neg = offset < 0;
9581 upper_insn = (0xf000
9582 | ((offset >> 12) & 0x3ff)
9583 | (neg << 10));
9584 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9585 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9586 | ((offset >> 1) & 0x7ff);
9587 bfd_put_16 (input_bfd, upper_insn, hit_data);
9588 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9589 return bfd_reloc_ok;
9590 }
9591 }
9592 /* These relocations needs special care, as besides the fact
9593 they point somewhere in .gotplt, the addend must be
9594 adjusted accordingly depending on the type of instruction
9595 we refer to */
9596 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9597 {
9598 unsigned long data, insn;
9599 unsigned thumb;
9600
9601 data = bfd_get_32 (input_bfd, hit_data);
9602 thumb = data & 1;
9603 data &= ~1u;
9604
9605 if (thumb)
9606 {
9607 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9608 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9609 insn = (insn << 16)
9610 | bfd_get_16 (input_bfd,
9611 contents + rel->r_offset - data + 2);
9612 if ((insn & 0xf800c000) == 0xf000c000)
9613 /* bl/blx */
9614 value = -6;
9615 else if ((insn & 0xffffff00) == 0x4400)
9616 /* add */
9617 value = -5;
9618 else
9619 {
9620 (*_bfd_error_handler)
9621 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9622 input_bfd, input_section,
9623 (unsigned long)rel->r_offset, insn);
9624 return bfd_reloc_notsupported;
9625 }
9626 }
9627 else
9628 {
9629 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9630
9631 switch (insn >> 24)
9632 {
9633 case 0xeb: /* bl */
9634 case 0xfa: /* blx */
9635 value = -4;
9636 break;
9637
9638 case 0xe0: /* add */
9639 value = -8;
9640 break;
9641
9642 default:
9643 (*_bfd_error_handler)
9644 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9645 input_bfd, input_section,
9646 (unsigned long)rel->r_offset, insn);
9647 return bfd_reloc_notsupported;
9648 }
9649 }
9650
9651 value += ((globals->root.sgotplt->output_section->vma
9652 + globals->root.sgotplt->output_offset + off)
9653 - (input_section->output_section->vma
9654 + input_section->output_offset
9655 + rel->r_offset)
9656 + globals->sgotplt_jump_table_size);
9657 }
9658 else
9659 value = ((globals->root.sgot->output_section->vma
9660 + globals->root.sgot->output_offset + off)
9661 - (input_section->output_section->vma
9662 + input_section->output_offset + rel->r_offset));
9663
9664 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9665 contents, rel->r_offset, value,
9666 rel->r_addend);
9667 }
9668
9669 case R_ARM_TLS_LE32:
9670 if (info->shared && !info->pie)
9671 {
9672 (*_bfd_error_handler)
9673 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9674 input_bfd, input_section,
9675 (long) rel->r_offset, howto->name);
9676 return bfd_reloc_notsupported;
9677 }
9678 else
9679 value = tpoff (info, value);
9680
9681 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9682 contents, rel->r_offset, value,
9683 rel->r_addend);
9684
9685 case R_ARM_V4BX:
9686 if (globals->fix_v4bx)
9687 {
9688 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9689
9690 /* Ensure that we have a BX instruction. */
9691 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9692
9693 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9694 {
9695 /* Branch to veneer. */
9696 bfd_vma glue_addr;
9697 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9698 glue_addr -= input_section->output_section->vma
9699 + input_section->output_offset
9700 + rel->r_offset + 8;
9701 insn = (insn & 0xf0000000) | 0x0a000000
9702 | ((glue_addr >> 2) & 0x00ffffff);
9703 }
9704 else
9705 {
9706 /* Preserve Rm (lowest four bits) and the condition code
9707 (highest four bits). Other bits encode MOV PC,Rm. */
9708 insn = (insn & 0xf000000f) | 0x01a0f000;
9709 }
9710
9711 bfd_put_32 (input_bfd, insn, hit_data);
9712 }
9713 return bfd_reloc_ok;
9714
9715 case R_ARM_MOVW_ABS_NC:
9716 case R_ARM_MOVT_ABS:
9717 case R_ARM_MOVW_PREL_NC:
9718 case R_ARM_MOVT_PREL:
9719 /* Until we properly support segment-base-relative addressing then
9720 we assume the segment base to be zero, as for the group relocations.
9721 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9722 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9723 case R_ARM_MOVW_BREL_NC:
9724 case R_ARM_MOVW_BREL:
9725 case R_ARM_MOVT_BREL:
9726 {
9727 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9728
9729 if (globals->use_rel)
9730 {
9731 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9732 signed_addend = (addend ^ 0x8000) - 0x8000;
9733 }
9734
9735 value += signed_addend;
9736
9737 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9738 value -= (input_section->output_section->vma
9739 + input_section->output_offset + rel->r_offset);
9740
9741 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9742 return bfd_reloc_overflow;
9743
9744 if (branch_type == ST_BRANCH_TO_THUMB)
9745 value |= 1;
9746
9747 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9748 || r_type == R_ARM_MOVT_BREL)
9749 value >>= 16;
9750
9751 insn &= 0xfff0f000;
9752 insn |= value & 0xfff;
9753 insn |= (value & 0xf000) << 4;
9754 bfd_put_32 (input_bfd, insn, hit_data);
9755 }
9756 return bfd_reloc_ok;
9757
9758 case R_ARM_THM_MOVW_ABS_NC:
9759 case R_ARM_THM_MOVT_ABS:
9760 case R_ARM_THM_MOVW_PREL_NC:
9761 case R_ARM_THM_MOVT_PREL:
9762 /* Until we properly support segment-base-relative addressing then
9763 we assume the segment base to be zero, as for the above relocations.
9764 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9765 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9766 as R_ARM_THM_MOVT_ABS. */
9767 case R_ARM_THM_MOVW_BREL_NC:
9768 case R_ARM_THM_MOVW_BREL:
9769 case R_ARM_THM_MOVT_BREL:
9770 {
9771 bfd_vma insn;
9772
9773 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9774 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9775
9776 if (globals->use_rel)
9777 {
9778 addend = ((insn >> 4) & 0xf000)
9779 | ((insn >> 15) & 0x0800)
9780 | ((insn >> 4) & 0x0700)
9781 | (insn & 0x00ff);
9782 signed_addend = (addend ^ 0x8000) - 0x8000;
9783 }
9784
9785 value += signed_addend;
9786
9787 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9788 value -= (input_section->output_section->vma
9789 + input_section->output_offset + rel->r_offset);
9790
9791 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9792 return bfd_reloc_overflow;
9793
9794 if (branch_type == ST_BRANCH_TO_THUMB)
9795 value |= 1;
9796
9797 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9798 || r_type == R_ARM_THM_MOVT_BREL)
9799 value >>= 16;
9800
9801 insn &= 0xfbf08f00;
9802 insn |= (value & 0xf000) << 4;
9803 insn |= (value & 0x0800) << 15;
9804 insn |= (value & 0x0700) << 4;
9805 insn |= (value & 0x00ff);
9806
9807 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9808 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9809 }
9810 return bfd_reloc_ok;
9811
9812 case R_ARM_ALU_PC_G0_NC:
9813 case R_ARM_ALU_PC_G1_NC:
9814 case R_ARM_ALU_PC_G0:
9815 case R_ARM_ALU_PC_G1:
9816 case R_ARM_ALU_PC_G2:
9817 case R_ARM_ALU_SB_G0_NC:
9818 case R_ARM_ALU_SB_G1_NC:
9819 case R_ARM_ALU_SB_G0:
9820 case R_ARM_ALU_SB_G1:
9821 case R_ARM_ALU_SB_G2:
9822 {
9823 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9824 bfd_vma pc = input_section->output_section->vma
9825 + input_section->output_offset + rel->r_offset;
9826 /* sb should be the origin of the *segment* containing the symbol.
9827 It is not clear how to obtain this OS-dependent value, so we
9828 make an arbitrary choice of zero. */
9829 bfd_vma sb = 0;
9830 bfd_vma residual;
9831 bfd_vma g_n;
9832 bfd_signed_vma signed_value;
9833 int group = 0;
9834
9835 /* Determine which group of bits to select. */
9836 switch (r_type)
9837 {
9838 case R_ARM_ALU_PC_G0_NC:
9839 case R_ARM_ALU_PC_G0:
9840 case R_ARM_ALU_SB_G0_NC:
9841 case R_ARM_ALU_SB_G0:
9842 group = 0;
9843 break;
9844
9845 case R_ARM_ALU_PC_G1_NC:
9846 case R_ARM_ALU_PC_G1:
9847 case R_ARM_ALU_SB_G1_NC:
9848 case R_ARM_ALU_SB_G1:
9849 group = 1;
9850 break;
9851
9852 case R_ARM_ALU_PC_G2:
9853 case R_ARM_ALU_SB_G2:
9854 group = 2;
9855 break;
9856
9857 default:
9858 abort ();
9859 }
9860
9861 /* If REL, extract the addend from the insn. If RELA, it will
9862 have already been fetched for us. */
9863 if (globals->use_rel)
9864 {
9865 int negative;
9866 bfd_vma constant = insn & 0xff;
9867 bfd_vma rotation = (insn & 0xf00) >> 8;
9868
9869 if (rotation == 0)
9870 signed_addend = constant;
9871 else
9872 {
9873 /* Compensate for the fact that in the instruction, the
9874 rotation is stored in multiples of 2 bits. */
9875 rotation *= 2;
9876
9877 /* Rotate "constant" right by "rotation" bits. */
9878 signed_addend = (constant >> rotation) |
9879 (constant << (8 * sizeof (bfd_vma) - rotation));
9880 }
9881
9882 /* Determine if the instruction is an ADD or a SUB.
9883 (For REL, this determines the sign of the addend.) */
9884 negative = identify_add_or_sub (insn);
9885 if (negative == 0)
9886 {
9887 (*_bfd_error_handler)
9888 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9889 input_bfd, input_section,
9890 (long) rel->r_offset, howto->name);
9891 return bfd_reloc_overflow;
9892 }
9893
9894 signed_addend *= negative;
9895 }
9896
9897 /* Compute the value (X) to go in the place. */
9898 if (r_type == R_ARM_ALU_PC_G0_NC
9899 || r_type == R_ARM_ALU_PC_G1_NC
9900 || r_type == R_ARM_ALU_PC_G0
9901 || r_type == R_ARM_ALU_PC_G1
9902 || r_type == R_ARM_ALU_PC_G2)
9903 /* PC relative. */
9904 signed_value = value - pc + signed_addend;
9905 else
9906 /* Section base relative. */
9907 signed_value = value - sb + signed_addend;
9908
9909 /* If the target symbol is a Thumb function, then set the
9910 Thumb bit in the address. */
9911 if (branch_type == ST_BRANCH_TO_THUMB)
9912 signed_value |= 1;
9913
9914 /* Calculate the value of the relevant G_n, in encoded
9915 constant-with-rotation format. */
9916 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9917 &residual);
9918
9919 /* Check for overflow if required. */
9920 if ((r_type == R_ARM_ALU_PC_G0
9921 || r_type == R_ARM_ALU_PC_G1
9922 || r_type == R_ARM_ALU_PC_G2
9923 || r_type == R_ARM_ALU_SB_G0
9924 || r_type == R_ARM_ALU_SB_G1
9925 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9926 {
9927 (*_bfd_error_handler)
9928 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9929 input_bfd, input_section,
9930 (long) rel->r_offset, abs (signed_value), howto->name);
9931 return bfd_reloc_overflow;
9932 }
9933
9934 /* Mask out the value and the ADD/SUB part of the opcode; take care
9935 not to destroy the S bit. */
9936 insn &= 0xff1ff000;
9937
9938 /* Set the opcode according to whether the value to go in the
9939 place is negative. */
9940 if (signed_value < 0)
9941 insn |= 1 << 22;
9942 else
9943 insn |= 1 << 23;
9944
9945 /* Encode the offset. */
9946 insn |= g_n;
9947
9948 bfd_put_32 (input_bfd, insn, hit_data);
9949 }
9950 return bfd_reloc_ok;
9951
9952 case R_ARM_LDR_PC_G0:
9953 case R_ARM_LDR_PC_G1:
9954 case R_ARM_LDR_PC_G2:
9955 case R_ARM_LDR_SB_G0:
9956 case R_ARM_LDR_SB_G1:
9957 case R_ARM_LDR_SB_G2:
9958 {
9959 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9960 bfd_vma pc = input_section->output_section->vma
9961 + input_section->output_offset + rel->r_offset;
9962 bfd_vma sb = 0; /* See note above. */
9963 bfd_vma residual;
9964 bfd_signed_vma signed_value;
9965 int group = 0;
9966
9967 /* Determine which groups of bits to calculate. */
9968 switch (r_type)
9969 {
9970 case R_ARM_LDR_PC_G0:
9971 case R_ARM_LDR_SB_G0:
9972 group = 0;
9973 break;
9974
9975 case R_ARM_LDR_PC_G1:
9976 case R_ARM_LDR_SB_G1:
9977 group = 1;
9978 break;
9979
9980 case R_ARM_LDR_PC_G2:
9981 case R_ARM_LDR_SB_G2:
9982 group = 2;
9983 break;
9984
9985 default:
9986 abort ();
9987 }
9988
9989 /* If REL, extract the addend from the insn. If RELA, it will
9990 have already been fetched for us. */
9991 if (globals->use_rel)
9992 {
9993 int negative = (insn & (1 << 23)) ? 1 : -1;
9994 signed_addend = negative * (insn & 0xfff);
9995 }
9996
9997 /* Compute the value (X) to go in the place. */
9998 if (r_type == R_ARM_LDR_PC_G0
9999 || r_type == R_ARM_LDR_PC_G1
10000 || r_type == R_ARM_LDR_PC_G2)
10001 /* PC relative. */
10002 signed_value = value - pc + signed_addend;
10003 else
10004 /* Section base relative. */
10005 signed_value = value - sb + signed_addend;
10006
10007 /* Calculate the value of the relevant G_{n-1} to obtain
10008 the residual at that stage. */
10009 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10010
10011 /* Check for overflow. */
10012 if (residual >= 0x1000)
10013 {
10014 (*_bfd_error_handler)
10015 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10016 input_bfd, input_section,
10017 (long) rel->r_offset, abs (signed_value), howto->name);
10018 return bfd_reloc_overflow;
10019 }
10020
10021 /* Mask out the value and U bit. */
10022 insn &= 0xff7ff000;
10023
10024 /* Set the U bit if the value to go in the place is non-negative. */
10025 if (signed_value >= 0)
10026 insn |= 1 << 23;
10027
10028 /* Encode the offset. */
10029 insn |= residual;
10030
10031 bfd_put_32 (input_bfd, insn, hit_data);
10032 }
10033 return bfd_reloc_ok;
10034
10035 case R_ARM_LDRS_PC_G0:
10036 case R_ARM_LDRS_PC_G1:
10037 case R_ARM_LDRS_PC_G2:
10038 case R_ARM_LDRS_SB_G0:
10039 case R_ARM_LDRS_SB_G1:
10040 case R_ARM_LDRS_SB_G2:
10041 {
10042 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10043 bfd_vma pc = input_section->output_section->vma
10044 + input_section->output_offset + rel->r_offset;
10045 bfd_vma sb = 0; /* See note above. */
10046 bfd_vma residual;
10047 bfd_signed_vma signed_value;
10048 int group = 0;
10049
10050 /* Determine which groups of bits to calculate. */
10051 switch (r_type)
10052 {
10053 case R_ARM_LDRS_PC_G0:
10054 case R_ARM_LDRS_SB_G0:
10055 group = 0;
10056 break;
10057
10058 case R_ARM_LDRS_PC_G1:
10059 case R_ARM_LDRS_SB_G1:
10060 group = 1;
10061 break;
10062
10063 case R_ARM_LDRS_PC_G2:
10064 case R_ARM_LDRS_SB_G2:
10065 group = 2;
10066 break;
10067
10068 default:
10069 abort ();
10070 }
10071
10072 /* If REL, extract the addend from the insn. If RELA, it will
10073 have already been fetched for us. */
10074 if (globals->use_rel)
10075 {
10076 int negative = (insn & (1 << 23)) ? 1 : -1;
10077 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10078 }
10079
10080 /* Compute the value (X) to go in the place. */
10081 if (r_type == R_ARM_LDRS_PC_G0
10082 || r_type == R_ARM_LDRS_PC_G1
10083 || r_type == R_ARM_LDRS_PC_G2)
10084 /* PC relative. */
10085 signed_value = value - pc + signed_addend;
10086 else
10087 /* Section base relative. */
10088 signed_value = value - sb + signed_addend;
10089
10090 /* Calculate the value of the relevant G_{n-1} to obtain
10091 the residual at that stage. */
10092 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10093
10094 /* Check for overflow. */
10095 if (residual >= 0x100)
10096 {
10097 (*_bfd_error_handler)
10098 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10099 input_bfd, input_section,
10100 (long) rel->r_offset, abs (signed_value), howto->name);
10101 return bfd_reloc_overflow;
10102 }
10103
10104 /* Mask out the value and U bit. */
10105 insn &= 0xff7ff0f0;
10106
10107 /* Set the U bit if the value to go in the place is non-negative. */
10108 if (signed_value >= 0)
10109 insn |= 1 << 23;
10110
10111 /* Encode the offset. */
10112 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10113
10114 bfd_put_32 (input_bfd, insn, hit_data);
10115 }
10116 return bfd_reloc_ok;
10117
10118 case R_ARM_LDC_PC_G0:
10119 case R_ARM_LDC_PC_G1:
10120 case R_ARM_LDC_PC_G2:
10121 case R_ARM_LDC_SB_G0:
10122 case R_ARM_LDC_SB_G1:
10123 case R_ARM_LDC_SB_G2:
10124 {
10125 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10126 bfd_vma pc = input_section->output_section->vma
10127 + input_section->output_offset + rel->r_offset;
10128 bfd_vma sb = 0; /* See note above. */
10129 bfd_vma residual;
10130 bfd_signed_vma signed_value;
10131 int group = 0;
10132
10133 /* Determine which groups of bits to calculate. */
10134 switch (r_type)
10135 {
10136 case R_ARM_LDC_PC_G0:
10137 case R_ARM_LDC_SB_G0:
10138 group = 0;
10139 break;
10140
10141 case R_ARM_LDC_PC_G1:
10142 case R_ARM_LDC_SB_G1:
10143 group = 1;
10144 break;
10145
10146 case R_ARM_LDC_PC_G2:
10147 case R_ARM_LDC_SB_G2:
10148 group = 2;
10149 break;
10150
10151 default:
10152 abort ();
10153 }
10154
10155 /* If REL, extract the addend from the insn. If RELA, it will
10156 have already been fetched for us. */
10157 if (globals->use_rel)
10158 {
10159 int negative = (insn & (1 << 23)) ? 1 : -1;
10160 signed_addend = negative * ((insn & 0xff) << 2);
10161 }
10162
10163 /* Compute the value (X) to go in the place. */
10164 if (r_type == R_ARM_LDC_PC_G0
10165 || r_type == R_ARM_LDC_PC_G1
10166 || r_type == R_ARM_LDC_PC_G2)
10167 /* PC relative. */
10168 signed_value = value - pc + signed_addend;
10169 else
10170 /* Section base relative. */
10171 signed_value = value - sb + signed_addend;
10172
10173 /* Calculate the value of the relevant G_{n-1} to obtain
10174 the residual at that stage. */
10175 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10176
10177 /* Check for overflow. (The absolute value to go in the place must be
10178 divisible by four and, after having been divided by four, must
10179 fit in eight bits.) */
10180 if ((residual & 0x3) != 0 || residual >= 0x400)
10181 {
10182 (*_bfd_error_handler)
10183 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10184 input_bfd, input_section,
10185 (long) rel->r_offset, abs (signed_value), howto->name);
10186 return bfd_reloc_overflow;
10187 }
10188
10189 /* Mask out the value and U bit. */
10190 insn &= 0xff7fff00;
10191
10192 /* Set the U bit if the value to go in the place is non-negative. */
10193 if (signed_value >= 0)
10194 insn |= 1 << 23;
10195
10196 /* Encode the offset. */
10197 insn |= residual >> 2;
10198
10199 bfd_put_32 (input_bfd, insn, hit_data);
10200 }
10201 return bfd_reloc_ok;
10202
10203 default:
10204 return bfd_reloc_notsupported;
10205 }
10206 }
10207
10208 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10209 static void
10210 arm_add_to_rel (bfd * abfd,
10211 bfd_byte * address,
10212 reloc_howto_type * howto,
10213 bfd_signed_vma increment)
10214 {
10215 bfd_signed_vma addend;
10216
10217 if (howto->type == R_ARM_THM_CALL
10218 || howto->type == R_ARM_THM_JUMP24)
10219 {
10220 int upper_insn, lower_insn;
10221 int upper, lower;
10222
10223 upper_insn = bfd_get_16 (abfd, address);
10224 lower_insn = bfd_get_16 (abfd, address + 2);
10225 upper = upper_insn & 0x7ff;
10226 lower = lower_insn & 0x7ff;
10227
10228 addend = (upper << 12) | (lower << 1);
10229 addend += increment;
10230 addend >>= 1;
10231
10232 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10233 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10234
10235 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10236 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10237 }
10238 else
10239 {
10240 bfd_vma contents;
10241
10242 contents = bfd_get_32 (abfd, address);
10243
10244 /* Get the (signed) value from the instruction. */
10245 addend = contents & howto->src_mask;
10246 if (addend & ((howto->src_mask + 1) >> 1))
10247 {
10248 bfd_signed_vma mask;
10249
10250 mask = -1;
10251 mask &= ~ howto->src_mask;
10252 addend |= mask;
10253 }
10254
10255 /* Add in the increment, (which is a byte value). */
10256 switch (howto->type)
10257 {
10258 default:
10259 addend += increment;
10260 break;
10261
10262 case R_ARM_PC24:
10263 case R_ARM_PLT32:
10264 case R_ARM_CALL:
10265 case R_ARM_JUMP24:
10266 addend <<= howto->size;
10267 addend += increment;
10268
10269 /* Should we check for overflow here ? */
10270
10271 /* Drop any undesired bits. */
10272 addend >>= howto->rightshift;
10273 break;
10274 }
10275
10276 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10277
10278 bfd_put_32 (abfd, contents, address);
10279 }
10280 }
10281
10282 #define IS_ARM_TLS_RELOC(R_TYPE) \
10283 ((R_TYPE) == R_ARM_TLS_GD32 \
10284 || (R_TYPE) == R_ARM_TLS_LDO32 \
10285 || (R_TYPE) == R_ARM_TLS_LDM32 \
10286 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10287 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10288 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10289 || (R_TYPE) == R_ARM_TLS_LE32 \
10290 || (R_TYPE) == R_ARM_TLS_IE32 \
10291 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10292
10293 /* Specific set of relocations for the gnu tls dialect. */
10294 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10295 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10296 || (R_TYPE) == R_ARM_TLS_CALL \
10297 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10298 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10299 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10300
10301 /* Relocate an ARM ELF section. */
10302
10303 static bfd_boolean
10304 elf32_arm_relocate_section (bfd * output_bfd,
10305 struct bfd_link_info * info,
10306 bfd * input_bfd,
10307 asection * input_section,
10308 bfd_byte * contents,
10309 Elf_Internal_Rela * relocs,
10310 Elf_Internal_Sym * local_syms,
10311 asection ** local_sections)
10312 {
10313 Elf_Internal_Shdr *symtab_hdr;
10314 struct elf_link_hash_entry **sym_hashes;
10315 Elf_Internal_Rela *rel;
10316 Elf_Internal_Rela *relend;
10317 const char *name;
10318 struct elf32_arm_link_hash_table * globals;
10319
10320 globals = elf32_arm_hash_table (info);
10321 if (globals == NULL)
10322 return FALSE;
10323
10324 symtab_hdr = & elf_symtab_hdr (input_bfd);
10325 sym_hashes = elf_sym_hashes (input_bfd);
10326
10327 rel = relocs;
10328 relend = relocs + input_section->reloc_count;
10329 for (; rel < relend; rel++)
10330 {
10331 int r_type;
10332 reloc_howto_type * howto;
10333 unsigned long r_symndx;
10334 Elf_Internal_Sym * sym;
10335 asection * sec;
10336 struct elf_link_hash_entry * h;
10337 bfd_vma relocation;
10338 bfd_reloc_status_type r;
10339 arelent bfd_reloc;
10340 char sym_type;
10341 bfd_boolean unresolved_reloc = FALSE;
10342 char *error_message = NULL;
10343
10344 r_symndx = ELF32_R_SYM (rel->r_info);
10345 r_type = ELF32_R_TYPE (rel->r_info);
10346 r_type = arm_real_reloc_type (globals, r_type);
10347
10348 if ( r_type == R_ARM_GNU_VTENTRY
10349 || r_type == R_ARM_GNU_VTINHERIT)
10350 continue;
10351
10352 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10353 howto = bfd_reloc.howto;
10354
10355 h = NULL;
10356 sym = NULL;
10357 sec = NULL;
10358
10359 if (r_symndx < symtab_hdr->sh_info)
10360 {
10361 sym = local_syms + r_symndx;
10362 sym_type = ELF32_ST_TYPE (sym->st_info);
10363 sec = local_sections[r_symndx];
10364
10365 /* An object file might have a reference to a local
10366 undefined symbol. This is a daft object file, but we
10367 should at least do something about it. V4BX & NONE
10368 relocations do not use the symbol and are explicitly
10369 allowed to use the undefined symbol, so allow those.
10370 Likewise for relocations against STN_UNDEF. */
10371 if (r_type != R_ARM_V4BX
10372 && r_type != R_ARM_NONE
10373 && r_symndx != STN_UNDEF
10374 && bfd_is_und_section (sec)
10375 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10376 {
10377 if (!info->callbacks->undefined_symbol
10378 (info, bfd_elf_string_from_elf_section
10379 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10380 input_bfd, input_section,
10381 rel->r_offset, TRUE))
10382 return FALSE;
10383 }
10384
10385 if (globals->use_rel)
10386 {
10387 relocation = (sec->output_section->vma
10388 + sec->output_offset
10389 + sym->st_value);
10390 if (!info->relocatable
10391 && (sec->flags & SEC_MERGE)
10392 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10393 {
10394 asection *msec;
10395 bfd_vma addend, value;
10396
10397 switch (r_type)
10398 {
10399 case R_ARM_MOVW_ABS_NC:
10400 case R_ARM_MOVT_ABS:
10401 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10402 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10403 addend = (addend ^ 0x8000) - 0x8000;
10404 break;
10405
10406 case R_ARM_THM_MOVW_ABS_NC:
10407 case R_ARM_THM_MOVT_ABS:
10408 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10409 << 16;
10410 value |= bfd_get_16 (input_bfd,
10411 contents + rel->r_offset + 2);
10412 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10413 | ((value & 0x04000000) >> 15);
10414 addend = (addend ^ 0x8000) - 0x8000;
10415 break;
10416
10417 default:
10418 if (howto->rightshift
10419 || (howto->src_mask & (howto->src_mask + 1)))
10420 {
10421 (*_bfd_error_handler)
10422 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10423 input_bfd, input_section,
10424 (long) rel->r_offset, howto->name);
10425 return FALSE;
10426 }
10427
10428 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10429
10430 /* Get the (signed) value from the instruction. */
10431 addend = value & howto->src_mask;
10432 if (addend & ((howto->src_mask + 1) >> 1))
10433 {
10434 bfd_signed_vma mask;
10435
10436 mask = -1;
10437 mask &= ~ howto->src_mask;
10438 addend |= mask;
10439 }
10440 break;
10441 }
10442
10443 msec = sec;
10444 addend =
10445 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10446 - relocation;
10447 addend += msec->output_section->vma + msec->output_offset;
10448
10449 /* Cases here must match those in the preceding
10450 switch statement. */
10451 switch (r_type)
10452 {
10453 case R_ARM_MOVW_ABS_NC:
10454 case R_ARM_MOVT_ABS:
10455 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10456 | (addend & 0xfff);
10457 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10458 break;
10459
10460 case R_ARM_THM_MOVW_ABS_NC:
10461 case R_ARM_THM_MOVT_ABS:
10462 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10463 | (addend & 0xff) | ((addend & 0x0800) << 15);
10464 bfd_put_16 (input_bfd, value >> 16,
10465 contents + rel->r_offset);
10466 bfd_put_16 (input_bfd, value,
10467 contents + rel->r_offset + 2);
10468 break;
10469
10470 default:
10471 value = (value & ~ howto->dst_mask)
10472 | (addend & howto->dst_mask);
10473 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10474 break;
10475 }
10476 }
10477 }
10478 else
10479 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10480 }
10481 else
10482 {
10483 bfd_boolean warned;
10484
10485 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10486 r_symndx, symtab_hdr, sym_hashes,
10487 h, sec, relocation,
10488 unresolved_reloc, warned);
10489
10490 sym_type = h->type;
10491 }
10492
10493 if (sec != NULL && discarded_section (sec))
10494 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10495 rel, 1, relend, howto, 0, contents);
10496
10497 if (info->relocatable)
10498 {
10499 /* This is a relocatable link. We don't have to change
10500 anything, unless the reloc is against a section symbol,
10501 in which case we have to adjust according to where the
10502 section symbol winds up in the output section. */
10503 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10504 {
10505 if (globals->use_rel)
10506 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10507 howto, (bfd_signed_vma) sec->output_offset);
10508 else
10509 rel->r_addend += sec->output_offset;
10510 }
10511 continue;
10512 }
10513
10514 if (h != NULL)
10515 name = h->root.root.string;
10516 else
10517 {
10518 name = (bfd_elf_string_from_elf_section
10519 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10520 if (name == NULL || *name == '\0')
10521 name = bfd_section_name (input_bfd, sec);
10522 }
10523
10524 if (r_symndx != STN_UNDEF
10525 && r_type != R_ARM_NONE
10526 && (h == NULL
10527 || h->root.type == bfd_link_hash_defined
10528 || h->root.type == bfd_link_hash_defweak)
10529 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10530 {
10531 (*_bfd_error_handler)
10532 ((sym_type == STT_TLS
10533 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10534 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10535 input_bfd,
10536 input_section,
10537 (long) rel->r_offset,
10538 howto->name,
10539 name);
10540 }
10541
10542 /* We call elf32_arm_final_link_relocate unless we're completely
10543 done, i.e., the relaxation produced the final output we want,
10544 and we won't let anybody mess with it. Also, we have to do
10545 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10546 both in relaxed and non-relaxed cases */
10547 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10548 || (IS_ARM_TLS_GNU_RELOC (r_type)
10549 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10550 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10551 & GOT_TLS_GDESC)))
10552 {
10553 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10554 contents, rel, h == NULL);
10555 /* This may have been marked unresolved because it came from
10556 a shared library. But we've just dealt with that. */
10557 unresolved_reloc = 0;
10558 }
10559 else
10560 r = bfd_reloc_continue;
10561
10562 if (r == bfd_reloc_continue)
10563 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10564 input_section, contents, rel,
10565 relocation, info, sec, name, sym_type,
10566 (h ? h->target_internal
10567 : ARM_SYM_BRANCH_TYPE (sym)), h,
10568 &unresolved_reloc, &error_message);
10569
10570 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10571 because such sections are not SEC_ALLOC and thus ld.so will
10572 not process them. */
10573 if (unresolved_reloc
10574 && !((input_section->flags & SEC_DEBUGGING) != 0
10575 && h->def_dynamic)
10576 && _bfd_elf_section_offset (output_bfd, info, input_section,
10577 rel->r_offset) != (bfd_vma) -1)
10578 {
10579 (*_bfd_error_handler)
10580 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10581 input_bfd,
10582 input_section,
10583 (long) rel->r_offset,
10584 howto->name,
10585 h->root.root.string);
10586 return FALSE;
10587 }
10588
10589 if (r != bfd_reloc_ok)
10590 {
10591 switch (r)
10592 {
10593 case bfd_reloc_overflow:
10594 /* If the overflowing reloc was to an undefined symbol,
10595 we have already printed one error message and there
10596 is no point complaining again. */
10597 if ((! h ||
10598 h->root.type != bfd_link_hash_undefined)
10599 && (!((*info->callbacks->reloc_overflow)
10600 (info, (h ? &h->root : NULL), name, howto->name,
10601 (bfd_vma) 0, input_bfd, input_section,
10602 rel->r_offset))))
10603 return FALSE;
10604 break;
10605
10606 case bfd_reloc_undefined:
10607 if (!((*info->callbacks->undefined_symbol)
10608 (info, name, input_bfd, input_section,
10609 rel->r_offset, TRUE)))
10610 return FALSE;
10611 break;
10612
10613 case bfd_reloc_outofrange:
10614 error_message = _("out of range");
10615 goto common_error;
10616
10617 case bfd_reloc_notsupported:
10618 error_message = _("unsupported relocation");
10619 goto common_error;
10620
10621 case bfd_reloc_dangerous:
10622 /* error_message should already be set. */
10623 goto common_error;
10624
10625 default:
10626 error_message = _("unknown error");
10627 /* Fall through. */
10628
10629 common_error:
10630 BFD_ASSERT (error_message != NULL);
10631 if (!((*info->callbacks->reloc_dangerous)
10632 (info, error_message, input_bfd, input_section,
10633 rel->r_offset)))
10634 return FALSE;
10635 break;
10636 }
10637 }
10638 }
10639
10640 return TRUE;
10641 }
10642
10643 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10644 adds the edit to the start of the list. (The list must be built in order of
10645 ascending TINDEX: the function's callers are primarily responsible for
10646 maintaining that condition). */
10647
10648 static void
10649 add_unwind_table_edit (arm_unwind_table_edit **head,
10650 arm_unwind_table_edit **tail,
10651 arm_unwind_edit_type type,
10652 asection *linked_section,
10653 unsigned int tindex)
10654 {
10655 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10656 xmalloc (sizeof (arm_unwind_table_edit));
10657
10658 new_edit->type = type;
10659 new_edit->linked_section = linked_section;
10660 new_edit->index = tindex;
10661
10662 if (tindex > 0)
10663 {
10664 new_edit->next = NULL;
10665
10666 if (*tail)
10667 (*tail)->next = new_edit;
10668
10669 (*tail) = new_edit;
10670
10671 if (!*head)
10672 (*head) = new_edit;
10673 }
10674 else
10675 {
10676 new_edit->next = *head;
10677
10678 if (!*tail)
10679 *tail = new_edit;
10680
10681 *head = new_edit;
10682 }
10683 }
10684
10685 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10686
10687 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10688 static void
10689 adjust_exidx_size(asection *exidx_sec, int adjust)
10690 {
10691 asection *out_sec;
10692
10693 if (!exidx_sec->rawsize)
10694 exidx_sec->rawsize = exidx_sec->size;
10695
10696 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10697 out_sec = exidx_sec->output_section;
10698 /* Adjust size of output section. */
10699 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10700 }
10701
10702 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10703 static void
10704 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10705 {
10706 struct _arm_elf_section_data *exidx_arm_data;
10707
10708 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10709 add_unwind_table_edit (
10710 &exidx_arm_data->u.exidx.unwind_edit_list,
10711 &exidx_arm_data->u.exidx.unwind_edit_tail,
10712 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10713
10714 adjust_exidx_size(exidx_sec, 8);
10715 }
10716
10717 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10718 made to those tables, such that:
10719
10720 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10721 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10722 codes which have been inlined into the index).
10723
10724 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10725
10726 The edits are applied when the tables are written
10727 (in elf32_arm_write_section). */
10728
10729 bfd_boolean
10730 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10731 unsigned int num_text_sections,
10732 struct bfd_link_info *info,
10733 bfd_boolean merge_exidx_entries)
10734 {
10735 bfd *inp;
10736 unsigned int last_second_word = 0, i;
10737 asection *last_exidx_sec = NULL;
10738 asection *last_text_sec = NULL;
10739 int last_unwind_type = -1;
10740
10741 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10742 text sections. */
10743 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10744 {
10745 asection *sec;
10746
10747 for (sec = inp->sections; sec != NULL; sec = sec->next)
10748 {
10749 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10750 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10751
10752 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10753 continue;
10754
10755 if (elf_sec->linked_to)
10756 {
10757 Elf_Internal_Shdr *linked_hdr
10758 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10759 struct _arm_elf_section_data *linked_sec_arm_data
10760 = get_arm_elf_section_data (linked_hdr->bfd_section);
10761
10762 if (linked_sec_arm_data == NULL)
10763 continue;
10764
10765 /* Link this .ARM.exidx section back from the text section it
10766 describes. */
10767 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10768 }
10769 }
10770 }
10771
10772 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10773 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10774 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10775
10776 for (i = 0; i < num_text_sections; i++)
10777 {
10778 asection *sec = text_section_order[i];
10779 asection *exidx_sec;
10780 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10781 struct _arm_elf_section_data *exidx_arm_data;
10782 bfd_byte *contents = NULL;
10783 int deleted_exidx_bytes = 0;
10784 bfd_vma j;
10785 arm_unwind_table_edit *unwind_edit_head = NULL;
10786 arm_unwind_table_edit *unwind_edit_tail = NULL;
10787 Elf_Internal_Shdr *hdr;
10788 bfd *ibfd;
10789
10790 if (arm_data == NULL)
10791 continue;
10792
10793 exidx_sec = arm_data->u.text.arm_exidx_sec;
10794 if (exidx_sec == NULL)
10795 {
10796 /* Section has no unwind data. */
10797 if (last_unwind_type == 0 || !last_exidx_sec)
10798 continue;
10799
10800 /* Ignore zero sized sections. */
10801 if (sec->size == 0)
10802 continue;
10803
10804 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10805 last_unwind_type = 0;
10806 continue;
10807 }
10808
10809 /* Skip /DISCARD/ sections. */
10810 if (bfd_is_abs_section (exidx_sec->output_section))
10811 continue;
10812
10813 hdr = &elf_section_data (exidx_sec)->this_hdr;
10814 if (hdr->sh_type != SHT_ARM_EXIDX)
10815 continue;
10816
10817 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10818 if (exidx_arm_data == NULL)
10819 continue;
10820
10821 ibfd = exidx_sec->owner;
10822
10823 if (hdr->contents != NULL)
10824 contents = hdr->contents;
10825 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10826 /* An error? */
10827 continue;
10828
10829 for (j = 0; j < hdr->sh_size; j += 8)
10830 {
10831 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10832 int unwind_type;
10833 int elide = 0;
10834
10835 /* An EXIDX_CANTUNWIND entry. */
10836 if (second_word == 1)
10837 {
10838 if (last_unwind_type == 0)
10839 elide = 1;
10840 unwind_type = 0;
10841 }
10842 /* Inlined unwinding data. Merge if equal to previous. */
10843 else if ((second_word & 0x80000000) != 0)
10844 {
10845 if (merge_exidx_entries
10846 && last_second_word == second_word && last_unwind_type == 1)
10847 elide = 1;
10848 unwind_type = 1;
10849 last_second_word = second_word;
10850 }
10851 /* Normal table entry. In theory we could merge these too,
10852 but duplicate entries are likely to be much less common. */
10853 else
10854 unwind_type = 2;
10855
10856 if (elide)
10857 {
10858 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10859 DELETE_EXIDX_ENTRY, NULL, j / 8);
10860
10861 deleted_exidx_bytes += 8;
10862 }
10863
10864 last_unwind_type = unwind_type;
10865 }
10866
10867 /* Free contents if we allocated it ourselves. */
10868 if (contents != hdr->contents)
10869 free (contents);
10870
10871 /* Record edits to be applied later (in elf32_arm_write_section). */
10872 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10873 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10874
10875 if (deleted_exidx_bytes > 0)
10876 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10877
10878 last_exidx_sec = exidx_sec;
10879 last_text_sec = sec;
10880 }
10881
10882 /* Add terminating CANTUNWIND entry. */
10883 if (last_exidx_sec && last_unwind_type != 0)
10884 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10885
10886 return TRUE;
10887 }
10888
10889 static bfd_boolean
10890 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10891 bfd *ibfd, const char *name)
10892 {
10893 asection *sec, *osec;
10894
10895 sec = bfd_get_linker_section (ibfd, name);
10896 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10897 return TRUE;
10898
10899 osec = sec->output_section;
10900 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10901 return TRUE;
10902
10903 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10904 sec->output_offset, sec->size))
10905 return FALSE;
10906
10907 return TRUE;
10908 }
10909
10910 static bfd_boolean
10911 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10912 {
10913 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10914 asection *sec, *osec;
10915
10916 if (globals == NULL)
10917 return FALSE;
10918
10919 /* Invoke the regular ELF backend linker to do all the work. */
10920 if (!bfd_elf_final_link (abfd, info))
10921 return FALSE;
10922
10923 /* Process stub sections (eg BE8 encoding, ...). */
10924 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10925 int i;
10926 for (i=0; i<htab->top_id; i++)
10927 {
10928 sec = htab->stub_group[i].stub_sec;
10929 /* Only process it once, in its link_sec slot. */
10930 if (sec && i == htab->stub_group[i].link_sec->id)
10931 {
10932 osec = sec->output_section;
10933 elf32_arm_write_section (abfd, info, sec, sec->contents);
10934 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10935 sec->output_offset, sec->size))
10936 return FALSE;
10937 }
10938 }
10939
10940 /* Write out any glue sections now that we have created all the
10941 stubs. */
10942 if (globals->bfd_of_glue_owner != NULL)
10943 {
10944 if (! elf32_arm_output_glue_section (info, abfd,
10945 globals->bfd_of_glue_owner,
10946 ARM2THUMB_GLUE_SECTION_NAME))
10947 return FALSE;
10948
10949 if (! elf32_arm_output_glue_section (info, abfd,
10950 globals->bfd_of_glue_owner,
10951 THUMB2ARM_GLUE_SECTION_NAME))
10952 return FALSE;
10953
10954 if (! elf32_arm_output_glue_section (info, abfd,
10955 globals->bfd_of_glue_owner,
10956 VFP11_ERRATUM_VENEER_SECTION_NAME))
10957 return FALSE;
10958
10959 if (! elf32_arm_output_glue_section (info, abfd,
10960 globals->bfd_of_glue_owner,
10961 ARM_BX_GLUE_SECTION_NAME))
10962 return FALSE;
10963 }
10964
10965 return TRUE;
10966 }
10967
10968 /* Return a best guess for the machine number based on the attributes. */
10969
10970 static unsigned int
10971 bfd_arm_get_mach_from_attributes (bfd * abfd)
10972 {
10973 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10974
10975 switch (arch)
10976 {
10977 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10978 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10979 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10980
10981 case TAG_CPU_ARCH_V5TE:
10982 {
10983 char * name;
10984
10985 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10986 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10987
10988 if (name)
10989 {
10990 if (strcmp (name, "IWMMXT2") == 0)
10991 return bfd_mach_arm_iWMMXt2;
10992
10993 if (strcmp (name, "IWMMXT") == 0)
10994 return bfd_mach_arm_iWMMXt;
10995 }
10996
10997 return bfd_mach_arm_5TE;
10998 }
10999
11000 default:
11001 return bfd_mach_arm_unknown;
11002 }
11003 }
11004
11005 /* Set the right machine number. */
11006
11007 static bfd_boolean
11008 elf32_arm_object_p (bfd *abfd)
11009 {
11010 unsigned int mach;
11011
11012 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11013
11014 if (mach == bfd_mach_arm_unknown)
11015 {
11016 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11017 mach = bfd_mach_arm_ep9312;
11018 else
11019 mach = bfd_arm_get_mach_from_attributes (abfd);
11020 }
11021
11022 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11023 return TRUE;
11024 }
11025
11026 /* Function to keep ARM specific flags in the ELF header. */
11027
11028 static bfd_boolean
11029 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11030 {
11031 if (elf_flags_init (abfd)
11032 && elf_elfheader (abfd)->e_flags != flags)
11033 {
11034 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11035 {
11036 if (flags & EF_ARM_INTERWORK)
11037 (*_bfd_error_handler)
11038 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11039 abfd);
11040 else
11041 _bfd_error_handler
11042 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11043 abfd);
11044 }
11045 }
11046 else
11047 {
11048 elf_elfheader (abfd)->e_flags = flags;
11049 elf_flags_init (abfd) = TRUE;
11050 }
11051
11052 return TRUE;
11053 }
11054
11055 /* Copy backend specific data from one object module to another. */
11056
11057 static bfd_boolean
11058 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11059 {
11060 flagword in_flags;
11061 flagword out_flags;
11062
11063 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11064 return TRUE;
11065
11066 in_flags = elf_elfheader (ibfd)->e_flags;
11067 out_flags = elf_elfheader (obfd)->e_flags;
11068
11069 if (elf_flags_init (obfd)
11070 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11071 && in_flags != out_flags)
11072 {
11073 /* Cannot mix APCS26 and APCS32 code. */
11074 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11075 return FALSE;
11076
11077 /* Cannot mix float APCS and non-float APCS code. */
11078 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11079 return FALSE;
11080
11081 /* If the src and dest have different interworking flags
11082 then turn off the interworking bit. */
11083 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11084 {
11085 if (out_flags & EF_ARM_INTERWORK)
11086 _bfd_error_handler
11087 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11088 obfd, ibfd);
11089
11090 in_flags &= ~EF_ARM_INTERWORK;
11091 }
11092
11093 /* Likewise for PIC, though don't warn for this case. */
11094 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11095 in_flags &= ~EF_ARM_PIC;
11096 }
11097
11098 elf_elfheader (obfd)->e_flags = in_flags;
11099 elf_flags_init (obfd) = TRUE;
11100
11101 /* Also copy the EI_OSABI field. */
11102 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11103 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11104
11105 /* Copy object attributes. */
11106 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11107
11108 return TRUE;
11109 }
11110
11111 /* Values for Tag_ABI_PCS_R9_use. */
11112 enum
11113 {
11114 AEABI_R9_V6,
11115 AEABI_R9_SB,
11116 AEABI_R9_TLS,
11117 AEABI_R9_unused
11118 };
11119
11120 /* Values for Tag_ABI_PCS_RW_data. */
11121 enum
11122 {
11123 AEABI_PCS_RW_data_absolute,
11124 AEABI_PCS_RW_data_PCrel,
11125 AEABI_PCS_RW_data_SBrel,
11126 AEABI_PCS_RW_data_unused
11127 };
11128
11129 /* Values for Tag_ABI_enum_size. */
11130 enum
11131 {
11132 AEABI_enum_unused,
11133 AEABI_enum_short,
11134 AEABI_enum_wide,
11135 AEABI_enum_forced_wide
11136 };
11137
11138 /* Determine whether an object attribute tag takes an integer, a
11139 string or both. */
11140
11141 static int
11142 elf32_arm_obj_attrs_arg_type (int tag)
11143 {
11144 if (tag == Tag_compatibility)
11145 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11146 else if (tag == Tag_nodefaults)
11147 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11148 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11149 return ATTR_TYPE_FLAG_STR_VAL;
11150 else if (tag < 32)
11151 return ATTR_TYPE_FLAG_INT_VAL;
11152 else
11153 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11154 }
11155
11156 /* The ABI defines that Tag_conformance should be emitted first, and that
11157 Tag_nodefaults should be second (if either is defined). This sets those
11158 two positions, and bumps up the position of all the remaining tags to
11159 compensate. */
11160 static int
11161 elf32_arm_obj_attrs_order (int num)
11162 {
11163 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11164 return Tag_conformance;
11165 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11166 return Tag_nodefaults;
11167 if ((num - 2) < Tag_nodefaults)
11168 return num - 2;
11169 if ((num - 1) < Tag_conformance)
11170 return num - 1;
11171 return num;
11172 }
11173
11174 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11175 static bfd_boolean
11176 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11177 {
11178 if ((tag & 127) < 64)
11179 {
11180 _bfd_error_handler
11181 (_("%B: Unknown mandatory EABI object attribute %d"),
11182 abfd, tag);
11183 bfd_set_error (bfd_error_bad_value);
11184 return FALSE;
11185 }
11186 else
11187 {
11188 _bfd_error_handler
11189 (_("Warning: %B: Unknown EABI object attribute %d"),
11190 abfd, tag);
11191 return TRUE;
11192 }
11193 }
11194
11195 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11196 Returns -1 if no architecture could be read. */
11197
11198 static int
11199 get_secondary_compatible_arch (bfd *abfd)
11200 {
11201 obj_attribute *attr =
11202 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11203
11204 /* Note: the tag and its argument below are uleb128 values, though
11205 currently-defined values fit in one byte for each. */
11206 if (attr->s
11207 && attr->s[0] == Tag_CPU_arch
11208 && (attr->s[1] & 128) != 128
11209 && attr->s[2] == 0)
11210 return attr->s[1];
11211
11212 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11213 return -1;
11214 }
11215
11216 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11217 The tag is removed if ARCH is -1. */
11218
11219 static void
11220 set_secondary_compatible_arch (bfd *abfd, int arch)
11221 {
11222 obj_attribute *attr =
11223 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11224
11225 if (arch == -1)
11226 {
11227 attr->s = NULL;
11228 return;
11229 }
11230
11231 /* Note: the tag and its argument below are uleb128 values, though
11232 currently-defined values fit in one byte for each. */
11233 if (!attr->s)
11234 attr->s = (char *) bfd_alloc (abfd, 3);
11235 attr->s[0] = Tag_CPU_arch;
11236 attr->s[1] = arch;
11237 attr->s[2] = '\0';
11238 }
11239
11240 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11241 into account. */
11242
11243 static int
11244 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11245 int newtag, int secondary_compat)
11246 {
11247 #define T(X) TAG_CPU_ARCH_##X
11248 int tagl, tagh, result;
11249 const int v6t2[] =
11250 {
11251 T(V6T2), /* PRE_V4. */
11252 T(V6T2), /* V4. */
11253 T(V6T2), /* V4T. */
11254 T(V6T2), /* V5T. */
11255 T(V6T2), /* V5TE. */
11256 T(V6T2), /* V5TEJ. */
11257 T(V6T2), /* V6. */
11258 T(V7), /* V6KZ. */
11259 T(V6T2) /* V6T2. */
11260 };
11261 const int v6k[] =
11262 {
11263 T(V6K), /* PRE_V4. */
11264 T(V6K), /* V4. */
11265 T(V6K), /* V4T. */
11266 T(V6K), /* V5T. */
11267 T(V6K), /* V5TE. */
11268 T(V6K), /* V5TEJ. */
11269 T(V6K), /* V6. */
11270 T(V6KZ), /* V6KZ. */
11271 T(V7), /* V6T2. */
11272 T(V6K) /* V6K. */
11273 };
11274 const int v7[] =
11275 {
11276 T(V7), /* PRE_V4. */
11277 T(V7), /* V4. */
11278 T(V7), /* V4T. */
11279 T(V7), /* V5T. */
11280 T(V7), /* V5TE. */
11281 T(V7), /* V5TEJ. */
11282 T(V7), /* V6. */
11283 T(V7), /* V6KZ. */
11284 T(V7), /* V6T2. */
11285 T(V7), /* V6K. */
11286 T(V7) /* V7. */
11287 };
11288 const int v6_m[] =
11289 {
11290 -1, /* PRE_V4. */
11291 -1, /* V4. */
11292 T(V6K), /* V4T. */
11293 T(V6K), /* V5T. */
11294 T(V6K), /* V5TE. */
11295 T(V6K), /* V5TEJ. */
11296 T(V6K), /* V6. */
11297 T(V6KZ), /* V6KZ. */
11298 T(V7), /* V6T2. */
11299 T(V6K), /* V6K. */
11300 T(V7), /* V7. */
11301 T(V6_M) /* V6_M. */
11302 };
11303 const int v6s_m[] =
11304 {
11305 -1, /* PRE_V4. */
11306 -1, /* V4. */
11307 T(V6K), /* V4T. */
11308 T(V6K), /* V5T. */
11309 T(V6K), /* V5TE. */
11310 T(V6K), /* V5TEJ. */
11311 T(V6K), /* V6. */
11312 T(V6KZ), /* V6KZ. */
11313 T(V7), /* V6T2. */
11314 T(V6K), /* V6K. */
11315 T(V7), /* V7. */
11316 T(V6S_M), /* V6_M. */
11317 T(V6S_M) /* V6S_M. */
11318 };
11319 const int v7e_m[] =
11320 {
11321 -1, /* PRE_V4. */
11322 -1, /* V4. */
11323 T(V7E_M), /* V4T. */
11324 T(V7E_M), /* V5T. */
11325 T(V7E_M), /* V5TE. */
11326 T(V7E_M), /* V5TEJ. */
11327 T(V7E_M), /* V6. */
11328 T(V7E_M), /* V6KZ. */
11329 T(V7E_M), /* V6T2. */
11330 T(V7E_M), /* V6K. */
11331 T(V7E_M), /* V7. */
11332 T(V7E_M), /* V6_M. */
11333 T(V7E_M), /* V6S_M. */
11334 T(V7E_M) /* V7E_M. */
11335 };
11336 const int v8[] =
11337 {
11338 T(V8), /* PRE_V4. */
11339 T(V8), /* V4. */
11340 T(V8), /* V4T. */
11341 T(V8), /* V5T. */
11342 T(V8), /* V5TE. */
11343 T(V8), /* V5TEJ. */
11344 T(V8), /* V6. */
11345 T(V8), /* V6KZ. */
11346 T(V8), /* V6T2. */
11347 T(V8), /* V6K. */
11348 T(V8), /* V7. */
11349 T(V8), /* V6_M. */
11350 T(V8), /* V6S_M. */
11351 T(V8), /* V7E_M. */
11352 T(V8) /* V8. */
11353 };
11354 const int v4t_plus_v6_m[] =
11355 {
11356 -1, /* PRE_V4. */
11357 -1, /* V4. */
11358 T(V4T), /* V4T. */
11359 T(V5T), /* V5T. */
11360 T(V5TE), /* V5TE. */
11361 T(V5TEJ), /* V5TEJ. */
11362 T(V6), /* V6. */
11363 T(V6KZ), /* V6KZ. */
11364 T(V6T2), /* V6T2. */
11365 T(V6K), /* V6K. */
11366 T(V7), /* V7. */
11367 T(V6_M), /* V6_M. */
11368 T(V6S_M), /* V6S_M. */
11369 T(V7E_M), /* V7E_M. */
11370 T(V8), /* V8. */
11371 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11372 };
11373 const int *comb[] =
11374 {
11375 v6t2,
11376 v6k,
11377 v7,
11378 v6_m,
11379 v6s_m,
11380 v7e_m,
11381 v8,
11382 /* Pseudo-architecture. */
11383 v4t_plus_v6_m
11384 };
11385
11386 /* Check we've not got a higher architecture than we know about. */
11387
11388 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11389 {
11390 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11391 return -1;
11392 }
11393
11394 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11395
11396 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11397 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11398 oldtag = T(V4T_PLUS_V6_M);
11399
11400 /* And override the new tag if we have a Tag_also_compatible_with on the
11401 input. */
11402
11403 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11404 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11405 newtag = T(V4T_PLUS_V6_M);
11406
11407 tagl = (oldtag < newtag) ? oldtag : newtag;
11408 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11409
11410 /* Architectures before V6KZ add features monotonically. */
11411 if (tagh <= TAG_CPU_ARCH_V6KZ)
11412 return result;
11413
11414 result = comb[tagh - T(V6T2)][tagl];
11415
11416 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11417 as the canonical version. */
11418 if (result == T(V4T_PLUS_V6_M))
11419 {
11420 result = T(V4T);
11421 *secondary_compat_out = T(V6_M);
11422 }
11423 else
11424 *secondary_compat_out = -1;
11425
11426 if (result == -1)
11427 {
11428 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11429 ibfd, oldtag, newtag);
11430 return -1;
11431 }
11432
11433 return result;
11434 #undef T
11435 }
11436
11437 /* Query attributes object to see if integer divide instructions may be
11438 present in an object. */
11439 static bfd_boolean
11440 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11441 {
11442 int arch = attr[Tag_CPU_arch].i;
11443 int profile = attr[Tag_CPU_arch_profile].i;
11444
11445 switch (attr[Tag_DIV_use].i)
11446 {
11447 case 0:
11448 /* Integer divide allowed if instruction contained in archetecture. */
11449 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11450 return TRUE;
11451 else if (arch >= TAG_CPU_ARCH_V7E_M)
11452 return TRUE;
11453 else
11454 return FALSE;
11455
11456 case 1:
11457 /* Integer divide explicitly prohibited. */
11458 return FALSE;
11459
11460 default:
11461 /* Unrecognised case - treat as allowing divide everywhere. */
11462 case 2:
11463 /* Integer divide allowed in ARM state. */
11464 return TRUE;
11465 }
11466 }
11467
11468 /* Query attributes object to see if integer divide instructions are
11469 forbidden to be in the object. This is not the inverse of
11470 elf32_arm_attributes_accept_div. */
11471 static bfd_boolean
11472 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11473 {
11474 return attr[Tag_DIV_use].i == 1;
11475 }
11476
11477 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11478 are conflicting attributes. */
11479
11480 static bfd_boolean
11481 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11482 {
11483 obj_attribute *in_attr;
11484 obj_attribute *out_attr;
11485 /* Some tags have 0 = don't care, 1 = strong requirement,
11486 2 = weak requirement. */
11487 static const int order_021[3] = {0, 2, 1};
11488 int i;
11489 bfd_boolean result = TRUE;
11490
11491 /* Skip the linker stubs file. This preserves previous behavior
11492 of accepting unknown attributes in the first input file - but
11493 is that a bug? */
11494 if (ibfd->flags & BFD_LINKER_CREATED)
11495 return TRUE;
11496
11497 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11498 {
11499 /* This is the first object. Copy the attributes. */
11500 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11501
11502 out_attr = elf_known_obj_attributes_proc (obfd);
11503
11504 /* Use the Tag_null value to indicate the attributes have been
11505 initialized. */
11506 out_attr[0].i = 1;
11507
11508 /* We do not output objects with Tag_MPextension_use_legacy - we move
11509 the attribute's value to Tag_MPextension_use. */
11510 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11511 {
11512 if (out_attr[Tag_MPextension_use].i != 0
11513 && out_attr[Tag_MPextension_use_legacy].i
11514 != out_attr[Tag_MPextension_use].i)
11515 {
11516 _bfd_error_handler
11517 (_("Error: %B has both the current and legacy "
11518 "Tag_MPextension_use attributes"), ibfd);
11519 result = FALSE;
11520 }
11521
11522 out_attr[Tag_MPextension_use] =
11523 out_attr[Tag_MPextension_use_legacy];
11524 out_attr[Tag_MPextension_use_legacy].type = 0;
11525 out_attr[Tag_MPextension_use_legacy].i = 0;
11526 }
11527
11528 return result;
11529 }
11530
11531 in_attr = elf_known_obj_attributes_proc (ibfd);
11532 out_attr = elf_known_obj_attributes_proc (obfd);
11533 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11534 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11535 {
11536 /* Ignore mismatches if the object doesn't use floating point. */
11537 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11538 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11539 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11540 {
11541 _bfd_error_handler
11542 (_("error: %B uses VFP register arguments, %B does not"),
11543 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11544 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11545 result = FALSE;
11546 }
11547 }
11548
11549 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11550 {
11551 /* Merge this attribute with existing attributes. */
11552 switch (i)
11553 {
11554 case Tag_CPU_raw_name:
11555 case Tag_CPU_name:
11556 /* These are merged after Tag_CPU_arch. */
11557 break;
11558
11559 case Tag_ABI_optimization_goals:
11560 case Tag_ABI_FP_optimization_goals:
11561 /* Use the first value seen. */
11562 break;
11563
11564 case Tag_CPU_arch:
11565 {
11566 int secondary_compat = -1, secondary_compat_out = -1;
11567 unsigned int saved_out_attr = out_attr[i].i;
11568 static const char *name_table[] = {
11569 /* These aren't real CPU names, but we can't guess
11570 that from the architecture version alone. */
11571 "Pre v4",
11572 "ARM v4",
11573 "ARM v4T",
11574 "ARM v5T",
11575 "ARM v5TE",
11576 "ARM v5TEJ",
11577 "ARM v6",
11578 "ARM v6KZ",
11579 "ARM v6T2",
11580 "ARM v6K",
11581 "ARM v7",
11582 "ARM v6-M",
11583 "ARM v6S-M",
11584 "ARM v8"
11585 };
11586
11587 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11588 secondary_compat = get_secondary_compatible_arch (ibfd);
11589 secondary_compat_out = get_secondary_compatible_arch (obfd);
11590 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11591 &secondary_compat_out,
11592 in_attr[i].i,
11593 secondary_compat);
11594 set_secondary_compatible_arch (obfd, secondary_compat_out);
11595
11596 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11597 if (out_attr[i].i == saved_out_attr)
11598 ; /* Leave the names alone. */
11599 else if (out_attr[i].i == in_attr[i].i)
11600 {
11601 /* The output architecture has been changed to match the
11602 input architecture. Use the input names. */
11603 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11604 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11605 : NULL;
11606 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11607 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11608 : NULL;
11609 }
11610 else
11611 {
11612 out_attr[Tag_CPU_name].s = NULL;
11613 out_attr[Tag_CPU_raw_name].s = NULL;
11614 }
11615
11616 /* If we still don't have a value for Tag_CPU_name,
11617 make one up now. Tag_CPU_raw_name remains blank. */
11618 if (out_attr[Tag_CPU_name].s == NULL
11619 && out_attr[i].i < ARRAY_SIZE (name_table))
11620 out_attr[Tag_CPU_name].s =
11621 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11622 }
11623 break;
11624
11625 case Tag_ARM_ISA_use:
11626 case Tag_THUMB_ISA_use:
11627 case Tag_WMMX_arch:
11628 case Tag_Advanced_SIMD_arch:
11629 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11630 case Tag_ABI_FP_rounding:
11631 case Tag_ABI_FP_exceptions:
11632 case Tag_ABI_FP_user_exceptions:
11633 case Tag_ABI_FP_number_model:
11634 case Tag_FP_HP_extension:
11635 case Tag_CPU_unaligned_access:
11636 case Tag_T2EE_use:
11637 case Tag_MPextension_use:
11638 /* Use the largest value specified. */
11639 if (in_attr[i].i > out_attr[i].i)
11640 out_attr[i].i = in_attr[i].i;
11641 break;
11642
11643 case Tag_ABI_align_preserved:
11644 case Tag_ABI_PCS_RO_data:
11645 /* Use the smallest value specified. */
11646 if (in_attr[i].i < out_attr[i].i)
11647 out_attr[i].i = in_attr[i].i;
11648 break;
11649
11650 case Tag_ABI_align_needed:
11651 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11652 && (in_attr[Tag_ABI_align_preserved].i == 0
11653 || out_attr[Tag_ABI_align_preserved].i == 0))
11654 {
11655 /* This error message should be enabled once all non-conformant
11656 binaries in the toolchain have had the attributes set
11657 properly.
11658 _bfd_error_handler
11659 (_("error: %B: 8-byte data alignment conflicts with %B"),
11660 obfd, ibfd);
11661 result = FALSE; */
11662 }
11663 /* Fall through. */
11664 case Tag_ABI_FP_denormal:
11665 case Tag_ABI_PCS_GOT_use:
11666 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11667 value if greater than 2 (for future-proofing). */
11668 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11669 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11670 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11671 out_attr[i].i = in_attr[i].i;
11672 break;
11673
11674 case Tag_Virtualization_use:
11675 /* The virtualization tag effectively stores two bits of
11676 information: the intended use of TrustZone (in bit 0), and the
11677 intended use of Virtualization (in bit 1). */
11678 if (out_attr[i].i == 0)
11679 out_attr[i].i = in_attr[i].i;
11680 else if (in_attr[i].i != 0
11681 && in_attr[i].i != out_attr[i].i)
11682 {
11683 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11684 out_attr[i].i = 3;
11685 else
11686 {
11687 _bfd_error_handler
11688 (_("error: %B: unable to merge virtualization attributes "
11689 "with %B"),
11690 obfd, ibfd);
11691 result = FALSE;
11692 }
11693 }
11694 break;
11695
11696 case Tag_CPU_arch_profile:
11697 if (out_attr[i].i != in_attr[i].i)
11698 {
11699 /* 0 will merge with anything.
11700 'A' and 'S' merge to 'A'.
11701 'R' and 'S' merge to 'R'.
11702 'M' and 'A|R|S' is an error. */
11703 if (out_attr[i].i == 0
11704 || (out_attr[i].i == 'S'
11705 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11706 out_attr[i].i = in_attr[i].i;
11707 else if (in_attr[i].i == 0
11708 || (in_attr[i].i == 'S'
11709 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11710 ; /* Do nothing. */
11711 else
11712 {
11713 _bfd_error_handler
11714 (_("error: %B: Conflicting architecture profiles %c/%c"),
11715 ibfd,
11716 in_attr[i].i ? in_attr[i].i : '0',
11717 out_attr[i].i ? out_attr[i].i : '0');
11718 result = FALSE;
11719 }
11720 }
11721 break;
11722 case Tag_FP_arch:
11723 {
11724 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11725 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11726 when it's 0. It might mean absence of FP hardware if
11727 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11728
11729 #define VFP_VERSION_COUNT 8
11730 static const struct
11731 {
11732 int ver;
11733 int regs;
11734 } vfp_versions[VFP_VERSION_COUNT] =
11735 {
11736 {0, 0},
11737 {1, 16},
11738 {2, 16},
11739 {3, 32},
11740 {3, 16},
11741 {4, 32},
11742 {4, 16},
11743 {8, 32}
11744 };
11745 int ver;
11746 int regs;
11747 int newval;
11748
11749 /* If the output has no requirement about FP hardware,
11750 follow the requirement of the input. */
11751 if (out_attr[i].i == 0)
11752 {
11753 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11754 out_attr[i].i = in_attr[i].i;
11755 out_attr[Tag_ABI_HardFP_use].i
11756 = in_attr[Tag_ABI_HardFP_use].i;
11757 break;
11758 }
11759 /* If the input has no requirement about FP hardware, do
11760 nothing. */
11761 else if (in_attr[i].i == 0)
11762 {
11763 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11764 break;
11765 }
11766
11767 /* Both the input and the output have nonzero Tag_FP_arch.
11768 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11769
11770 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11771 do nothing. */
11772 if (in_attr[Tag_ABI_HardFP_use].i == 0
11773 && out_attr[Tag_ABI_HardFP_use].i == 0)
11774 ;
11775 /* If the input and the output have different Tag_ABI_HardFP_use,
11776 the combination of them is 3 (SP & DP). */
11777 else if (in_attr[Tag_ABI_HardFP_use].i
11778 != out_attr[Tag_ABI_HardFP_use].i)
11779 out_attr[Tag_ABI_HardFP_use].i = 3;
11780
11781 /* Now we can handle Tag_FP_arch. */
11782
11783 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
11784 pick the biggest. */
11785 if (in_attr[i].i >= VFP_VERSION_COUNT
11786 && in_attr[i].i > out_attr[i].i)
11787 {
11788 out_attr[i] = in_attr[i];
11789 break;
11790 }
11791 /* The output uses the superset of input features
11792 (ISA version) and registers. */
11793 ver = vfp_versions[in_attr[i].i].ver;
11794 if (ver < vfp_versions[out_attr[i].i].ver)
11795 ver = vfp_versions[out_attr[i].i].ver;
11796 regs = vfp_versions[in_attr[i].i].regs;
11797 if (regs < vfp_versions[out_attr[i].i].regs)
11798 regs = vfp_versions[out_attr[i].i].regs;
11799 /* This assumes all possible supersets are also a valid
11800 options. */
11801 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
11802 {
11803 if (regs == vfp_versions[newval].regs
11804 && ver == vfp_versions[newval].ver)
11805 break;
11806 }
11807 out_attr[i].i = newval;
11808 }
11809 break;
11810 case Tag_PCS_config:
11811 if (out_attr[i].i == 0)
11812 out_attr[i].i = in_attr[i].i;
11813 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11814 {
11815 /* It's sometimes ok to mix different configs, so this is only
11816 a warning. */
11817 _bfd_error_handler
11818 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11819 }
11820 break;
11821 case Tag_ABI_PCS_R9_use:
11822 if (in_attr[i].i != out_attr[i].i
11823 && out_attr[i].i != AEABI_R9_unused
11824 && in_attr[i].i != AEABI_R9_unused)
11825 {
11826 _bfd_error_handler
11827 (_("error: %B: Conflicting use of R9"), ibfd);
11828 result = FALSE;
11829 }
11830 if (out_attr[i].i == AEABI_R9_unused)
11831 out_attr[i].i = in_attr[i].i;
11832 break;
11833 case Tag_ABI_PCS_RW_data:
11834 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11835 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11836 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11837 {
11838 _bfd_error_handler
11839 (_("error: %B: SB relative addressing conflicts with use of R9"),
11840 ibfd);
11841 result = FALSE;
11842 }
11843 /* Use the smallest value specified. */
11844 if (in_attr[i].i < out_attr[i].i)
11845 out_attr[i].i = in_attr[i].i;
11846 break;
11847 case Tag_ABI_PCS_wchar_t:
11848 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11849 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11850 {
11851 _bfd_error_handler
11852 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11853 ibfd, in_attr[i].i, out_attr[i].i);
11854 }
11855 else if (in_attr[i].i && !out_attr[i].i)
11856 out_attr[i].i = in_attr[i].i;
11857 break;
11858 case Tag_ABI_enum_size:
11859 if (in_attr[i].i != AEABI_enum_unused)
11860 {
11861 if (out_attr[i].i == AEABI_enum_unused
11862 || out_attr[i].i == AEABI_enum_forced_wide)
11863 {
11864 /* The existing object is compatible with anything.
11865 Use whatever requirements the new object has. */
11866 out_attr[i].i = in_attr[i].i;
11867 }
11868 else if (in_attr[i].i != AEABI_enum_forced_wide
11869 && out_attr[i].i != in_attr[i].i
11870 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11871 {
11872 static const char *aeabi_enum_names[] =
11873 { "", "variable-size", "32-bit", "" };
11874 const char *in_name =
11875 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11876 ? aeabi_enum_names[in_attr[i].i]
11877 : "<unknown>";
11878 const char *out_name =
11879 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11880 ? aeabi_enum_names[out_attr[i].i]
11881 : "<unknown>";
11882 _bfd_error_handler
11883 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11884 ibfd, in_name, out_name);
11885 }
11886 }
11887 break;
11888 case Tag_ABI_VFP_args:
11889 /* Aready done. */
11890 break;
11891 case Tag_ABI_WMMX_args:
11892 if (in_attr[i].i != out_attr[i].i)
11893 {
11894 _bfd_error_handler
11895 (_("error: %B uses iWMMXt register arguments, %B does not"),
11896 ibfd, obfd);
11897 result = FALSE;
11898 }
11899 break;
11900 case Tag_compatibility:
11901 /* Merged in target-independent code. */
11902 break;
11903 case Tag_ABI_HardFP_use:
11904 /* This is handled along with Tag_FP_arch. */
11905 break;
11906 case Tag_ABI_FP_16bit_format:
11907 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11908 {
11909 if (in_attr[i].i != out_attr[i].i)
11910 {
11911 _bfd_error_handler
11912 (_("error: fp16 format mismatch between %B and %B"),
11913 ibfd, obfd);
11914 result = FALSE;
11915 }
11916 }
11917 if (in_attr[i].i != 0)
11918 out_attr[i].i = in_attr[i].i;
11919 break;
11920
11921 case Tag_DIV_use:
11922 /* A value of zero on input means that the divide instruction may
11923 be used if available in the base architecture as specified via
11924 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11925 the user did not want divide instructions. A value of 2
11926 explicitly means that divide instructions were allowed in ARM
11927 and Thumb state. */
11928 if (in_attr[i].i == out_attr[i].i)
11929 /* Do nothing. */ ;
11930 else if (elf32_arm_attributes_forbid_div (in_attr)
11931 && !elf32_arm_attributes_accept_div (out_attr))
11932 out_attr[i].i = 1;
11933 else if (elf32_arm_attributes_forbid_div (out_attr)
11934 && elf32_arm_attributes_accept_div (in_attr))
11935 out_attr[i].i = in_attr[i].i;
11936 else if (in_attr[i].i == 2)
11937 out_attr[i].i = in_attr[i].i;
11938 break;
11939
11940 case Tag_MPextension_use_legacy:
11941 /* We don't output objects with Tag_MPextension_use_legacy - we
11942 move the value to Tag_MPextension_use. */
11943 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11944 {
11945 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11946 {
11947 _bfd_error_handler
11948 (_("%B has has both the current and legacy "
11949 "Tag_MPextension_use attributes"),
11950 ibfd);
11951 result = FALSE;
11952 }
11953 }
11954
11955 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11956 out_attr[Tag_MPextension_use] = in_attr[i];
11957
11958 break;
11959
11960 case Tag_nodefaults:
11961 /* This tag is set if it exists, but the value is unused (and is
11962 typically zero). We don't actually need to do anything here -
11963 the merge happens automatically when the type flags are merged
11964 below. */
11965 break;
11966 case Tag_also_compatible_with:
11967 /* Already done in Tag_CPU_arch. */
11968 break;
11969 case Tag_conformance:
11970 /* Keep the attribute if it matches. Throw it away otherwise.
11971 No attribute means no claim to conform. */
11972 if (!in_attr[i].s || !out_attr[i].s
11973 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11974 out_attr[i].s = NULL;
11975 break;
11976
11977 default:
11978 result
11979 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11980 }
11981
11982 /* If out_attr was copied from in_attr then it won't have a type yet. */
11983 if (in_attr[i].type && !out_attr[i].type)
11984 out_attr[i].type = in_attr[i].type;
11985 }
11986
11987 /* Merge Tag_compatibility attributes and any common GNU ones. */
11988 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11989 return FALSE;
11990
11991 /* Check for any attributes not known on ARM. */
11992 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11993
11994 return result;
11995 }
11996
11997
11998 /* Return TRUE if the two EABI versions are incompatible. */
11999
12000 static bfd_boolean
12001 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12002 {
12003 /* v4 and v5 are the same spec before and after it was released,
12004 so allow mixing them. */
12005 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12006 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12007 return TRUE;
12008
12009 return (iver == over);
12010 }
12011
12012 /* Merge backend specific data from an object file to the output
12013 object file when linking. */
12014
12015 static bfd_boolean
12016 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12017
12018 /* Display the flags field. */
12019
12020 static bfd_boolean
12021 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12022 {
12023 FILE * file = (FILE *) ptr;
12024 unsigned long flags;
12025
12026 BFD_ASSERT (abfd != NULL && ptr != NULL);
12027
12028 /* Print normal ELF private data. */
12029 _bfd_elf_print_private_bfd_data (abfd, ptr);
12030
12031 flags = elf_elfheader (abfd)->e_flags;
12032 /* Ignore init flag - it may not be set, despite the flags field
12033 containing valid data. */
12034
12035 /* xgettext:c-format */
12036 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12037
12038 switch (EF_ARM_EABI_VERSION (flags))
12039 {
12040 case EF_ARM_EABI_UNKNOWN:
12041 /* The following flag bits are GNU extensions and not part of the
12042 official ARM ELF extended ABI. Hence they are only decoded if
12043 the EABI version is not set. */
12044 if (flags & EF_ARM_INTERWORK)
12045 fprintf (file, _(" [interworking enabled]"));
12046
12047 if (flags & EF_ARM_APCS_26)
12048 fprintf (file, " [APCS-26]");
12049 else
12050 fprintf (file, " [APCS-32]");
12051
12052 if (flags & EF_ARM_VFP_FLOAT)
12053 fprintf (file, _(" [VFP float format]"));
12054 else if (flags & EF_ARM_MAVERICK_FLOAT)
12055 fprintf (file, _(" [Maverick float format]"));
12056 else
12057 fprintf (file, _(" [FPA float format]"));
12058
12059 if (flags & EF_ARM_APCS_FLOAT)
12060 fprintf (file, _(" [floats passed in float registers]"));
12061
12062 if (flags & EF_ARM_PIC)
12063 fprintf (file, _(" [position independent]"));
12064
12065 if (flags & EF_ARM_NEW_ABI)
12066 fprintf (file, _(" [new ABI]"));
12067
12068 if (flags & EF_ARM_OLD_ABI)
12069 fprintf (file, _(" [old ABI]"));
12070
12071 if (flags & EF_ARM_SOFT_FLOAT)
12072 fprintf (file, _(" [software FP]"));
12073
12074 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12075 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12076 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12077 | EF_ARM_MAVERICK_FLOAT);
12078 break;
12079
12080 case EF_ARM_EABI_VER1:
12081 fprintf (file, _(" [Version1 EABI]"));
12082
12083 if (flags & EF_ARM_SYMSARESORTED)
12084 fprintf (file, _(" [sorted symbol table]"));
12085 else
12086 fprintf (file, _(" [unsorted symbol table]"));
12087
12088 flags &= ~ EF_ARM_SYMSARESORTED;
12089 break;
12090
12091 case EF_ARM_EABI_VER2:
12092 fprintf (file, _(" [Version2 EABI]"));
12093
12094 if (flags & EF_ARM_SYMSARESORTED)
12095 fprintf (file, _(" [sorted symbol table]"));
12096 else
12097 fprintf (file, _(" [unsorted symbol table]"));
12098
12099 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12100 fprintf (file, _(" [dynamic symbols use segment index]"));
12101
12102 if (flags & EF_ARM_MAPSYMSFIRST)
12103 fprintf (file, _(" [mapping symbols precede others]"));
12104
12105 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12106 | EF_ARM_MAPSYMSFIRST);
12107 break;
12108
12109 case EF_ARM_EABI_VER3:
12110 fprintf (file, _(" [Version3 EABI]"));
12111 break;
12112
12113 case EF_ARM_EABI_VER4:
12114 fprintf (file, _(" [Version4 EABI]"));
12115 goto eabi;
12116
12117 case EF_ARM_EABI_VER5:
12118 fprintf (file, _(" [Version5 EABI]"));
12119
12120 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12121 fprintf (file, _(" [soft-float ABI]"));
12122
12123 if (flags & EF_ARM_ABI_FLOAT_HARD)
12124 fprintf (file, _(" [hard-float ABI]"));
12125
12126 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12127
12128 eabi:
12129 if (flags & EF_ARM_BE8)
12130 fprintf (file, _(" [BE8]"));
12131
12132 if (flags & EF_ARM_LE8)
12133 fprintf (file, _(" [LE8]"));
12134
12135 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12136 break;
12137
12138 default:
12139 fprintf (file, _(" <EABI version unrecognised>"));
12140 break;
12141 }
12142
12143 flags &= ~ EF_ARM_EABIMASK;
12144
12145 if (flags & EF_ARM_RELEXEC)
12146 fprintf (file, _(" [relocatable executable]"));
12147
12148 if (flags & EF_ARM_HASENTRY)
12149 fprintf (file, _(" [has entry point]"));
12150
12151 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12152
12153 if (flags)
12154 fprintf (file, _("<Unrecognised flag bits set>"));
12155
12156 fputc ('\n', file);
12157
12158 return TRUE;
12159 }
12160
12161 static int
12162 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12163 {
12164 switch (ELF_ST_TYPE (elf_sym->st_info))
12165 {
12166 case STT_ARM_TFUNC:
12167 return ELF_ST_TYPE (elf_sym->st_info);
12168
12169 case STT_ARM_16BIT:
12170 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12171 This allows us to distinguish between data used by Thumb instructions
12172 and non-data (which is probably code) inside Thumb regions of an
12173 executable. */
12174 if (type != STT_OBJECT && type != STT_TLS)
12175 return ELF_ST_TYPE (elf_sym->st_info);
12176 break;
12177
12178 default:
12179 break;
12180 }
12181
12182 return type;
12183 }
12184
12185 static asection *
12186 elf32_arm_gc_mark_hook (asection *sec,
12187 struct bfd_link_info *info,
12188 Elf_Internal_Rela *rel,
12189 struct elf_link_hash_entry *h,
12190 Elf_Internal_Sym *sym)
12191 {
12192 if (h != NULL)
12193 switch (ELF32_R_TYPE (rel->r_info))
12194 {
12195 case R_ARM_GNU_VTINHERIT:
12196 case R_ARM_GNU_VTENTRY:
12197 return NULL;
12198 }
12199
12200 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12201 }
12202
12203 /* Update the got entry reference counts for the section being removed. */
12204
12205 static bfd_boolean
12206 elf32_arm_gc_sweep_hook (bfd * abfd,
12207 struct bfd_link_info * info,
12208 asection * sec,
12209 const Elf_Internal_Rela * relocs)
12210 {
12211 Elf_Internal_Shdr *symtab_hdr;
12212 struct elf_link_hash_entry **sym_hashes;
12213 bfd_signed_vma *local_got_refcounts;
12214 const Elf_Internal_Rela *rel, *relend;
12215 struct elf32_arm_link_hash_table * globals;
12216
12217 if (info->relocatable)
12218 return TRUE;
12219
12220 globals = elf32_arm_hash_table (info);
12221 if (globals == NULL)
12222 return FALSE;
12223
12224 elf_section_data (sec)->local_dynrel = NULL;
12225
12226 symtab_hdr = & elf_symtab_hdr (abfd);
12227 sym_hashes = elf_sym_hashes (abfd);
12228 local_got_refcounts = elf_local_got_refcounts (abfd);
12229
12230 check_use_blx (globals);
12231
12232 relend = relocs + sec->reloc_count;
12233 for (rel = relocs; rel < relend; rel++)
12234 {
12235 unsigned long r_symndx;
12236 struct elf_link_hash_entry *h = NULL;
12237 struct elf32_arm_link_hash_entry *eh;
12238 int r_type;
12239 bfd_boolean call_reloc_p;
12240 bfd_boolean may_become_dynamic_p;
12241 bfd_boolean may_need_local_target_p;
12242 union gotplt_union *root_plt;
12243 struct arm_plt_info *arm_plt;
12244
12245 r_symndx = ELF32_R_SYM (rel->r_info);
12246 if (r_symndx >= symtab_hdr->sh_info)
12247 {
12248 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12249 while (h->root.type == bfd_link_hash_indirect
12250 || h->root.type == bfd_link_hash_warning)
12251 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12252 }
12253 eh = (struct elf32_arm_link_hash_entry *) h;
12254
12255 call_reloc_p = FALSE;
12256 may_become_dynamic_p = FALSE;
12257 may_need_local_target_p = FALSE;
12258
12259 r_type = ELF32_R_TYPE (rel->r_info);
12260 r_type = arm_real_reloc_type (globals, r_type);
12261 switch (r_type)
12262 {
12263 case R_ARM_GOT32:
12264 case R_ARM_GOT_PREL:
12265 case R_ARM_TLS_GD32:
12266 case R_ARM_TLS_IE32:
12267 if (h != NULL)
12268 {
12269 if (h->got.refcount > 0)
12270 h->got.refcount -= 1;
12271 }
12272 else if (local_got_refcounts != NULL)
12273 {
12274 if (local_got_refcounts[r_symndx] > 0)
12275 local_got_refcounts[r_symndx] -= 1;
12276 }
12277 break;
12278
12279 case R_ARM_TLS_LDM32:
12280 globals->tls_ldm_got.refcount -= 1;
12281 break;
12282
12283 case R_ARM_PC24:
12284 case R_ARM_PLT32:
12285 case R_ARM_CALL:
12286 case R_ARM_JUMP24:
12287 case R_ARM_PREL31:
12288 case R_ARM_THM_CALL:
12289 case R_ARM_THM_JUMP24:
12290 case R_ARM_THM_JUMP19:
12291 call_reloc_p = TRUE;
12292 may_need_local_target_p = TRUE;
12293 break;
12294
12295 case R_ARM_ABS12:
12296 if (!globals->vxworks_p)
12297 {
12298 may_need_local_target_p = TRUE;
12299 break;
12300 }
12301 /* Fall through. */
12302 case R_ARM_ABS32:
12303 case R_ARM_ABS32_NOI:
12304 case R_ARM_REL32:
12305 case R_ARM_REL32_NOI:
12306 case R_ARM_MOVW_ABS_NC:
12307 case R_ARM_MOVT_ABS:
12308 case R_ARM_MOVW_PREL_NC:
12309 case R_ARM_MOVT_PREL:
12310 case R_ARM_THM_MOVW_ABS_NC:
12311 case R_ARM_THM_MOVT_ABS:
12312 case R_ARM_THM_MOVW_PREL_NC:
12313 case R_ARM_THM_MOVT_PREL:
12314 /* Should the interworking branches be here also? */
12315 if ((info->shared || globals->root.is_relocatable_executable)
12316 && (sec->flags & SEC_ALLOC) != 0)
12317 {
12318 if (h == NULL
12319 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12320 {
12321 call_reloc_p = TRUE;
12322 may_need_local_target_p = TRUE;
12323 }
12324 else
12325 may_become_dynamic_p = TRUE;
12326 }
12327 else
12328 may_need_local_target_p = TRUE;
12329 break;
12330
12331 default:
12332 break;
12333 }
12334
12335 if (may_need_local_target_p
12336 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12337 {
12338 /* If PLT refcount book-keeping is wrong and too low, we'll
12339 see a zero value (going to -1) for the root PLT reference
12340 count. */
12341 if (root_plt->refcount >= 0)
12342 {
12343 BFD_ASSERT (root_plt->refcount != 0);
12344 root_plt->refcount -= 1;
12345 }
12346 else
12347 /* A value of -1 means the symbol has become local, forced
12348 or seeing a hidden definition. Any other negative value
12349 is an error. */
12350 BFD_ASSERT (root_plt->refcount == -1);
12351
12352 if (!call_reloc_p)
12353 arm_plt->noncall_refcount--;
12354
12355 if (r_type == R_ARM_THM_CALL)
12356 arm_plt->maybe_thumb_refcount--;
12357
12358 if (r_type == R_ARM_THM_JUMP24
12359 || r_type == R_ARM_THM_JUMP19)
12360 arm_plt->thumb_refcount--;
12361 }
12362
12363 if (may_become_dynamic_p)
12364 {
12365 struct elf_dyn_relocs **pp;
12366 struct elf_dyn_relocs *p;
12367
12368 if (h != NULL)
12369 pp = &(eh->dyn_relocs);
12370 else
12371 {
12372 Elf_Internal_Sym *isym;
12373
12374 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12375 abfd, r_symndx);
12376 if (isym == NULL)
12377 return FALSE;
12378 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12379 if (pp == NULL)
12380 return FALSE;
12381 }
12382 for (; (p = *pp) != NULL; pp = &p->next)
12383 if (p->sec == sec)
12384 {
12385 /* Everything must go for SEC. */
12386 *pp = p->next;
12387 break;
12388 }
12389 }
12390 }
12391
12392 return TRUE;
12393 }
12394
12395 /* Look through the relocs for a section during the first phase. */
12396
12397 static bfd_boolean
12398 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12399 asection *sec, const Elf_Internal_Rela *relocs)
12400 {
12401 Elf_Internal_Shdr *symtab_hdr;
12402 struct elf_link_hash_entry **sym_hashes;
12403 const Elf_Internal_Rela *rel;
12404 const Elf_Internal_Rela *rel_end;
12405 bfd *dynobj;
12406 asection *sreloc;
12407 struct elf32_arm_link_hash_table *htab;
12408 bfd_boolean call_reloc_p;
12409 bfd_boolean may_become_dynamic_p;
12410 bfd_boolean may_need_local_target_p;
12411 unsigned long nsyms;
12412
12413 if (info->relocatable)
12414 return TRUE;
12415
12416 BFD_ASSERT (is_arm_elf (abfd));
12417
12418 htab = elf32_arm_hash_table (info);
12419 if (htab == NULL)
12420 return FALSE;
12421
12422 sreloc = NULL;
12423
12424 /* Create dynamic sections for relocatable executables so that we can
12425 copy relocations. */
12426 if (htab->root.is_relocatable_executable
12427 && ! htab->root.dynamic_sections_created)
12428 {
12429 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12430 return FALSE;
12431 }
12432
12433 if (htab->root.dynobj == NULL)
12434 htab->root.dynobj = abfd;
12435 if (!create_ifunc_sections (info))
12436 return FALSE;
12437
12438 dynobj = htab->root.dynobj;
12439
12440 symtab_hdr = & elf_symtab_hdr (abfd);
12441 sym_hashes = elf_sym_hashes (abfd);
12442 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12443
12444 rel_end = relocs + sec->reloc_count;
12445 for (rel = relocs; rel < rel_end; rel++)
12446 {
12447 Elf_Internal_Sym *isym;
12448 struct elf_link_hash_entry *h;
12449 struct elf32_arm_link_hash_entry *eh;
12450 unsigned long r_symndx;
12451 int r_type;
12452
12453 r_symndx = ELF32_R_SYM (rel->r_info);
12454 r_type = ELF32_R_TYPE (rel->r_info);
12455 r_type = arm_real_reloc_type (htab, r_type);
12456
12457 if (r_symndx >= nsyms
12458 /* PR 9934: It is possible to have relocations that do not
12459 refer to symbols, thus it is also possible to have an
12460 object file containing relocations but no symbol table. */
12461 && (r_symndx > STN_UNDEF || nsyms > 0))
12462 {
12463 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12464 r_symndx);
12465 return FALSE;
12466 }
12467
12468 h = NULL;
12469 isym = NULL;
12470 if (nsyms > 0)
12471 {
12472 if (r_symndx < symtab_hdr->sh_info)
12473 {
12474 /* A local symbol. */
12475 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12476 abfd, r_symndx);
12477 if (isym == NULL)
12478 return FALSE;
12479 }
12480 else
12481 {
12482 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12483 while (h->root.type == bfd_link_hash_indirect
12484 || h->root.type == bfd_link_hash_warning)
12485 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12486 }
12487 }
12488
12489 eh = (struct elf32_arm_link_hash_entry *) h;
12490
12491 call_reloc_p = FALSE;
12492 may_become_dynamic_p = FALSE;
12493 may_need_local_target_p = FALSE;
12494
12495 /* Could be done earlier, if h were already available. */
12496 r_type = elf32_arm_tls_transition (info, r_type, h);
12497 switch (r_type)
12498 {
12499 case R_ARM_GOT32:
12500 case R_ARM_GOT_PREL:
12501 case R_ARM_TLS_GD32:
12502 case R_ARM_TLS_IE32:
12503 case R_ARM_TLS_GOTDESC:
12504 case R_ARM_TLS_DESCSEQ:
12505 case R_ARM_THM_TLS_DESCSEQ:
12506 case R_ARM_TLS_CALL:
12507 case R_ARM_THM_TLS_CALL:
12508 /* This symbol requires a global offset table entry. */
12509 {
12510 int tls_type, old_tls_type;
12511
12512 switch (r_type)
12513 {
12514 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12515
12516 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12517
12518 case R_ARM_TLS_GOTDESC:
12519 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12520 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12521 tls_type = GOT_TLS_GDESC; break;
12522
12523 default: tls_type = GOT_NORMAL; break;
12524 }
12525
12526 if (h != NULL)
12527 {
12528 h->got.refcount++;
12529 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12530 }
12531 else
12532 {
12533 /* This is a global offset table entry for a local symbol. */
12534 if (!elf32_arm_allocate_local_sym_info (abfd))
12535 return FALSE;
12536 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12537 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12538 }
12539
12540 /* If a variable is accessed with both tls methods, two
12541 slots may be created. */
12542 if (GOT_TLS_GD_ANY_P (old_tls_type)
12543 && GOT_TLS_GD_ANY_P (tls_type))
12544 tls_type |= old_tls_type;
12545
12546 /* We will already have issued an error message if there
12547 is a TLS/non-TLS mismatch, based on the symbol
12548 type. So just combine any TLS types needed. */
12549 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12550 && tls_type != GOT_NORMAL)
12551 tls_type |= old_tls_type;
12552
12553 /* If the symbol is accessed in both IE and GDESC
12554 method, we're able to relax. Turn off the GDESC flag,
12555 without messing up with any other kind of tls types
12556 that may be involved */
12557 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12558 tls_type &= ~GOT_TLS_GDESC;
12559
12560 if (old_tls_type != tls_type)
12561 {
12562 if (h != NULL)
12563 elf32_arm_hash_entry (h)->tls_type = tls_type;
12564 else
12565 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12566 }
12567 }
12568 /* Fall through. */
12569
12570 case R_ARM_TLS_LDM32:
12571 if (r_type == R_ARM_TLS_LDM32)
12572 htab->tls_ldm_got.refcount++;
12573 /* Fall through. */
12574
12575 case R_ARM_GOTOFF32:
12576 case R_ARM_GOTPC:
12577 if (htab->root.sgot == NULL
12578 && !create_got_section (htab->root.dynobj, info))
12579 return FALSE;
12580 break;
12581
12582 case R_ARM_PC24:
12583 case R_ARM_PLT32:
12584 case R_ARM_CALL:
12585 case R_ARM_JUMP24:
12586 case R_ARM_PREL31:
12587 case R_ARM_THM_CALL:
12588 case R_ARM_THM_JUMP24:
12589 case R_ARM_THM_JUMP19:
12590 call_reloc_p = TRUE;
12591 may_need_local_target_p = TRUE;
12592 break;
12593
12594 case R_ARM_ABS12:
12595 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12596 ldr __GOTT_INDEX__ offsets. */
12597 if (!htab->vxworks_p)
12598 {
12599 may_need_local_target_p = TRUE;
12600 break;
12601 }
12602 /* Fall through. */
12603
12604 case R_ARM_MOVW_ABS_NC:
12605 case R_ARM_MOVT_ABS:
12606 case R_ARM_THM_MOVW_ABS_NC:
12607 case R_ARM_THM_MOVT_ABS:
12608 if (info->shared)
12609 {
12610 (*_bfd_error_handler)
12611 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12612 abfd, elf32_arm_howto_table_1[r_type].name,
12613 (h) ? h->root.root.string : "a local symbol");
12614 bfd_set_error (bfd_error_bad_value);
12615 return FALSE;
12616 }
12617
12618 /* Fall through. */
12619 case R_ARM_ABS32:
12620 case R_ARM_ABS32_NOI:
12621 case R_ARM_REL32:
12622 case R_ARM_REL32_NOI:
12623 case R_ARM_MOVW_PREL_NC:
12624 case R_ARM_MOVT_PREL:
12625 case R_ARM_THM_MOVW_PREL_NC:
12626 case R_ARM_THM_MOVT_PREL:
12627
12628 /* Should the interworking branches be listed here? */
12629 if ((info->shared || htab->root.is_relocatable_executable)
12630 && (sec->flags & SEC_ALLOC) != 0)
12631 {
12632 if (h == NULL
12633 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12634 {
12635 /* In shared libraries and relocatable executables,
12636 we treat local relative references as calls;
12637 see the related SYMBOL_CALLS_LOCAL code in
12638 allocate_dynrelocs. */
12639 call_reloc_p = TRUE;
12640 may_need_local_target_p = TRUE;
12641 }
12642 else
12643 /* We are creating a shared library or relocatable
12644 executable, and this is a reloc against a global symbol,
12645 or a non-PC-relative reloc against a local symbol.
12646 We may need to copy the reloc into the output. */
12647 may_become_dynamic_p = TRUE;
12648 }
12649 else
12650 may_need_local_target_p = TRUE;
12651 break;
12652
12653 /* This relocation describes the C++ object vtable hierarchy.
12654 Reconstruct it for later use during GC. */
12655 case R_ARM_GNU_VTINHERIT:
12656 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12657 return FALSE;
12658 break;
12659
12660 /* This relocation describes which C++ vtable entries are actually
12661 used. Record for later use during GC. */
12662 case R_ARM_GNU_VTENTRY:
12663 BFD_ASSERT (h != NULL);
12664 if (h != NULL
12665 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12666 return FALSE;
12667 break;
12668 }
12669
12670 if (h != NULL)
12671 {
12672 if (call_reloc_p)
12673 /* We may need a .plt entry if the function this reloc
12674 refers to is in a different object, regardless of the
12675 symbol's type. We can't tell for sure yet, because
12676 something later might force the symbol local. */
12677 h->needs_plt = 1;
12678 else if (may_need_local_target_p)
12679 /* If this reloc is in a read-only section, we might
12680 need a copy reloc. We can't check reliably at this
12681 stage whether the section is read-only, as input
12682 sections have not yet been mapped to output sections.
12683 Tentatively set the flag for now, and correct in
12684 adjust_dynamic_symbol. */
12685 h->non_got_ref = 1;
12686 }
12687
12688 if (may_need_local_target_p
12689 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12690 {
12691 union gotplt_union *root_plt;
12692 struct arm_plt_info *arm_plt;
12693 struct arm_local_iplt_info *local_iplt;
12694
12695 if (h != NULL)
12696 {
12697 root_plt = &h->plt;
12698 arm_plt = &eh->plt;
12699 }
12700 else
12701 {
12702 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12703 if (local_iplt == NULL)
12704 return FALSE;
12705 root_plt = &local_iplt->root;
12706 arm_plt = &local_iplt->arm;
12707 }
12708
12709 /* If the symbol is a function that doesn't bind locally,
12710 this relocation will need a PLT entry. */
12711 if (root_plt->refcount != -1)
12712 root_plt->refcount += 1;
12713
12714 if (!call_reloc_p)
12715 arm_plt->noncall_refcount++;
12716
12717 /* It's too early to use htab->use_blx here, so we have to
12718 record possible blx references separately from
12719 relocs that definitely need a thumb stub. */
12720
12721 if (r_type == R_ARM_THM_CALL)
12722 arm_plt->maybe_thumb_refcount += 1;
12723
12724 if (r_type == R_ARM_THM_JUMP24
12725 || r_type == R_ARM_THM_JUMP19)
12726 arm_plt->thumb_refcount += 1;
12727 }
12728
12729 if (may_become_dynamic_p)
12730 {
12731 struct elf_dyn_relocs *p, **head;
12732
12733 /* Create a reloc section in dynobj. */
12734 if (sreloc == NULL)
12735 {
12736 sreloc = _bfd_elf_make_dynamic_reloc_section
12737 (sec, dynobj, 2, abfd, ! htab->use_rel);
12738
12739 if (sreloc == NULL)
12740 return FALSE;
12741
12742 /* BPABI objects never have dynamic relocations mapped. */
12743 if (htab->symbian_p)
12744 {
12745 flagword flags;
12746
12747 flags = bfd_get_section_flags (dynobj, sreloc);
12748 flags &= ~(SEC_LOAD | SEC_ALLOC);
12749 bfd_set_section_flags (dynobj, sreloc, flags);
12750 }
12751 }
12752
12753 /* If this is a global symbol, count the number of
12754 relocations we need for this symbol. */
12755 if (h != NULL)
12756 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12757 else
12758 {
12759 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12760 if (head == NULL)
12761 return FALSE;
12762 }
12763
12764 p = *head;
12765 if (p == NULL || p->sec != sec)
12766 {
12767 bfd_size_type amt = sizeof *p;
12768
12769 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12770 if (p == NULL)
12771 return FALSE;
12772 p->next = *head;
12773 *head = p;
12774 p->sec = sec;
12775 p->count = 0;
12776 p->pc_count = 0;
12777 }
12778
12779 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12780 p->pc_count += 1;
12781 p->count += 1;
12782 }
12783 }
12784
12785 return TRUE;
12786 }
12787
12788 /* Unwinding tables are not referenced directly. This pass marks them as
12789 required if the corresponding code section is marked. */
12790
12791 static bfd_boolean
12792 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12793 elf_gc_mark_hook_fn gc_mark_hook)
12794 {
12795 bfd *sub;
12796 Elf_Internal_Shdr **elf_shdrp;
12797 bfd_boolean again;
12798
12799 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12800
12801 /* Marking EH data may cause additional code sections to be marked,
12802 requiring multiple passes. */
12803 again = TRUE;
12804 while (again)
12805 {
12806 again = FALSE;
12807 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12808 {
12809 asection *o;
12810
12811 if (! is_arm_elf (sub))
12812 continue;
12813
12814 elf_shdrp = elf_elfsections (sub);
12815 for (o = sub->sections; o != NULL; o = o->next)
12816 {
12817 Elf_Internal_Shdr *hdr;
12818
12819 hdr = &elf_section_data (o)->this_hdr;
12820 if (hdr->sh_type == SHT_ARM_EXIDX
12821 && hdr->sh_link
12822 && hdr->sh_link < elf_numsections (sub)
12823 && !o->gc_mark
12824 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12825 {
12826 again = TRUE;
12827 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12828 return FALSE;
12829 }
12830 }
12831 }
12832 }
12833
12834 return TRUE;
12835 }
12836
12837 /* Treat mapping symbols as special target symbols. */
12838
12839 static bfd_boolean
12840 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12841 {
12842 return bfd_is_arm_special_symbol_name (sym->name,
12843 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12844 }
12845
12846 /* This is a copy of elf_find_function() from elf.c except that
12847 ARM mapping symbols are ignored when looking for function names
12848 and STT_ARM_TFUNC is considered to a function type. */
12849
12850 static bfd_boolean
12851 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12852 asection * section,
12853 asymbol ** symbols,
12854 bfd_vma offset,
12855 const char ** filename_ptr,
12856 const char ** functionname_ptr)
12857 {
12858 const char * filename = NULL;
12859 asymbol * func = NULL;
12860 bfd_vma low_func = 0;
12861 asymbol ** p;
12862
12863 for (p = symbols; *p != NULL; p++)
12864 {
12865 elf_symbol_type *q;
12866
12867 q = (elf_symbol_type *) *p;
12868
12869 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12870 {
12871 default:
12872 break;
12873 case STT_FILE:
12874 filename = bfd_asymbol_name (&q->symbol);
12875 break;
12876 case STT_FUNC:
12877 case STT_ARM_TFUNC:
12878 case STT_NOTYPE:
12879 /* Skip mapping symbols. */
12880 if ((q->symbol.flags & BSF_LOCAL)
12881 && bfd_is_arm_special_symbol_name (q->symbol.name,
12882 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12883 continue;
12884 /* Fall through. */
12885 if (bfd_get_section (&q->symbol) == section
12886 && q->symbol.value >= low_func
12887 && q->symbol.value <= offset)
12888 {
12889 func = (asymbol *) q;
12890 low_func = q->symbol.value;
12891 }
12892 break;
12893 }
12894 }
12895
12896 if (func == NULL)
12897 return FALSE;
12898
12899 if (filename_ptr)
12900 *filename_ptr = filename;
12901 if (functionname_ptr)
12902 *functionname_ptr = bfd_asymbol_name (func);
12903
12904 return TRUE;
12905 }
12906
12907
12908 /* Find the nearest line to a particular section and offset, for error
12909 reporting. This code is a duplicate of the code in elf.c, except
12910 that it uses arm_elf_find_function. */
12911
12912 static bfd_boolean
12913 elf32_arm_find_nearest_line (bfd * abfd,
12914 asection * section,
12915 asymbol ** symbols,
12916 bfd_vma offset,
12917 const char ** filename_ptr,
12918 const char ** functionname_ptr,
12919 unsigned int * line_ptr)
12920 {
12921 bfd_boolean found = FALSE;
12922
12923 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12924
12925 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12926 section, symbols, offset,
12927 filename_ptr, functionname_ptr,
12928 line_ptr, NULL, 0,
12929 & elf_tdata (abfd)->dwarf2_find_line_info))
12930 {
12931 if (!*functionname_ptr)
12932 arm_elf_find_function (abfd, section, symbols, offset,
12933 *filename_ptr ? NULL : filename_ptr,
12934 functionname_ptr);
12935
12936 return TRUE;
12937 }
12938
12939 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12940 & found, filename_ptr,
12941 functionname_ptr, line_ptr,
12942 & elf_tdata (abfd)->line_info))
12943 return FALSE;
12944
12945 if (found && (*functionname_ptr || *line_ptr))
12946 return TRUE;
12947
12948 if (symbols == NULL)
12949 return FALSE;
12950
12951 if (! arm_elf_find_function (abfd, section, symbols, offset,
12952 filename_ptr, functionname_ptr))
12953 return FALSE;
12954
12955 *line_ptr = 0;
12956 return TRUE;
12957 }
12958
12959 static bfd_boolean
12960 elf32_arm_find_inliner_info (bfd * abfd,
12961 const char ** filename_ptr,
12962 const char ** functionname_ptr,
12963 unsigned int * line_ptr)
12964 {
12965 bfd_boolean found;
12966 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12967 functionname_ptr, line_ptr,
12968 & elf_tdata (abfd)->dwarf2_find_line_info);
12969 return found;
12970 }
12971
12972 /* Adjust a symbol defined by a dynamic object and referenced by a
12973 regular object. The current definition is in some section of the
12974 dynamic object, but we're not including those sections. We have to
12975 change the definition to something the rest of the link can
12976 understand. */
12977
12978 static bfd_boolean
12979 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12980 struct elf_link_hash_entry * h)
12981 {
12982 bfd * dynobj;
12983 asection * s;
12984 struct elf32_arm_link_hash_entry * eh;
12985 struct elf32_arm_link_hash_table *globals;
12986
12987 globals = elf32_arm_hash_table (info);
12988 if (globals == NULL)
12989 return FALSE;
12990
12991 dynobj = elf_hash_table (info)->dynobj;
12992
12993 /* Make sure we know what is going on here. */
12994 BFD_ASSERT (dynobj != NULL
12995 && (h->needs_plt
12996 || h->type == STT_GNU_IFUNC
12997 || h->u.weakdef != NULL
12998 || (h->def_dynamic
12999 && h->ref_regular
13000 && !h->def_regular)));
13001
13002 eh = (struct elf32_arm_link_hash_entry *) h;
13003
13004 /* If this is a function, put it in the procedure linkage table. We
13005 will fill in the contents of the procedure linkage table later,
13006 when we know the address of the .got section. */
13007 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13008 {
13009 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13010 symbol binds locally. */
13011 if (h->plt.refcount <= 0
13012 || (h->type != STT_GNU_IFUNC
13013 && (SYMBOL_CALLS_LOCAL (info, h)
13014 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13015 && h->root.type == bfd_link_hash_undefweak))))
13016 {
13017 /* This case can occur if we saw a PLT32 reloc in an input
13018 file, but the symbol was never referred to by a dynamic
13019 object, or if all references were garbage collected. In
13020 such a case, we don't actually need to build a procedure
13021 linkage table, and we can just do a PC24 reloc instead. */
13022 h->plt.offset = (bfd_vma) -1;
13023 eh->plt.thumb_refcount = 0;
13024 eh->plt.maybe_thumb_refcount = 0;
13025 eh->plt.noncall_refcount = 0;
13026 h->needs_plt = 0;
13027 }
13028
13029 return TRUE;
13030 }
13031 else
13032 {
13033 /* It's possible that we incorrectly decided a .plt reloc was
13034 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13035 in check_relocs. We can't decide accurately between function
13036 and non-function syms in check-relocs; Objects loaded later in
13037 the link may change h->type. So fix it now. */
13038 h->plt.offset = (bfd_vma) -1;
13039 eh->plt.thumb_refcount = 0;
13040 eh->plt.maybe_thumb_refcount = 0;
13041 eh->plt.noncall_refcount = 0;
13042 }
13043
13044 /* If this is a weak symbol, and there is a real definition, the
13045 processor independent code will have arranged for us to see the
13046 real definition first, and we can just use the same value. */
13047 if (h->u.weakdef != NULL)
13048 {
13049 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13050 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13051 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13052 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13053 return TRUE;
13054 }
13055
13056 /* If there are no non-GOT references, we do not need a copy
13057 relocation. */
13058 if (!h->non_got_ref)
13059 return TRUE;
13060
13061 /* This is a reference to a symbol defined by a dynamic object which
13062 is not a function. */
13063
13064 /* If we are creating a shared library, we must presume that the
13065 only references to the symbol are via the global offset table.
13066 For such cases we need not do anything here; the relocations will
13067 be handled correctly by relocate_section. Relocatable executables
13068 can reference data in shared objects directly, so we don't need to
13069 do anything here. */
13070 if (info->shared || globals->root.is_relocatable_executable)
13071 return TRUE;
13072
13073 /* We must allocate the symbol in our .dynbss section, which will
13074 become part of the .bss section of the executable. There will be
13075 an entry for this symbol in the .dynsym section. The dynamic
13076 object will contain position independent code, so all references
13077 from the dynamic object to this symbol will go through the global
13078 offset table. The dynamic linker will use the .dynsym entry to
13079 determine the address it must put in the global offset table, so
13080 both the dynamic object and the regular object will refer to the
13081 same memory location for the variable. */
13082 s = bfd_get_linker_section (dynobj, ".dynbss");
13083 BFD_ASSERT (s != NULL);
13084
13085 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13086 copy the initial value out of the dynamic object and into the
13087 runtime process image. We need to remember the offset into the
13088 .rel(a).bss section we are going to use. */
13089 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13090 {
13091 asection *srel;
13092
13093 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13094 elf32_arm_allocate_dynrelocs (info, srel, 1);
13095 h->needs_copy = 1;
13096 }
13097
13098 return _bfd_elf_adjust_dynamic_copy (h, s);
13099 }
13100
13101 /* Allocate space in .plt, .got and associated reloc sections for
13102 dynamic relocs. */
13103
13104 static bfd_boolean
13105 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13106 {
13107 struct bfd_link_info *info;
13108 struct elf32_arm_link_hash_table *htab;
13109 struct elf32_arm_link_hash_entry *eh;
13110 struct elf_dyn_relocs *p;
13111
13112 if (h->root.type == bfd_link_hash_indirect)
13113 return TRUE;
13114
13115 eh = (struct elf32_arm_link_hash_entry *) h;
13116
13117 info = (struct bfd_link_info *) inf;
13118 htab = elf32_arm_hash_table (info);
13119 if (htab == NULL)
13120 return FALSE;
13121
13122 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13123 && h->plt.refcount > 0)
13124 {
13125 /* Make sure this symbol is output as a dynamic symbol.
13126 Undefined weak syms won't yet be marked as dynamic. */
13127 if (h->dynindx == -1
13128 && !h->forced_local)
13129 {
13130 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13131 return FALSE;
13132 }
13133
13134 /* If the call in the PLT entry binds locally, the associated
13135 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13136 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13137 than the .plt section. */
13138 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13139 {
13140 eh->is_iplt = 1;
13141 if (eh->plt.noncall_refcount == 0
13142 && SYMBOL_REFERENCES_LOCAL (info, h))
13143 /* All non-call references can be resolved directly.
13144 This means that they can (and in some cases, must)
13145 resolve directly to the run-time target, rather than
13146 to the PLT. That in turns means that any .got entry
13147 would be equal to the .igot.plt entry, so there's
13148 no point having both. */
13149 h->got.refcount = 0;
13150 }
13151
13152 if (info->shared
13153 || eh->is_iplt
13154 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13155 {
13156 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13157
13158 /* If this symbol is not defined in a regular file, and we are
13159 not generating a shared library, then set the symbol to this
13160 location in the .plt. This is required to make function
13161 pointers compare as equal between the normal executable and
13162 the shared library. */
13163 if (! info->shared
13164 && !h->def_regular)
13165 {
13166 h->root.u.def.section = htab->root.splt;
13167 h->root.u.def.value = h->plt.offset;
13168
13169 /* Make sure the function is not marked as Thumb, in case
13170 it is the target of an ABS32 relocation, which will
13171 point to the PLT entry. */
13172 h->target_internal = ST_BRANCH_TO_ARM;
13173 }
13174
13175 htab->next_tls_desc_index++;
13176
13177 /* VxWorks executables have a second set of relocations for
13178 each PLT entry. They go in a separate relocation section,
13179 which is processed by the kernel loader. */
13180 if (htab->vxworks_p && !info->shared)
13181 {
13182 /* There is a relocation for the initial PLT entry:
13183 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13184 if (h->plt.offset == htab->plt_header_size)
13185 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13186
13187 /* There are two extra relocations for each subsequent
13188 PLT entry: an R_ARM_32 relocation for the GOT entry,
13189 and an R_ARM_32 relocation for the PLT entry. */
13190 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13191 }
13192 }
13193 else
13194 {
13195 h->plt.offset = (bfd_vma) -1;
13196 h->needs_plt = 0;
13197 }
13198 }
13199 else
13200 {
13201 h->plt.offset = (bfd_vma) -1;
13202 h->needs_plt = 0;
13203 }
13204
13205 eh = (struct elf32_arm_link_hash_entry *) h;
13206 eh->tlsdesc_got = (bfd_vma) -1;
13207
13208 if (h->got.refcount > 0)
13209 {
13210 asection *s;
13211 bfd_boolean dyn;
13212 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13213 int indx;
13214
13215 /* Make sure this symbol is output as a dynamic symbol.
13216 Undefined weak syms won't yet be marked as dynamic. */
13217 if (h->dynindx == -1
13218 && !h->forced_local)
13219 {
13220 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13221 return FALSE;
13222 }
13223
13224 if (!htab->symbian_p)
13225 {
13226 s = htab->root.sgot;
13227 h->got.offset = s->size;
13228
13229 if (tls_type == GOT_UNKNOWN)
13230 abort ();
13231
13232 if (tls_type == GOT_NORMAL)
13233 /* Non-TLS symbols need one GOT slot. */
13234 s->size += 4;
13235 else
13236 {
13237 if (tls_type & GOT_TLS_GDESC)
13238 {
13239 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13240 eh->tlsdesc_got
13241 = (htab->root.sgotplt->size
13242 - elf32_arm_compute_jump_table_size (htab));
13243 htab->root.sgotplt->size += 8;
13244 h->got.offset = (bfd_vma) -2;
13245 /* plt.got_offset needs to know there's a TLS_DESC
13246 reloc in the middle of .got.plt. */
13247 htab->num_tls_desc++;
13248 }
13249
13250 if (tls_type & GOT_TLS_GD)
13251 {
13252 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13253 the symbol is both GD and GDESC, got.offset may
13254 have been overwritten. */
13255 h->got.offset = s->size;
13256 s->size += 8;
13257 }
13258
13259 if (tls_type & GOT_TLS_IE)
13260 /* R_ARM_TLS_IE32 needs one GOT slot. */
13261 s->size += 4;
13262 }
13263
13264 dyn = htab->root.dynamic_sections_created;
13265
13266 indx = 0;
13267 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13268 && (!info->shared
13269 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13270 indx = h->dynindx;
13271
13272 if (tls_type != GOT_NORMAL
13273 && (info->shared || indx != 0)
13274 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13275 || h->root.type != bfd_link_hash_undefweak))
13276 {
13277 if (tls_type & GOT_TLS_IE)
13278 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13279
13280 if (tls_type & GOT_TLS_GD)
13281 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13282
13283 if (tls_type & GOT_TLS_GDESC)
13284 {
13285 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13286 /* GDESC needs a trampoline to jump to. */
13287 htab->tls_trampoline = -1;
13288 }
13289
13290 /* Only GD needs it. GDESC just emits one relocation per
13291 2 entries. */
13292 if ((tls_type & GOT_TLS_GD) && indx != 0)
13293 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13294 }
13295 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
13296 {
13297 if (htab->root.dynamic_sections_created)
13298 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13299 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13300 }
13301 else if (h->type == STT_GNU_IFUNC
13302 && eh->plt.noncall_refcount == 0)
13303 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13304 they all resolve dynamically instead. Reserve room for the
13305 GOT entry's R_ARM_IRELATIVE relocation. */
13306 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13307 else if (info->shared)
13308 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13309 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13310 }
13311 }
13312 else
13313 h->got.offset = (bfd_vma) -1;
13314
13315 /* Allocate stubs for exported Thumb functions on v4t. */
13316 if (!htab->use_blx && h->dynindx != -1
13317 && h->def_regular
13318 && h->target_internal == ST_BRANCH_TO_THUMB
13319 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13320 {
13321 struct elf_link_hash_entry * th;
13322 struct bfd_link_hash_entry * bh;
13323 struct elf_link_hash_entry * myh;
13324 char name[1024];
13325 asection *s;
13326 bh = NULL;
13327 /* Create a new symbol to regist the real location of the function. */
13328 s = h->root.u.def.section;
13329 sprintf (name, "__real_%s", h->root.root.string);
13330 _bfd_generic_link_add_one_symbol (info, s->owner,
13331 name, BSF_GLOBAL, s,
13332 h->root.u.def.value,
13333 NULL, TRUE, FALSE, &bh);
13334
13335 myh = (struct elf_link_hash_entry *) bh;
13336 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13337 myh->forced_local = 1;
13338 myh->target_internal = ST_BRANCH_TO_THUMB;
13339 eh->export_glue = myh;
13340 th = record_arm_to_thumb_glue (info, h);
13341 /* Point the symbol at the stub. */
13342 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13343 h->target_internal = ST_BRANCH_TO_ARM;
13344 h->root.u.def.section = th->root.u.def.section;
13345 h->root.u.def.value = th->root.u.def.value & ~1;
13346 }
13347
13348 if (eh->dyn_relocs == NULL)
13349 return TRUE;
13350
13351 /* In the shared -Bsymbolic case, discard space allocated for
13352 dynamic pc-relative relocs against symbols which turn out to be
13353 defined in regular objects. For the normal shared case, discard
13354 space for pc-relative relocs that have become local due to symbol
13355 visibility changes. */
13356
13357 if (info->shared || htab->root.is_relocatable_executable)
13358 {
13359 /* The only relocs that use pc_count are R_ARM_REL32 and
13360 R_ARM_REL32_NOI, which will appear on something like
13361 ".long foo - .". We want calls to protected symbols to resolve
13362 directly to the function rather than going via the plt. If people
13363 want function pointer comparisons to work as expected then they
13364 should avoid writing assembly like ".long foo - .". */
13365 if (SYMBOL_CALLS_LOCAL (info, h))
13366 {
13367 struct elf_dyn_relocs **pp;
13368
13369 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13370 {
13371 p->count -= p->pc_count;
13372 p->pc_count = 0;
13373 if (p->count == 0)
13374 *pp = p->next;
13375 else
13376 pp = &p->next;
13377 }
13378 }
13379
13380 if (htab->vxworks_p)
13381 {
13382 struct elf_dyn_relocs **pp;
13383
13384 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13385 {
13386 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13387 *pp = p->next;
13388 else
13389 pp = &p->next;
13390 }
13391 }
13392
13393 /* Also discard relocs on undefined weak syms with non-default
13394 visibility. */
13395 if (eh->dyn_relocs != NULL
13396 && h->root.type == bfd_link_hash_undefweak)
13397 {
13398 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13399 eh->dyn_relocs = NULL;
13400
13401 /* Make sure undefined weak symbols are output as a dynamic
13402 symbol in PIEs. */
13403 else if (h->dynindx == -1
13404 && !h->forced_local)
13405 {
13406 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13407 return FALSE;
13408 }
13409 }
13410
13411 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13412 && h->root.type == bfd_link_hash_new)
13413 {
13414 /* Output absolute symbols so that we can create relocations
13415 against them. For normal symbols we output a relocation
13416 against the section that contains them. */
13417 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13418 return FALSE;
13419 }
13420
13421 }
13422 else
13423 {
13424 /* For the non-shared case, discard space for relocs against
13425 symbols which turn out to need copy relocs or are not
13426 dynamic. */
13427
13428 if (!h->non_got_ref
13429 && ((h->def_dynamic
13430 && !h->def_regular)
13431 || (htab->root.dynamic_sections_created
13432 && (h->root.type == bfd_link_hash_undefweak
13433 || h->root.type == bfd_link_hash_undefined))))
13434 {
13435 /* Make sure this symbol is output as a dynamic symbol.
13436 Undefined weak syms won't yet be marked as dynamic. */
13437 if (h->dynindx == -1
13438 && !h->forced_local)
13439 {
13440 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13441 return FALSE;
13442 }
13443
13444 /* If that succeeded, we know we'll be keeping all the
13445 relocs. */
13446 if (h->dynindx != -1)
13447 goto keep;
13448 }
13449
13450 eh->dyn_relocs = NULL;
13451
13452 keep: ;
13453 }
13454
13455 /* Finally, allocate space. */
13456 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13457 {
13458 asection *sreloc = elf_section_data (p->sec)->sreloc;
13459 if (h->type == STT_GNU_IFUNC
13460 && eh->plt.noncall_refcount == 0
13461 && SYMBOL_REFERENCES_LOCAL (info, h))
13462 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13463 else
13464 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13465 }
13466
13467 return TRUE;
13468 }
13469
13470 /* Find any dynamic relocs that apply to read-only sections. */
13471
13472 static bfd_boolean
13473 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13474 {
13475 struct elf32_arm_link_hash_entry * eh;
13476 struct elf_dyn_relocs * p;
13477
13478 eh = (struct elf32_arm_link_hash_entry *) h;
13479 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13480 {
13481 asection *s = p->sec;
13482
13483 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13484 {
13485 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13486
13487 info->flags |= DF_TEXTREL;
13488
13489 /* Not an error, just cut short the traversal. */
13490 return FALSE;
13491 }
13492 }
13493 return TRUE;
13494 }
13495
13496 void
13497 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13498 int byteswap_code)
13499 {
13500 struct elf32_arm_link_hash_table *globals;
13501
13502 globals = elf32_arm_hash_table (info);
13503 if (globals == NULL)
13504 return;
13505
13506 globals->byteswap_code = byteswap_code;
13507 }
13508
13509 /* Set the sizes of the dynamic sections. */
13510
13511 static bfd_boolean
13512 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13513 struct bfd_link_info * info)
13514 {
13515 bfd * dynobj;
13516 asection * s;
13517 bfd_boolean plt;
13518 bfd_boolean relocs;
13519 bfd *ibfd;
13520 struct elf32_arm_link_hash_table *htab;
13521
13522 htab = elf32_arm_hash_table (info);
13523 if (htab == NULL)
13524 return FALSE;
13525
13526 dynobj = elf_hash_table (info)->dynobj;
13527 BFD_ASSERT (dynobj != NULL);
13528 check_use_blx (htab);
13529
13530 if (elf_hash_table (info)->dynamic_sections_created)
13531 {
13532 /* Set the contents of the .interp section to the interpreter. */
13533 if (info->executable)
13534 {
13535 s = bfd_get_linker_section (dynobj, ".interp");
13536 BFD_ASSERT (s != NULL);
13537 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13538 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13539 }
13540 }
13541
13542 /* Set up .got offsets for local syms, and space for local dynamic
13543 relocs. */
13544 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13545 {
13546 bfd_signed_vma *local_got;
13547 bfd_signed_vma *end_local_got;
13548 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13549 char *local_tls_type;
13550 bfd_vma *local_tlsdesc_gotent;
13551 bfd_size_type locsymcount;
13552 Elf_Internal_Shdr *symtab_hdr;
13553 asection *srel;
13554 bfd_boolean is_vxworks = htab->vxworks_p;
13555 unsigned int symndx;
13556
13557 if (! is_arm_elf (ibfd))
13558 continue;
13559
13560 for (s = ibfd->sections; s != NULL; s = s->next)
13561 {
13562 struct elf_dyn_relocs *p;
13563
13564 for (p = (struct elf_dyn_relocs *)
13565 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13566 {
13567 if (!bfd_is_abs_section (p->sec)
13568 && bfd_is_abs_section (p->sec->output_section))
13569 {
13570 /* Input section has been discarded, either because
13571 it is a copy of a linkonce section or due to
13572 linker script /DISCARD/, so we'll be discarding
13573 the relocs too. */
13574 }
13575 else if (is_vxworks
13576 && strcmp (p->sec->output_section->name,
13577 ".tls_vars") == 0)
13578 {
13579 /* Relocations in vxworks .tls_vars sections are
13580 handled specially by the loader. */
13581 }
13582 else if (p->count != 0)
13583 {
13584 srel = elf_section_data (p->sec)->sreloc;
13585 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13586 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13587 info->flags |= DF_TEXTREL;
13588 }
13589 }
13590 }
13591
13592 local_got = elf_local_got_refcounts (ibfd);
13593 if (!local_got)
13594 continue;
13595
13596 symtab_hdr = & elf_symtab_hdr (ibfd);
13597 locsymcount = symtab_hdr->sh_info;
13598 end_local_got = local_got + locsymcount;
13599 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13600 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13601 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13602 symndx = 0;
13603 s = htab->root.sgot;
13604 srel = htab->root.srelgot;
13605 for (; local_got < end_local_got;
13606 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13607 ++local_tlsdesc_gotent, ++symndx)
13608 {
13609 *local_tlsdesc_gotent = (bfd_vma) -1;
13610 local_iplt = *local_iplt_ptr;
13611 if (local_iplt != NULL)
13612 {
13613 struct elf_dyn_relocs *p;
13614
13615 if (local_iplt->root.refcount > 0)
13616 {
13617 elf32_arm_allocate_plt_entry (info, TRUE,
13618 &local_iplt->root,
13619 &local_iplt->arm);
13620 if (local_iplt->arm.noncall_refcount == 0)
13621 /* All references to the PLT are calls, so all
13622 non-call references can resolve directly to the
13623 run-time target. This means that the .got entry
13624 would be the same as the .igot.plt entry, so there's
13625 no point creating both. */
13626 *local_got = 0;
13627 }
13628 else
13629 {
13630 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13631 local_iplt->root.offset = (bfd_vma) -1;
13632 }
13633
13634 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13635 {
13636 asection *psrel;
13637
13638 psrel = elf_section_data (p->sec)->sreloc;
13639 if (local_iplt->arm.noncall_refcount == 0)
13640 elf32_arm_allocate_irelocs (info, psrel, p->count);
13641 else
13642 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13643 }
13644 }
13645 if (*local_got > 0)
13646 {
13647 Elf_Internal_Sym *isym;
13648
13649 *local_got = s->size;
13650 if (*local_tls_type & GOT_TLS_GD)
13651 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13652 s->size += 8;
13653 if (*local_tls_type & GOT_TLS_GDESC)
13654 {
13655 *local_tlsdesc_gotent = htab->root.sgotplt->size
13656 - elf32_arm_compute_jump_table_size (htab);
13657 htab->root.sgotplt->size += 8;
13658 *local_got = (bfd_vma) -2;
13659 /* plt.got_offset needs to know there's a TLS_DESC
13660 reloc in the middle of .got.plt. */
13661 htab->num_tls_desc++;
13662 }
13663 if (*local_tls_type & GOT_TLS_IE)
13664 s->size += 4;
13665
13666 if (*local_tls_type & GOT_NORMAL)
13667 {
13668 /* If the symbol is both GD and GDESC, *local_got
13669 may have been overwritten. */
13670 *local_got = s->size;
13671 s->size += 4;
13672 }
13673
13674 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13675 if (isym == NULL)
13676 return FALSE;
13677
13678 /* If all references to an STT_GNU_IFUNC PLT are calls,
13679 then all non-call references, including this GOT entry,
13680 resolve directly to the run-time target. */
13681 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13682 && (local_iplt == NULL
13683 || local_iplt->arm.noncall_refcount == 0))
13684 elf32_arm_allocate_irelocs (info, srel, 1);
13685 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13686 || *local_tls_type & GOT_TLS_GD)
13687 elf32_arm_allocate_dynrelocs (info, srel, 1);
13688
13689 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13690 {
13691 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13692 htab->tls_trampoline = -1;
13693 }
13694 }
13695 else
13696 *local_got = (bfd_vma) -1;
13697 }
13698 }
13699
13700 if (htab->tls_ldm_got.refcount > 0)
13701 {
13702 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13703 for R_ARM_TLS_LDM32 relocations. */
13704 htab->tls_ldm_got.offset = htab->root.sgot->size;
13705 htab->root.sgot->size += 8;
13706 if (info->shared)
13707 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13708 }
13709 else
13710 htab->tls_ldm_got.offset = -1;
13711
13712 /* Allocate global sym .plt and .got entries, and space for global
13713 sym dynamic relocs. */
13714 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13715
13716 /* Here we rummage through the found bfds to collect glue information. */
13717 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13718 {
13719 if (! is_arm_elf (ibfd))
13720 continue;
13721
13722 /* Initialise mapping tables for code/data. */
13723 bfd_elf32_arm_init_maps (ibfd);
13724
13725 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13726 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13727 /* xgettext:c-format */
13728 _bfd_error_handler (_("Errors encountered processing file %s"),
13729 ibfd->filename);
13730 }
13731
13732 /* Allocate space for the glue sections now that we've sized them. */
13733 bfd_elf32_arm_allocate_interworking_sections (info);
13734
13735 /* For every jump slot reserved in the sgotplt, reloc_count is
13736 incremented. However, when we reserve space for TLS descriptors,
13737 it's not incremented, so in order to compute the space reserved
13738 for them, it suffices to multiply the reloc count by the jump
13739 slot size. */
13740 if (htab->root.srelplt)
13741 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13742
13743 if (htab->tls_trampoline)
13744 {
13745 if (htab->root.splt->size == 0)
13746 htab->root.splt->size += htab->plt_header_size;
13747
13748 htab->tls_trampoline = htab->root.splt->size;
13749 htab->root.splt->size += htab->plt_entry_size;
13750
13751 /* If we're not using lazy TLS relocations, don't generate the
13752 PLT and GOT entries they require. */
13753 if (!(info->flags & DF_BIND_NOW))
13754 {
13755 htab->dt_tlsdesc_got = htab->root.sgot->size;
13756 htab->root.sgot->size += 4;
13757
13758 htab->dt_tlsdesc_plt = htab->root.splt->size;
13759 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13760 }
13761 }
13762
13763 /* The check_relocs and adjust_dynamic_symbol entry points have
13764 determined the sizes of the various dynamic sections. Allocate
13765 memory for them. */
13766 plt = FALSE;
13767 relocs = FALSE;
13768 for (s = dynobj->sections; s != NULL; s = s->next)
13769 {
13770 const char * name;
13771
13772 if ((s->flags & SEC_LINKER_CREATED) == 0)
13773 continue;
13774
13775 /* It's OK to base decisions on the section name, because none
13776 of the dynobj section names depend upon the input files. */
13777 name = bfd_get_section_name (dynobj, s);
13778
13779 if (s == htab->root.splt)
13780 {
13781 /* Remember whether there is a PLT. */
13782 plt = s->size != 0;
13783 }
13784 else if (CONST_STRNEQ (name, ".rel"))
13785 {
13786 if (s->size != 0)
13787 {
13788 /* Remember whether there are any reloc sections other
13789 than .rel(a).plt and .rela.plt.unloaded. */
13790 if (s != htab->root.srelplt && s != htab->srelplt2)
13791 relocs = TRUE;
13792
13793 /* We use the reloc_count field as a counter if we need
13794 to copy relocs into the output file. */
13795 s->reloc_count = 0;
13796 }
13797 }
13798 else if (s != htab->root.sgot
13799 && s != htab->root.sgotplt
13800 && s != htab->root.iplt
13801 && s != htab->root.igotplt
13802 && s != htab->sdynbss)
13803 {
13804 /* It's not one of our sections, so don't allocate space. */
13805 continue;
13806 }
13807
13808 if (s->size == 0)
13809 {
13810 /* If we don't need this section, strip it from the
13811 output file. This is mostly to handle .rel(a).bss and
13812 .rel(a).plt. We must create both sections in
13813 create_dynamic_sections, because they must be created
13814 before the linker maps input sections to output
13815 sections. The linker does that before
13816 adjust_dynamic_symbol is called, and it is that
13817 function which decides whether anything needs to go
13818 into these sections. */
13819 s->flags |= SEC_EXCLUDE;
13820 continue;
13821 }
13822
13823 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13824 continue;
13825
13826 /* Allocate memory for the section contents. */
13827 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13828 if (s->contents == NULL)
13829 return FALSE;
13830 }
13831
13832 if (elf_hash_table (info)->dynamic_sections_created)
13833 {
13834 /* Add some entries to the .dynamic section. We fill in the
13835 values later, in elf32_arm_finish_dynamic_sections, but we
13836 must add the entries now so that we get the correct size for
13837 the .dynamic section. The DT_DEBUG entry is filled in by the
13838 dynamic linker and used by the debugger. */
13839 #define add_dynamic_entry(TAG, VAL) \
13840 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13841
13842 if (info->executable)
13843 {
13844 if (!add_dynamic_entry (DT_DEBUG, 0))
13845 return FALSE;
13846 }
13847
13848 if (plt)
13849 {
13850 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13851 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13852 || !add_dynamic_entry (DT_PLTREL,
13853 htab->use_rel ? DT_REL : DT_RELA)
13854 || !add_dynamic_entry (DT_JMPREL, 0))
13855 return FALSE;
13856
13857 if (htab->dt_tlsdesc_plt &&
13858 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13859 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13860 return FALSE;
13861 }
13862
13863 if (relocs)
13864 {
13865 if (htab->use_rel)
13866 {
13867 if (!add_dynamic_entry (DT_REL, 0)
13868 || !add_dynamic_entry (DT_RELSZ, 0)
13869 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13870 return FALSE;
13871 }
13872 else
13873 {
13874 if (!add_dynamic_entry (DT_RELA, 0)
13875 || !add_dynamic_entry (DT_RELASZ, 0)
13876 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13877 return FALSE;
13878 }
13879 }
13880
13881 /* If any dynamic relocs apply to a read-only section,
13882 then we need a DT_TEXTREL entry. */
13883 if ((info->flags & DF_TEXTREL) == 0)
13884 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13885 info);
13886
13887 if ((info->flags & DF_TEXTREL) != 0)
13888 {
13889 if (!add_dynamic_entry (DT_TEXTREL, 0))
13890 return FALSE;
13891 }
13892 if (htab->vxworks_p
13893 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13894 return FALSE;
13895 }
13896 #undef add_dynamic_entry
13897
13898 return TRUE;
13899 }
13900
13901 /* Size sections even though they're not dynamic. We use it to setup
13902 _TLS_MODULE_BASE_, if needed. */
13903
13904 static bfd_boolean
13905 elf32_arm_always_size_sections (bfd *output_bfd,
13906 struct bfd_link_info *info)
13907 {
13908 asection *tls_sec;
13909
13910 if (info->relocatable)
13911 return TRUE;
13912
13913 tls_sec = elf_hash_table (info)->tls_sec;
13914
13915 if (tls_sec)
13916 {
13917 struct elf_link_hash_entry *tlsbase;
13918
13919 tlsbase = elf_link_hash_lookup
13920 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13921
13922 if (tlsbase)
13923 {
13924 struct bfd_link_hash_entry *bh = NULL;
13925 const struct elf_backend_data *bed
13926 = get_elf_backend_data (output_bfd);
13927
13928 if (!(_bfd_generic_link_add_one_symbol
13929 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13930 tls_sec, 0, NULL, FALSE,
13931 bed->collect, &bh)))
13932 return FALSE;
13933
13934 tlsbase->type = STT_TLS;
13935 tlsbase = (struct elf_link_hash_entry *)bh;
13936 tlsbase->def_regular = 1;
13937 tlsbase->other = STV_HIDDEN;
13938 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13939 }
13940 }
13941 return TRUE;
13942 }
13943
13944 /* Finish up dynamic symbol handling. We set the contents of various
13945 dynamic sections here. */
13946
13947 static bfd_boolean
13948 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13949 struct bfd_link_info * info,
13950 struct elf_link_hash_entry * h,
13951 Elf_Internal_Sym * sym)
13952 {
13953 struct elf32_arm_link_hash_table *htab;
13954 struct elf32_arm_link_hash_entry *eh;
13955
13956 htab = elf32_arm_hash_table (info);
13957 if (htab == NULL)
13958 return FALSE;
13959
13960 eh = (struct elf32_arm_link_hash_entry *) h;
13961
13962 if (h->plt.offset != (bfd_vma) -1)
13963 {
13964 if (!eh->is_iplt)
13965 {
13966 BFD_ASSERT (h->dynindx != -1);
13967 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13968 h->dynindx, 0);
13969 }
13970
13971 if (!h->def_regular)
13972 {
13973 /* Mark the symbol as undefined, rather than as defined in
13974 the .plt section. Leave the value alone. */
13975 sym->st_shndx = SHN_UNDEF;
13976 /* If the symbol is weak, we do need to clear the value.
13977 Otherwise, the PLT entry would provide a definition for
13978 the symbol even if the symbol wasn't defined anywhere,
13979 and so the symbol would never be NULL. */
13980 if (!h->ref_regular_nonweak)
13981 sym->st_value = 0;
13982 }
13983 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13984 {
13985 /* At least one non-call relocation references this .iplt entry,
13986 so the .iplt entry is the function's canonical address. */
13987 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13988 sym->st_target_internal = ST_BRANCH_TO_ARM;
13989 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13990 (output_bfd, htab->root.iplt->output_section));
13991 sym->st_value = (h->plt.offset
13992 + htab->root.iplt->output_section->vma
13993 + htab->root.iplt->output_offset);
13994 }
13995 }
13996
13997 if (h->needs_copy)
13998 {
13999 asection * s;
14000 Elf_Internal_Rela rel;
14001
14002 /* This symbol needs a copy reloc. Set it up. */
14003 BFD_ASSERT (h->dynindx != -1
14004 && (h->root.type == bfd_link_hash_defined
14005 || h->root.type == bfd_link_hash_defweak));
14006
14007 s = htab->srelbss;
14008 BFD_ASSERT (s != NULL);
14009
14010 rel.r_addend = 0;
14011 rel.r_offset = (h->root.u.def.value
14012 + h->root.u.def.section->output_section->vma
14013 + h->root.u.def.section->output_offset);
14014 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14015 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14016 }
14017
14018 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14019 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14020 to the ".got" section. */
14021 if (h == htab->root.hdynamic
14022 || (!htab->vxworks_p && h == htab->root.hgot))
14023 sym->st_shndx = SHN_ABS;
14024
14025 return TRUE;
14026 }
14027
14028 static void
14029 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14030 void *contents,
14031 const unsigned long *template, unsigned count)
14032 {
14033 unsigned ix;
14034
14035 for (ix = 0; ix != count; ix++)
14036 {
14037 unsigned long insn = template[ix];
14038
14039 /* Emit mov pc,rx if bx is not permitted. */
14040 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14041 insn = (insn & 0xf000000f) | 0x01a0f000;
14042 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14043 }
14044 }
14045
14046 /* Finish up the dynamic sections. */
14047
14048 static bfd_boolean
14049 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14050 {
14051 bfd * dynobj;
14052 asection * sgot;
14053 asection * sdyn;
14054 struct elf32_arm_link_hash_table *htab;
14055
14056 htab = elf32_arm_hash_table (info);
14057 if (htab == NULL)
14058 return FALSE;
14059
14060 dynobj = elf_hash_table (info)->dynobj;
14061
14062 sgot = htab->root.sgotplt;
14063 /* A broken linker script might have discarded the dynamic sections.
14064 Catch this here so that we do not seg-fault later on. */
14065 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14066 return FALSE;
14067 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14068
14069 if (elf_hash_table (info)->dynamic_sections_created)
14070 {
14071 asection *splt;
14072 Elf32_External_Dyn *dyncon, *dynconend;
14073
14074 splt = htab->root.splt;
14075 BFD_ASSERT (splt != NULL && sdyn != NULL);
14076 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14077
14078 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14079 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14080
14081 for (; dyncon < dynconend; dyncon++)
14082 {
14083 Elf_Internal_Dyn dyn;
14084 const char * name;
14085 asection * s;
14086
14087 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14088
14089 switch (dyn.d_tag)
14090 {
14091 unsigned int type;
14092
14093 default:
14094 if (htab->vxworks_p
14095 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14096 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14097 break;
14098
14099 case DT_HASH:
14100 name = ".hash";
14101 goto get_vma_if_bpabi;
14102 case DT_STRTAB:
14103 name = ".dynstr";
14104 goto get_vma_if_bpabi;
14105 case DT_SYMTAB:
14106 name = ".dynsym";
14107 goto get_vma_if_bpabi;
14108 case DT_VERSYM:
14109 name = ".gnu.version";
14110 goto get_vma_if_bpabi;
14111 case DT_VERDEF:
14112 name = ".gnu.version_d";
14113 goto get_vma_if_bpabi;
14114 case DT_VERNEED:
14115 name = ".gnu.version_r";
14116 goto get_vma_if_bpabi;
14117
14118 case DT_PLTGOT:
14119 name = ".got";
14120 goto get_vma;
14121 case DT_JMPREL:
14122 name = RELOC_SECTION (htab, ".plt");
14123 get_vma:
14124 s = bfd_get_section_by_name (output_bfd, name);
14125 if (s == NULL)
14126 {
14127 /* PR ld/14397: Issue an error message if a required section is missing. */
14128 (*_bfd_error_handler)
14129 (_("error: required section '%s' not found in the linker script"), name);
14130 bfd_set_error (bfd_error_invalid_operation);
14131 return FALSE;
14132 }
14133 if (!htab->symbian_p)
14134 dyn.d_un.d_ptr = s->vma;
14135 else
14136 /* In the BPABI, tags in the PT_DYNAMIC section point
14137 at the file offset, not the memory address, for the
14138 convenience of the post linker. */
14139 dyn.d_un.d_ptr = s->filepos;
14140 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14141 break;
14142
14143 get_vma_if_bpabi:
14144 if (htab->symbian_p)
14145 goto get_vma;
14146 break;
14147
14148 case DT_PLTRELSZ:
14149 s = htab->root.srelplt;
14150 BFD_ASSERT (s != NULL);
14151 dyn.d_un.d_val = s->size;
14152 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14153 break;
14154
14155 case DT_RELSZ:
14156 case DT_RELASZ:
14157 if (!htab->symbian_p)
14158 {
14159 /* My reading of the SVR4 ABI indicates that the
14160 procedure linkage table relocs (DT_JMPREL) should be
14161 included in the overall relocs (DT_REL). This is
14162 what Solaris does. However, UnixWare can not handle
14163 that case. Therefore, we override the DT_RELSZ entry
14164 here to make it not include the JMPREL relocs. Since
14165 the linker script arranges for .rel(a).plt to follow all
14166 other relocation sections, we don't have to worry
14167 about changing the DT_REL entry. */
14168 s = htab->root.srelplt;
14169 if (s != NULL)
14170 dyn.d_un.d_val -= s->size;
14171 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14172 break;
14173 }
14174 /* Fall through. */
14175
14176 case DT_REL:
14177 case DT_RELA:
14178 /* In the BPABI, the DT_REL tag must point at the file
14179 offset, not the VMA, of the first relocation
14180 section. So, we use code similar to that in
14181 elflink.c, but do not check for SHF_ALLOC on the
14182 relcoation section, since relocations sections are
14183 never allocated under the BPABI. The comments above
14184 about Unixware notwithstanding, we include all of the
14185 relocations here. */
14186 if (htab->symbian_p)
14187 {
14188 unsigned int i;
14189 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14190 ? SHT_REL : SHT_RELA);
14191 dyn.d_un.d_val = 0;
14192 for (i = 1; i < elf_numsections (output_bfd); i++)
14193 {
14194 Elf_Internal_Shdr *hdr
14195 = elf_elfsections (output_bfd)[i];
14196 if (hdr->sh_type == type)
14197 {
14198 if (dyn.d_tag == DT_RELSZ
14199 || dyn.d_tag == DT_RELASZ)
14200 dyn.d_un.d_val += hdr->sh_size;
14201 else if ((ufile_ptr) hdr->sh_offset
14202 <= dyn.d_un.d_val - 1)
14203 dyn.d_un.d_val = hdr->sh_offset;
14204 }
14205 }
14206 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14207 }
14208 break;
14209
14210 case DT_TLSDESC_PLT:
14211 s = htab->root.splt;
14212 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14213 + htab->dt_tlsdesc_plt);
14214 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14215 break;
14216
14217 case DT_TLSDESC_GOT:
14218 s = htab->root.sgot;
14219 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14220 + htab->dt_tlsdesc_got);
14221 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14222 break;
14223
14224 /* Set the bottom bit of DT_INIT/FINI if the
14225 corresponding function is Thumb. */
14226 case DT_INIT:
14227 name = info->init_function;
14228 goto get_sym;
14229 case DT_FINI:
14230 name = info->fini_function;
14231 get_sym:
14232 /* If it wasn't set by elf_bfd_final_link
14233 then there is nothing to adjust. */
14234 if (dyn.d_un.d_val != 0)
14235 {
14236 struct elf_link_hash_entry * eh;
14237
14238 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14239 FALSE, FALSE, TRUE);
14240 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14241 {
14242 dyn.d_un.d_val |= 1;
14243 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14244 }
14245 }
14246 break;
14247 }
14248 }
14249
14250 /* Fill in the first entry in the procedure linkage table. */
14251 if (splt->size > 0 && htab->plt_header_size)
14252 {
14253 const bfd_vma *plt0_entry;
14254 bfd_vma got_address, plt_address, got_displacement;
14255
14256 /* Calculate the addresses of the GOT and PLT. */
14257 got_address = sgot->output_section->vma + sgot->output_offset;
14258 plt_address = splt->output_section->vma + splt->output_offset;
14259
14260 if (htab->vxworks_p)
14261 {
14262 /* The VxWorks GOT is relocated by the dynamic linker.
14263 Therefore, we must emit relocations rather than simply
14264 computing the values now. */
14265 Elf_Internal_Rela rel;
14266
14267 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14268 put_arm_insn (htab, output_bfd, plt0_entry[0],
14269 splt->contents + 0);
14270 put_arm_insn (htab, output_bfd, plt0_entry[1],
14271 splt->contents + 4);
14272 put_arm_insn (htab, output_bfd, plt0_entry[2],
14273 splt->contents + 8);
14274 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14275
14276 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14277 rel.r_offset = plt_address + 12;
14278 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14279 rel.r_addend = 0;
14280 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14281 htab->srelplt2->contents);
14282 }
14283 else if (htab->nacl_p)
14284 {
14285 unsigned int i;
14286
14287 got_displacement = got_address + 8 - (plt_address + 16);
14288
14289 put_arm_insn (htab, output_bfd,
14290 elf32_arm_nacl_plt0_entry[0]
14291 | arm_movw_immediate (got_displacement),
14292 splt->contents + 0);
14293 put_arm_insn (htab, output_bfd,
14294 elf32_arm_nacl_plt0_entry[1]
14295 | arm_movt_immediate (got_displacement),
14296 splt->contents + 4);
14297 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14298 put_arm_insn (htab, output_bfd,
14299 elf32_arm_nacl_plt0_entry[i],
14300 splt->contents + (i * 4));
14301 }
14302 else
14303 {
14304 got_displacement = got_address - (plt_address + 16);
14305
14306 plt0_entry = elf32_arm_plt0_entry;
14307 put_arm_insn (htab, output_bfd, plt0_entry[0],
14308 splt->contents + 0);
14309 put_arm_insn (htab, output_bfd, plt0_entry[1],
14310 splt->contents + 4);
14311 put_arm_insn (htab, output_bfd, plt0_entry[2],
14312 splt->contents + 8);
14313 put_arm_insn (htab, output_bfd, plt0_entry[3],
14314 splt->contents + 12);
14315
14316 #ifdef FOUR_WORD_PLT
14317 /* The displacement value goes in the otherwise-unused
14318 last word of the second entry. */
14319 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14320 #else
14321 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14322 #endif
14323 }
14324 }
14325
14326 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14327 really seem like the right value. */
14328 if (splt->output_section->owner == output_bfd)
14329 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14330
14331 if (htab->dt_tlsdesc_plt)
14332 {
14333 bfd_vma got_address
14334 = sgot->output_section->vma + sgot->output_offset;
14335 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14336 + htab->root.sgot->output_offset);
14337 bfd_vma plt_address
14338 = splt->output_section->vma + splt->output_offset;
14339
14340 arm_put_trampoline (htab, output_bfd,
14341 splt->contents + htab->dt_tlsdesc_plt,
14342 dl_tlsdesc_lazy_trampoline, 6);
14343
14344 bfd_put_32 (output_bfd,
14345 gotplt_address + htab->dt_tlsdesc_got
14346 - (plt_address + htab->dt_tlsdesc_plt)
14347 - dl_tlsdesc_lazy_trampoline[6],
14348 splt->contents + htab->dt_tlsdesc_plt + 24);
14349 bfd_put_32 (output_bfd,
14350 got_address - (plt_address + htab->dt_tlsdesc_plt)
14351 - dl_tlsdesc_lazy_trampoline[7],
14352 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14353 }
14354
14355 if (htab->tls_trampoline)
14356 {
14357 arm_put_trampoline (htab, output_bfd,
14358 splt->contents + htab->tls_trampoline,
14359 tls_trampoline, 3);
14360 #ifdef FOUR_WORD_PLT
14361 bfd_put_32 (output_bfd, 0x00000000,
14362 splt->contents + htab->tls_trampoline + 12);
14363 #endif
14364 }
14365
14366 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14367 {
14368 /* Correct the .rel(a).plt.unloaded relocations. They will have
14369 incorrect symbol indexes. */
14370 int num_plts;
14371 unsigned char *p;
14372
14373 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14374 / htab->plt_entry_size);
14375 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14376
14377 for (; num_plts; num_plts--)
14378 {
14379 Elf_Internal_Rela rel;
14380
14381 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14382 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14383 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14384 p += RELOC_SIZE (htab);
14385
14386 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14387 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14388 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14389 p += RELOC_SIZE (htab);
14390 }
14391 }
14392 }
14393
14394 /* Fill in the first three entries in the global offset table. */
14395 if (sgot)
14396 {
14397 if (sgot->size > 0)
14398 {
14399 if (sdyn == NULL)
14400 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14401 else
14402 bfd_put_32 (output_bfd,
14403 sdyn->output_section->vma + sdyn->output_offset,
14404 sgot->contents);
14405 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14406 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14407 }
14408
14409 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14410 }
14411
14412 return TRUE;
14413 }
14414
14415 static void
14416 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14417 {
14418 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14419 struct elf32_arm_link_hash_table *globals;
14420
14421 i_ehdrp = elf_elfheader (abfd);
14422
14423 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14424 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14425 else
14426 i_ehdrp->e_ident[EI_OSABI] = 0;
14427 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14428
14429 if (link_info)
14430 {
14431 globals = elf32_arm_hash_table (link_info);
14432 if (globals != NULL && globals->byteswap_code)
14433 i_ehdrp->e_flags |= EF_ARM_BE8;
14434 }
14435
14436 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14437 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14438 {
14439 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14440 if (abi)
14441 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14442 else
14443 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14444 }
14445 }
14446
14447 static enum elf_reloc_type_class
14448 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14449 {
14450 switch ((int) ELF32_R_TYPE (rela->r_info))
14451 {
14452 case R_ARM_RELATIVE:
14453 return reloc_class_relative;
14454 case R_ARM_JUMP_SLOT:
14455 return reloc_class_plt;
14456 case R_ARM_COPY:
14457 return reloc_class_copy;
14458 default:
14459 return reloc_class_normal;
14460 }
14461 }
14462
14463 static void
14464 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14465 {
14466 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14467 }
14468
14469 /* Return TRUE if this is an unwinding table entry. */
14470
14471 static bfd_boolean
14472 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14473 {
14474 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14475 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14476 }
14477
14478
14479 /* Set the type and flags for an ARM section. We do this by
14480 the section name, which is a hack, but ought to work. */
14481
14482 static bfd_boolean
14483 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14484 {
14485 const char * name;
14486
14487 name = bfd_get_section_name (abfd, sec);
14488
14489 if (is_arm_elf_unwind_section_name (abfd, name))
14490 {
14491 hdr->sh_type = SHT_ARM_EXIDX;
14492 hdr->sh_flags |= SHF_LINK_ORDER;
14493 }
14494 return TRUE;
14495 }
14496
14497 /* Handle an ARM specific section when reading an object file. This is
14498 called when bfd_section_from_shdr finds a section with an unknown
14499 type. */
14500
14501 static bfd_boolean
14502 elf32_arm_section_from_shdr (bfd *abfd,
14503 Elf_Internal_Shdr * hdr,
14504 const char *name,
14505 int shindex)
14506 {
14507 /* There ought to be a place to keep ELF backend specific flags, but
14508 at the moment there isn't one. We just keep track of the
14509 sections by their name, instead. Fortunately, the ABI gives
14510 names for all the ARM specific sections, so we will probably get
14511 away with this. */
14512 switch (hdr->sh_type)
14513 {
14514 case SHT_ARM_EXIDX:
14515 case SHT_ARM_PREEMPTMAP:
14516 case SHT_ARM_ATTRIBUTES:
14517 break;
14518
14519 default:
14520 return FALSE;
14521 }
14522
14523 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14524 return FALSE;
14525
14526 return TRUE;
14527 }
14528
14529 static _arm_elf_section_data *
14530 get_arm_elf_section_data (asection * sec)
14531 {
14532 if (sec && sec->owner && is_arm_elf (sec->owner))
14533 return elf32_arm_section_data (sec);
14534 else
14535 return NULL;
14536 }
14537
14538 typedef struct
14539 {
14540 void *flaginfo;
14541 struct bfd_link_info *info;
14542 asection *sec;
14543 int sec_shndx;
14544 int (*func) (void *, const char *, Elf_Internal_Sym *,
14545 asection *, struct elf_link_hash_entry *);
14546 } output_arch_syminfo;
14547
14548 enum map_symbol_type
14549 {
14550 ARM_MAP_ARM,
14551 ARM_MAP_THUMB,
14552 ARM_MAP_DATA
14553 };
14554
14555
14556 /* Output a single mapping symbol. */
14557
14558 static bfd_boolean
14559 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14560 enum map_symbol_type type,
14561 bfd_vma offset)
14562 {
14563 static const char *names[3] = {"$a", "$t", "$d"};
14564 Elf_Internal_Sym sym;
14565
14566 sym.st_value = osi->sec->output_section->vma
14567 + osi->sec->output_offset
14568 + offset;
14569 sym.st_size = 0;
14570 sym.st_other = 0;
14571 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14572 sym.st_shndx = osi->sec_shndx;
14573 sym.st_target_internal = 0;
14574 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14575 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14576 }
14577
14578 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14579 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14580
14581 static bfd_boolean
14582 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14583 bfd_boolean is_iplt_entry_p,
14584 union gotplt_union *root_plt,
14585 struct arm_plt_info *arm_plt)
14586 {
14587 struct elf32_arm_link_hash_table *htab;
14588 bfd_vma addr, plt_header_size;
14589
14590 if (root_plt->offset == (bfd_vma) -1)
14591 return TRUE;
14592
14593 htab = elf32_arm_hash_table (osi->info);
14594 if (htab == NULL)
14595 return FALSE;
14596
14597 if (is_iplt_entry_p)
14598 {
14599 osi->sec = htab->root.iplt;
14600 plt_header_size = 0;
14601 }
14602 else
14603 {
14604 osi->sec = htab->root.splt;
14605 plt_header_size = htab->plt_header_size;
14606 }
14607 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14608 (osi->info->output_bfd, osi->sec->output_section));
14609
14610 addr = root_plt->offset & -2;
14611 if (htab->symbian_p)
14612 {
14613 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14614 return FALSE;
14615 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14616 return FALSE;
14617 }
14618 else if (htab->vxworks_p)
14619 {
14620 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14621 return FALSE;
14622 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14623 return FALSE;
14624 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14625 return FALSE;
14626 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14627 return FALSE;
14628 }
14629 else if (htab->nacl_p)
14630 {
14631 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14632 return FALSE;
14633 }
14634 else
14635 {
14636 bfd_boolean thumb_stub_p;
14637
14638 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14639 if (thumb_stub_p)
14640 {
14641 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14642 return FALSE;
14643 }
14644 #ifdef FOUR_WORD_PLT
14645 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14646 return FALSE;
14647 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14648 return FALSE;
14649 #else
14650 /* A three-word PLT with no Thumb thunk contains only Arm code,
14651 so only need to output a mapping symbol for the first PLT entry and
14652 entries with thumb thunks. */
14653 if (thumb_stub_p || addr == plt_header_size)
14654 {
14655 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14656 return FALSE;
14657 }
14658 #endif
14659 }
14660
14661 return TRUE;
14662 }
14663
14664 /* Output mapping symbols for PLT entries associated with H. */
14665
14666 static bfd_boolean
14667 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14668 {
14669 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14670 struct elf32_arm_link_hash_entry *eh;
14671
14672 if (h->root.type == bfd_link_hash_indirect)
14673 return TRUE;
14674
14675 if (h->root.type == bfd_link_hash_warning)
14676 /* When warning symbols are created, they **replace** the "real"
14677 entry in the hash table, thus we never get to see the real
14678 symbol in a hash traversal. So look at it now. */
14679 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14680
14681 eh = (struct elf32_arm_link_hash_entry *) h;
14682 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14683 &h->plt, &eh->plt);
14684 }
14685
14686 /* Output a single local symbol for a generated stub. */
14687
14688 static bfd_boolean
14689 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14690 bfd_vma offset, bfd_vma size)
14691 {
14692 Elf_Internal_Sym sym;
14693
14694 sym.st_value = osi->sec->output_section->vma
14695 + osi->sec->output_offset
14696 + offset;
14697 sym.st_size = size;
14698 sym.st_other = 0;
14699 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14700 sym.st_shndx = osi->sec_shndx;
14701 sym.st_target_internal = 0;
14702 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14703 }
14704
14705 static bfd_boolean
14706 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14707 void * in_arg)
14708 {
14709 struct elf32_arm_stub_hash_entry *stub_entry;
14710 asection *stub_sec;
14711 bfd_vma addr;
14712 char *stub_name;
14713 output_arch_syminfo *osi;
14714 const insn_sequence *template_sequence;
14715 enum stub_insn_type prev_type;
14716 int size;
14717 int i;
14718 enum map_symbol_type sym_type;
14719
14720 /* Massage our args to the form they really have. */
14721 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14722 osi = (output_arch_syminfo *) in_arg;
14723
14724 stub_sec = stub_entry->stub_sec;
14725
14726 /* Ensure this stub is attached to the current section being
14727 processed. */
14728 if (stub_sec != osi->sec)
14729 return TRUE;
14730
14731 addr = (bfd_vma) stub_entry->stub_offset;
14732 stub_name = stub_entry->output_name;
14733
14734 template_sequence = stub_entry->stub_template;
14735 switch (template_sequence[0].type)
14736 {
14737 case ARM_TYPE:
14738 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14739 return FALSE;
14740 break;
14741 case THUMB16_TYPE:
14742 case THUMB32_TYPE:
14743 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14744 stub_entry->stub_size))
14745 return FALSE;
14746 break;
14747 default:
14748 BFD_FAIL ();
14749 return 0;
14750 }
14751
14752 prev_type = DATA_TYPE;
14753 size = 0;
14754 for (i = 0; i < stub_entry->stub_template_size; i++)
14755 {
14756 switch (template_sequence[i].type)
14757 {
14758 case ARM_TYPE:
14759 sym_type = ARM_MAP_ARM;
14760 break;
14761
14762 case THUMB16_TYPE:
14763 case THUMB32_TYPE:
14764 sym_type = ARM_MAP_THUMB;
14765 break;
14766
14767 case DATA_TYPE:
14768 sym_type = ARM_MAP_DATA;
14769 break;
14770
14771 default:
14772 BFD_FAIL ();
14773 return FALSE;
14774 }
14775
14776 if (template_sequence[i].type != prev_type)
14777 {
14778 prev_type = template_sequence[i].type;
14779 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14780 return FALSE;
14781 }
14782
14783 switch (template_sequence[i].type)
14784 {
14785 case ARM_TYPE:
14786 case THUMB32_TYPE:
14787 size += 4;
14788 break;
14789
14790 case THUMB16_TYPE:
14791 size += 2;
14792 break;
14793
14794 case DATA_TYPE:
14795 size += 4;
14796 break;
14797
14798 default:
14799 BFD_FAIL ();
14800 return FALSE;
14801 }
14802 }
14803
14804 return TRUE;
14805 }
14806
14807 /* Output mapping symbols for linker generated sections,
14808 and for those data-only sections that do not have a
14809 $d. */
14810
14811 static bfd_boolean
14812 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14813 struct bfd_link_info *info,
14814 void *flaginfo,
14815 int (*func) (void *, const char *,
14816 Elf_Internal_Sym *,
14817 asection *,
14818 struct elf_link_hash_entry *))
14819 {
14820 output_arch_syminfo osi;
14821 struct elf32_arm_link_hash_table *htab;
14822 bfd_vma offset;
14823 bfd_size_type size;
14824 bfd *input_bfd;
14825
14826 htab = elf32_arm_hash_table (info);
14827 if (htab == NULL)
14828 return FALSE;
14829
14830 check_use_blx (htab);
14831
14832 osi.flaginfo = flaginfo;
14833 osi.info = info;
14834 osi.func = func;
14835
14836 /* Add a $d mapping symbol to data-only sections that
14837 don't have any mapping symbol. This may result in (harmless) redundant
14838 mapping symbols. */
14839 for (input_bfd = info->input_bfds;
14840 input_bfd != NULL;
14841 input_bfd = input_bfd->link_next)
14842 {
14843 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14844 for (osi.sec = input_bfd->sections;
14845 osi.sec != NULL;
14846 osi.sec = osi.sec->next)
14847 {
14848 if (osi.sec->output_section != NULL
14849 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14850 != 0)
14851 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14852 == SEC_HAS_CONTENTS
14853 && get_arm_elf_section_data (osi.sec) != NULL
14854 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14855 && osi.sec->size > 0
14856 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14857 {
14858 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14859 (output_bfd, osi.sec->output_section);
14860 if (osi.sec_shndx != (int)SHN_BAD)
14861 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14862 }
14863 }
14864 }
14865
14866 /* ARM->Thumb glue. */
14867 if (htab->arm_glue_size > 0)
14868 {
14869 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14870 ARM2THUMB_GLUE_SECTION_NAME);
14871
14872 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14873 (output_bfd, osi.sec->output_section);
14874 if (info->shared || htab->root.is_relocatable_executable
14875 || htab->pic_veneer)
14876 size = ARM2THUMB_PIC_GLUE_SIZE;
14877 else if (htab->use_blx)
14878 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14879 else
14880 size = ARM2THUMB_STATIC_GLUE_SIZE;
14881
14882 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14883 {
14884 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14885 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14886 }
14887 }
14888
14889 /* Thumb->ARM glue. */
14890 if (htab->thumb_glue_size > 0)
14891 {
14892 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14893 THUMB2ARM_GLUE_SECTION_NAME);
14894
14895 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14896 (output_bfd, osi.sec->output_section);
14897 size = THUMB2ARM_GLUE_SIZE;
14898
14899 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14900 {
14901 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14902 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14903 }
14904 }
14905
14906 /* ARMv4 BX veneers. */
14907 if (htab->bx_glue_size > 0)
14908 {
14909 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14910 ARM_BX_GLUE_SECTION_NAME);
14911
14912 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14913 (output_bfd, osi.sec->output_section);
14914
14915 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14916 }
14917
14918 /* Long calls stubs. */
14919 if (htab->stub_bfd && htab->stub_bfd->sections)
14920 {
14921 asection* stub_sec;
14922
14923 for (stub_sec = htab->stub_bfd->sections;
14924 stub_sec != NULL;
14925 stub_sec = stub_sec->next)
14926 {
14927 /* Ignore non-stub sections. */
14928 if (!strstr (stub_sec->name, STUB_SUFFIX))
14929 continue;
14930
14931 osi.sec = stub_sec;
14932
14933 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14934 (output_bfd, osi.sec->output_section);
14935
14936 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14937 }
14938 }
14939
14940 /* Finally, output mapping symbols for the PLT. */
14941 if (htab->root.splt && htab->root.splt->size > 0)
14942 {
14943 osi.sec = htab->root.splt;
14944 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14945 (output_bfd, osi.sec->output_section));
14946
14947 /* Output mapping symbols for the plt header. SymbianOS does not have a
14948 plt header. */
14949 if (htab->vxworks_p)
14950 {
14951 /* VxWorks shared libraries have no PLT header. */
14952 if (!info->shared)
14953 {
14954 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14955 return FALSE;
14956 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14957 return FALSE;
14958 }
14959 }
14960 else if (htab->nacl_p)
14961 {
14962 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14963 return FALSE;
14964 }
14965 else if (!htab->symbian_p)
14966 {
14967 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14968 return FALSE;
14969 #ifndef FOUR_WORD_PLT
14970 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14971 return FALSE;
14972 #endif
14973 }
14974 }
14975 if ((htab->root.splt && htab->root.splt->size > 0)
14976 || (htab->root.iplt && htab->root.iplt->size > 0))
14977 {
14978 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14979 for (input_bfd = info->input_bfds;
14980 input_bfd != NULL;
14981 input_bfd = input_bfd->link_next)
14982 {
14983 struct arm_local_iplt_info **local_iplt;
14984 unsigned int i, num_syms;
14985
14986 local_iplt = elf32_arm_local_iplt (input_bfd);
14987 if (local_iplt != NULL)
14988 {
14989 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14990 for (i = 0; i < num_syms; i++)
14991 if (local_iplt[i] != NULL
14992 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14993 &local_iplt[i]->root,
14994 &local_iplt[i]->arm))
14995 return FALSE;
14996 }
14997 }
14998 }
14999 if (htab->dt_tlsdesc_plt != 0)
15000 {
15001 /* Mapping symbols for the lazy tls trampoline. */
15002 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15003 return FALSE;
15004
15005 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15006 htab->dt_tlsdesc_plt + 24))
15007 return FALSE;
15008 }
15009 if (htab->tls_trampoline != 0)
15010 {
15011 /* Mapping symbols for the tls trampoline. */
15012 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15013 return FALSE;
15014 #ifdef FOUR_WORD_PLT
15015 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15016 htab->tls_trampoline + 12))
15017 return FALSE;
15018 #endif
15019 }
15020
15021 return TRUE;
15022 }
15023
15024 /* Allocate target specific section data. */
15025
15026 static bfd_boolean
15027 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15028 {
15029 if (!sec->used_by_bfd)
15030 {
15031 _arm_elf_section_data *sdata;
15032 bfd_size_type amt = sizeof (*sdata);
15033
15034 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15035 if (sdata == NULL)
15036 return FALSE;
15037 sec->used_by_bfd = sdata;
15038 }
15039
15040 return _bfd_elf_new_section_hook (abfd, sec);
15041 }
15042
15043
15044 /* Used to order a list of mapping symbols by address. */
15045
15046 static int
15047 elf32_arm_compare_mapping (const void * a, const void * b)
15048 {
15049 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15050 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15051
15052 if (amap->vma > bmap->vma)
15053 return 1;
15054 else if (amap->vma < bmap->vma)
15055 return -1;
15056 else if (amap->type > bmap->type)
15057 /* Ensure results do not depend on the host qsort for objects with
15058 multiple mapping symbols at the same address by sorting on type
15059 after vma. */
15060 return 1;
15061 else if (amap->type < bmap->type)
15062 return -1;
15063 else
15064 return 0;
15065 }
15066
15067 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15068
15069 static unsigned long
15070 offset_prel31 (unsigned long addr, bfd_vma offset)
15071 {
15072 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15073 }
15074
15075 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15076 relocations. */
15077
15078 static void
15079 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15080 {
15081 unsigned long first_word = bfd_get_32 (output_bfd, from);
15082 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15083
15084 /* High bit of first word is supposed to be zero. */
15085 if ((first_word & 0x80000000ul) == 0)
15086 first_word = offset_prel31 (first_word, offset);
15087
15088 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15089 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15090 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15091 second_word = offset_prel31 (second_word, offset);
15092
15093 bfd_put_32 (output_bfd, first_word, to);
15094 bfd_put_32 (output_bfd, second_word, to + 4);
15095 }
15096
15097 /* Data for make_branch_to_a8_stub(). */
15098
15099 struct a8_branch_to_stub_data
15100 {
15101 asection *writing_section;
15102 bfd_byte *contents;
15103 };
15104
15105
15106 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15107 places for a particular section. */
15108
15109 static bfd_boolean
15110 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15111 void *in_arg)
15112 {
15113 struct elf32_arm_stub_hash_entry *stub_entry;
15114 struct a8_branch_to_stub_data *data;
15115 bfd_byte *contents;
15116 unsigned long branch_insn;
15117 bfd_vma veneered_insn_loc, veneer_entry_loc;
15118 bfd_signed_vma branch_offset;
15119 bfd *abfd;
15120 unsigned int target;
15121
15122 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15123 data = (struct a8_branch_to_stub_data *) in_arg;
15124
15125 if (stub_entry->target_section != data->writing_section
15126 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15127 return TRUE;
15128
15129 contents = data->contents;
15130
15131 veneered_insn_loc = stub_entry->target_section->output_section->vma
15132 + stub_entry->target_section->output_offset
15133 + stub_entry->target_value;
15134
15135 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15136 + stub_entry->stub_sec->output_offset
15137 + stub_entry->stub_offset;
15138
15139 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15140 veneered_insn_loc &= ~3u;
15141
15142 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15143
15144 abfd = stub_entry->target_section->owner;
15145 target = stub_entry->target_value;
15146
15147 /* We attempt to avoid this condition by setting stubs_always_after_branch
15148 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15149 This check is just to be on the safe side... */
15150 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15151 {
15152 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15153 "allocated in unsafe location"), abfd);
15154 return FALSE;
15155 }
15156
15157 switch (stub_entry->stub_type)
15158 {
15159 case arm_stub_a8_veneer_b:
15160 case arm_stub_a8_veneer_b_cond:
15161 branch_insn = 0xf0009000;
15162 goto jump24;
15163
15164 case arm_stub_a8_veneer_blx:
15165 branch_insn = 0xf000e800;
15166 goto jump24;
15167
15168 case arm_stub_a8_veneer_bl:
15169 {
15170 unsigned int i1, j1, i2, j2, s;
15171
15172 branch_insn = 0xf000d000;
15173
15174 jump24:
15175 if (branch_offset < -16777216 || branch_offset > 16777214)
15176 {
15177 /* There's not much we can do apart from complain if this
15178 happens. */
15179 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15180 "of range (input file too large)"), abfd);
15181 return FALSE;
15182 }
15183
15184 /* i1 = not(j1 eor s), so:
15185 not i1 = j1 eor s
15186 j1 = (not i1) eor s. */
15187
15188 branch_insn |= (branch_offset >> 1) & 0x7ff;
15189 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15190 i2 = (branch_offset >> 22) & 1;
15191 i1 = (branch_offset >> 23) & 1;
15192 s = (branch_offset >> 24) & 1;
15193 j1 = (!i1) ^ s;
15194 j2 = (!i2) ^ s;
15195 branch_insn |= j2 << 11;
15196 branch_insn |= j1 << 13;
15197 branch_insn |= s << 26;
15198 }
15199 break;
15200
15201 default:
15202 BFD_FAIL ();
15203 return FALSE;
15204 }
15205
15206 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15207 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15208
15209 return TRUE;
15210 }
15211
15212 /* Do code byteswapping. Return FALSE afterwards so that the section is
15213 written out as normal. */
15214
15215 static bfd_boolean
15216 elf32_arm_write_section (bfd *output_bfd,
15217 struct bfd_link_info *link_info,
15218 asection *sec,
15219 bfd_byte *contents)
15220 {
15221 unsigned int mapcount, errcount;
15222 _arm_elf_section_data *arm_data;
15223 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15224 elf32_arm_section_map *map;
15225 elf32_vfp11_erratum_list *errnode;
15226 bfd_vma ptr;
15227 bfd_vma end;
15228 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15229 bfd_byte tmp;
15230 unsigned int i;
15231
15232 if (globals == NULL)
15233 return FALSE;
15234
15235 /* If this section has not been allocated an _arm_elf_section_data
15236 structure then we cannot record anything. */
15237 arm_data = get_arm_elf_section_data (sec);
15238 if (arm_data == NULL)
15239 return FALSE;
15240
15241 mapcount = arm_data->mapcount;
15242 map = arm_data->map;
15243 errcount = arm_data->erratumcount;
15244
15245 if (errcount != 0)
15246 {
15247 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15248
15249 for (errnode = arm_data->erratumlist; errnode != 0;
15250 errnode = errnode->next)
15251 {
15252 bfd_vma target = errnode->vma - offset;
15253
15254 switch (errnode->type)
15255 {
15256 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15257 {
15258 bfd_vma branch_to_veneer;
15259 /* Original condition code of instruction, plus bit mask for
15260 ARM B instruction. */
15261 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15262 | 0x0a000000;
15263
15264 /* The instruction is before the label. */
15265 target -= 4;
15266
15267 /* Above offset included in -4 below. */
15268 branch_to_veneer = errnode->u.b.veneer->vma
15269 - errnode->vma - 4;
15270
15271 if ((signed) branch_to_veneer < -(1 << 25)
15272 || (signed) branch_to_veneer >= (1 << 25))
15273 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15274 "range"), output_bfd);
15275
15276 insn |= (branch_to_veneer >> 2) & 0xffffff;
15277 contents[endianflip ^ target] = insn & 0xff;
15278 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15279 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15280 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15281 }
15282 break;
15283
15284 case VFP11_ERRATUM_ARM_VENEER:
15285 {
15286 bfd_vma branch_from_veneer;
15287 unsigned int insn;
15288
15289 /* Take size of veneer into account. */
15290 branch_from_veneer = errnode->u.v.branch->vma
15291 - errnode->vma - 12;
15292
15293 if ((signed) branch_from_veneer < -(1 << 25)
15294 || (signed) branch_from_veneer >= (1 << 25))
15295 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15296 "range"), output_bfd);
15297
15298 /* Original instruction. */
15299 insn = errnode->u.v.branch->u.b.vfp_insn;
15300 contents[endianflip ^ target] = insn & 0xff;
15301 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15302 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15303 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15304
15305 /* Branch back to insn after original insn. */
15306 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15307 contents[endianflip ^ (target + 4)] = insn & 0xff;
15308 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15309 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15310 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15311 }
15312 break;
15313
15314 default:
15315 abort ();
15316 }
15317 }
15318 }
15319
15320 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15321 {
15322 arm_unwind_table_edit *edit_node
15323 = arm_data->u.exidx.unwind_edit_list;
15324 /* Now, sec->size is the size of the section we will write. The original
15325 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15326 markers) was sec->rawsize. (This isn't the case if we perform no
15327 edits, then rawsize will be zero and we should use size). */
15328 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15329 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15330 unsigned int in_index, out_index;
15331 bfd_vma add_to_offsets = 0;
15332
15333 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15334 {
15335 if (edit_node)
15336 {
15337 unsigned int edit_index = edit_node->index;
15338
15339 if (in_index < edit_index && in_index * 8 < input_size)
15340 {
15341 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15342 contents + in_index * 8, add_to_offsets);
15343 out_index++;
15344 in_index++;
15345 }
15346 else if (in_index == edit_index
15347 || (in_index * 8 >= input_size
15348 && edit_index == UINT_MAX))
15349 {
15350 switch (edit_node->type)
15351 {
15352 case DELETE_EXIDX_ENTRY:
15353 in_index++;
15354 add_to_offsets += 8;
15355 break;
15356
15357 case INSERT_EXIDX_CANTUNWIND_AT_END:
15358 {
15359 asection *text_sec = edit_node->linked_section;
15360 bfd_vma text_offset = text_sec->output_section->vma
15361 + text_sec->output_offset
15362 + text_sec->size;
15363 bfd_vma exidx_offset = offset + out_index * 8;
15364 unsigned long prel31_offset;
15365
15366 /* Note: this is meant to be equivalent to an
15367 R_ARM_PREL31 relocation. These synthetic
15368 EXIDX_CANTUNWIND markers are not relocated by the
15369 usual BFD method. */
15370 prel31_offset = (text_offset - exidx_offset)
15371 & 0x7ffffffful;
15372
15373 /* First address we can't unwind. */
15374 bfd_put_32 (output_bfd, prel31_offset,
15375 &edited_contents[out_index * 8]);
15376
15377 /* Code for EXIDX_CANTUNWIND. */
15378 bfd_put_32 (output_bfd, 0x1,
15379 &edited_contents[out_index * 8 + 4]);
15380
15381 out_index++;
15382 add_to_offsets -= 8;
15383 }
15384 break;
15385 }
15386
15387 edit_node = edit_node->next;
15388 }
15389 }
15390 else
15391 {
15392 /* No more edits, copy remaining entries verbatim. */
15393 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15394 contents + in_index * 8, add_to_offsets);
15395 out_index++;
15396 in_index++;
15397 }
15398 }
15399
15400 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15401 bfd_set_section_contents (output_bfd, sec->output_section,
15402 edited_contents,
15403 (file_ptr) sec->output_offset, sec->size);
15404
15405 return TRUE;
15406 }
15407
15408 /* Fix code to point to Cortex-A8 erratum stubs. */
15409 if (globals->fix_cortex_a8)
15410 {
15411 struct a8_branch_to_stub_data data;
15412
15413 data.writing_section = sec;
15414 data.contents = contents;
15415
15416 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15417 &data);
15418 }
15419
15420 if (mapcount == 0)
15421 return FALSE;
15422
15423 if (globals->byteswap_code)
15424 {
15425 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15426
15427 ptr = map[0].vma;
15428 for (i = 0; i < mapcount; i++)
15429 {
15430 if (i == mapcount - 1)
15431 end = sec->size;
15432 else
15433 end = map[i + 1].vma;
15434
15435 switch (map[i].type)
15436 {
15437 case 'a':
15438 /* Byte swap code words. */
15439 while (ptr + 3 < end)
15440 {
15441 tmp = contents[ptr];
15442 contents[ptr] = contents[ptr + 3];
15443 contents[ptr + 3] = tmp;
15444 tmp = contents[ptr + 1];
15445 contents[ptr + 1] = contents[ptr + 2];
15446 contents[ptr + 2] = tmp;
15447 ptr += 4;
15448 }
15449 break;
15450
15451 case 't':
15452 /* Byte swap code halfwords. */
15453 while (ptr + 1 < end)
15454 {
15455 tmp = contents[ptr];
15456 contents[ptr] = contents[ptr + 1];
15457 contents[ptr + 1] = tmp;
15458 ptr += 2;
15459 }
15460 break;
15461
15462 case 'd':
15463 /* Leave data alone. */
15464 break;
15465 }
15466 ptr = end;
15467 }
15468 }
15469
15470 free (map);
15471 arm_data->mapcount = -1;
15472 arm_data->mapsize = 0;
15473 arm_data->map = NULL;
15474
15475 return FALSE;
15476 }
15477
15478 /* Mangle thumb function symbols as we read them in. */
15479
15480 static bfd_boolean
15481 elf32_arm_swap_symbol_in (bfd * abfd,
15482 const void *psrc,
15483 const void *pshn,
15484 Elf_Internal_Sym *dst)
15485 {
15486 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15487 return FALSE;
15488
15489 /* New EABI objects mark thumb function symbols by setting the low bit of
15490 the address. */
15491 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15492 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15493 {
15494 if (dst->st_value & 1)
15495 {
15496 dst->st_value &= ~(bfd_vma) 1;
15497 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15498 }
15499 else
15500 dst->st_target_internal = ST_BRANCH_TO_ARM;
15501 }
15502 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15503 {
15504 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15505 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15506 }
15507 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15508 dst->st_target_internal = ST_BRANCH_LONG;
15509 else
15510 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15511
15512 return TRUE;
15513 }
15514
15515
15516 /* Mangle thumb function symbols as we write them out. */
15517
15518 static void
15519 elf32_arm_swap_symbol_out (bfd *abfd,
15520 const Elf_Internal_Sym *src,
15521 void *cdst,
15522 void *shndx)
15523 {
15524 Elf_Internal_Sym newsym;
15525
15526 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15527 of the address set, as per the new EABI. We do this unconditionally
15528 because objcopy does not set the elf header flags until after
15529 it writes out the symbol table. */
15530 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15531 {
15532 newsym = *src;
15533 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15534 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15535 if (newsym.st_shndx != SHN_UNDEF)
15536 {
15537 /* Do this only for defined symbols. At link type, the static
15538 linker will simulate the work of dynamic linker of resolving
15539 symbols and will carry over the thumbness of found symbols to
15540 the output symbol table. It's not clear how it happens, but
15541 the thumbness of undefined symbols can well be different at
15542 runtime, and writing '1' for them will be confusing for users
15543 and possibly for dynamic linker itself.
15544 */
15545 newsym.st_value |= 1;
15546 }
15547
15548 src = &newsym;
15549 }
15550 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15551 }
15552
15553 /* Add the PT_ARM_EXIDX program header. */
15554
15555 static bfd_boolean
15556 elf32_arm_modify_segment_map (bfd *abfd,
15557 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15558 {
15559 struct elf_segment_map *m;
15560 asection *sec;
15561
15562 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15563 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15564 {
15565 /* If there is already a PT_ARM_EXIDX header, then we do not
15566 want to add another one. This situation arises when running
15567 "strip"; the input binary already has the header. */
15568 m = elf_tdata (abfd)->segment_map;
15569 while (m && m->p_type != PT_ARM_EXIDX)
15570 m = m->next;
15571 if (!m)
15572 {
15573 m = (struct elf_segment_map *)
15574 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15575 if (m == NULL)
15576 return FALSE;
15577 m->p_type = PT_ARM_EXIDX;
15578 m->count = 1;
15579 m->sections[0] = sec;
15580
15581 m->next = elf_tdata (abfd)->segment_map;
15582 elf_tdata (abfd)->segment_map = m;
15583 }
15584 }
15585
15586 return TRUE;
15587 }
15588
15589 /* We may add a PT_ARM_EXIDX program header. */
15590
15591 static int
15592 elf32_arm_additional_program_headers (bfd *abfd,
15593 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15594 {
15595 asection *sec;
15596
15597 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15598 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15599 return 1;
15600 else
15601 return 0;
15602 }
15603
15604 /* Hook called by the linker routine which adds symbols from an object
15605 file. */
15606
15607 static bfd_boolean
15608 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15609 Elf_Internal_Sym *sym, const char **namep,
15610 flagword *flagsp, asection **secp, bfd_vma *valp)
15611 {
15612 if ((abfd->flags & DYNAMIC) == 0
15613 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15614 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15615 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15616
15617 if (elf32_arm_hash_table (info)->vxworks_p
15618 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15619 flagsp, secp, valp))
15620 return FALSE;
15621
15622 return TRUE;
15623 }
15624
15625 /* We use this to override swap_symbol_in and swap_symbol_out. */
15626 const struct elf_size_info elf32_arm_size_info =
15627 {
15628 sizeof (Elf32_External_Ehdr),
15629 sizeof (Elf32_External_Phdr),
15630 sizeof (Elf32_External_Shdr),
15631 sizeof (Elf32_External_Rel),
15632 sizeof (Elf32_External_Rela),
15633 sizeof (Elf32_External_Sym),
15634 sizeof (Elf32_External_Dyn),
15635 sizeof (Elf_External_Note),
15636 4,
15637 1,
15638 32, 2,
15639 ELFCLASS32, EV_CURRENT,
15640 bfd_elf32_write_out_phdrs,
15641 bfd_elf32_write_shdrs_and_ehdr,
15642 bfd_elf32_checksum_contents,
15643 bfd_elf32_write_relocs,
15644 elf32_arm_swap_symbol_in,
15645 elf32_arm_swap_symbol_out,
15646 bfd_elf32_slurp_reloc_table,
15647 bfd_elf32_slurp_symbol_table,
15648 bfd_elf32_swap_dyn_in,
15649 bfd_elf32_swap_dyn_out,
15650 bfd_elf32_swap_reloc_in,
15651 bfd_elf32_swap_reloc_out,
15652 bfd_elf32_swap_reloca_in,
15653 bfd_elf32_swap_reloca_out
15654 };
15655
15656 #define ELF_ARCH bfd_arch_arm
15657 #define ELF_TARGET_ID ARM_ELF_DATA
15658 #define ELF_MACHINE_CODE EM_ARM
15659 #ifdef __QNXTARGET__
15660 #define ELF_MAXPAGESIZE 0x1000
15661 #else
15662 #define ELF_MAXPAGESIZE 0x8000
15663 #endif
15664 #define ELF_MINPAGESIZE 0x1000
15665 #define ELF_COMMONPAGESIZE 0x1000
15666
15667 #define bfd_elf32_mkobject elf32_arm_mkobject
15668
15669 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15670 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15671 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15672 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15673 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15674 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15675 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15676 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15677 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15678 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15679 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15680 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15681 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15682
15683 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15684 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15685 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15686 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15687 #define elf_backend_check_relocs elf32_arm_check_relocs
15688 #define elf_backend_relocate_section elf32_arm_relocate_section
15689 #define elf_backend_write_section elf32_arm_write_section
15690 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15691 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15692 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15693 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15694 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15695 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15696 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15697 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15698 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15699 #define elf_backend_object_p elf32_arm_object_p
15700 #define elf_backend_fake_sections elf32_arm_fake_sections
15701 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15702 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15703 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15704 #define elf_backend_size_info elf32_arm_size_info
15705 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15706 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15707 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15708 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15709 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15710
15711 #define elf_backend_can_refcount 1
15712 #define elf_backend_can_gc_sections 1
15713 #define elf_backend_plt_readonly 1
15714 #define elf_backend_want_got_plt 1
15715 #define elf_backend_want_plt_sym 0
15716 #define elf_backend_may_use_rel_p 1
15717 #define elf_backend_may_use_rela_p 0
15718 #define elf_backend_default_use_rela_p 0
15719
15720 #define elf_backend_got_header_size 12
15721
15722 #undef elf_backend_obj_attrs_vendor
15723 #define elf_backend_obj_attrs_vendor "aeabi"
15724 #undef elf_backend_obj_attrs_section
15725 #define elf_backend_obj_attrs_section ".ARM.attributes"
15726 #undef elf_backend_obj_attrs_arg_type
15727 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15728 #undef elf_backend_obj_attrs_section_type
15729 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15730 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15731 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15732
15733 #include "elf32-target.h"
15734
15735 /* Native Client targets. */
15736
15737 #undef TARGET_LITTLE_SYM
15738 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15739 #undef TARGET_LITTLE_NAME
15740 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15741 #undef TARGET_BIG_SYM
15742 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15743 #undef TARGET_BIG_NAME
15744 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15745
15746 /* Like elf32_arm_link_hash_table_create -- but overrides
15747 appropriately for NaCl. */
15748
15749 static struct bfd_link_hash_table *
15750 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15751 {
15752 struct bfd_link_hash_table *ret;
15753
15754 ret = elf32_arm_link_hash_table_create (abfd);
15755 if (ret)
15756 {
15757 struct elf32_arm_link_hash_table *htab
15758 = (struct elf32_arm_link_hash_table *) ret;
15759
15760 htab->nacl_p = 1;
15761
15762 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15763 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15764 }
15765 return ret;
15766 }
15767
15768 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15769 really need to use elf32_arm_modify_segment_map. But we do it
15770 anyway just to reduce gratuitous differences with the stock ARM backend. */
15771
15772 static bfd_boolean
15773 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15774 {
15775 return (elf32_arm_modify_segment_map (abfd, info)
15776 && nacl_modify_segment_map (abfd, info));
15777 }
15778
15779 #undef elf32_bed
15780 #define elf32_bed elf32_arm_nacl_bed
15781 #undef bfd_elf32_bfd_link_hash_table_create
15782 #define bfd_elf32_bfd_link_hash_table_create \
15783 elf32_arm_nacl_link_hash_table_create
15784 #undef elf_backend_plt_alignment
15785 #define elf_backend_plt_alignment 4
15786 #undef elf_backend_modify_segment_map
15787 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15788 #undef elf_backend_modify_program_headers
15789 #define elf_backend_modify_program_headers nacl_modify_program_headers
15790
15791 #undef ELF_MAXPAGESIZE
15792 #define ELF_MAXPAGESIZE 0x10000
15793
15794 #include "elf32-target.h"
15795
15796 /* Reset to defaults. */
15797 #undef elf_backend_plt_alignment
15798 #undef elf_backend_modify_segment_map
15799 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15800 #undef elf_backend_modify_program_headers
15801
15802 /* VxWorks Targets. */
15803
15804 #undef TARGET_LITTLE_SYM
15805 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15806 #undef TARGET_LITTLE_NAME
15807 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15808 #undef TARGET_BIG_SYM
15809 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15810 #undef TARGET_BIG_NAME
15811 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15812
15813 /* Like elf32_arm_link_hash_table_create -- but overrides
15814 appropriately for VxWorks. */
15815
15816 static struct bfd_link_hash_table *
15817 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15818 {
15819 struct bfd_link_hash_table *ret;
15820
15821 ret = elf32_arm_link_hash_table_create (abfd);
15822 if (ret)
15823 {
15824 struct elf32_arm_link_hash_table *htab
15825 = (struct elf32_arm_link_hash_table *) ret;
15826 htab->use_rel = 0;
15827 htab->vxworks_p = 1;
15828 }
15829 return ret;
15830 }
15831
15832 static void
15833 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15834 {
15835 elf32_arm_final_write_processing (abfd, linker);
15836 elf_vxworks_final_write_processing (abfd, linker);
15837 }
15838
15839 #undef elf32_bed
15840 #define elf32_bed elf32_arm_vxworks_bed
15841
15842 #undef bfd_elf32_bfd_link_hash_table_create
15843 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15844 #undef elf_backend_final_write_processing
15845 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15846 #undef elf_backend_emit_relocs
15847 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15848
15849 #undef elf_backend_may_use_rel_p
15850 #define elf_backend_may_use_rel_p 0
15851 #undef elf_backend_may_use_rela_p
15852 #define elf_backend_may_use_rela_p 1
15853 #undef elf_backend_default_use_rela_p
15854 #define elf_backend_default_use_rela_p 1
15855 #undef elf_backend_want_plt_sym
15856 #define elf_backend_want_plt_sym 1
15857 #undef ELF_MAXPAGESIZE
15858 #define ELF_MAXPAGESIZE 0x1000
15859
15860 #include "elf32-target.h"
15861
15862
15863 /* Merge backend specific data from an object file to the output
15864 object file when linking. */
15865
15866 static bfd_boolean
15867 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15868 {
15869 flagword out_flags;
15870 flagword in_flags;
15871 bfd_boolean flags_compatible = TRUE;
15872 asection *sec;
15873
15874 /* Check if we have the same endianness. */
15875 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15876 return FALSE;
15877
15878 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15879 return TRUE;
15880
15881 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15882 return FALSE;
15883
15884 /* The input BFD must have had its flags initialised. */
15885 /* The following seems bogus to me -- The flags are initialized in
15886 the assembler but I don't think an elf_flags_init field is
15887 written into the object. */
15888 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15889
15890 in_flags = elf_elfheader (ibfd)->e_flags;
15891 out_flags = elf_elfheader (obfd)->e_flags;
15892
15893 /* In theory there is no reason why we couldn't handle this. However
15894 in practice it isn't even close to working and there is no real
15895 reason to want it. */
15896 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15897 && !(ibfd->flags & DYNAMIC)
15898 && (in_flags & EF_ARM_BE8))
15899 {
15900 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15901 ibfd);
15902 return FALSE;
15903 }
15904
15905 if (!elf_flags_init (obfd))
15906 {
15907 /* If the input is the default architecture and had the default
15908 flags then do not bother setting the flags for the output
15909 architecture, instead allow future merges to do this. If no
15910 future merges ever set these flags then they will retain their
15911 uninitialised values, which surprise surprise, correspond
15912 to the default values. */
15913 if (bfd_get_arch_info (ibfd)->the_default
15914 && elf_elfheader (ibfd)->e_flags == 0)
15915 return TRUE;
15916
15917 elf_flags_init (obfd) = TRUE;
15918 elf_elfheader (obfd)->e_flags = in_flags;
15919
15920 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15921 && bfd_get_arch_info (obfd)->the_default)
15922 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15923
15924 return TRUE;
15925 }
15926
15927 /* Determine what should happen if the input ARM architecture
15928 does not match the output ARM architecture. */
15929 if (! bfd_arm_merge_machines (ibfd, obfd))
15930 return FALSE;
15931
15932 /* Identical flags must be compatible. */
15933 if (in_flags == out_flags)
15934 return TRUE;
15935
15936 /* Check to see if the input BFD actually contains any sections. If
15937 not, its flags may not have been initialised either, but it
15938 cannot actually cause any incompatiblity. Do not short-circuit
15939 dynamic objects; their section list may be emptied by
15940 elf_link_add_object_symbols.
15941
15942 Also check to see if there are no code sections in the input.
15943 In this case there is no need to check for code specific flags.
15944 XXX - do we need to worry about floating-point format compatability
15945 in data sections ? */
15946 if (!(ibfd->flags & DYNAMIC))
15947 {
15948 bfd_boolean null_input_bfd = TRUE;
15949 bfd_boolean only_data_sections = TRUE;
15950
15951 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15952 {
15953 /* Ignore synthetic glue sections. */
15954 if (strcmp (sec->name, ".glue_7")
15955 && strcmp (sec->name, ".glue_7t"))
15956 {
15957 if ((bfd_get_section_flags (ibfd, sec)
15958 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15959 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15960 only_data_sections = FALSE;
15961
15962 null_input_bfd = FALSE;
15963 break;
15964 }
15965 }
15966
15967 if (null_input_bfd || only_data_sections)
15968 return TRUE;
15969 }
15970
15971 /* Complain about various flag mismatches. */
15972 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15973 EF_ARM_EABI_VERSION (out_flags)))
15974 {
15975 _bfd_error_handler
15976 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15977 ibfd, obfd,
15978 (in_flags & EF_ARM_EABIMASK) >> 24,
15979 (out_flags & EF_ARM_EABIMASK) >> 24);
15980 return FALSE;
15981 }
15982
15983 /* Not sure what needs to be checked for EABI versions >= 1. */
15984 /* VxWorks libraries do not use these flags. */
15985 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15986 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15987 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15988 {
15989 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15990 {
15991 _bfd_error_handler
15992 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15993 ibfd, obfd,
15994 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15995 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15996 flags_compatible = FALSE;
15997 }
15998
15999 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16000 {
16001 if (in_flags & EF_ARM_APCS_FLOAT)
16002 _bfd_error_handler
16003 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16004 ibfd, obfd);
16005 else
16006 _bfd_error_handler
16007 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16008 ibfd, obfd);
16009
16010 flags_compatible = FALSE;
16011 }
16012
16013 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16014 {
16015 if (in_flags & EF_ARM_VFP_FLOAT)
16016 _bfd_error_handler
16017 (_("error: %B uses VFP instructions, whereas %B does not"),
16018 ibfd, obfd);
16019 else
16020 _bfd_error_handler
16021 (_("error: %B uses FPA instructions, whereas %B does not"),
16022 ibfd, obfd);
16023
16024 flags_compatible = FALSE;
16025 }
16026
16027 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16028 {
16029 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16030 _bfd_error_handler
16031 (_("error: %B uses Maverick instructions, whereas %B does not"),
16032 ibfd, obfd);
16033 else
16034 _bfd_error_handler
16035 (_("error: %B does not use Maverick instructions, whereas %B does"),
16036 ibfd, obfd);
16037
16038 flags_compatible = FALSE;
16039 }
16040
16041 #ifdef EF_ARM_SOFT_FLOAT
16042 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16043 {
16044 /* We can allow interworking between code that is VFP format
16045 layout, and uses either soft float or integer regs for
16046 passing floating point arguments and results. We already
16047 know that the APCS_FLOAT flags match; similarly for VFP
16048 flags. */
16049 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16050 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16051 {
16052 if (in_flags & EF_ARM_SOFT_FLOAT)
16053 _bfd_error_handler
16054 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16055 ibfd, obfd);
16056 else
16057 _bfd_error_handler
16058 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16059 ibfd, obfd);
16060
16061 flags_compatible = FALSE;
16062 }
16063 }
16064 #endif
16065
16066 /* Interworking mismatch is only a warning. */
16067 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16068 {
16069 if (in_flags & EF_ARM_INTERWORK)
16070 {
16071 _bfd_error_handler
16072 (_("Warning: %B supports interworking, whereas %B does not"),
16073 ibfd, obfd);
16074 }
16075 else
16076 {
16077 _bfd_error_handler
16078 (_("Warning: %B does not support interworking, whereas %B does"),
16079 ibfd, obfd);
16080 }
16081 }
16082 }
16083
16084 return flags_compatible;
16085 }
16086
16087
16088 /* Symbian OS Targets. */
16089
16090 #undef TARGET_LITTLE_SYM
16091 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16092 #undef TARGET_LITTLE_NAME
16093 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16094 #undef TARGET_BIG_SYM
16095 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16096 #undef TARGET_BIG_NAME
16097 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16098
16099 /* Like elf32_arm_link_hash_table_create -- but overrides
16100 appropriately for Symbian OS. */
16101
16102 static struct bfd_link_hash_table *
16103 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16104 {
16105 struct bfd_link_hash_table *ret;
16106
16107 ret = elf32_arm_link_hash_table_create (abfd);
16108 if (ret)
16109 {
16110 struct elf32_arm_link_hash_table *htab
16111 = (struct elf32_arm_link_hash_table *)ret;
16112 /* There is no PLT header for Symbian OS. */
16113 htab->plt_header_size = 0;
16114 /* The PLT entries are each one instruction and one word. */
16115 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16116 htab->symbian_p = 1;
16117 /* Symbian uses armv5t or above, so use_blx is always true. */
16118 htab->use_blx = 1;
16119 htab->root.is_relocatable_executable = 1;
16120 }
16121 return ret;
16122 }
16123
16124 static const struct bfd_elf_special_section
16125 elf32_arm_symbian_special_sections[] =
16126 {
16127 /* In a BPABI executable, the dynamic linking sections do not go in
16128 the loadable read-only segment. The post-linker may wish to
16129 refer to these sections, but they are not part of the final
16130 program image. */
16131 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16132 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16133 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16134 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16135 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16136 /* These sections do not need to be writable as the SymbianOS
16137 postlinker will arrange things so that no dynamic relocation is
16138 required. */
16139 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16140 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16141 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16142 { NULL, 0, 0, 0, 0 }
16143 };
16144
16145 static void
16146 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16147 struct bfd_link_info *link_info)
16148 {
16149 /* BPABI objects are never loaded directly by an OS kernel; they are
16150 processed by a postlinker first, into an OS-specific format. If
16151 the D_PAGED bit is set on the file, BFD will align segments on
16152 page boundaries, so that an OS can directly map the file. With
16153 BPABI objects, that just results in wasted space. In addition,
16154 because we clear the D_PAGED bit, map_sections_to_segments will
16155 recognize that the program headers should not be mapped into any
16156 loadable segment. */
16157 abfd->flags &= ~D_PAGED;
16158 elf32_arm_begin_write_processing (abfd, link_info);
16159 }
16160
16161 static bfd_boolean
16162 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16163 struct bfd_link_info *info)
16164 {
16165 struct elf_segment_map *m;
16166 asection *dynsec;
16167
16168 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16169 segment. However, because the .dynamic section is not marked
16170 with SEC_LOAD, the generic ELF code will not create such a
16171 segment. */
16172 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16173 if (dynsec)
16174 {
16175 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
16176 if (m->p_type == PT_DYNAMIC)
16177 break;
16178
16179 if (m == NULL)
16180 {
16181 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16182 m->next = elf_tdata (abfd)->segment_map;
16183 elf_tdata (abfd)->segment_map = m;
16184 }
16185 }
16186
16187 /* Also call the generic arm routine. */
16188 return elf32_arm_modify_segment_map (abfd, info);
16189 }
16190
16191 /* Return address for Ith PLT stub in section PLT, for relocation REL
16192 or (bfd_vma) -1 if it should not be included. */
16193
16194 static bfd_vma
16195 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16196 const arelent *rel ATTRIBUTE_UNUSED)
16197 {
16198 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16199 }
16200
16201
16202 #undef elf32_bed
16203 #define elf32_bed elf32_arm_symbian_bed
16204
16205 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16206 will process them and then discard them. */
16207 #undef ELF_DYNAMIC_SEC_FLAGS
16208 #define ELF_DYNAMIC_SEC_FLAGS \
16209 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16210
16211 #undef elf_backend_emit_relocs
16212
16213 #undef bfd_elf32_bfd_link_hash_table_create
16214 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16215 #undef elf_backend_special_sections
16216 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16217 #undef elf_backend_begin_write_processing
16218 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16219 #undef elf_backend_final_write_processing
16220 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16221
16222 #undef elf_backend_modify_segment_map
16223 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16224
16225 /* There is no .got section for BPABI objects, and hence no header. */
16226 #undef elf_backend_got_header_size
16227 #define elf_backend_got_header_size 0
16228
16229 /* Similarly, there is no .got.plt section. */
16230 #undef elf_backend_want_got_plt
16231 #define elf_backend_want_got_plt 0
16232
16233 #undef elf_backend_plt_sym_val
16234 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16235
16236 #undef elf_backend_may_use_rel_p
16237 #define elf_backend_may_use_rel_p 1
16238 #undef elf_backend_may_use_rela_p
16239 #define elf_backend_may_use_rela_p 0
16240 #undef elf_backend_default_use_rela_p
16241 #define elf_backend_default_use_rela_p 0
16242 #undef elf_backend_want_plt_sym
16243 #define elf_backend_want_plt_sym 0
16244 #undef ELF_MAXPAGESIZE
16245 #define ELF_MAXPAGESIZE 0x8000
16246
16247 #include "elf32-target.h"
This page took 0.406544 seconds and 4 git commands to generate.