157024c8cd1a9511da831cb556dc242383df7503
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 /* Note the first a8_veneer type */
2232 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2233 };
2234 #undef DEF_STUB
2235
2236 typedef struct
2237 {
2238 const insn_sequence* template_sequence;
2239 int template_size;
2240 } stub_def;
2241
2242 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2243 static const stub_def stub_definitions[] = {
2244 {NULL, 0},
2245 DEF_STUBS
2246 };
2247
2248 struct elf32_arm_stub_hash_entry
2249 {
2250 /* Base hash table entry structure. */
2251 struct bfd_hash_entry root;
2252
2253 /* The stub section. */
2254 asection *stub_sec;
2255
2256 /* Offset within stub_sec of the beginning of this stub. */
2257 bfd_vma stub_offset;
2258
2259 /* Given the symbol's value and its section we can determine its final
2260 value when building the stubs (so the stub knows where to jump). */
2261 bfd_vma target_value;
2262 asection *target_section;
2263
2264 /* Offset to apply to relocation referencing target_value. */
2265 bfd_vma target_addend;
2266
2267 /* The instruction which caused this stub to be generated (only valid for
2268 Cortex-A8 erratum workaround stubs at present). */
2269 unsigned long orig_insn;
2270
2271 /* The stub type. */
2272 enum elf32_arm_stub_type stub_type;
2273 /* Its encoding size in bytes. */
2274 int stub_size;
2275 /* Its template. */
2276 const insn_sequence *stub_template;
2277 /* The size of the template (number of entries). */
2278 int stub_template_size;
2279
2280 /* The symbol table entry, if any, that this was derived from. */
2281 struct elf32_arm_link_hash_entry *h;
2282
2283 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2284 unsigned char st_type;
2285
2286 /* Where this stub is being called from, or, in the case of combined
2287 stub sections, the first input section in the group. */
2288 asection *id_sec;
2289
2290 /* The name for the local symbol at the start of this stub. The
2291 stub name in the hash table has to be unique; this does not, so
2292 it can be friendlier. */
2293 char *output_name;
2294 };
2295
2296 /* Used to build a map of a section. This is required for mixed-endian
2297 code/data. */
2298
2299 typedef struct elf32_elf_section_map
2300 {
2301 bfd_vma vma;
2302 char type;
2303 }
2304 elf32_arm_section_map;
2305
2306 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2307
2308 typedef enum
2309 {
2310 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2311 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2312 VFP11_ERRATUM_ARM_VENEER,
2313 VFP11_ERRATUM_THUMB_VENEER
2314 }
2315 elf32_vfp11_erratum_type;
2316
2317 typedef struct elf32_vfp11_erratum_list
2318 {
2319 struct elf32_vfp11_erratum_list *next;
2320 bfd_vma vma;
2321 union
2322 {
2323 struct
2324 {
2325 struct elf32_vfp11_erratum_list *veneer;
2326 unsigned int vfp_insn;
2327 } b;
2328 struct
2329 {
2330 struct elf32_vfp11_erratum_list *branch;
2331 unsigned int id;
2332 } v;
2333 } u;
2334 elf32_vfp11_erratum_type type;
2335 }
2336 elf32_vfp11_erratum_list;
2337
2338 typedef enum
2339 {
2340 DELETE_EXIDX_ENTRY,
2341 INSERT_EXIDX_CANTUNWIND_AT_END
2342 }
2343 arm_unwind_edit_type;
2344
2345 /* A (sorted) list of edits to apply to an unwind table. */
2346 typedef struct arm_unwind_table_edit
2347 {
2348 arm_unwind_edit_type type;
2349 /* Note: we sometimes want to insert an unwind entry corresponding to a
2350 section different from the one we're currently writing out, so record the
2351 (text) section this edit relates to here. */
2352 asection *linked_section;
2353 unsigned int index;
2354 struct arm_unwind_table_edit *next;
2355 }
2356 arm_unwind_table_edit;
2357
2358 typedef struct _arm_elf_section_data
2359 {
2360 /* Information about mapping symbols. */
2361 struct bfd_elf_section_data elf;
2362 unsigned int mapcount;
2363 unsigned int mapsize;
2364 elf32_arm_section_map *map;
2365 /* Information about CPU errata. */
2366 unsigned int erratumcount;
2367 elf32_vfp11_erratum_list *erratumlist;
2368 /* Information about unwind tables. */
2369 union
2370 {
2371 /* Unwind info attached to a text section. */
2372 struct
2373 {
2374 asection *arm_exidx_sec;
2375 } text;
2376
2377 /* Unwind info attached to an .ARM.exidx section. */
2378 struct
2379 {
2380 arm_unwind_table_edit *unwind_edit_list;
2381 arm_unwind_table_edit *unwind_edit_tail;
2382 } exidx;
2383 } u;
2384 }
2385 _arm_elf_section_data;
2386
2387 #define elf32_arm_section_data(sec) \
2388 ((_arm_elf_section_data *) elf_section_data (sec))
2389
2390 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2391 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2392 so may be created multiple times: we use an array of these entries whilst
2393 relaxing which we can refresh easily, then create stubs for each potentially
2394 erratum-triggering instruction once we've settled on a solution. */
2395
2396 struct a8_erratum_fix {
2397 bfd *input_bfd;
2398 asection *section;
2399 bfd_vma offset;
2400 bfd_vma addend;
2401 unsigned long orig_insn;
2402 char *stub_name;
2403 enum elf32_arm_stub_type stub_type;
2404 };
2405
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409 struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 unsigned int r_type;
2413 unsigned char st_type;
2414 const char *sym_name;
2415 bfd_boolean non_a8_stub;
2416 };
2417
2418 /* The size of the thread control block. */
2419 #define TCB_SIZE 8
2420
2421 struct elf_arm_obj_tdata
2422 {
2423 struct elf_obj_tdata root;
2424
2425 /* tls_type for each local got entry. */
2426 char *local_got_tls_type;
2427
2428 /* Zero to warn when linking objects with incompatible enum sizes. */
2429 int no_enum_size_warning;
2430
2431 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2432 int no_wchar_size_warning;
2433 };
2434
2435 #define elf_arm_tdata(bfd) \
2436 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2437
2438 #define elf32_arm_local_got_tls_type(bfd) \
2439 (elf_arm_tdata (bfd)->local_got_tls_type)
2440
2441 #define is_arm_elf(bfd) \
2442 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2443 && elf_tdata (bfd) != NULL \
2444 && elf_object_id (bfd) == ARM_ELF_TDATA)
2445
2446 static bfd_boolean
2447 elf32_arm_mkobject (bfd *abfd)
2448 {
2449 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2450 ARM_ELF_TDATA);
2451 }
2452
2453 /* The ARM linker needs to keep track of the number of relocs that it
2454 decides to copy in check_relocs for each symbol. This is so that
2455 it can discard PC relative relocs if it doesn't need them when
2456 linking with -Bsymbolic. We store the information in a field
2457 extending the regular ELF linker hash table. */
2458
2459 /* This structure keeps track of the number of relocs we have copied
2460 for a given symbol. */
2461 struct elf32_arm_relocs_copied
2462 {
2463 /* Next section. */
2464 struct elf32_arm_relocs_copied * next;
2465 /* A section in dynobj. */
2466 asection * section;
2467 /* Number of relocs copied in this section. */
2468 bfd_size_type count;
2469 /* Number of PC-relative relocs copied in this section. */
2470 bfd_size_type pc_count;
2471 };
2472
2473 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2474
2475 /* Arm ELF linker hash entry. */
2476 struct elf32_arm_link_hash_entry
2477 {
2478 struct elf_link_hash_entry root;
2479
2480 /* Number of PC relative relocs copied for this symbol. */
2481 struct elf32_arm_relocs_copied * relocs_copied;
2482
2483 /* We reference count Thumb references to a PLT entry separately,
2484 so that we can emit the Thumb trampoline only if needed. */
2485 bfd_signed_vma plt_thumb_refcount;
2486
2487 /* Some references from Thumb code may be eliminated by BL->BLX
2488 conversion, so record them separately. */
2489 bfd_signed_vma plt_maybe_thumb_refcount;
2490
2491 /* Since PLT entries have variable size if the Thumb prologue is
2492 used, we need to record the index into .got.plt instead of
2493 recomputing it from the PLT offset. */
2494 bfd_signed_vma plt_got_offset;
2495
2496 #define GOT_UNKNOWN 0
2497 #define GOT_NORMAL 1
2498 #define GOT_TLS_GD 2
2499 #define GOT_TLS_IE 4
2500 unsigned char tls_type;
2501
2502 /* The symbol marking the real symbol location for exported thumb
2503 symbols with Arm stubs. */
2504 struct elf_link_hash_entry *export_glue;
2505
2506 /* A pointer to the most recently used stub hash entry against this
2507 symbol. */
2508 struct elf32_arm_stub_hash_entry *stub_cache;
2509 };
2510
2511 /* Traverse an arm ELF linker hash table. */
2512 #define elf32_arm_link_hash_traverse(table, func, info) \
2513 (elf_link_hash_traverse \
2514 (&(table)->root, \
2515 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 (info)))
2517
2518 /* Get the ARM elf linker hash table from a link_info structure. */
2519 #define elf32_arm_hash_table(info) \
2520 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2521
2522 #define arm_stub_hash_lookup(table, string, create, copy) \
2523 ((struct elf32_arm_stub_hash_entry *) \
2524 bfd_hash_lookup ((table), (string), (create), (copy)))
2525
2526 /* ARM ELF linker hash table. */
2527 struct elf32_arm_link_hash_table
2528 {
2529 /* The main hash table. */
2530 struct elf_link_hash_table root;
2531
2532 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2533 bfd_size_type thumb_glue_size;
2534
2535 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2536 bfd_size_type arm_glue_size;
2537
2538 /* The size in bytes of section containing the ARMv4 BX veneers. */
2539 bfd_size_type bx_glue_size;
2540
2541 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2542 veneer has been populated. */
2543 bfd_vma bx_glue_offset[15];
2544
2545 /* The size in bytes of the section containing glue for VFP11 erratum
2546 veneers. */
2547 bfd_size_type vfp11_erratum_glue_size;
2548
2549 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2550 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2551 elf32_arm_write_section(). */
2552 struct a8_erratum_fix *a8_erratum_fixes;
2553 unsigned int num_a8_erratum_fixes;
2554
2555 /* An arbitrary input BFD chosen to hold the glue sections. */
2556 bfd * bfd_of_glue_owner;
2557
2558 /* Nonzero to output a BE8 image. */
2559 int byteswap_code;
2560
2561 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2562 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2563 int target1_is_rel;
2564
2565 /* The relocation to use for R_ARM_TARGET2 relocations. */
2566 int target2_reloc;
2567
2568 /* 0 = Ignore R_ARM_V4BX.
2569 1 = Convert BX to MOV PC.
2570 2 = Generate v4 interworing stubs. */
2571 int fix_v4bx;
2572
2573 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2574 int fix_cortex_a8;
2575
2576 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2577 int use_blx;
2578
2579 /* What sort of code sequences we should look for which may trigger the
2580 VFP11 denorm erratum. */
2581 bfd_arm_vfp11_fix vfp11_fix;
2582
2583 /* Global counter for the number of fixes we have emitted. */
2584 int num_vfp11_fixes;
2585
2586 /* Nonzero to force PIC branch veneers. */
2587 int pic_veneer;
2588
2589 /* The number of bytes in the initial entry in the PLT. */
2590 bfd_size_type plt_header_size;
2591
2592 /* The number of bytes in the subsequent PLT etries. */
2593 bfd_size_type plt_entry_size;
2594
2595 /* True if the target system is VxWorks. */
2596 int vxworks_p;
2597
2598 /* True if the target system is Symbian OS. */
2599 int symbian_p;
2600
2601 /* True if the target uses REL relocations. */
2602 int use_rel;
2603
2604 /* Short-cuts to get to dynamic linker sections. */
2605 asection *sgot;
2606 asection *sgotplt;
2607 asection *srelgot;
2608 asection *splt;
2609 asection *srelplt;
2610 asection *sdynbss;
2611 asection *srelbss;
2612
2613 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2614 asection *srelplt2;
2615
2616 /* Data for R_ARM_TLS_LDM32 relocations. */
2617 union
2618 {
2619 bfd_signed_vma refcount;
2620 bfd_vma offset;
2621 } tls_ldm_got;
2622
2623 /* Small local sym cache. */
2624 struct sym_cache sym_cache;
2625
2626 /* For convenience in allocate_dynrelocs. */
2627 bfd * obfd;
2628
2629 /* The stub hash table. */
2630 struct bfd_hash_table stub_hash_table;
2631
2632 /* Linker stub bfd. */
2633 bfd *stub_bfd;
2634
2635 /* Linker call-backs. */
2636 asection * (*add_stub_section) (const char *, asection *);
2637 void (*layout_sections_again) (void);
2638
2639 /* Array to keep track of which stub sections have been created, and
2640 information on stub grouping. */
2641 struct map_stub
2642 {
2643 /* This is the section to which stubs in the group will be
2644 attached. */
2645 asection *link_sec;
2646 /* The stub section. */
2647 asection *stub_sec;
2648 } *stub_group;
2649
2650 /* Assorted information used by elf32_arm_size_stubs. */
2651 unsigned int bfd_count;
2652 int top_index;
2653 asection **input_list;
2654 };
2655
2656 /* Create an entry in an ARM ELF linker hash table. */
2657
2658 static struct bfd_hash_entry *
2659 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2660 struct bfd_hash_table * table,
2661 const char * string)
2662 {
2663 struct elf32_arm_link_hash_entry * ret =
2664 (struct elf32_arm_link_hash_entry *) entry;
2665
2666 /* Allocate the structure if it has not already been allocated by a
2667 subclass. */
2668 if (ret == NULL)
2669 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2670 if (ret == NULL)
2671 return (struct bfd_hash_entry *) ret;
2672
2673 /* Call the allocation method of the superclass. */
2674 ret = ((struct elf32_arm_link_hash_entry *)
2675 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2676 table, string));
2677 if (ret != NULL)
2678 {
2679 ret->relocs_copied = NULL;
2680 ret->tls_type = GOT_UNKNOWN;
2681 ret->plt_thumb_refcount = 0;
2682 ret->plt_maybe_thumb_refcount = 0;
2683 ret->plt_got_offset = -1;
2684 ret->export_glue = NULL;
2685
2686 ret->stub_cache = NULL;
2687 }
2688
2689 return (struct bfd_hash_entry *) ret;
2690 }
2691
2692 /* Initialize an entry in the stub hash table. */
2693
2694 static struct bfd_hash_entry *
2695 stub_hash_newfunc (struct bfd_hash_entry *entry,
2696 struct bfd_hash_table *table,
2697 const char *string)
2698 {
2699 /* Allocate the structure if it has not already been allocated by a
2700 subclass. */
2701 if (entry == NULL)
2702 {
2703 entry = bfd_hash_allocate (table,
2704 sizeof (struct elf32_arm_stub_hash_entry));
2705 if (entry == NULL)
2706 return entry;
2707 }
2708
2709 /* Call the allocation method of the superclass. */
2710 entry = bfd_hash_newfunc (entry, table, string);
2711 if (entry != NULL)
2712 {
2713 struct elf32_arm_stub_hash_entry *eh;
2714
2715 /* Initialize the local fields. */
2716 eh = (struct elf32_arm_stub_hash_entry *) entry;
2717 eh->stub_sec = NULL;
2718 eh->stub_offset = 0;
2719 eh->target_value = 0;
2720 eh->target_section = NULL;
2721 eh->target_addend = 0;
2722 eh->orig_insn = 0;
2723 eh->stub_type = arm_stub_none;
2724 eh->stub_size = 0;
2725 eh->stub_template = NULL;
2726 eh->stub_template_size = 0;
2727 eh->h = NULL;
2728 eh->id_sec = NULL;
2729 eh->output_name = NULL;
2730 }
2731
2732 return entry;
2733 }
2734
2735 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2736 shortcuts to them in our hash table. */
2737
2738 static bfd_boolean
2739 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2740 {
2741 struct elf32_arm_link_hash_table *htab;
2742
2743 htab = elf32_arm_hash_table (info);
2744 /* BPABI objects never have a GOT, or associated sections. */
2745 if (htab->symbian_p)
2746 return TRUE;
2747
2748 if (! _bfd_elf_create_got_section (dynobj, info))
2749 return FALSE;
2750
2751 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2752 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2753 if (!htab->sgot || !htab->sgotplt)
2754 abort ();
2755
2756 htab->srelgot = bfd_get_section_by_name (dynobj,
2757 RELOC_SECTION (htab, ".got"));
2758 if (htab->srelgot == NULL)
2759 return FALSE;
2760 return TRUE;
2761 }
2762
2763 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2764 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2765 hash table. */
2766
2767 static bfd_boolean
2768 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2769 {
2770 struct elf32_arm_link_hash_table *htab;
2771
2772 htab = elf32_arm_hash_table (info);
2773 if (!htab->sgot && !create_got_section (dynobj, info))
2774 return FALSE;
2775
2776 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2777 return FALSE;
2778
2779 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2780 htab->srelplt = bfd_get_section_by_name (dynobj,
2781 RELOC_SECTION (htab, ".plt"));
2782 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2783 if (!info->shared)
2784 htab->srelbss = bfd_get_section_by_name (dynobj,
2785 RELOC_SECTION (htab, ".bss"));
2786
2787 if (htab->vxworks_p)
2788 {
2789 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2790 return FALSE;
2791
2792 if (info->shared)
2793 {
2794 htab->plt_header_size = 0;
2795 htab->plt_entry_size
2796 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2797 }
2798 else
2799 {
2800 htab->plt_header_size
2801 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2802 htab->plt_entry_size
2803 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2804 }
2805 }
2806
2807 if (!htab->splt
2808 || !htab->srelplt
2809 || !htab->sdynbss
2810 || (!info->shared && !htab->srelbss))
2811 abort ();
2812
2813 return TRUE;
2814 }
2815
2816 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2817
2818 static void
2819 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2820 struct elf_link_hash_entry *dir,
2821 struct elf_link_hash_entry *ind)
2822 {
2823 struct elf32_arm_link_hash_entry *edir, *eind;
2824
2825 edir = (struct elf32_arm_link_hash_entry *) dir;
2826 eind = (struct elf32_arm_link_hash_entry *) ind;
2827
2828 if (eind->relocs_copied != NULL)
2829 {
2830 if (edir->relocs_copied != NULL)
2831 {
2832 struct elf32_arm_relocs_copied **pp;
2833 struct elf32_arm_relocs_copied *p;
2834
2835 /* Add reloc counts against the indirect sym to the direct sym
2836 list. Merge any entries against the same section. */
2837 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2838 {
2839 struct elf32_arm_relocs_copied *q;
2840
2841 for (q = edir->relocs_copied; q != NULL; q = q->next)
2842 if (q->section == p->section)
2843 {
2844 q->pc_count += p->pc_count;
2845 q->count += p->count;
2846 *pp = p->next;
2847 break;
2848 }
2849 if (q == NULL)
2850 pp = &p->next;
2851 }
2852 *pp = edir->relocs_copied;
2853 }
2854
2855 edir->relocs_copied = eind->relocs_copied;
2856 eind->relocs_copied = NULL;
2857 }
2858
2859 if (ind->root.type == bfd_link_hash_indirect)
2860 {
2861 /* Copy over PLT info. */
2862 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2863 eind->plt_thumb_refcount = 0;
2864 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2865 eind->plt_maybe_thumb_refcount = 0;
2866
2867 if (dir->got.refcount <= 0)
2868 {
2869 edir->tls_type = eind->tls_type;
2870 eind->tls_type = GOT_UNKNOWN;
2871 }
2872 }
2873
2874 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2875 }
2876
2877 /* Create an ARM elf linker hash table. */
2878
2879 static struct bfd_link_hash_table *
2880 elf32_arm_link_hash_table_create (bfd *abfd)
2881 {
2882 struct elf32_arm_link_hash_table *ret;
2883 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2884
2885 ret = bfd_malloc (amt);
2886 if (ret == NULL)
2887 return NULL;
2888
2889 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2890 elf32_arm_link_hash_newfunc,
2891 sizeof (struct elf32_arm_link_hash_entry)))
2892 {
2893 free (ret);
2894 return NULL;
2895 }
2896
2897 ret->sgot = NULL;
2898 ret->sgotplt = NULL;
2899 ret->srelgot = NULL;
2900 ret->splt = NULL;
2901 ret->srelplt = NULL;
2902 ret->sdynbss = NULL;
2903 ret->srelbss = NULL;
2904 ret->srelplt2 = NULL;
2905 ret->thumb_glue_size = 0;
2906 ret->arm_glue_size = 0;
2907 ret->bx_glue_size = 0;
2908 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2909 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2910 ret->vfp11_erratum_glue_size = 0;
2911 ret->num_vfp11_fixes = 0;
2912 ret->fix_cortex_a8 = 0;
2913 ret->bfd_of_glue_owner = NULL;
2914 ret->byteswap_code = 0;
2915 ret->target1_is_rel = 0;
2916 ret->target2_reloc = R_ARM_NONE;
2917 #ifdef FOUR_WORD_PLT
2918 ret->plt_header_size = 16;
2919 ret->plt_entry_size = 16;
2920 #else
2921 ret->plt_header_size = 20;
2922 ret->plt_entry_size = 12;
2923 #endif
2924 ret->fix_v4bx = 0;
2925 ret->use_blx = 0;
2926 ret->vxworks_p = 0;
2927 ret->symbian_p = 0;
2928 ret->use_rel = 1;
2929 ret->sym_cache.abfd = NULL;
2930 ret->obfd = abfd;
2931 ret->tls_ldm_got.refcount = 0;
2932 ret->stub_bfd = NULL;
2933 ret->add_stub_section = NULL;
2934 ret->layout_sections_again = NULL;
2935 ret->stub_group = NULL;
2936 ret->bfd_count = 0;
2937 ret->top_index = 0;
2938 ret->input_list = NULL;
2939
2940 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2941 sizeof (struct elf32_arm_stub_hash_entry)))
2942 {
2943 free (ret);
2944 return NULL;
2945 }
2946
2947 return &ret->root.root;
2948 }
2949
2950 /* Free the derived linker hash table. */
2951
2952 static void
2953 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2954 {
2955 struct elf32_arm_link_hash_table *ret
2956 = (struct elf32_arm_link_hash_table *) hash;
2957
2958 bfd_hash_table_free (&ret->stub_hash_table);
2959 _bfd_generic_link_hash_table_free (hash);
2960 }
2961
2962 /* Determine if we're dealing with a Thumb only architecture. */
2963
2964 static bfd_boolean
2965 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2966 {
2967 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2968 Tag_CPU_arch);
2969 int profile;
2970
2971 if (arch != TAG_CPU_ARCH_V7)
2972 return FALSE;
2973
2974 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2975 Tag_CPU_arch_profile);
2976
2977 return profile == 'M';
2978 }
2979
2980 /* Determine if we're dealing with a Thumb-2 object. */
2981
2982 static bfd_boolean
2983 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2984 {
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2988 }
2989
2990 /* Determine what kind of NOPs are available. */
2991
2992 static bfd_boolean
2993 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
2994 {
2995 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch);
2997 return arch == TAG_CPU_ARCH_V6T2
2998 || arch == TAG_CPU_ARCH_V6K
2999 || arch == TAG_CPU_ARCH_V7;
3000 }
3001
3002 static bfd_boolean
3003 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3004 {
3005 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3006 Tag_CPU_arch);
3007 return arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7;
3008 }
3009
3010 static bfd_boolean
3011 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3012 {
3013 switch (stub_type)
3014 {
3015 case arm_stub_long_branch_thumb_only:
3016 case arm_stub_long_branch_v4t_thumb_arm:
3017 case arm_stub_short_branch_v4t_thumb_arm:
3018 case arm_stub_long_branch_v4t_thumb_arm_pic:
3019 case arm_stub_long_branch_thumb_only_pic:
3020 return TRUE;
3021 case arm_stub_none:
3022 BFD_FAIL ();
3023 return FALSE;
3024 break;
3025 default:
3026 return FALSE;
3027 }
3028 }
3029
3030 /* Determine the type of stub needed, if any, for a call. */
3031
3032 static enum elf32_arm_stub_type
3033 arm_type_of_stub (struct bfd_link_info *info,
3034 asection *input_sec,
3035 const Elf_Internal_Rela *rel,
3036 unsigned char st_type,
3037 struct elf32_arm_link_hash_entry *hash,
3038 bfd_vma destination,
3039 asection *sym_sec,
3040 bfd *input_bfd,
3041 const char *name)
3042 {
3043 bfd_vma location;
3044 bfd_signed_vma branch_offset;
3045 unsigned int r_type;
3046 struct elf32_arm_link_hash_table * globals;
3047 int thumb2;
3048 int thumb_only;
3049 enum elf32_arm_stub_type stub_type = arm_stub_none;
3050 int use_plt = 0;
3051
3052 /* We don't know the actual type of destination in case it is of
3053 type STT_SECTION: give up. */
3054 if (st_type == STT_SECTION)
3055 return stub_type;
3056
3057 globals = elf32_arm_hash_table (info);
3058
3059 thumb_only = using_thumb_only (globals);
3060
3061 thumb2 = using_thumb2 (globals);
3062
3063 /* Determine where the call point is. */
3064 location = (input_sec->output_offset
3065 + input_sec->output_section->vma
3066 + rel->r_offset);
3067
3068 branch_offset = (bfd_signed_vma)(destination - location);
3069
3070 r_type = ELF32_R_TYPE (rel->r_info);
3071
3072 /* Keep a simpler condition, for the sake of clarity. */
3073 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3074 {
3075 use_plt = 1;
3076 /* Note when dealing with PLT entries: the main PLT stub is in
3077 ARM mode, so if the branch is in Thumb mode, another
3078 Thumb->ARM stub will be inserted later just before the ARM
3079 PLT stub. We don't take this extra distance into account
3080 here, because if a long branch stub is needed, we'll add a
3081 Thumb->Arm one and branch directly to the ARM PLT entry
3082 because it avoids spreading offset corrections in several
3083 places. */
3084 }
3085
3086 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3087 {
3088 /* Handle cases where:
3089 - this call goes too far (different Thumb/Thumb2 max
3090 distance)
3091 - it's a Thumb->Arm call and blx is not available, or it's a
3092 Thumb->Arm branch (not bl). A stub is needed in this case,
3093 but only if this call is not through a PLT entry. Indeed,
3094 PLT stubs handle mode switching already.
3095 */
3096 if ((!thumb2
3097 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3098 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3099 || (thumb2
3100 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3101 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3102 || ((st_type != STT_ARM_TFUNC)
3103 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3104 || (r_type == R_ARM_THM_JUMP24))
3105 && !use_plt))
3106 {
3107 if (st_type == STT_ARM_TFUNC)
3108 {
3109 /* Thumb to thumb. */
3110 if (!thumb_only)
3111 {
3112 stub_type = (info->shared | globals->pic_veneer)
3113 /* PIC stubs. */
3114 ? ((globals->use_blx
3115 && (r_type ==R_ARM_THM_CALL))
3116 /* V5T and above. Stub starts with ARM code, so
3117 we must be able to switch mode before
3118 reaching it, which is only possible for 'bl'
3119 (ie R_ARM_THM_CALL relocation). */
3120 ? arm_stub_long_branch_any_thumb_pic
3121 /* On V4T, use Thumb code only. */
3122 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3123
3124 /* non-PIC stubs. */
3125 : ((globals->use_blx
3126 && (r_type ==R_ARM_THM_CALL))
3127 /* V5T and above. */
3128 ? arm_stub_long_branch_any_any
3129 /* V4T. */
3130 : arm_stub_long_branch_v4t_thumb_thumb);
3131 }
3132 else
3133 {
3134 stub_type = (info->shared | globals->pic_veneer)
3135 /* PIC stub. */
3136 ? arm_stub_long_branch_thumb_only_pic
3137 /* non-PIC stub. */
3138 : arm_stub_long_branch_thumb_only;
3139 }
3140 }
3141 else
3142 {
3143 /* Thumb to arm. */
3144 if (sym_sec != NULL
3145 && sym_sec->owner != NULL
3146 && !INTERWORK_FLAG (sym_sec->owner))
3147 {
3148 (*_bfd_error_handler)
3149 (_("%B(%s): warning: interworking not enabled.\n"
3150 " first occurrence: %B: Thumb call to ARM"),
3151 sym_sec->owner, input_bfd, name);
3152 }
3153
3154 stub_type = (info->shared | globals->pic_veneer)
3155 /* PIC stubs. */
3156 ? ((globals->use_blx
3157 && (r_type ==R_ARM_THM_CALL))
3158 /* V5T and above. */
3159 ? arm_stub_long_branch_any_arm_pic
3160 /* V4T PIC stub. */
3161 : arm_stub_long_branch_v4t_thumb_arm_pic)
3162
3163 /* non-PIC stubs. */
3164 : ((globals->use_blx
3165 && (r_type ==R_ARM_THM_CALL))
3166 /* V5T and above. */
3167 ? arm_stub_long_branch_any_any
3168 /* V4T. */
3169 : arm_stub_long_branch_v4t_thumb_arm);
3170
3171 /* Handle v4t short branches. */
3172 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3173 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3174 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3175 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3176 }
3177 }
3178 }
3179 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3180 {
3181 if (st_type == STT_ARM_TFUNC)
3182 {
3183 /* Arm to thumb. */
3184
3185 if (sym_sec != NULL
3186 && sym_sec->owner != NULL
3187 && !INTERWORK_FLAG (sym_sec->owner))
3188 {
3189 (*_bfd_error_handler)
3190 (_("%B(%s): warning: interworking not enabled.\n"
3191 " first occurrence: %B: ARM call to Thumb"),
3192 sym_sec->owner, input_bfd, name);
3193 }
3194
3195 /* We have an extra 2-bytes reach because of
3196 the mode change (bit 24 (H) of BLX encoding). */
3197 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3198 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3199 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3200 || (r_type == R_ARM_JUMP24)
3201 || (r_type == R_ARM_PLT32))
3202 {
3203 stub_type = (info->shared | globals->pic_veneer)
3204 /* PIC stubs. */
3205 ? ((globals->use_blx)
3206 /* V5T and above. */
3207 ? arm_stub_long_branch_any_thumb_pic
3208 /* V4T stub. */
3209 : arm_stub_long_branch_v4t_arm_thumb_pic)
3210
3211 /* non-PIC stubs. */
3212 : ((globals->use_blx)
3213 /* V5T and above. */
3214 ? arm_stub_long_branch_any_any
3215 /* V4T. */
3216 : arm_stub_long_branch_v4t_arm_thumb);
3217 }
3218 }
3219 else
3220 {
3221 /* Arm to arm. */
3222 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3223 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3224 {
3225 stub_type = (info->shared | globals->pic_veneer)
3226 /* PIC stubs. */
3227 ? arm_stub_long_branch_any_arm_pic
3228 /* non-PIC stubs. */
3229 : arm_stub_long_branch_any_any;
3230 }
3231 }
3232 }
3233
3234 return stub_type;
3235 }
3236
3237 /* Build a name for an entry in the stub hash table. */
3238
3239 static char *
3240 elf32_arm_stub_name (const asection *input_section,
3241 const asection *sym_sec,
3242 const struct elf32_arm_link_hash_entry *hash,
3243 const Elf_Internal_Rela *rel)
3244 {
3245 char *stub_name;
3246 bfd_size_type len;
3247
3248 if (hash)
3249 {
3250 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3251 stub_name = bfd_malloc (len);
3252 if (stub_name != NULL)
3253 sprintf (stub_name, "%08x_%s+%x",
3254 input_section->id & 0xffffffff,
3255 hash->root.root.root.string,
3256 (int) rel->r_addend & 0xffffffff);
3257 }
3258 else
3259 {
3260 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3261 stub_name = bfd_malloc (len);
3262 if (stub_name != NULL)
3263 sprintf (stub_name, "%08x_%x:%x+%x",
3264 input_section->id & 0xffffffff,
3265 sym_sec->id & 0xffffffff,
3266 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3267 (int) rel->r_addend & 0xffffffff);
3268 }
3269
3270 return stub_name;
3271 }
3272
3273 /* Look up an entry in the stub hash. Stub entries are cached because
3274 creating the stub name takes a bit of time. */
3275
3276 static struct elf32_arm_stub_hash_entry *
3277 elf32_arm_get_stub_entry (const asection *input_section,
3278 const asection *sym_sec,
3279 struct elf_link_hash_entry *hash,
3280 const Elf_Internal_Rela *rel,
3281 struct elf32_arm_link_hash_table *htab)
3282 {
3283 struct elf32_arm_stub_hash_entry *stub_entry;
3284 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3285 const asection *id_sec;
3286
3287 if ((input_section->flags & SEC_CODE) == 0)
3288 return NULL;
3289
3290 /* If this input section is part of a group of sections sharing one
3291 stub section, then use the id of the first section in the group.
3292 Stub names need to include a section id, as there may well be
3293 more than one stub used to reach say, printf, and we need to
3294 distinguish between them. */
3295 id_sec = htab->stub_group[input_section->id].link_sec;
3296
3297 if (h != NULL && h->stub_cache != NULL
3298 && h->stub_cache->h == h
3299 && h->stub_cache->id_sec == id_sec)
3300 {
3301 stub_entry = h->stub_cache;
3302 }
3303 else
3304 {
3305 char *stub_name;
3306
3307 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3308 if (stub_name == NULL)
3309 return NULL;
3310
3311 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3312 stub_name, FALSE, FALSE);
3313 if (h != NULL)
3314 h->stub_cache = stub_entry;
3315
3316 free (stub_name);
3317 }
3318
3319 return stub_entry;
3320 }
3321
3322 /* Find or create a stub section. Returns a pointer to the stub section, and
3323 the section to which the stub section will be attached (in *LINK_SEC_P).
3324 LINK_SEC_P may be NULL. */
3325
3326 static asection *
3327 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3328 struct elf32_arm_link_hash_table *htab)
3329 {
3330 asection *link_sec;
3331 asection *stub_sec;
3332
3333 link_sec = htab->stub_group[section->id].link_sec;
3334 stub_sec = htab->stub_group[section->id].stub_sec;
3335 if (stub_sec == NULL)
3336 {
3337 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3338 if (stub_sec == NULL)
3339 {
3340 size_t namelen;
3341 bfd_size_type len;
3342 char *s_name;
3343
3344 namelen = strlen (link_sec->name);
3345 len = namelen + sizeof (STUB_SUFFIX);
3346 s_name = bfd_alloc (htab->stub_bfd, len);
3347 if (s_name == NULL)
3348 return NULL;
3349
3350 memcpy (s_name, link_sec->name, namelen);
3351 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3352 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3353 if (stub_sec == NULL)
3354 return NULL;
3355 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3356 }
3357 htab->stub_group[section->id].stub_sec = stub_sec;
3358 }
3359
3360 if (link_sec_p)
3361 *link_sec_p = link_sec;
3362
3363 return stub_sec;
3364 }
3365
3366 /* Add a new stub entry to the stub hash. Not all fields of the new
3367 stub entry are initialised. */
3368
3369 static struct elf32_arm_stub_hash_entry *
3370 elf32_arm_add_stub (const char *stub_name,
3371 asection *section,
3372 struct elf32_arm_link_hash_table *htab)
3373 {
3374 asection *link_sec;
3375 asection *stub_sec;
3376 struct elf32_arm_stub_hash_entry *stub_entry;
3377
3378 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3379 if (stub_sec == NULL)
3380 return NULL;
3381
3382 /* Enter this entry into the linker stub hash table. */
3383 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3384 TRUE, FALSE);
3385 if (stub_entry == NULL)
3386 {
3387 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3388 section->owner,
3389 stub_name);
3390 return NULL;
3391 }
3392
3393 stub_entry->stub_sec = stub_sec;
3394 stub_entry->stub_offset = 0;
3395 stub_entry->id_sec = link_sec;
3396
3397 return stub_entry;
3398 }
3399
3400 /* Store an Arm insn into an output section not processed by
3401 elf32_arm_write_section. */
3402
3403 static void
3404 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3405 bfd * output_bfd, bfd_vma val, void * ptr)
3406 {
3407 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3408 bfd_putl32 (val, ptr);
3409 else
3410 bfd_putb32 (val, ptr);
3411 }
3412
3413 /* Store a 16-bit Thumb insn into an output section not processed by
3414 elf32_arm_write_section. */
3415
3416 static void
3417 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3418 bfd * output_bfd, bfd_vma val, void * ptr)
3419 {
3420 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3421 bfd_putl16 (val, ptr);
3422 else
3423 bfd_putb16 (val, ptr);
3424 }
3425
3426 static bfd_reloc_status_type elf32_arm_final_link_relocate
3427 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3428 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3429 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3430
3431 static bfd_boolean
3432 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3433 void * in_arg)
3434 {
3435 #define MAXRELOCS 2
3436 struct elf32_arm_stub_hash_entry *stub_entry;
3437 struct bfd_link_info *info;
3438 struct elf32_arm_link_hash_table *htab;
3439 asection *stub_sec;
3440 bfd *stub_bfd;
3441 bfd_vma stub_addr;
3442 bfd_byte *loc;
3443 bfd_vma sym_value;
3444 int template_size;
3445 int size;
3446 const insn_sequence *template_sequence;
3447 int i;
3448 struct elf32_arm_link_hash_table * globals;
3449 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3450 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3451 int nrelocs = 0;
3452
3453 /* Massage our args to the form they really have. */
3454 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3455 info = (struct bfd_link_info *) in_arg;
3456
3457 globals = elf32_arm_hash_table (info);
3458
3459 htab = elf32_arm_hash_table (info);
3460 stub_sec = stub_entry->stub_sec;
3461
3462 if ((htab->fix_cortex_a8 < 0)
3463 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3464 /* We have to do the a8 fixes last, as they are less aligned than
3465 the other veneers. */
3466 return TRUE;
3467
3468 /* Make a note of the offset within the stubs for this entry. */
3469 stub_entry->stub_offset = stub_sec->size;
3470 loc = stub_sec->contents + stub_entry->stub_offset;
3471
3472 stub_bfd = stub_sec->owner;
3473
3474 /* This is the address of the start of the stub. */
3475 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3476 + stub_entry->stub_offset;
3477
3478 /* This is the address of the stub destination. */
3479 sym_value = (stub_entry->target_value
3480 + stub_entry->target_section->output_offset
3481 + stub_entry->target_section->output_section->vma);
3482
3483 template_sequence = stub_entry->stub_template;
3484 template_size = stub_entry->stub_template_size;
3485
3486 size = 0;
3487 for (i = 0; i < template_size; i++)
3488 {
3489 switch (template_sequence[i].type)
3490 {
3491 case THUMB16_TYPE:
3492 {
3493 bfd_vma data = (bfd_vma) template_sequence[i].data;
3494 if (template_sequence[i].reloc_addend != 0)
3495 {
3496 /* We've borrowed the reloc_addend field to mean we should
3497 insert a condition code into this (Thumb-1 branch)
3498 instruction. See THUMB16_BCOND_INSN. */
3499 BFD_ASSERT ((data & 0xff00) == 0xd000);
3500 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3501 }
3502 put_thumb_insn (globals, stub_bfd, data, loc + size);
3503 size += 2;
3504 }
3505 break;
3506
3507 case THUMB32_TYPE:
3508 put_thumb_insn (globals, stub_bfd,
3509 (template_sequence[i].data >> 16) & 0xffff,
3510 loc + size);
3511 put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
3512 loc + size + 2);
3513 if (template_sequence[i].r_type != R_ARM_NONE)
3514 {
3515 stub_reloc_idx[nrelocs] = i;
3516 stub_reloc_offset[nrelocs++] = size;
3517 }
3518 size += 4;
3519 break;
3520
3521 case ARM_TYPE:
3522 put_arm_insn (globals, stub_bfd, template_sequence[i].data,
3523 loc + size);
3524 /* Handle cases where the target is encoded within the
3525 instruction. */
3526 if (template_sequence[i].r_type == R_ARM_JUMP24)
3527 {
3528 stub_reloc_idx[nrelocs] = i;
3529 stub_reloc_offset[nrelocs++] = size;
3530 }
3531 size += 4;
3532 break;
3533
3534 case DATA_TYPE:
3535 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3536 stub_reloc_idx[nrelocs] = i;
3537 stub_reloc_offset[nrelocs++] = size;
3538 size += 4;
3539 break;
3540
3541 default:
3542 BFD_FAIL ();
3543 return FALSE;
3544 }
3545 }
3546
3547 stub_sec->size += size;
3548
3549 /* Stub size has already been computed in arm_size_one_stub. Check
3550 consistency. */
3551 BFD_ASSERT (size == stub_entry->stub_size);
3552
3553 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3554 if (stub_entry->st_type == STT_ARM_TFUNC)
3555 sym_value |= 1;
3556
3557 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3558 in each stub. */
3559 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3560
3561 for (i = 0; i < nrelocs; i++)
3562 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3563 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3564 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3565 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3566 {
3567 Elf_Internal_Rela rel;
3568 bfd_boolean unresolved_reloc;
3569 char *error_message;
3570 int sym_flags
3571 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3572 ? STT_ARM_TFUNC : 0;
3573 bfd_vma points_to = sym_value + stub_entry->target_addend;
3574
3575 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3576 rel.r_info = ELF32_R_INFO (0,
3577 template_sequence[stub_reloc_idx[i]].r_type);
3578 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3579
3580 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3581 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3582 template should refer back to the instruction after the original
3583 branch. */
3584 points_to = sym_value;
3585
3586 /* There may be unintended consequences if this is not true. */
3587 BFD_ASSERT (stub_entry->h == NULL);
3588
3589 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3590 properly. We should probably use this function unconditionally,
3591 rather than only for certain relocations listed in the enclosing
3592 conditional, for the sake of consistency. */
3593 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3594 (template_sequence[stub_reloc_idx[i]].r_type),
3595 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3596 points_to, info, stub_entry->target_section, "", sym_flags,
3597 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3598 &error_message);
3599 }
3600 else
3601 {
3602 _bfd_final_link_relocate (elf32_arm_howto_from_type
3603 (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3604 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3605 sym_value + stub_entry->target_addend,
3606 template_sequence[stub_reloc_idx[i]].reloc_addend);
3607 }
3608
3609 return TRUE;
3610 #undef MAXRELOCS
3611 }
3612
3613 /* Calculate the template, template size and instruction size for a stub.
3614 Return value is the instruction size. */
3615
3616 static unsigned int
3617 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3618 const insn_sequence **stub_template,
3619 int *stub_template_size)
3620 {
3621 const insn_sequence *template_sequence = NULL;
3622 int template_size = 0, i;
3623 unsigned int size;
3624
3625 template_sequence = stub_definitions[stub_type].template_sequence;
3626 template_size = stub_definitions[stub_type].template_size;
3627
3628 size = 0;
3629 for (i = 0; i < template_size; i++)
3630 {
3631 switch (template_sequence[i].type)
3632 {
3633 case THUMB16_TYPE:
3634 size += 2;
3635 break;
3636
3637 case ARM_TYPE:
3638 case THUMB32_TYPE:
3639 case DATA_TYPE:
3640 size += 4;
3641 break;
3642
3643 default:
3644 BFD_FAIL ();
3645 return FALSE;
3646 }
3647 }
3648
3649 if (stub_template)
3650 *stub_template = template_sequence;
3651
3652 if (stub_template_size)
3653 *stub_template_size = template_size;
3654
3655 return size;
3656 }
3657
3658 /* As above, but don't actually build the stub. Just bump offset so
3659 we know stub section sizes. */
3660
3661 static bfd_boolean
3662 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3663 void * in_arg)
3664 {
3665 struct elf32_arm_stub_hash_entry *stub_entry;
3666 struct elf32_arm_link_hash_table *htab;
3667 const insn_sequence *template_sequence;
3668 int template_size, size;
3669
3670 /* Massage our args to the form they really have. */
3671 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3672 htab = (struct elf32_arm_link_hash_table *) in_arg;
3673
3674 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3675 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3676
3677 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3678 &template_size);
3679
3680 stub_entry->stub_size = size;
3681 stub_entry->stub_template = template_sequence;
3682 stub_entry->stub_template_size = template_size;
3683
3684 size = (size + 7) & ~7;
3685 stub_entry->stub_sec->size += size;
3686
3687 return TRUE;
3688 }
3689
3690 /* External entry points for sizing and building linker stubs. */
3691
3692 /* Set up various things so that we can make a list of input sections
3693 for each output section included in the link. Returns -1 on error,
3694 0 when no stubs will be needed, and 1 on success. */
3695
3696 int
3697 elf32_arm_setup_section_lists (bfd *output_bfd,
3698 struct bfd_link_info *info)
3699 {
3700 bfd *input_bfd;
3701 unsigned int bfd_count;
3702 int top_id, top_index;
3703 asection *section;
3704 asection **input_list, **list;
3705 bfd_size_type amt;
3706 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3707
3708 if (! is_elf_hash_table (htab))
3709 return 0;
3710
3711 /* Count the number of input BFDs and find the top input section id. */
3712 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3713 input_bfd != NULL;
3714 input_bfd = input_bfd->link_next)
3715 {
3716 bfd_count += 1;
3717 for (section = input_bfd->sections;
3718 section != NULL;
3719 section = section->next)
3720 {
3721 if (top_id < section->id)
3722 top_id = section->id;
3723 }
3724 }
3725 htab->bfd_count = bfd_count;
3726
3727 amt = sizeof (struct map_stub) * (top_id + 1);
3728 htab->stub_group = bfd_zmalloc (amt);
3729 if (htab->stub_group == NULL)
3730 return -1;
3731
3732 /* We can't use output_bfd->section_count here to find the top output
3733 section index as some sections may have been removed, and
3734 _bfd_strip_section_from_output doesn't renumber the indices. */
3735 for (section = output_bfd->sections, top_index = 0;
3736 section != NULL;
3737 section = section->next)
3738 {
3739 if (top_index < section->index)
3740 top_index = section->index;
3741 }
3742
3743 htab->top_index = top_index;
3744 amt = sizeof (asection *) * (top_index + 1);
3745 input_list = bfd_malloc (amt);
3746 htab->input_list = input_list;
3747 if (input_list == NULL)
3748 return -1;
3749
3750 /* For sections we aren't interested in, mark their entries with a
3751 value we can check later. */
3752 list = input_list + top_index;
3753 do
3754 *list = bfd_abs_section_ptr;
3755 while (list-- != input_list);
3756
3757 for (section = output_bfd->sections;
3758 section != NULL;
3759 section = section->next)
3760 {
3761 if ((section->flags & SEC_CODE) != 0)
3762 input_list[section->index] = NULL;
3763 }
3764
3765 return 1;
3766 }
3767
3768 /* The linker repeatedly calls this function for each input section,
3769 in the order that input sections are linked into output sections.
3770 Build lists of input sections to determine groupings between which
3771 we may insert linker stubs. */
3772
3773 void
3774 elf32_arm_next_input_section (struct bfd_link_info *info,
3775 asection *isec)
3776 {
3777 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3778
3779 if (isec->output_section->index <= htab->top_index)
3780 {
3781 asection **list = htab->input_list + isec->output_section->index;
3782
3783 if (*list != bfd_abs_section_ptr)
3784 {
3785 /* Steal the link_sec pointer for our list. */
3786 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3787 /* This happens to make the list in reverse order,
3788 which we reverse later. */
3789 PREV_SEC (isec) = *list;
3790 *list = isec;
3791 }
3792 }
3793 }
3794
3795 /* See whether we can group stub sections together. Grouping stub
3796 sections may result in fewer stubs. More importantly, we need to
3797 put all .init* and .fini* stubs at the end of the .init or
3798 .fini output sections respectively, because glibc splits the
3799 _init and _fini functions into multiple parts. Putting a stub in
3800 the middle of a function is not a good idea. */
3801
3802 static void
3803 group_sections (struct elf32_arm_link_hash_table *htab,
3804 bfd_size_type stub_group_size,
3805 bfd_boolean stubs_always_after_branch)
3806 {
3807 asection **list = htab->input_list;
3808
3809 do
3810 {
3811 asection *tail = *list;
3812 asection *head;
3813
3814 if (tail == bfd_abs_section_ptr)
3815 continue;
3816
3817 /* Reverse the list: we must avoid placing stubs at the
3818 beginning of the section because the beginning of the text
3819 section may be required for an interrupt vector in bare metal
3820 code. */
3821 #define NEXT_SEC PREV_SEC
3822 head = NULL;
3823 while (tail != NULL)
3824 {
3825 /* Pop from tail. */
3826 asection *item = tail;
3827 tail = PREV_SEC (item);
3828
3829 /* Push on head. */
3830 NEXT_SEC (item) = head;
3831 head = item;
3832 }
3833
3834 while (head != NULL)
3835 {
3836 asection *curr;
3837 asection *next;
3838 bfd_vma stub_group_start = head->output_offset;
3839 bfd_vma end_of_next;
3840
3841 curr = head;
3842 while (NEXT_SEC (curr) != NULL)
3843 {
3844 next = NEXT_SEC (curr);
3845 end_of_next = next->output_offset + next->size;
3846 if (end_of_next - stub_group_start >= stub_group_size)
3847 /* End of NEXT is too far from start, so stop. */
3848 break;
3849 /* Add NEXT to the group. */
3850 curr = next;
3851 }
3852
3853 /* OK, the size from the start to the start of CURR is less
3854 than stub_group_size and thus can be handled by one stub
3855 section. (Or the head section is itself larger than
3856 stub_group_size, in which case we may be toast.)
3857 We should really be keeping track of the total size of
3858 stubs added here, as stubs contribute to the final output
3859 section size. */
3860 do
3861 {
3862 next = NEXT_SEC (head);
3863 /* Set up this stub group. */
3864 htab->stub_group[head->id].link_sec = curr;
3865 }
3866 while (head != curr && (head = next) != NULL);
3867
3868 /* But wait, there's more! Input sections up to stub_group_size
3869 bytes after the stub section can be handled by it too. */
3870 if (!stubs_always_after_branch)
3871 {
3872 stub_group_start = curr->output_offset + curr->size;
3873
3874 while (next != NULL)
3875 {
3876 end_of_next = next->output_offset + next->size;
3877 if (end_of_next - stub_group_start >= stub_group_size)
3878 /* End of NEXT is too far from stubs, so stop. */
3879 break;
3880 /* Add NEXT to the stub group. */
3881 head = next;
3882 next = NEXT_SEC (head);
3883 htab->stub_group[head->id].link_sec = curr;
3884 }
3885 }
3886 head = next;
3887 }
3888 }
3889 while (list++ != htab->input_list + htab->top_index);
3890
3891 free (htab->input_list);
3892 #undef PREV_SEC
3893 #undef NEXT_SEC
3894 }
3895
3896 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3897 erratum fix. */
3898
3899 static int
3900 a8_reloc_compare (const void *a, const void *b)
3901 {
3902 const struct a8_erratum_reloc *ra = a, *rb = b;
3903
3904 if (ra->from < rb->from)
3905 return -1;
3906 else if (ra->from > rb->from)
3907 return 1;
3908 else
3909 return 0;
3910 }
3911
3912 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3913 const char *, char **);
3914
3915 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3916 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3917 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3918 otherwise. */
3919
3920 static bfd_boolean
3921 cortex_a8_erratum_scan (bfd *input_bfd,
3922 struct bfd_link_info *info,
3923 struct a8_erratum_fix **a8_fixes_p,
3924 unsigned int *num_a8_fixes_p,
3925 unsigned int *a8_fix_table_size_p,
3926 struct a8_erratum_reloc *a8_relocs,
3927 unsigned int num_a8_relocs,
3928 unsigned prev_num_a8_fixes,
3929 bfd_boolean *stub_changed_p)
3930 {
3931 asection *section;
3932 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3933 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3934 unsigned int num_a8_fixes = *num_a8_fixes_p;
3935 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3936
3937 for (section = input_bfd->sections;
3938 section != NULL;
3939 section = section->next)
3940 {
3941 bfd_byte *contents = NULL;
3942 struct _arm_elf_section_data *sec_data;
3943 unsigned int span;
3944 bfd_vma base_vma;
3945
3946 if (elf_section_type (section) != SHT_PROGBITS
3947 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3948 || (section->flags & SEC_EXCLUDE) != 0
3949 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3950 || (section->output_section == bfd_abs_section_ptr))
3951 continue;
3952
3953 base_vma = section->output_section->vma + section->output_offset;
3954
3955 if (elf_section_data (section)->this_hdr.contents != NULL)
3956 contents = elf_section_data (section)->this_hdr.contents;
3957 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3958 return TRUE;
3959
3960 sec_data = elf32_arm_section_data (section);
3961
3962 for (span = 0; span < sec_data->mapcount; span++)
3963 {
3964 unsigned int span_start = sec_data->map[span].vma;
3965 unsigned int span_end = (span == sec_data->mapcount - 1)
3966 ? section->size : sec_data->map[span + 1].vma;
3967 unsigned int i;
3968 char span_type = sec_data->map[span].type;
3969 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3970
3971 if (span_type != 't')
3972 continue;
3973
3974 /* Span is entirely within a single 4KB region: skip scanning. */
3975 if (((base_vma + span_start) & ~0xfff)
3976 == ((base_vma + span_end) & ~0xfff))
3977 continue;
3978
3979 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3980
3981 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3982 * The branch target is in the same 4KB region as the
3983 first half of the branch.
3984 * The instruction before the branch is a 32-bit
3985 length non-branch instruction. */
3986 for (i = span_start; i < span_end;)
3987 {
3988 unsigned int insn = bfd_getl16 (&contents[i]);
3989 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3990 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3991
3992 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3993 insn_32bit = TRUE;
3994
3995 if (insn_32bit)
3996 {
3997 /* Load the rest of the insn (in manual-friendly order). */
3998 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3999
4000 /* Encoding T4: B<c>.W. */
4001 is_b = (insn & 0xf800d000) == 0xf0009000;
4002 /* Encoding T1: BL<c>.W. */
4003 is_bl = (insn & 0xf800d000) == 0xf000d000;
4004 /* Encoding T2: BLX<c>.W. */
4005 is_blx = (insn & 0xf800d000) == 0xf000c000;
4006 /* Encoding T3: B<c>.W (not permitted in IT block). */
4007 is_bcc = (insn & 0xf800d000) == 0xf0008000
4008 && (insn & 0x07f00000) != 0x03800000;
4009 }
4010
4011 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4012
4013 if (((base_vma + i) & 0xfff) == 0xffe
4014 && insn_32bit
4015 && is_32bit_branch
4016 && last_was_32bit
4017 && ! last_was_branch)
4018 {
4019 bfd_signed_vma offset;
4020 bfd_boolean force_target_arm = FALSE;
4021 bfd_boolean force_target_thumb = FALSE;
4022 bfd_vma target;
4023 enum elf32_arm_stub_type stub_type = arm_stub_none;
4024 struct a8_erratum_reloc key, *found;
4025
4026 key.from = base_vma + i;
4027 found = bsearch (&key, a8_relocs, num_a8_relocs,
4028 sizeof (struct a8_erratum_reloc),
4029 &a8_reloc_compare);
4030
4031 if (found)
4032 {
4033 char *error_message = NULL;
4034 struct elf_link_hash_entry *entry;
4035
4036 /* We don't care about the error returned from this
4037 function, only if there is glue or not. */
4038 entry = find_thumb_glue (info, found->sym_name,
4039 &error_message);
4040
4041 if (entry)
4042 found->non_a8_stub = TRUE;
4043
4044 if (found->r_type == R_ARM_THM_CALL
4045 && found->st_type != STT_ARM_TFUNC)
4046 force_target_arm = TRUE;
4047 else if (found->r_type == R_ARM_THM_CALL
4048 && found->st_type == STT_ARM_TFUNC)
4049 force_target_thumb = TRUE;
4050 }
4051
4052 /* Check if we have an offending branch instruction. */
4053
4054 if (found && found->non_a8_stub)
4055 /* We've already made a stub for this instruction, e.g.
4056 it's a long branch or a Thumb->ARM stub. Assume that
4057 stub will suffice to work around the A8 erratum (see
4058 setting of always_after_branch above). */
4059 ;
4060 else if (is_bcc)
4061 {
4062 offset = (insn & 0x7ff) << 1;
4063 offset |= (insn & 0x3f0000) >> 4;
4064 offset |= (insn & 0x2000) ? 0x40000 : 0;
4065 offset |= (insn & 0x800) ? 0x80000 : 0;
4066 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4067 if (offset & 0x100000)
4068 offset |= ~ ((bfd_signed_vma) 0xfffff);
4069 stub_type = arm_stub_a8_veneer_b_cond;
4070 }
4071 else if (is_b || is_bl || is_blx)
4072 {
4073 int s = (insn & 0x4000000) != 0;
4074 int j1 = (insn & 0x2000) != 0;
4075 int j2 = (insn & 0x800) != 0;
4076 int i1 = !(j1 ^ s);
4077 int i2 = !(j2 ^ s);
4078
4079 offset = (insn & 0x7ff) << 1;
4080 offset |= (insn & 0x3ff0000) >> 4;
4081 offset |= i2 << 22;
4082 offset |= i1 << 23;
4083 offset |= s << 24;
4084 if (offset & 0x1000000)
4085 offset |= ~ ((bfd_signed_vma) 0xffffff);
4086
4087 if (is_blx)
4088 offset &= ~ ((bfd_signed_vma) 3);
4089
4090 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4091 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4092 }
4093
4094 if (stub_type != arm_stub_none)
4095 {
4096 bfd_vma pc_for_insn = base_vma + i + 4;
4097
4098 /* The original instruction is a BL, but the target is
4099 an ARM instruction. If we were not making a stub,
4100 the BL would have been converted to a BLX. Use the
4101 BLX stub instead in that case. */
4102 if (htab->use_blx && force_target_arm
4103 && stub_type == arm_stub_a8_veneer_bl)
4104 {
4105 stub_type = arm_stub_a8_veneer_blx;
4106 is_blx = TRUE;
4107 is_bl = FALSE;
4108 }
4109 /* Conversely, if the original instruction was
4110 BLX but the target is Thumb mode, use the BL
4111 stub. */
4112 else if (force_target_thumb
4113 && stub_type == arm_stub_a8_veneer_blx)
4114 {
4115 stub_type = arm_stub_a8_veneer_bl;
4116 is_blx = FALSE;
4117 is_bl = TRUE;
4118 }
4119
4120 if (is_blx)
4121 pc_for_insn &= ~ ((bfd_vma) 3);
4122
4123 /* If we found a relocation, use the proper destination,
4124 not the offset in the (unrelocated) instruction.
4125 Note this is always done if we switched the stub type
4126 above. */
4127 if (found)
4128 offset =
4129 (bfd_signed_vma) (found->destination - pc_for_insn);
4130
4131 target = pc_for_insn + offset;
4132
4133 /* The BLX stub is ARM-mode code. Adjust the offset to
4134 take the different PC value (+8 instead of +4) into
4135 account. */
4136 if (stub_type == arm_stub_a8_veneer_blx)
4137 offset += 4;
4138
4139 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4140 {
4141 char *stub_name = NULL;
4142
4143 if (num_a8_fixes == a8_fix_table_size)
4144 {
4145 a8_fix_table_size *= 2;
4146 a8_fixes = bfd_realloc (a8_fixes,
4147 sizeof (struct a8_erratum_fix)
4148 * a8_fix_table_size);
4149 }
4150
4151 if (num_a8_fixes < prev_num_a8_fixes)
4152 {
4153 /* If we're doing a subsequent scan,
4154 check if we've found the same fix as
4155 before, and try and reuse the stub
4156 name. */
4157 stub_name = a8_fixes[num_a8_fixes].stub_name;
4158 if ((a8_fixes[num_a8_fixes].section != section)
4159 || (a8_fixes[num_a8_fixes].offset != i))
4160 {
4161 free (stub_name);
4162 stub_name = NULL;
4163 *stub_changed_p = TRUE;
4164 }
4165 }
4166
4167 if (!stub_name)
4168 {
4169 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4170 if (stub_name != NULL)
4171 sprintf (stub_name, "%x:%x", section->id, i);
4172 }
4173
4174 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4175 a8_fixes[num_a8_fixes].section = section;
4176 a8_fixes[num_a8_fixes].offset = i;
4177 a8_fixes[num_a8_fixes].addend = offset;
4178 a8_fixes[num_a8_fixes].orig_insn = insn;
4179 a8_fixes[num_a8_fixes].stub_name = stub_name;
4180 a8_fixes[num_a8_fixes].stub_type = stub_type;
4181
4182 num_a8_fixes++;
4183 }
4184 }
4185 }
4186
4187 i += insn_32bit ? 4 : 2;
4188 last_was_32bit = insn_32bit;
4189 last_was_branch = is_32bit_branch;
4190 }
4191 }
4192
4193 if (elf_section_data (section)->this_hdr.contents == NULL)
4194 free (contents);
4195 }
4196
4197 *a8_fixes_p = a8_fixes;
4198 *num_a8_fixes_p = num_a8_fixes;
4199 *a8_fix_table_size_p = a8_fix_table_size;
4200
4201 return FALSE;
4202 }
4203
4204 /* Determine and set the size of the stub section for a final link.
4205
4206 The basic idea here is to examine all the relocations looking for
4207 PC-relative calls to a target that is unreachable with a "bl"
4208 instruction. */
4209
4210 bfd_boolean
4211 elf32_arm_size_stubs (bfd *output_bfd,
4212 bfd *stub_bfd,
4213 struct bfd_link_info *info,
4214 bfd_signed_vma group_size,
4215 asection * (*add_stub_section) (const char *, asection *),
4216 void (*layout_sections_again) (void))
4217 {
4218 bfd_size_type stub_group_size;
4219 bfd_boolean stubs_always_after_branch;
4220 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4221 struct a8_erratum_fix *a8_fixes = NULL;
4222 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4223 struct a8_erratum_reloc *a8_relocs = NULL;
4224 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4225
4226 if (htab->fix_cortex_a8)
4227 {
4228 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4229 * a8_fix_table_size);
4230 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4231 * a8_reloc_table_size);
4232 }
4233
4234 /* Propagate mach to stub bfd, because it may not have been
4235 finalized when we created stub_bfd. */
4236 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4237 bfd_get_mach (output_bfd));
4238
4239 /* Stash our params away. */
4240 htab->stub_bfd = stub_bfd;
4241 htab->add_stub_section = add_stub_section;
4242 htab->layout_sections_again = layout_sections_again;
4243 stubs_always_after_branch = group_size < 0;
4244
4245 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4246 as the first half of a 32-bit branch straddling two 4K pages. This is a
4247 crude way of enforcing that. */
4248 if (htab->fix_cortex_a8)
4249 stubs_always_after_branch = 1;
4250
4251 if (group_size < 0)
4252 stub_group_size = -group_size;
4253 else
4254 stub_group_size = group_size;
4255
4256 if (stub_group_size == 1)
4257 {
4258 /* Default values. */
4259 /* Thumb branch range is +-4MB has to be used as the default
4260 maximum size (a given section can contain both ARM and Thumb
4261 code, so the worst case has to be taken into account).
4262
4263 This value is 24K less than that, which allows for 2025
4264 12-byte stubs. If we exceed that, then we will fail to link.
4265 The user will have to relink with an explicit group size
4266 option. */
4267 stub_group_size = 4170000;
4268 }
4269
4270 group_sections (htab, stub_group_size, stubs_always_after_branch);
4271
4272 /* If we're applying the cortex A8 fix, we need to determine the
4273 program header size now, because we cannot change it later --
4274 that could alter section placements. Notice the A8 erratum fix
4275 ends up requiring the section addresses to remain unchanged
4276 modulo the page size. That's something we cannot represent
4277 inside BFD, and we don't want to force the section alignment to
4278 be the page size. */
4279 if (htab->fix_cortex_a8)
4280 (*htab->layout_sections_again) ();
4281
4282 while (1)
4283 {
4284 bfd *input_bfd;
4285 unsigned int bfd_indx;
4286 asection *stub_sec;
4287 bfd_boolean stub_changed = FALSE;
4288 unsigned prev_num_a8_fixes = num_a8_fixes;
4289
4290 num_a8_fixes = 0;
4291 for (input_bfd = info->input_bfds, bfd_indx = 0;
4292 input_bfd != NULL;
4293 input_bfd = input_bfd->link_next, bfd_indx++)
4294 {
4295 Elf_Internal_Shdr *symtab_hdr;
4296 asection *section;
4297 Elf_Internal_Sym *local_syms = NULL;
4298
4299 num_a8_relocs = 0;
4300
4301 /* We'll need the symbol table in a second. */
4302 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4303 if (symtab_hdr->sh_info == 0)
4304 continue;
4305
4306 /* Walk over each section attached to the input bfd. */
4307 for (section = input_bfd->sections;
4308 section != NULL;
4309 section = section->next)
4310 {
4311 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4312
4313 /* If there aren't any relocs, then there's nothing more
4314 to do. */
4315 if ((section->flags & SEC_RELOC) == 0
4316 || section->reloc_count == 0
4317 || (section->flags & SEC_CODE) == 0)
4318 continue;
4319
4320 /* If this section is a link-once section that will be
4321 discarded, then don't create any stubs. */
4322 if (section->output_section == NULL
4323 || section->output_section->owner != output_bfd)
4324 continue;
4325
4326 /* Get the relocs. */
4327 internal_relocs
4328 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4329 NULL, info->keep_memory);
4330 if (internal_relocs == NULL)
4331 goto error_ret_free_local;
4332
4333 /* Now examine each relocation. */
4334 irela = internal_relocs;
4335 irelaend = irela + section->reloc_count;
4336 for (; irela < irelaend; irela++)
4337 {
4338 unsigned int r_type, r_indx;
4339 enum elf32_arm_stub_type stub_type;
4340 struct elf32_arm_stub_hash_entry *stub_entry;
4341 asection *sym_sec;
4342 bfd_vma sym_value;
4343 bfd_vma destination;
4344 struct elf32_arm_link_hash_entry *hash;
4345 const char *sym_name;
4346 char *stub_name;
4347 const asection *id_sec;
4348 unsigned char st_type;
4349 bfd_boolean created_stub = FALSE;
4350
4351 r_type = ELF32_R_TYPE (irela->r_info);
4352 r_indx = ELF32_R_SYM (irela->r_info);
4353
4354 if (r_type >= (unsigned int) R_ARM_max)
4355 {
4356 bfd_set_error (bfd_error_bad_value);
4357 error_ret_free_internal:
4358 if (elf_section_data (section)->relocs == NULL)
4359 free (internal_relocs);
4360 goto error_ret_free_local;
4361 }
4362
4363 /* Only look for stubs on branch instructions. */
4364 if ((r_type != (unsigned int) R_ARM_CALL)
4365 && (r_type != (unsigned int) R_ARM_THM_CALL)
4366 && (r_type != (unsigned int) R_ARM_JUMP24)
4367 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4368 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4369 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4370 && (r_type != (unsigned int) R_ARM_PLT32))
4371 continue;
4372
4373 /* Now determine the call target, its name, value,
4374 section. */
4375 sym_sec = NULL;
4376 sym_value = 0;
4377 destination = 0;
4378 hash = NULL;
4379 sym_name = NULL;
4380 if (r_indx < symtab_hdr->sh_info)
4381 {
4382 /* It's a local symbol. */
4383 Elf_Internal_Sym *sym;
4384 Elf_Internal_Shdr *hdr;
4385
4386 if (local_syms == NULL)
4387 {
4388 local_syms
4389 = (Elf_Internal_Sym *) symtab_hdr->contents;
4390 if (local_syms == NULL)
4391 local_syms
4392 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4393 symtab_hdr->sh_info, 0,
4394 NULL, NULL, NULL);
4395 if (local_syms == NULL)
4396 goto error_ret_free_internal;
4397 }
4398
4399 sym = local_syms + r_indx;
4400 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4401 sym_sec = hdr->bfd_section;
4402 if (!sym_sec)
4403 /* This is an undefined symbol. It can never
4404 be resolved. */
4405 continue;
4406
4407 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4408 sym_value = sym->st_value;
4409 destination = (sym_value + irela->r_addend
4410 + sym_sec->output_offset
4411 + sym_sec->output_section->vma);
4412 st_type = ELF_ST_TYPE (sym->st_info);
4413 sym_name
4414 = bfd_elf_string_from_elf_section (input_bfd,
4415 symtab_hdr->sh_link,
4416 sym->st_name);
4417 }
4418 else
4419 {
4420 /* It's an external symbol. */
4421 int e_indx;
4422
4423 e_indx = r_indx - symtab_hdr->sh_info;
4424 hash = ((struct elf32_arm_link_hash_entry *)
4425 elf_sym_hashes (input_bfd)[e_indx]);
4426
4427 while (hash->root.root.type == bfd_link_hash_indirect
4428 || hash->root.root.type == bfd_link_hash_warning)
4429 hash = ((struct elf32_arm_link_hash_entry *)
4430 hash->root.root.u.i.link);
4431
4432 if (hash->root.root.type == bfd_link_hash_defined
4433 || hash->root.root.type == bfd_link_hash_defweak)
4434 {
4435 sym_sec = hash->root.root.u.def.section;
4436 sym_value = hash->root.root.u.def.value;
4437
4438 struct elf32_arm_link_hash_table *globals =
4439 elf32_arm_hash_table (info);
4440
4441 /* For a destination in a shared library,
4442 use the PLT stub as target address to
4443 decide whether a branch stub is
4444 needed. */
4445 if (globals->splt != NULL && hash != NULL
4446 && hash->root.plt.offset != (bfd_vma) -1)
4447 {
4448 sym_sec = globals->splt;
4449 sym_value = hash->root.plt.offset;
4450 if (sym_sec->output_section != NULL)
4451 destination = (sym_value
4452 + sym_sec->output_offset
4453 + sym_sec->output_section->vma);
4454 }
4455 else if (sym_sec->output_section != NULL)
4456 destination = (sym_value + irela->r_addend
4457 + sym_sec->output_offset
4458 + sym_sec->output_section->vma);
4459 }
4460 else if ((hash->root.root.type == bfd_link_hash_undefined)
4461 || (hash->root.root.type == bfd_link_hash_undefweak))
4462 {
4463 /* For a shared library, use the PLT stub as
4464 target address to decide whether a long
4465 branch stub is needed.
4466 For absolute code, they cannot be handled. */
4467 struct elf32_arm_link_hash_table *globals =
4468 elf32_arm_hash_table (info);
4469
4470 if (globals->splt != NULL && hash != NULL
4471 && hash->root.plt.offset != (bfd_vma) -1)
4472 {
4473 sym_sec = globals->splt;
4474 sym_value = hash->root.plt.offset;
4475 if (sym_sec->output_section != NULL)
4476 destination = (sym_value
4477 + sym_sec->output_offset
4478 + sym_sec->output_section->vma);
4479 }
4480 else
4481 continue;
4482 }
4483 else
4484 {
4485 bfd_set_error (bfd_error_bad_value);
4486 goto error_ret_free_internal;
4487 }
4488 st_type = ELF_ST_TYPE (hash->root.type);
4489 sym_name = hash->root.root.root.string;
4490 }
4491
4492 do
4493 {
4494 /* Determine what (if any) linker stub is needed. */
4495 stub_type = arm_type_of_stub (info, section, irela,
4496 st_type, hash,
4497 destination, sym_sec,
4498 input_bfd, sym_name);
4499 if (stub_type == arm_stub_none)
4500 break;
4501
4502 /* Support for grouping stub sections. */
4503 id_sec = htab->stub_group[section->id].link_sec;
4504
4505 /* Get the name of this stub. */
4506 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4507 irela);
4508 if (!stub_name)
4509 goto error_ret_free_internal;
4510
4511 /* We've either created a stub for this reloc already,
4512 or we are about to. */
4513 created_stub = TRUE;
4514
4515 stub_entry = arm_stub_hash_lookup
4516 (&htab->stub_hash_table, stub_name,
4517 FALSE, FALSE);
4518 if (stub_entry != NULL)
4519 {
4520 /* The proper stub has already been created. */
4521 free (stub_name);
4522 stub_entry->target_value = sym_value;
4523 break;
4524 }
4525
4526 stub_entry = elf32_arm_add_stub (stub_name, section,
4527 htab);
4528 if (stub_entry == NULL)
4529 {
4530 free (stub_name);
4531 goto error_ret_free_internal;
4532 }
4533
4534 stub_entry->target_value = sym_value;
4535 stub_entry->target_section = sym_sec;
4536 stub_entry->stub_type = stub_type;
4537 stub_entry->h = hash;
4538 stub_entry->st_type = st_type;
4539
4540 if (sym_name == NULL)
4541 sym_name = "unnamed";
4542 stub_entry->output_name
4543 = bfd_alloc (htab->stub_bfd,
4544 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4545 + strlen (sym_name));
4546 if (stub_entry->output_name == NULL)
4547 {
4548 free (stub_name);
4549 goto error_ret_free_internal;
4550 }
4551
4552 /* For historical reasons, use the existing names for
4553 ARM-to-Thumb and Thumb-to-ARM stubs. */
4554 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4555 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4556 && st_type != STT_ARM_TFUNC)
4557 sprintf (stub_entry->output_name,
4558 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4559 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4560 || (r_type == (unsigned int) R_ARM_JUMP24))
4561 && st_type == STT_ARM_TFUNC)
4562 sprintf (stub_entry->output_name,
4563 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4564 else
4565 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4566 sym_name);
4567
4568 stub_changed = TRUE;
4569 }
4570 while (0);
4571
4572 /* Look for relocations which might trigger Cortex-A8
4573 erratum. */
4574 if (htab->fix_cortex_a8
4575 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4576 || r_type == (unsigned int) R_ARM_THM_JUMP19
4577 || r_type == (unsigned int) R_ARM_THM_CALL
4578 || r_type == (unsigned int) R_ARM_THM_XPC22))
4579 {
4580 bfd_vma from = section->output_section->vma
4581 + section->output_offset
4582 + irela->r_offset;
4583
4584 if ((from & 0xfff) == 0xffe)
4585 {
4586 /* Found a candidate. Note we haven't checked the
4587 destination is within 4K here: if we do so (and
4588 don't create an entry in a8_relocs) we can't tell
4589 that a branch should have been relocated when
4590 scanning later. */
4591 if (num_a8_relocs == a8_reloc_table_size)
4592 {
4593 a8_reloc_table_size *= 2;
4594 a8_relocs = bfd_realloc (a8_relocs,
4595 sizeof (struct a8_erratum_reloc)
4596 * a8_reloc_table_size);
4597 }
4598
4599 a8_relocs[num_a8_relocs].from = from;
4600 a8_relocs[num_a8_relocs].destination = destination;
4601 a8_relocs[num_a8_relocs].r_type = r_type;
4602 a8_relocs[num_a8_relocs].st_type = st_type;
4603 a8_relocs[num_a8_relocs].sym_name = sym_name;
4604 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4605
4606 num_a8_relocs++;
4607 }
4608 }
4609 }
4610
4611 /* We're done with the internal relocs, free them. */
4612 if (elf_section_data (section)->relocs == NULL)
4613 free (internal_relocs);
4614 }
4615
4616 if (htab->fix_cortex_a8)
4617 {
4618 /* Sort relocs which might apply to Cortex-A8 erratum. */
4619 qsort (a8_relocs, num_a8_relocs,
4620 sizeof (struct a8_erratum_reloc),
4621 &a8_reloc_compare);
4622
4623 /* Scan for branches which might trigger Cortex-A8 erratum. */
4624 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4625 &num_a8_fixes, &a8_fix_table_size,
4626 a8_relocs, num_a8_relocs,
4627 prev_num_a8_fixes, &stub_changed)
4628 != 0)
4629 goto error_ret_free_local;
4630 }
4631 }
4632
4633 if (prev_num_a8_fixes != num_a8_fixes)
4634 stub_changed = TRUE;
4635
4636 if (!stub_changed)
4637 break;
4638
4639 /* OK, we've added some stubs. Find out the new size of the
4640 stub sections. */
4641 for (stub_sec = htab->stub_bfd->sections;
4642 stub_sec != NULL;
4643 stub_sec = stub_sec->next)
4644 {
4645 /* Ignore non-stub sections. */
4646 if (!strstr (stub_sec->name, STUB_SUFFIX))
4647 continue;
4648
4649 stub_sec->size = 0;
4650 }
4651
4652 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4653
4654 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4655 if (htab->fix_cortex_a8)
4656 for (i = 0; i < num_a8_fixes; i++)
4657 {
4658 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4659 a8_fixes[i].section, htab);
4660
4661 if (stub_sec == NULL)
4662 goto error_ret_free_local;
4663
4664 stub_sec->size
4665 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4666 NULL);
4667 }
4668
4669
4670 /* Ask the linker to do its stuff. */
4671 (*htab->layout_sections_again) ();
4672 }
4673
4674 /* Add stubs for Cortex-A8 erratum fixes now. */
4675 if (htab->fix_cortex_a8)
4676 {
4677 for (i = 0; i < num_a8_fixes; i++)
4678 {
4679 struct elf32_arm_stub_hash_entry *stub_entry;
4680 char *stub_name = a8_fixes[i].stub_name;
4681 asection *section = a8_fixes[i].section;
4682 unsigned int section_id = a8_fixes[i].section->id;
4683 asection *link_sec = htab->stub_group[section_id].link_sec;
4684 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4685 const insn_sequence *template_sequence;
4686 int template_size, size = 0;
4687
4688 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4689 TRUE, FALSE);
4690 if (stub_entry == NULL)
4691 {
4692 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4693 section->owner,
4694 stub_name);
4695 return FALSE;
4696 }
4697
4698 stub_entry->stub_sec = stub_sec;
4699 stub_entry->stub_offset = 0;
4700 stub_entry->id_sec = link_sec;
4701 stub_entry->stub_type = a8_fixes[i].stub_type;
4702 stub_entry->target_section = a8_fixes[i].section;
4703 stub_entry->target_value = a8_fixes[i].offset;
4704 stub_entry->target_addend = a8_fixes[i].addend;
4705 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4706 stub_entry->st_type = STT_ARM_TFUNC;
4707
4708 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4709 &template_sequence,
4710 &template_size);
4711
4712 stub_entry->stub_size = size;
4713 stub_entry->stub_template = template_sequence;
4714 stub_entry->stub_template_size = template_size;
4715 }
4716
4717 /* Stash the Cortex-A8 erratum fix array for use later in
4718 elf32_arm_write_section(). */
4719 htab->a8_erratum_fixes = a8_fixes;
4720 htab->num_a8_erratum_fixes = num_a8_fixes;
4721 }
4722 else
4723 {
4724 htab->a8_erratum_fixes = NULL;
4725 htab->num_a8_erratum_fixes = 0;
4726 }
4727 return TRUE;
4728
4729 error_ret_free_local:
4730 return FALSE;
4731 }
4732
4733 /* Build all the stubs associated with the current output file. The
4734 stubs are kept in a hash table attached to the main linker hash
4735 table. We also set up the .plt entries for statically linked PIC
4736 functions here. This function is called via arm_elf_finish in the
4737 linker. */
4738
4739 bfd_boolean
4740 elf32_arm_build_stubs (struct bfd_link_info *info)
4741 {
4742 asection *stub_sec;
4743 struct bfd_hash_table *table;
4744 struct elf32_arm_link_hash_table *htab;
4745
4746 htab = elf32_arm_hash_table (info);
4747
4748 for (stub_sec = htab->stub_bfd->sections;
4749 stub_sec != NULL;
4750 stub_sec = stub_sec->next)
4751 {
4752 bfd_size_type size;
4753
4754 /* Ignore non-stub sections. */
4755 if (!strstr (stub_sec->name, STUB_SUFFIX))
4756 continue;
4757
4758 /* Allocate memory to hold the linker stubs. */
4759 size = stub_sec->size;
4760 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4761 if (stub_sec->contents == NULL && size != 0)
4762 return FALSE;
4763 stub_sec->size = 0;
4764 }
4765
4766 /* Build the stubs as directed by the stub hash table. */
4767 table = &htab->stub_hash_table;
4768 bfd_hash_traverse (table, arm_build_one_stub, info);
4769 if (htab->fix_cortex_a8)
4770 {
4771 /* Place the cortex a8 stubs last. */
4772 htab->fix_cortex_a8 = -1;
4773 bfd_hash_traverse (table, arm_build_one_stub, info);
4774 }
4775
4776 return TRUE;
4777 }
4778
4779 /* Locate the Thumb encoded calling stub for NAME. */
4780
4781 static struct elf_link_hash_entry *
4782 find_thumb_glue (struct bfd_link_info *link_info,
4783 const char *name,
4784 char **error_message)
4785 {
4786 char *tmp_name;
4787 struct elf_link_hash_entry *hash;
4788 struct elf32_arm_link_hash_table *hash_table;
4789
4790 /* We need a pointer to the armelf specific hash table. */
4791 hash_table = elf32_arm_hash_table (link_info);
4792
4793 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4794 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4795
4796 BFD_ASSERT (tmp_name);
4797
4798 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4799
4800 hash = elf_link_hash_lookup
4801 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4802
4803 if (hash == NULL
4804 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4805 tmp_name, name) == -1)
4806 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4807
4808 free (tmp_name);
4809
4810 return hash;
4811 }
4812
4813 /* Locate the ARM encoded calling stub for NAME. */
4814
4815 static struct elf_link_hash_entry *
4816 find_arm_glue (struct bfd_link_info *link_info,
4817 const char *name,
4818 char **error_message)
4819 {
4820 char *tmp_name;
4821 struct elf_link_hash_entry *myh;
4822 struct elf32_arm_link_hash_table *hash_table;
4823
4824 /* We need a pointer to the elfarm specific hash table. */
4825 hash_table = elf32_arm_hash_table (link_info);
4826
4827 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4828 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4829
4830 BFD_ASSERT (tmp_name);
4831
4832 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4833
4834 myh = elf_link_hash_lookup
4835 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4836
4837 if (myh == NULL
4838 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4839 tmp_name, name) == -1)
4840 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4841
4842 free (tmp_name);
4843
4844 return myh;
4845 }
4846
4847 /* ARM->Thumb glue (static images):
4848
4849 .arm
4850 __func_from_arm:
4851 ldr r12, __func_addr
4852 bx r12
4853 __func_addr:
4854 .word func @ behave as if you saw a ARM_32 reloc.
4855
4856 (v5t static images)
4857 .arm
4858 __func_from_arm:
4859 ldr pc, __func_addr
4860 __func_addr:
4861 .word func @ behave as if you saw a ARM_32 reloc.
4862
4863 (relocatable images)
4864 .arm
4865 __func_from_arm:
4866 ldr r12, __func_offset
4867 add r12, r12, pc
4868 bx r12
4869 __func_offset:
4870 .word func - . */
4871
4872 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4873 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4874 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4875 static const insn32 a2t3_func_addr_insn = 0x00000001;
4876
4877 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4878 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4879 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4880
4881 #define ARM2THUMB_PIC_GLUE_SIZE 16
4882 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4883 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4884 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4885
4886 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4887
4888 .thumb .thumb
4889 .align 2 .align 2
4890 __func_from_thumb: __func_from_thumb:
4891 bx pc push {r6, lr}
4892 nop ldr r6, __func_addr
4893 .arm mov lr, pc
4894 b func bx r6
4895 .arm
4896 ;; back_to_thumb
4897 ldmia r13! {r6, lr}
4898 bx lr
4899 __func_addr:
4900 .word func */
4901
4902 #define THUMB2ARM_GLUE_SIZE 8
4903 static const insn16 t2a1_bx_pc_insn = 0x4778;
4904 static const insn16 t2a2_noop_insn = 0x46c0;
4905 static const insn32 t2a3_b_insn = 0xea000000;
4906
4907 #define VFP11_ERRATUM_VENEER_SIZE 8
4908
4909 #define ARM_BX_VENEER_SIZE 12
4910 static const insn32 armbx1_tst_insn = 0xe3100001;
4911 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4912 static const insn32 armbx3_bx_insn = 0xe12fff10;
4913
4914 #ifndef ELFARM_NABI_C_INCLUDED
4915 static void
4916 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4917 {
4918 asection * s;
4919 bfd_byte * contents;
4920
4921 if (size == 0)
4922 {
4923 /* Do not include empty glue sections in the output. */
4924 if (abfd != NULL)
4925 {
4926 s = bfd_get_section_by_name (abfd, name);
4927 if (s != NULL)
4928 s->flags |= SEC_EXCLUDE;
4929 }
4930 return;
4931 }
4932
4933 BFD_ASSERT (abfd != NULL);
4934
4935 s = bfd_get_section_by_name (abfd, name);
4936 BFD_ASSERT (s != NULL);
4937
4938 contents = bfd_alloc (abfd, size);
4939
4940 BFD_ASSERT (s->size == size);
4941 s->contents = contents;
4942 }
4943
4944 bfd_boolean
4945 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4946 {
4947 struct elf32_arm_link_hash_table * globals;
4948
4949 globals = elf32_arm_hash_table (info);
4950 BFD_ASSERT (globals != NULL);
4951
4952 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4953 globals->arm_glue_size,
4954 ARM2THUMB_GLUE_SECTION_NAME);
4955
4956 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4957 globals->thumb_glue_size,
4958 THUMB2ARM_GLUE_SECTION_NAME);
4959
4960 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4961 globals->vfp11_erratum_glue_size,
4962 VFP11_ERRATUM_VENEER_SECTION_NAME);
4963
4964 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4965 globals->bx_glue_size,
4966 ARM_BX_GLUE_SECTION_NAME);
4967
4968 return TRUE;
4969 }
4970
4971 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4972 returns the symbol identifying the stub. */
4973
4974 static struct elf_link_hash_entry *
4975 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4976 struct elf_link_hash_entry * h)
4977 {
4978 const char * name = h->root.root.string;
4979 asection * s;
4980 char * tmp_name;
4981 struct elf_link_hash_entry * myh;
4982 struct bfd_link_hash_entry * bh;
4983 struct elf32_arm_link_hash_table * globals;
4984 bfd_vma val;
4985 bfd_size_type size;
4986
4987 globals = elf32_arm_hash_table (link_info);
4988
4989 BFD_ASSERT (globals != NULL);
4990 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4991
4992 s = bfd_get_section_by_name
4993 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4994
4995 BFD_ASSERT (s != NULL);
4996
4997 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4998
4999 BFD_ASSERT (tmp_name);
5000
5001 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5002
5003 myh = elf_link_hash_lookup
5004 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5005
5006 if (myh != NULL)
5007 {
5008 /* We've already seen this guy. */
5009 free (tmp_name);
5010 return myh;
5011 }
5012
5013 /* The only trick here is using hash_table->arm_glue_size as the value.
5014 Even though the section isn't allocated yet, this is where we will be
5015 putting it. The +1 on the value marks that the stub has not been
5016 output yet - not that it is a Thumb function. */
5017 bh = NULL;
5018 val = globals->arm_glue_size + 1;
5019 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5020 tmp_name, BSF_GLOBAL, s, val,
5021 NULL, TRUE, FALSE, &bh);
5022
5023 myh = (struct elf_link_hash_entry *) bh;
5024 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5025 myh->forced_local = 1;
5026
5027 free (tmp_name);
5028
5029 if (link_info->shared || globals->root.is_relocatable_executable
5030 || globals->pic_veneer)
5031 size = ARM2THUMB_PIC_GLUE_SIZE;
5032 else if (globals->use_blx)
5033 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5034 else
5035 size = ARM2THUMB_STATIC_GLUE_SIZE;
5036
5037 s->size += size;
5038 globals->arm_glue_size += size;
5039
5040 return myh;
5041 }
5042
5043 /* Allocate space for ARMv4 BX veneers. */
5044
5045 static void
5046 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5047 {
5048 asection * s;
5049 struct elf32_arm_link_hash_table *globals;
5050 char *tmp_name;
5051 struct elf_link_hash_entry *myh;
5052 struct bfd_link_hash_entry *bh;
5053 bfd_vma val;
5054
5055 /* BX PC does not need a veneer. */
5056 if (reg == 15)
5057 return;
5058
5059 globals = elf32_arm_hash_table (link_info);
5060
5061 BFD_ASSERT (globals != NULL);
5062 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5063
5064 /* Check if this veneer has already been allocated. */
5065 if (globals->bx_glue_offset[reg])
5066 return;
5067
5068 s = bfd_get_section_by_name
5069 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5070
5071 BFD_ASSERT (s != NULL);
5072
5073 /* Add symbol for veneer. */
5074 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5075
5076 BFD_ASSERT (tmp_name);
5077
5078 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5079
5080 myh = elf_link_hash_lookup
5081 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5082
5083 BFD_ASSERT (myh == NULL);
5084
5085 bh = NULL;
5086 val = globals->bx_glue_size;
5087 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5088 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5089 NULL, TRUE, FALSE, &bh);
5090
5091 myh = (struct elf_link_hash_entry *) bh;
5092 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5093 myh->forced_local = 1;
5094
5095 s->size += ARM_BX_VENEER_SIZE;
5096 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5097 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5098 }
5099
5100
5101 /* Add an entry to the code/data map for section SEC. */
5102
5103 static void
5104 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5105 {
5106 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5107 unsigned int newidx;
5108
5109 if (sec_data->map == NULL)
5110 {
5111 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5112 sec_data->mapcount = 0;
5113 sec_data->mapsize = 1;
5114 }
5115
5116 newidx = sec_data->mapcount++;
5117
5118 if (sec_data->mapcount > sec_data->mapsize)
5119 {
5120 sec_data->mapsize *= 2;
5121 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5122 * sizeof (elf32_arm_section_map));
5123 }
5124
5125 if (sec_data->map)
5126 {
5127 sec_data->map[newidx].vma = vma;
5128 sec_data->map[newidx].type = type;
5129 }
5130 }
5131
5132
5133 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5134 veneers are handled for now. */
5135
5136 static bfd_vma
5137 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5138 elf32_vfp11_erratum_list *branch,
5139 bfd *branch_bfd,
5140 asection *branch_sec,
5141 unsigned int offset)
5142 {
5143 asection *s;
5144 struct elf32_arm_link_hash_table *hash_table;
5145 char *tmp_name;
5146 struct elf_link_hash_entry *myh;
5147 struct bfd_link_hash_entry *bh;
5148 bfd_vma val;
5149 struct _arm_elf_section_data *sec_data;
5150 int errcount;
5151 elf32_vfp11_erratum_list *newerr;
5152
5153 hash_table = elf32_arm_hash_table (link_info);
5154
5155 BFD_ASSERT (hash_table != NULL);
5156 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5157
5158 s = bfd_get_section_by_name
5159 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5160
5161 sec_data = elf32_arm_section_data (s);
5162
5163 BFD_ASSERT (s != NULL);
5164
5165 tmp_name = bfd_malloc ((bfd_size_type) strlen
5166 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5167
5168 BFD_ASSERT (tmp_name);
5169
5170 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5171 hash_table->num_vfp11_fixes);
5172
5173 myh = elf_link_hash_lookup
5174 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5175
5176 BFD_ASSERT (myh == NULL);
5177
5178 bh = NULL;
5179 val = hash_table->vfp11_erratum_glue_size;
5180 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5181 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5182 NULL, TRUE, FALSE, &bh);
5183
5184 myh = (struct elf_link_hash_entry *) bh;
5185 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5186 myh->forced_local = 1;
5187
5188 /* Link veneer back to calling location. */
5189 errcount = ++(sec_data->erratumcount);
5190 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5191
5192 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5193 newerr->vma = -1;
5194 newerr->u.v.branch = branch;
5195 newerr->u.v.id = hash_table->num_vfp11_fixes;
5196 branch->u.b.veneer = newerr;
5197
5198 newerr->next = sec_data->erratumlist;
5199 sec_data->erratumlist = newerr;
5200
5201 /* A symbol for the return from the veneer. */
5202 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5203 hash_table->num_vfp11_fixes);
5204
5205 myh = elf_link_hash_lookup
5206 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5207
5208 if (myh != NULL)
5209 abort ();
5210
5211 bh = NULL;
5212 val = offset + 4;
5213 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5214 branch_sec, val, NULL, TRUE, FALSE, &bh);
5215
5216 myh = (struct elf_link_hash_entry *) bh;
5217 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5218 myh->forced_local = 1;
5219
5220 free (tmp_name);
5221
5222 /* Generate a mapping symbol for the veneer section, and explicitly add an
5223 entry for that symbol to the code/data map for the section. */
5224 if (hash_table->vfp11_erratum_glue_size == 0)
5225 {
5226 bh = NULL;
5227 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5228 ever requires this erratum fix. */
5229 _bfd_generic_link_add_one_symbol (link_info,
5230 hash_table->bfd_of_glue_owner, "$a",
5231 BSF_LOCAL, s, 0, NULL,
5232 TRUE, FALSE, &bh);
5233
5234 myh = (struct elf_link_hash_entry *) bh;
5235 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5236 myh->forced_local = 1;
5237
5238 /* The elf32_arm_init_maps function only cares about symbols from input
5239 BFDs. We must make a note of this generated mapping symbol
5240 ourselves so that code byteswapping works properly in
5241 elf32_arm_write_section. */
5242 elf32_arm_section_map_add (s, 'a', 0);
5243 }
5244
5245 s->size += VFP11_ERRATUM_VENEER_SIZE;
5246 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5247 hash_table->num_vfp11_fixes++;
5248
5249 /* The offset of the veneer. */
5250 return val;
5251 }
5252
5253 #define ARM_GLUE_SECTION_FLAGS \
5254 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5255 | SEC_READONLY | SEC_LINKER_CREATED)
5256
5257 /* Create a fake section for use by the ARM backend of the linker. */
5258
5259 static bfd_boolean
5260 arm_make_glue_section (bfd * abfd, const char * name)
5261 {
5262 asection * sec;
5263
5264 sec = bfd_get_section_by_name (abfd, name);
5265 if (sec != NULL)
5266 /* Already made. */
5267 return TRUE;
5268
5269 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5270
5271 if (sec == NULL
5272 || !bfd_set_section_alignment (abfd, sec, 2))
5273 return FALSE;
5274
5275 /* Set the gc mark to prevent the section from being removed by garbage
5276 collection, despite the fact that no relocs refer to this section. */
5277 sec->gc_mark = 1;
5278
5279 return TRUE;
5280 }
5281
5282 /* Add the glue sections to ABFD. This function is called from the
5283 linker scripts in ld/emultempl/{armelf}.em. */
5284
5285 bfd_boolean
5286 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5287 struct bfd_link_info *info)
5288 {
5289 /* If we are only performing a partial
5290 link do not bother adding the glue. */
5291 if (info->relocatable)
5292 return TRUE;
5293
5294 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5295 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5296 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5297 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5298 }
5299
5300 /* Select a BFD to be used to hold the sections used by the glue code.
5301 This function is called from the linker scripts in ld/emultempl/
5302 {armelf/pe}.em. */
5303
5304 bfd_boolean
5305 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5306 {
5307 struct elf32_arm_link_hash_table *globals;
5308
5309 /* If we are only performing a partial link
5310 do not bother getting a bfd to hold the glue. */
5311 if (info->relocatable)
5312 return TRUE;
5313
5314 /* Make sure we don't attach the glue sections to a dynamic object. */
5315 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5316
5317 globals = elf32_arm_hash_table (info);
5318
5319 BFD_ASSERT (globals != NULL);
5320
5321 if (globals->bfd_of_glue_owner != NULL)
5322 return TRUE;
5323
5324 /* Save the bfd for later use. */
5325 globals->bfd_of_glue_owner = abfd;
5326
5327 return TRUE;
5328 }
5329
5330 static void
5331 check_use_blx (struct elf32_arm_link_hash_table *globals)
5332 {
5333 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5334 Tag_CPU_arch) > 2)
5335 globals->use_blx = 1;
5336 }
5337
5338 bfd_boolean
5339 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5340 struct bfd_link_info *link_info)
5341 {
5342 Elf_Internal_Shdr *symtab_hdr;
5343 Elf_Internal_Rela *internal_relocs = NULL;
5344 Elf_Internal_Rela *irel, *irelend;
5345 bfd_byte *contents = NULL;
5346
5347 asection *sec;
5348 struct elf32_arm_link_hash_table *globals;
5349
5350 /* If we are only performing a partial link do not bother
5351 to construct any glue. */
5352 if (link_info->relocatable)
5353 return TRUE;
5354
5355 /* Here we have a bfd that is to be included on the link. We have a
5356 hook to do reloc rummaging, before section sizes are nailed down. */
5357 globals = elf32_arm_hash_table (link_info);
5358
5359 BFD_ASSERT (globals != NULL);
5360
5361 check_use_blx (globals);
5362
5363 if (globals->byteswap_code && !bfd_big_endian (abfd))
5364 {
5365 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5366 abfd);
5367 return FALSE;
5368 }
5369
5370 /* PR 5398: If we have not decided to include any loadable sections in
5371 the output then we will not have a glue owner bfd. This is OK, it
5372 just means that there is nothing else for us to do here. */
5373 if (globals->bfd_of_glue_owner == NULL)
5374 return TRUE;
5375
5376 /* Rummage around all the relocs and map the glue vectors. */
5377 sec = abfd->sections;
5378
5379 if (sec == NULL)
5380 return TRUE;
5381
5382 for (; sec != NULL; sec = sec->next)
5383 {
5384 if (sec->reloc_count == 0)
5385 continue;
5386
5387 if ((sec->flags & SEC_EXCLUDE) != 0)
5388 continue;
5389
5390 symtab_hdr = & elf_symtab_hdr (abfd);
5391
5392 /* Load the relocs. */
5393 internal_relocs
5394 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5395
5396 if (internal_relocs == NULL)
5397 goto error_return;
5398
5399 irelend = internal_relocs + sec->reloc_count;
5400 for (irel = internal_relocs; irel < irelend; irel++)
5401 {
5402 long r_type;
5403 unsigned long r_index;
5404
5405 struct elf_link_hash_entry *h;
5406
5407 r_type = ELF32_R_TYPE (irel->r_info);
5408 r_index = ELF32_R_SYM (irel->r_info);
5409
5410 /* These are the only relocation types we care about. */
5411 if ( r_type != R_ARM_PC24
5412 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5413 continue;
5414
5415 /* Get the section contents if we haven't done so already. */
5416 if (contents == NULL)
5417 {
5418 /* Get cached copy if it exists. */
5419 if (elf_section_data (sec)->this_hdr.contents != NULL)
5420 contents = elf_section_data (sec)->this_hdr.contents;
5421 else
5422 {
5423 /* Go get them off disk. */
5424 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5425 goto error_return;
5426 }
5427 }
5428
5429 if (r_type == R_ARM_V4BX)
5430 {
5431 int reg;
5432
5433 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5434 record_arm_bx_glue (link_info, reg);
5435 continue;
5436 }
5437
5438 /* If the relocation is not against a symbol it cannot concern us. */
5439 h = NULL;
5440
5441 /* We don't care about local symbols. */
5442 if (r_index < symtab_hdr->sh_info)
5443 continue;
5444
5445 /* This is an external symbol. */
5446 r_index -= symtab_hdr->sh_info;
5447 h = (struct elf_link_hash_entry *)
5448 elf_sym_hashes (abfd)[r_index];
5449
5450 /* If the relocation is against a static symbol it must be within
5451 the current section and so cannot be a cross ARM/Thumb relocation. */
5452 if (h == NULL)
5453 continue;
5454
5455 /* If the call will go through a PLT entry then we do not need
5456 glue. */
5457 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5458 continue;
5459
5460 switch (r_type)
5461 {
5462 case R_ARM_PC24:
5463 /* This one is a call from arm code. We need to look up
5464 the target of the call. If it is a thumb target, we
5465 insert glue. */
5466 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5467 record_arm_to_thumb_glue (link_info, h);
5468 break;
5469
5470 default:
5471 abort ();
5472 }
5473 }
5474
5475 if (contents != NULL
5476 && elf_section_data (sec)->this_hdr.contents != contents)
5477 free (contents);
5478 contents = NULL;
5479
5480 if (internal_relocs != NULL
5481 && elf_section_data (sec)->relocs != internal_relocs)
5482 free (internal_relocs);
5483 internal_relocs = NULL;
5484 }
5485
5486 return TRUE;
5487
5488 error_return:
5489 if (contents != NULL
5490 && elf_section_data (sec)->this_hdr.contents != contents)
5491 free (contents);
5492 if (internal_relocs != NULL
5493 && elf_section_data (sec)->relocs != internal_relocs)
5494 free (internal_relocs);
5495
5496 return FALSE;
5497 }
5498 #endif
5499
5500
5501 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5502
5503 void
5504 bfd_elf32_arm_init_maps (bfd *abfd)
5505 {
5506 Elf_Internal_Sym *isymbuf;
5507 Elf_Internal_Shdr *hdr;
5508 unsigned int i, localsyms;
5509
5510 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5511 if (! is_arm_elf (abfd))
5512 return;
5513
5514 if ((abfd->flags & DYNAMIC) != 0)
5515 return;
5516
5517 hdr = & elf_symtab_hdr (abfd);
5518 localsyms = hdr->sh_info;
5519
5520 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5521 should contain the number of local symbols, which should come before any
5522 global symbols. Mapping symbols are always local. */
5523 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5524 NULL);
5525
5526 /* No internal symbols read? Skip this BFD. */
5527 if (isymbuf == NULL)
5528 return;
5529
5530 for (i = 0; i < localsyms; i++)
5531 {
5532 Elf_Internal_Sym *isym = &isymbuf[i];
5533 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5534 const char *name;
5535
5536 if (sec != NULL
5537 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5538 {
5539 name = bfd_elf_string_from_elf_section (abfd,
5540 hdr->sh_link, isym->st_name);
5541
5542 if (bfd_is_arm_special_symbol_name (name,
5543 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5544 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5545 }
5546 }
5547 }
5548
5549
5550 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5551 say what they wanted. */
5552
5553 void
5554 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5555 {
5556 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5557 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5558
5559 if (globals->fix_cortex_a8 == -1)
5560 {
5561 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5562 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5563 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5564 || out_attr[Tag_CPU_arch_profile].i == 0))
5565 globals->fix_cortex_a8 = 1;
5566 else
5567 globals->fix_cortex_a8 = 0;
5568 }
5569 }
5570
5571
5572 void
5573 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5574 {
5575 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5576 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5577
5578 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5579 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5580 {
5581 switch (globals->vfp11_fix)
5582 {
5583 case BFD_ARM_VFP11_FIX_DEFAULT:
5584 case BFD_ARM_VFP11_FIX_NONE:
5585 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5586 break;
5587
5588 default:
5589 /* Give a warning, but do as the user requests anyway. */
5590 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5591 "workaround is not necessary for target architecture"), obfd);
5592 }
5593 }
5594 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5595 /* For earlier architectures, we might need the workaround, but do not
5596 enable it by default. If users is running with broken hardware, they
5597 must enable the erratum fix explicitly. */
5598 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5599 }
5600
5601
5602 enum bfd_arm_vfp11_pipe
5603 {
5604 VFP11_FMAC,
5605 VFP11_LS,
5606 VFP11_DS,
5607 VFP11_BAD
5608 };
5609
5610 /* Return a VFP register number. This is encoded as RX:X for single-precision
5611 registers, or X:RX for double-precision registers, where RX is the group of
5612 four bits in the instruction encoding and X is the single extension bit.
5613 RX and X fields are specified using their lowest (starting) bit. The return
5614 value is:
5615
5616 0...31: single-precision registers s0...s31
5617 32...63: double-precision registers d0...d31.
5618
5619 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5620 encounter VFP3 instructions, so we allow the full range for DP registers. */
5621
5622 static unsigned int
5623 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5624 unsigned int x)
5625 {
5626 if (is_double)
5627 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5628 else
5629 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5630 }
5631
5632 /* Set bits in *WMASK according to a register number REG as encoded by
5633 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5634
5635 static void
5636 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5637 {
5638 if (reg < 32)
5639 *wmask |= 1 << reg;
5640 else if (reg < 48)
5641 *wmask |= 3 << ((reg - 32) * 2);
5642 }
5643
5644 /* Return TRUE if WMASK overwrites anything in REGS. */
5645
5646 static bfd_boolean
5647 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5648 {
5649 int i;
5650
5651 for (i = 0; i < numregs; i++)
5652 {
5653 unsigned int reg = regs[i];
5654
5655 if (reg < 32 && (wmask & (1 << reg)) != 0)
5656 return TRUE;
5657
5658 reg -= 32;
5659
5660 if (reg >= 16)
5661 continue;
5662
5663 if ((wmask & (3 << (reg * 2))) != 0)
5664 return TRUE;
5665 }
5666
5667 return FALSE;
5668 }
5669
5670 /* In this function, we're interested in two things: finding input registers
5671 for VFP data-processing instructions, and finding the set of registers which
5672 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5673 hold the written set, so FLDM etc. are easy to deal with (we're only
5674 interested in 32 SP registers or 16 dp registers, due to the VFP version
5675 implemented by the chip in question). DP registers are marked by setting
5676 both SP registers in the write mask). */
5677
5678 static enum bfd_arm_vfp11_pipe
5679 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5680 int *numregs)
5681 {
5682 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5683 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5684
5685 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5686 {
5687 unsigned int pqrs;
5688 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5689 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5690
5691 pqrs = ((insn & 0x00800000) >> 20)
5692 | ((insn & 0x00300000) >> 19)
5693 | ((insn & 0x00000040) >> 6);
5694
5695 switch (pqrs)
5696 {
5697 case 0: /* fmac[sd]. */
5698 case 1: /* fnmac[sd]. */
5699 case 2: /* fmsc[sd]. */
5700 case 3: /* fnmsc[sd]. */
5701 pipe = VFP11_FMAC;
5702 bfd_arm_vfp11_write_mask (destmask, fd);
5703 regs[0] = fd;
5704 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5705 regs[2] = fm;
5706 *numregs = 3;
5707 break;
5708
5709 case 4: /* fmul[sd]. */
5710 case 5: /* fnmul[sd]. */
5711 case 6: /* fadd[sd]. */
5712 case 7: /* fsub[sd]. */
5713 pipe = VFP11_FMAC;
5714 goto vfp_binop;
5715
5716 case 8: /* fdiv[sd]. */
5717 pipe = VFP11_DS;
5718 vfp_binop:
5719 bfd_arm_vfp11_write_mask (destmask, fd);
5720 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5721 regs[1] = fm;
5722 *numregs = 2;
5723 break;
5724
5725 case 15: /* extended opcode. */
5726 {
5727 unsigned int extn = ((insn >> 15) & 0x1e)
5728 | ((insn >> 7) & 1);
5729
5730 switch (extn)
5731 {
5732 case 0: /* fcpy[sd]. */
5733 case 1: /* fabs[sd]. */
5734 case 2: /* fneg[sd]. */
5735 case 8: /* fcmp[sd]. */
5736 case 9: /* fcmpe[sd]. */
5737 case 10: /* fcmpz[sd]. */
5738 case 11: /* fcmpez[sd]. */
5739 case 16: /* fuito[sd]. */
5740 case 17: /* fsito[sd]. */
5741 case 24: /* ftoui[sd]. */
5742 case 25: /* ftouiz[sd]. */
5743 case 26: /* ftosi[sd]. */
5744 case 27: /* ftosiz[sd]. */
5745 /* These instructions will not bounce due to underflow. */
5746 *numregs = 0;
5747 pipe = VFP11_FMAC;
5748 break;
5749
5750 case 3: /* fsqrt[sd]. */
5751 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5752 registers to cause the erratum in previous instructions. */
5753 bfd_arm_vfp11_write_mask (destmask, fd);
5754 pipe = VFP11_DS;
5755 break;
5756
5757 case 15: /* fcvt{ds,sd}. */
5758 {
5759 int rnum = 0;
5760
5761 bfd_arm_vfp11_write_mask (destmask, fd);
5762
5763 /* Only FCVTSD can underflow. */
5764 if ((insn & 0x100) != 0)
5765 regs[rnum++] = fm;
5766
5767 *numregs = rnum;
5768
5769 pipe = VFP11_FMAC;
5770 }
5771 break;
5772
5773 default:
5774 return VFP11_BAD;
5775 }
5776 }
5777 break;
5778
5779 default:
5780 return VFP11_BAD;
5781 }
5782 }
5783 /* Two-register transfer. */
5784 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5785 {
5786 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5787
5788 if ((insn & 0x100000) == 0)
5789 {
5790 if (is_double)
5791 bfd_arm_vfp11_write_mask (destmask, fm);
5792 else
5793 {
5794 bfd_arm_vfp11_write_mask (destmask, fm);
5795 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5796 }
5797 }
5798
5799 pipe = VFP11_LS;
5800 }
5801 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5802 {
5803 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5804 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5805
5806 switch (puw)
5807 {
5808 case 0: /* Two-reg transfer. We should catch these above. */
5809 abort ();
5810
5811 case 2: /* fldm[sdx]. */
5812 case 3:
5813 case 5:
5814 {
5815 unsigned int i, offset = insn & 0xff;
5816
5817 if (is_double)
5818 offset >>= 1;
5819
5820 for (i = fd; i < fd + offset; i++)
5821 bfd_arm_vfp11_write_mask (destmask, i);
5822 }
5823 break;
5824
5825 case 4: /* fld[sd]. */
5826 case 6:
5827 bfd_arm_vfp11_write_mask (destmask, fd);
5828 break;
5829
5830 default:
5831 return VFP11_BAD;
5832 }
5833
5834 pipe = VFP11_LS;
5835 }
5836 /* Single-register transfer. Note L==0. */
5837 else if ((insn & 0x0f100e10) == 0x0e000a10)
5838 {
5839 unsigned int opcode = (insn >> 21) & 7;
5840 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5841
5842 switch (opcode)
5843 {
5844 case 0: /* fmsr/fmdlr. */
5845 case 1: /* fmdhr. */
5846 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5847 destination register. I don't know if this is exactly right,
5848 but it is the conservative choice. */
5849 bfd_arm_vfp11_write_mask (destmask, fn);
5850 break;
5851
5852 case 7: /* fmxr. */
5853 break;
5854 }
5855
5856 pipe = VFP11_LS;
5857 }
5858
5859 return pipe;
5860 }
5861
5862
5863 static int elf32_arm_compare_mapping (const void * a, const void * b);
5864
5865
5866 /* Look for potentially-troublesome code sequences which might trigger the
5867 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5868 (available from ARM) for details of the erratum. A short version is
5869 described in ld.texinfo. */
5870
5871 bfd_boolean
5872 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5873 {
5874 asection *sec;
5875 bfd_byte *contents = NULL;
5876 int state = 0;
5877 int regs[3], numregs = 0;
5878 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5879 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5880
5881 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5882 The states transition as follows:
5883
5884 0 -> 1 (vector) or 0 -> 2 (scalar)
5885 A VFP FMAC-pipeline instruction has been seen. Fill
5886 regs[0]..regs[numregs-1] with its input operands. Remember this
5887 instruction in 'first_fmac'.
5888
5889 1 -> 2
5890 Any instruction, except for a VFP instruction which overwrites
5891 regs[*].
5892
5893 1 -> 3 [ -> 0 ] or
5894 2 -> 3 [ -> 0 ]
5895 A VFP instruction has been seen which overwrites any of regs[*].
5896 We must make a veneer! Reset state to 0 before examining next
5897 instruction.
5898
5899 2 -> 0
5900 If we fail to match anything in state 2, reset to state 0 and reset
5901 the instruction pointer to the instruction after 'first_fmac'.
5902
5903 If the VFP11 vector mode is in use, there must be at least two unrelated
5904 instructions between anti-dependent VFP11 instructions to properly avoid
5905 triggering the erratum, hence the use of the extra state 1. */
5906
5907 /* If we are only performing a partial link do not bother
5908 to construct any glue. */
5909 if (link_info->relocatable)
5910 return TRUE;
5911
5912 /* Skip if this bfd does not correspond to an ELF image. */
5913 if (! is_arm_elf (abfd))
5914 return TRUE;
5915
5916 /* We should have chosen a fix type by the time we get here. */
5917 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5918
5919 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5920 return TRUE;
5921
5922 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5923 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5924 return TRUE;
5925
5926 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5927 {
5928 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5929 struct _arm_elf_section_data *sec_data;
5930
5931 /* If we don't have executable progbits, we're not interested in this
5932 section. Also skip if section is to be excluded. */
5933 if (elf_section_type (sec) != SHT_PROGBITS
5934 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5935 || (sec->flags & SEC_EXCLUDE) != 0
5936 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5937 || sec->output_section == bfd_abs_section_ptr
5938 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5939 continue;
5940
5941 sec_data = elf32_arm_section_data (sec);
5942
5943 if (sec_data->mapcount == 0)
5944 continue;
5945
5946 if (elf_section_data (sec)->this_hdr.contents != NULL)
5947 contents = elf_section_data (sec)->this_hdr.contents;
5948 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5949 goto error_return;
5950
5951 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5952 elf32_arm_compare_mapping);
5953
5954 for (span = 0; span < sec_data->mapcount; span++)
5955 {
5956 unsigned int span_start = sec_data->map[span].vma;
5957 unsigned int span_end = (span == sec_data->mapcount - 1)
5958 ? sec->size : sec_data->map[span + 1].vma;
5959 char span_type = sec_data->map[span].type;
5960
5961 /* FIXME: Only ARM mode is supported at present. We may need to
5962 support Thumb-2 mode also at some point. */
5963 if (span_type != 'a')
5964 continue;
5965
5966 for (i = span_start; i < span_end;)
5967 {
5968 unsigned int next_i = i + 4;
5969 unsigned int insn = bfd_big_endian (abfd)
5970 ? (contents[i] << 24)
5971 | (contents[i + 1] << 16)
5972 | (contents[i + 2] << 8)
5973 | contents[i + 3]
5974 : (contents[i + 3] << 24)
5975 | (contents[i + 2] << 16)
5976 | (contents[i + 1] << 8)
5977 | contents[i];
5978 unsigned int writemask = 0;
5979 enum bfd_arm_vfp11_pipe pipe;
5980
5981 switch (state)
5982 {
5983 case 0:
5984 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5985 &numregs);
5986 /* I'm assuming the VFP11 erratum can trigger with denorm
5987 operands on either the FMAC or the DS pipeline. This might
5988 lead to slightly overenthusiastic veneer insertion. */
5989 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5990 {
5991 state = use_vector ? 1 : 2;
5992 first_fmac = i;
5993 veneer_of_insn = insn;
5994 }
5995 break;
5996
5997 case 1:
5998 {
5999 int other_regs[3], other_numregs;
6000 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6001 other_regs,
6002 &other_numregs);
6003 if (pipe != VFP11_BAD
6004 && bfd_arm_vfp11_antidependency (writemask, regs,
6005 numregs))
6006 state = 3;
6007 else
6008 state = 2;
6009 }
6010 break;
6011
6012 case 2:
6013 {
6014 int other_regs[3], other_numregs;
6015 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6016 other_regs,
6017 &other_numregs);
6018 if (pipe != VFP11_BAD
6019 && bfd_arm_vfp11_antidependency (writemask, regs,
6020 numregs))
6021 state = 3;
6022 else
6023 {
6024 state = 0;
6025 next_i = first_fmac + 4;
6026 }
6027 }
6028 break;
6029
6030 case 3:
6031 abort (); /* Should be unreachable. */
6032 }
6033
6034 if (state == 3)
6035 {
6036 elf32_vfp11_erratum_list *newerr
6037 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6038 int errcount;
6039
6040 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6041
6042 newerr->u.b.vfp_insn = veneer_of_insn;
6043
6044 switch (span_type)
6045 {
6046 case 'a':
6047 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6048 break;
6049
6050 default:
6051 abort ();
6052 }
6053
6054 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6055 first_fmac);
6056
6057 newerr->vma = -1;
6058
6059 newerr->next = sec_data->erratumlist;
6060 sec_data->erratumlist = newerr;
6061
6062 state = 0;
6063 }
6064
6065 i = next_i;
6066 }
6067 }
6068
6069 if (contents != NULL
6070 && elf_section_data (sec)->this_hdr.contents != contents)
6071 free (contents);
6072 contents = NULL;
6073 }
6074
6075 return TRUE;
6076
6077 error_return:
6078 if (contents != NULL
6079 && elf_section_data (sec)->this_hdr.contents != contents)
6080 free (contents);
6081
6082 return FALSE;
6083 }
6084
6085 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6086 after sections have been laid out, using specially-named symbols. */
6087
6088 void
6089 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6090 struct bfd_link_info *link_info)
6091 {
6092 asection *sec;
6093 struct elf32_arm_link_hash_table *globals;
6094 char *tmp_name;
6095
6096 if (link_info->relocatable)
6097 return;
6098
6099 /* Skip if this bfd does not correspond to an ELF image. */
6100 if (! is_arm_elf (abfd))
6101 return;
6102
6103 globals = elf32_arm_hash_table (link_info);
6104
6105 tmp_name = bfd_malloc ((bfd_size_type) strlen
6106 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6107
6108 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6109 {
6110 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6111 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6112
6113 for (; errnode != NULL; errnode = errnode->next)
6114 {
6115 struct elf_link_hash_entry *myh;
6116 bfd_vma vma;
6117
6118 switch (errnode->type)
6119 {
6120 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6121 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6122 /* Find veneer symbol. */
6123 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6124 errnode->u.b.veneer->u.v.id);
6125
6126 myh = elf_link_hash_lookup
6127 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6128
6129 if (myh == NULL)
6130 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6131 "`%s'"), abfd, tmp_name);
6132
6133 vma = myh->root.u.def.section->output_section->vma
6134 + myh->root.u.def.section->output_offset
6135 + myh->root.u.def.value;
6136
6137 errnode->u.b.veneer->vma = vma;
6138 break;
6139
6140 case VFP11_ERRATUM_ARM_VENEER:
6141 case VFP11_ERRATUM_THUMB_VENEER:
6142 /* Find return location. */
6143 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6144 errnode->u.v.id);
6145
6146 myh = elf_link_hash_lookup
6147 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6148
6149 if (myh == NULL)
6150 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6151 "`%s'"), abfd, tmp_name);
6152
6153 vma = myh->root.u.def.section->output_section->vma
6154 + myh->root.u.def.section->output_offset
6155 + myh->root.u.def.value;
6156
6157 errnode->u.v.branch->vma = vma;
6158 break;
6159
6160 default:
6161 abort ();
6162 }
6163 }
6164 }
6165
6166 free (tmp_name);
6167 }
6168
6169
6170 /* Set target relocation values needed during linking. */
6171
6172 void
6173 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6174 struct bfd_link_info *link_info,
6175 int target1_is_rel,
6176 char * target2_type,
6177 int fix_v4bx,
6178 int use_blx,
6179 bfd_arm_vfp11_fix vfp11_fix,
6180 int no_enum_warn, int no_wchar_warn,
6181 int pic_veneer, int fix_cortex_a8)
6182 {
6183 struct elf32_arm_link_hash_table *globals;
6184
6185 globals = elf32_arm_hash_table (link_info);
6186
6187 globals->target1_is_rel = target1_is_rel;
6188 if (strcmp (target2_type, "rel") == 0)
6189 globals->target2_reloc = R_ARM_REL32;
6190 else if (strcmp (target2_type, "abs") == 0)
6191 globals->target2_reloc = R_ARM_ABS32;
6192 else if (strcmp (target2_type, "got-rel") == 0)
6193 globals->target2_reloc = R_ARM_GOT_PREL;
6194 else
6195 {
6196 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6197 target2_type);
6198 }
6199 globals->fix_v4bx = fix_v4bx;
6200 globals->use_blx |= use_blx;
6201 globals->vfp11_fix = vfp11_fix;
6202 globals->pic_veneer = pic_veneer;
6203 globals->fix_cortex_a8 = fix_cortex_a8;
6204
6205 BFD_ASSERT (is_arm_elf (output_bfd));
6206 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6207 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6208 }
6209
6210 /* Replace the target offset of a Thumb bl or b.w instruction. */
6211
6212 static void
6213 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6214 {
6215 bfd_vma upper;
6216 bfd_vma lower;
6217 int reloc_sign;
6218
6219 BFD_ASSERT ((offset & 1) == 0);
6220
6221 upper = bfd_get_16 (abfd, insn);
6222 lower = bfd_get_16 (abfd, insn + 2);
6223 reloc_sign = (offset < 0) ? 1 : 0;
6224 upper = (upper & ~(bfd_vma) 0x7ff)
6225 | ((offset >> 12) & 0x3ff)
6226 | (reloc_sign << 10);
6227 lower = (lower & ~(bfd_vma) 0x2fff)
6228 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6229 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6230 | ((offset >> 1) & 0x7ff);
6231 bfd_put_16 (abfd, upper, insn);
6232 bfd_put_16 (abfd, lower, insn + 2);
6233 }
6234
6235 /* Thumb code calling an ARM function. */
6236
6237 static int
6238 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6239 const char * name,
6240 bfd * input_bfd,
6241 bfd * output_bfd,
6242 asection * input_section,
6243 bfd_byte * hit_data,
6244 asection * sym_sec,
6245 bfd_vma offset,
6246 bfd_signed_vma addend,
6247 bfd_vma val,
6248 char **error_message)
6249 {
6250 asection * s = 0;
6251 bfd_vma my_offset;
6252 long int ret_offset;
6253 struct elf_link_hash_entry * myh;
6254 struct elf32_arm_link_hash_table * globals;
6255
6256 myh = find_thumb_glue (info, name, error_message);
6257 if (myh == NULL)
6258 return FALSE;
6259
6260 globals = elf32_arm_hash_table (info);
6261
6262 BFD_ASSERT (globals != NULL);
6263 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6264
6265 my_offset = myh->root.u.def.value;
6266
6267 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6268 THUMB2ARM_GLUE_SECTION_NAME);
6269
6270 BFD_ASSERT (s != NULL);
6271 BFD_ASSERT (s->contents != NULL);
6272 BFD_ASSERT (s->output_section != NULL);
6273
6274 if ((my_offset & 0x01) == 0x01)
6275 {
6276 if (sym_sec != NULL
6277 && sym_sec->owner != NULL
6278 && !INTERWORK_FLAG (sym_sec->owner))
6279 {
6280 (*_bfd_error_handler)
6281 (_("%B(%s): warning: interworking not enabled.\n"
6282 " first occurrence: %B: thumb call to arm"),
6283 sym_sec->owner, input_bfd, name);
6284
6285 return FALSE;
6286 }
6287
6288 --my_offset;
6289 myh->root.u.def.value = my_offset;
6290
6291 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6292 s->contents + my_offset);
6293
6294 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6295 s->contents + my_offset + 2);
6296
6297 ret_offset =
6298 /* Address of destination of the stub. */
6299 ((bfd_signed_vma) val)
6300 - ((bfd_signed_vma)
6301 /* Offset from the start of the current section
6302 to the start of the stubs. */
6303 (s->output_offset
6304 /* Offset of the start of this stub from the start of the stubs. */
6305 + my_offset
6306 /* Address of the start of the current section. */
6307 + s->output_section->vma)
6308 /* The branch instruction is 4 bytes into the stub. */
6309 + 4
6310 /* ARM branches work from the pc of the instruction + 8. */
6311 + 8);
6312
6313 put_arm_insn (globals, output_bfd,
6314 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6315 s->contents + my_offset + 4);
6316 }
6317
6318 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6319
6320 /* Now go back and fix up the original BL insn to point to here. */
6321 ret_offset =
6322 /* Address of where the stub is located. */
6323 (s->output_section->vma + s->output_offset + my_offset)
6324 /* Address of where the BL is located. */
6325 - (input_section->output_section->vma + input_section->output_offset
6326 + offset)
6327 /* Addend in the relocation. */
6328 - addend
6329 /* Biassing for PC-relative addressing. */
6330 - 8;
6331
6332 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6333
6334 return TRUE;
6335 }
6336
6337 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6338
6339 static struct elf_link_hash_entry *
6340 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6341 const char * name,
6342 bfd * input_bfd,
6343 bfd * output_bfd,
6344 asection * sym_sec,
6345 bfd_vma val,
6346 asection * s,
6347 char ** error_message)
6348 {
6349 bfd_vma my_offset;
6350 long int ret_offset;
6351 struct elf_link_hash_entry * myh;
6352 struct elf32_arm_link_hash_table * globals;
6353
6354 myh = find_arm_glue (info, name, error_message);
6355 if (myh == NULL)
6356 return NULL;
6357
6358 globals = elf32_arm_hash_table (info);
6359
6360 BFD_ASSERT (globals != NULL);
6361 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6362
6363 my_offset = myh->root.u.def.value;
6364
6365 if ((my_offset & 0x01) == 0x01)
6366 {
6367 if (sym_sec != NULL
6368 && sym_sec->owner != NULL
6369 && !INTERWORK_FLAG (sym_sec->owner))
6370 {
6371 (*_bfd_error_handler)
6372 (_("%B(%s): warning: interworking not enabled.\n"
6373 " first occurrence: %B: arm call to thumb"),
6374 sym_sec->owner, input_bfd, name);
6375 }
6376
6377 --my_offset;
6378 myh->root.u.def.value = my_offset;
6379
6380 if (info->shared || globals->root.is_relocatable_executable
6381 || globals->pic_veneer)
6382 {
6383 /* For relocatable objects we can't use absolute addresses,
6384 so construct the address from a relative offset. */
6385 /* TODO: If the offset is small it's probably worth
6386 constructing the address with adds. */
6387 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6388 s->contents + my_offset);
6389 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6390 s->contents + my_offset + 4);
6391 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6392 s->contents + my_offset + 8);
6393 /* Adjust the offset by 4 for the position of the add,
6394 and 8 for the pipeline offset. */
6395 ret_offset = (val - (s->output_offset
6396 + s->output_section->vma
6397 + my_offset + 12))
6398 | 1;
6399 bfd_put_32 (output_bfd, ret_offset,
6400 s->contents + my_offset + 12);
6401 }
6402 else if (globals->use_blx)
6403 {
6404 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6405 s->contents + my_offset);
6406
6407 /* It's a thumb address. Add the low order bit. */
6408 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6409 s->contents + my_offset + 4);
6410 }
6411 else
6412 {
6413 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6414 s->contents + my_offset);
6415
6416 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6417 s->contents + my_offset + 4);
6418
6419 /* It's a thumb address. Add the low order bit. */
6420 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6421 s->contents + my_offset + 8);
6422
6423 my_offset += 12;
6424 }
6425 }
6426
6427 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6428
6429 return myh;
6430 }
6431
6432 /* Arm code calling a Thumb function. */
6433
6434 static int
6435 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6436 const char * name,
6437 bfd * input_bfd,
6438 bfd * output_bfd,
6439 asection * input_section,
6440 bfd_byte * hit_data,
6441 asection * sym_sec,
6442 bfd_vma offset,
6443 bfd_signed_vma addend,
6444 bfd_vma val,
6445 char **error_message)
6446 {
6447 unsigned long int tmp;
6448 bfd_vma my_offset;
6449 asection * s;
6450 long int ret_offset;
6451 struct elf_link_hash_entry * myh;
6452 struct elf32_arm_link_hash_table * globals;
6453
6454 globals = elf32_arm_hash_table (info);
6455
6456 BFD_ASSERT (globals != NULL);
6457 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6458
6459 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6460 ARM2THUMB_GLUE_SECTION_NAME);
6461 BFD_ASSERT (s != NULL);
6462 BFD_ASSERT (s->contents != NULL);
6463 BFD_ASSERT (s->output_section != NULL);
6464
6465 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6466 sym_sec, val, s, error_message);
6467 if (!myh)
6468 return FALSE;
6469
6470 my_offset = myh->root.u.def.value;
6471 tmp = bfd_get_32 (input_bfd, hit_data);
6472 tmp = tmp & 0xFF000000;
6473
6474 /* Somehow these are both 4 too far, so subtract 8. */
6475 ret_offset = (s->output_offset
6476 + my_offset
6477 + s->output_section->vma
6478 - (input_section->output_offset
6479 + input_section->output_section->vma
6480 + offset + addend)
6481 - 8);
6482
6483 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6484
6485 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6486
6487 return TRUE;
6488 }
6489
6490 /* Populate Arm stub for an exported Thumb function. */
6491
6492 static bfd_boolean
6493 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6494 {
6495 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6496 asection * s;
6497 struct elf_link_hash_entry * myh;
6498 struct elf32_arm_link_hash_entry *eh;
6499 struct elf32_arm_link_hash_table * globals;
6500 asection *sec;
6501 bfd_vma val;
6502 char *error_message;
6503
6504 eh = elf32_arm_hash_entry (h);
6505 /* Allocate stubs for exported Thumb functions on v4t. */
6506 if (eh->export_glue == NULL)
6507 return TRUE;
6508
6509 globals = elf32_arm_hash_table (info);
6510
6511 BFD_ASSERT (globals != NULL);
6512 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6513
6514 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6515 ARM2THUMB_GLUE_SECTION_NAME);
6516 BFD_ASSERT (s != NULL);
6517 BFD_ASSERT (s->contents != NULL);
6518 BFD_ASSERT (s->output_section != NULL);
6519
6520 sec = eh->export_glue->root.u.def.section;
6521
6522 BFD_ASSERT (sec->output_section != NULL);
6523
6524 val = eh->export_glue->root.u.def.value + sec->output_offset
6525 + sec->output_section->vma;
6526
6527 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6528 h->root.u.def.section->owner,
6529 globals->obfd, sec, val, s,
6530 &error_message);
6531 BFD_ASSERT (myh);
6532 return TRUE;
6533 }
6534
6535 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6536
6537 static bfd_vma
6538 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6539 {
6540 bfd_byte *p;
6541 bfd_vma glue_addr;
6542 asection *s;
6543 struct elf32_arm_link_hash_table *globals;
6544
6545 globals = elf32_arm_hash_table (info);
6546
6547 BFD_ASSERT (globals != NULL);
6548 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6549
6550 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6551 ARM_BX_GLUE_SECTION_NAME);
6552 BFD_ASSERT (s != NULL);
6553 BFD_ASSERT (s->contents != NULL);
6554 BFD_ASSERT (s->output_section != NULL);
6555
6556 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6557
6558 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6559
6560 if ((globals->bx_glue_offset[reg] & 1) == 0)
6561 {
6562 p = s->contents + glue_addr;
6563 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6564 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6565 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6566 globals->bx_glue_offset[reg] |= 1;
6567 }
6568
6569 return glue_addr + s->output_section->vma + s->output_offset;
6570 }
6571
6572 /* Generate Arm stubs for exported Thumb symbols. */
6573 static void
6574 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6575 struct bfd_link_info *link_info)
6576 {
6577 struct elf32_arm_link_hash_table * globals;
6578
6579 if (link_info == NULL)
6580 /* Ignore this if we are not called by the ELF backend linker. */
6581 return;
6582
6583 globals = elf32_arm_hash_table (link_info);
6584 /* If blx is available then exported Thumb symbols are OK and there is
6585 nothing to do. */
6586 if (globals->use_blx)
6587 return;
6588
6589 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6590 link_info);
6591 }
6592
6593 /* Some relocations map to different relocations depending on the
6594 target. Return the real relocation. */
6595
6596 static int
6597 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6598 int r_type)
6599 {
6600 switch (r_type)
6601 {
6602 case R_ARM_TARGET1:
6603 if (globals->target1_is_rel)
6604 return R_ARM_REL32;
6605 else
6606 return R_ARM_ABS32;
6607
6608 case R_ARM_TARGET2:
6609 return globals->target2_reloc;
6610
6611 default:
6612 return r_type;
6613 }
6614 }
6615
6616 /* Return the base VMA address which should be subtracted from real addresses
6617 when resolving @dtpoff relocation.
6618 This is PT_TLS segment p_vaddr. */
6619
6620 static bfd_vma
6621 dtpoff_base (struct bfd_link_info *info)
6622 {
6623 /* If tls_sec is NULL, we should have signalled an error already. */
6624 if (elf_hash_table (info)->tls_sec == NULL)
6625 return 0;
6626 return elf_hash_table (info)->tls_sec->vma;
6627 }
6628
6629 /* Return the relocation value for @tpoff relocation
6630 if STT_TLS virtual address is ADDRESS. */
6631
6632 static bfd_vma
6633 tpoff (struct bfd_link_info *info, bfd_vma address)
6634 {
6635 struct elf_link_hash_table *htab = elf_hash_table (info);
6636 bfd_vma base;
6637
6638 /* If tls_sec is NULL, we should have signalled an error already. */
6639 if (htab->tls_sec == NULL)
6640 return 0;
6641 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6642 return address - htab->tls_sec->vma + base;
6643 }
6644
6645 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6646 VALUE is the relocation value. */
6647
6648 static bfd_reloc_status_type
6649 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6650 {
6651 if (value > 0xfff)
6652 return bfd_reloc_overflow;
6653
6654 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6655 bfd_put_32 (abfd, value, data);
6656 return bfd_reloc_ok;
6657 }
6658
6659 /* For a given value of n, calculate the value of G_n as required to
6660 deal with group relocations. We return it in the form of an
6661 encoded constant-and-rotation, together with the final residual. If n is
6662 specified as less than zero, then final_residual is filled with the
6663 input value and no further action is performed. */
6664
6665 static bfd_vma
6666 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6667 {
6668 int current_n;
6669 bfd_vma g_n;
6670 bfd_vma encoded_g_n = 0;
6671 bfd_vma residual = value; /* Also known as Y_n. */
6672
6673 for (current_n = 0; current_n <= n; current_n++)
6674 {
6675 int shift;
6676
6677 /* Calculate which part of the value to mask. */
6678 if (residual == 0)
6679 shift = 0;
6680 else
6681 {
6682 int msb;
6683
6684 /* Determine the most significant bit in the residual and
6685 align the resulting value to a 2-bit boundary. */
6686 for (msb = 30; msb >= 0; msb -= 2)
6687 if (residual & (3 << msb))
6688 break;
6689
6690 /* The desired shift is now (msb - 6), or zero, whichever
6691 is the greater. */
6692 shift = msb - 6;
6693 if (shift < 0)
6694 shift = 0;
6695 }
6696
6697 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6698 g_n = residual & (0xff << shift);
6699 encoded_g_n = (g_n >> shift)
6700 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6701
6702 /* Calculate the residual for the next time around. */
6703 residual &= ~g_n;
6704 }
6705
6706 *final_residual = residual;
6707
6708 return encoded_g_n;
6709 }
6710
6711 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6712 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6713
6714 static int
6715 identify_add_or_sub (bfd_vma insn)
6716 {
6717 int opcode = insn & 0x1e00000;
6718
6719 if (opcode == 1 << 23) /* ADD */
6720 return 1;
6721
6722 if (opcode == 1 << 22) /* SUB */
6723 return -1;
6724
6725 return 0;
6726 }
6727
6728 /* Perform a relocation as part of a final link. */
6729
6730 static bfd_reloc_status_type
6731 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6732 bfd * input_bfd,
6733 bfd * output_bfd,
6734 asection * input_section,
6735 bfd_byte * contents,
6736 Elf_Internal_Rela * rel,
6737 bfd_vma value,
6738 struct bfd_link_info * info,
6739 asection * sym_sec,
6740 const char * sym_name,
6741 int sym_flags,
6742 struct elf_link_hash_entry * h,
6743 bfd_boolean * unresolved_reloc_p,
6744 char ** error_message)
6745 {
6746 unsigned long r_type = howto->type;
6747 unsigned long r_symndx;
6748 bfd_byte * hit_data = contents + rel->r_offset;
6749 bfd * dynobj = NULL;
6750 Elf_Internal_Shdr * symtab_hdr;
6751 struct elf_link_hash_entry ** sym_hashes;
6752 bfd_vma * local_got_offsets;
6753 asection * sgot = NULL;
6754 asection * splt = NULL;
6755 asection * sreloc = NULL;
6756 bfd_vma addend;
6757 bfd_signed_vma signed_addend;
6758 struct elf32_arm_link_hash_table * globals;
6759
6760 globals = elf32_arm_hash_table (info);
6761
6762 BFD_ASSERT (is_arm_elf (input_bfd));
6763
6764 /* Some relocation types map to different relocations depending on the
6765 target. We pick the right one here. */
6766 r_type = arm_real_reloc_type (globals, r_type);
6767 if (r_type != howto->type)
6768 howto = elf32_arm_howto_from_type (r_type);
6769
6770 /* If the start address has been set, then set the EF_ARM_HASENTRY
6771 flag. Setting this more than once is redundant, but the cost is
6772 not too high, and it keeps the code simple.
6773
6774 The test is done here, rather than somewhere else, because the
6775 start address is only set just before the final link commences.
6776
6777 Note - if the user deliberately sets a start address of 0, the
6778 flag will not be set. */
6779 if (bfd_get_start_address (output_bfd) != 0)
6780 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6781
6782 dynobj = elf_hash_table (info)->dynobj;
6783 if (dynobj)
6784 {
6785 sgot = bfd_get_section_by_name (dynobj, ".got");
6786 splt = bfd_get_section_by_name (dynobj, ".plt");
6787 }
6788 symtab_hdr = & elf_symtab_hdr (input_bfd);
6789 sym_hashes = elf_sym_hashes (input_bfd);
6790 local_got_offsets = elf_local_got_offsets (input_bfd);
6791 r_symndx = ELF32_R_SYM (rel->r_info);
6792
6793 if (globals->use_rel)
6794 {
6795 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6796
6797 if (addend & ((howto->src_mask + 1) >> 1))
6798 {
6799 signed_addend = -1;
6800 signed_addend &= ~ howto->src_mask;
6801 signed_addend |= addend;
6802 }
6803 else
6804 signed_addend = addend;
6805 }
6806 else
6807 addend = signed_addend = rel->r_addend;
6808
6809 switch (r_type)
6810 {
6811 case R_ARM_NONE:
6812 /* We don't need to find a value for this symbol. It's just a
6813 marker. */
6814 *unresolved_reloc_p = FALSE;
6815 return bfd_reloc_ok;
6816
6817 case R_ARM_ABS12:
6818 if (!globals->vxworks_p)
6819 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6820
6821 case R_ARM_PC24:
6822 case R_ARM_ABS32:
6823 case R_ARM_ABS32_NOI:
6824 case R_ARM_REL32:
6825 case R_ARM_REL32_NOI:
6826 case R_ARM_CALL:
6827 case R_ARM_JUMP24:
6828 case R_ARM_XPC25:
6829 case R_ARM_PREL31:
6830 case R_ARM_PLT32:
6831 /* Handle relocations which should use the PLT entry. ABS32/REL32
6832 will use the symbol's value, which may point to a PLT entry, but we
6833 don't need to handle that here. If we created a PLT entry, all
6834 branches in this object should go to it, except if the PLT is too
6835 far away, in which case a long branch stub should be inserted. */
6836 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6837 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6838 && r_type != R_ARM_CALL
6839 && r_type != R_ARM_JUMP24
6840 && r_type != R_ARM_PLT32)
6841 && h != NULL
6842 && splt != NULL
6843 && h->plt.offset != (bfd_vma) -1)
6844 {
6845 /* If we've created a .plt section, and assigned a PLT entry to
6846 this function, it should not be known to bind locally. If
6847 it were, we would have cleared the PLT entry. */
6848 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6849
6850 value = (splt->output_section->vma
6851 + splt->output_offset
6852 + h->plt.offset);
6853 *unresolved_reloc_p = FALSE;
6854 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6855 contents, rel->r_offset, value,
6856 rel->r_addend);
6857 }
6858
6859 /* When generating a shared object or relocatable executable, these
6860 relocations are copied into the output file to be resolved at
6861 run time. */
6862 if ((info->shared || globals->root.is_relocatable_executable)
6863 && (input_section->flags & SEC_ALLOC)
6864 && !(elf32_arm_hash_table (info)->vxworks_p
6865 && strcmp (input_section->output_section->name,
6866 ".tls_vars") == 0)
6867 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6868 || !SYMBOL_CALLS_LOCAL (info, h))
6869 && (h == NULL
6870 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6871 || h->root.type != bfd_link_hash_undefweak)
6872 && r_type != R_ARM_PC24
6873 && r_type != R_ARM_CALL
6874 && r_type != R_ARM_JUMP24
6875 && r_type != R_ARM_PREL31
6876 && r_type != R_ARM_PLT32)
6877 {
6878 Elf_Internal_Rela outrel;
6879 bfd_byte *loc;
6880 bfd_boolean skip, relocate;
6881
6882 *unresolved_reloc_p = FALSE;
6883
6884 if (sreloc == NULL)
6885 {
6886 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6887 ! globals->use_rel);
6888
6889 if (sreloc == NULL)
6890 return bfd_reloc_notsupported;
6891 }
6892
6893 skip = FALSE;
6894 relocate = FALSE;
6895
6896 outrel.r_addend = addend;
6897 outrel.r_offset =
6898 _bfd_elf_section_offset (output_bfd, info, input_section,
6899 rel->r_offset);
6900 if (outrel.r_offset == (bfd_vma) -1)
6901 skip = TRUE;
6902 else if (outrel.r_offset == (bfd_vma) -2)
6903 skip = TRUE, relocate = TRUE;
6904 outrel.r_offset += (input_section->output_section->vma
6905 + input_section->output_offset);
6906
6907 if (skip)
6908 memset (&outrel, 0, sizeof outrel);
6909 else if (h != NULL
6910 && h->dynindx != -1
6911 && (!info->shared
6912 || !info->symbolic
6913 || !h->def_regular))
6914 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6915 else
6916 {
6917 int symbol;
6918
6919 /* This symbol is local, or marked to become local. */
6920 if (sym_flags == STT_ARM_TFUNC)
6921 value |= 1;
6922 if (globals->symbian_p)
6923 {
6924 asection *osec;
6925
6926 /* On Symbian OS, the data segment and text segement
6927 can be relocated independently. Therefore, we
6928 must indicate the segment to which this
6929 relocation is relative. The BPABI allows us to
6930 use any symbol in the right segment; we just use
6931 the section symbol as it is convenient. (We
6932 cannot use the symbol given by "h" directly as it
6933 will not appear in the dynamic symbol table.)
6934
6935 Note that the dynamic linker ignores the section
6936 symbol value, so we don't subtract osec->vma
6937 from the emitted reloc addend. */
6938 if (sym_sec)
6939 osec = sym_sec->output_section;
6940 else
6941 osec = input_section->output_section;
6942 symbol = elf_section_data (osec)->dynindx;
6943 if (symbol == 0)
6944 {
6945 struct elf_link_hash_table *htab = elf_hash_table (info);
6946
6947 if ((osec->flags & SEC_READONLY) == 0
6948 && htab->data_index_section != NULL)
6949 osec = htab->data_index_section;
6950 else
6951 osec = htab->text_index_section;
6952 symbol = elf_section_data (osec)->dynindx;
6953 }
6954 BFD_ASSERT (symbol != 0);
6955 }
6956 else
6957 /* On SVR4-ish systems, the dynamic loader cannot
6958 relocate the text and data segments independently,
6959 so the symbol does not matter. */
6960 symbol = 0;
6961 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6962 if (globals->use_rel)
6963 relocate = TRUE;
6964 else
6965 outrel.r_addend += value;
6966 }
6967
6968 loc = sreloc->contents;
6969 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6970 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6971
6972 /* If this reloc is against an external symbol, we do not want to
6973 fiddle with the addend. Otherwise, we need to include the symbol
6974 value so that it becomes an addend for the dynamic reloc. */
6975 if (! relocate)
6976 return bfd_reloc_ok;
6977
6978 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6979 contents, rel->r_offset, value,
6980 (bfd_vma) 0);
6981 }
6982 else switch (r_type)
6983 {
6984 case R_ARM_ABS12:
6985 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6986
6987 case R_ARM_XPC25: /* Arm BLX instruction. */
6988 case R_ARM_CALL:
6989 case R_ARM_JUMP24:
6990 case R_ARM_PC24: /* Arm B/BL instruction. */
6991 case R_ARM_PLT32:
6992 {
6993 bfd_signed_vma branch_offset;
6994 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6995
6996 if (r_type == R_ARM_XPC25)
6997 {
6998 /* Check for Arm calling Arm function. */
6999 /* FIXME: Should we translate the instruction into a BL
7000 instruction instead ? */
7001 if (sym_flags != STT_ARM_TFUNC)
7002 (*_bfd_error_handler)
7003 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7004 input_bfd,
7005 h ? h->root.root.string : "(local)");
7006 }
7007 else if (r_type == R_ARM_PC24)
7008 {
7009 /* Check for Arm calling Thumb function. */
7010 if (sym_flags == STT_ARM_TFUNC)
7011 {
7012 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7013 output_bfd, input_section,
7014 hit_data, sym_sec, rel->r_offset,
7015 signed_addend, value,
7016 error_message))
7017 return bfd_reloc_ok;
7018 else
7019 return bfd_reloc_dangerous;
7020 }
7021 }
7022
7023 /* Check if a stub has to be inserted because the
7024 destination is too far or we are changing mode. */
7025 if ( r_type == R_ARM_CALL
7026 || r_type == R_ARM_JUMP24
7027 || r_type == R_ARM_PLT32)
7028 {
7029 bfd_vma from;
7030
7031 /* If the call goes through a PLT entry, make sure to
7032 check distance to the right destination address. */
7033 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7034 {
7035 value = (splt->output_section->vma
7036 + splt->output_offset
7037 + h->plt.offset);
7038 *unresolved_reloc_p = FALSE;
7039 /* The PLT entry is in ARM mode, regardless of the
7040 target function. */
7041 sym_flags = STT_FUNC;
7042 }
7043
7044 from = (input_section->output_section->vma
7045 + input_section->output_offset
7046 + rel->r_offset);
7047 branch_offset = (bfd_signed_vma)(value - from);
7048
7049 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7050 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7051 || ((sym_flags == STT_ARM_TFUNC)
7052 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7053 || (r_type == R_ARM_JUMP24)
7054 || (r_type == R_ARM_PLT32) ))
7055 )
7056 {
7057 /* The target is out of reach, so redirect the
7058 branch to the local stub for this function. */
7059
7060 stub_entry = elf32_arm_get_stub_entry (input_section,
7061 sym_sec, h,
7062 rel, globals);
7063 if (stub_entry != NULL)
7064 value = (stub_entry->stub_offset
7065 + stub_entry->stub_sec->output_offset
7066 + stub_entry->stub_sec->output_section->vma);
7067 }
7068 }
7069
7070 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7071 where:
7072 S is the address of the symbol in the relocation.
7073 P is address of the instruction being relocated.
7074 A is the addend (extracted from the instruction) in bytes.
7075
7076 S is held in 'value'.
7077 P is the base address of the section containing the
7078 instruction plus the offset of the reloc into that
7079 section, ie:
7080 (input_section->output_section->vma +
7081 input_section->output_offset +
7082 rel->r_offset).
7083 A is the addend, converted into bytes, ie:
7084 (signed_addend * 4)
7085
7086 Note: None of these operations have knowledge of the pipeline
7087 size of the processor, thus it is up to the assembler to
7088 encode this information into the addend. */
7089 value -= (input_section->output_section->vma
7090 + input_section->output_offset);
7091 value -= rel->r_offset;
7092 if (globals->use_rel)
7093 value += (signed_addend << howto->size);
7094 else
7095 /* RELA addends do not have to be adjusted by howto->size. */
7096 value += signed_addend;
7097
7098 signed_addend = value;
7099 signed_addend >>= howto->rightshift;
7100
7101 /* A branch to an undefined weak symbol is turned into a jump to
7102 the next instruction unless a PLT entry will be created.
7103 Do the same for local undefined symbols.
7104 The jump to the next instruction is optimized as a NOP depending
7105 on the architecture. */
7106 if (h ? (h->root.type == bfd_link_hash_undefweak
7107 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7108 : bfd_is_und_section (sym_sec))
7109 {
7110 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7111
7112 if (arch_has_arm_nop (globals))
7113 value |= 0x0320f000;
7114 else
7115 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7116 }
7117 else
7118 {
7119 /* Perform a signed range check. */
7120 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7121 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7122 return bfd_reloc_overflow;
7123
7124 addend = (value & 2);
7125
7126 value = (signed_addend & howto->dst_mask)
7127 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7128
7129 if (r_type == R_ARM_CALL)
7130 {
7131 /* Set the H bit in the BLX instruction. */
7132 if (sym_flags == STT_ARM_TFUNC)
7133 {
7134 if (addend)
7135 value |= (1 << 24);
7136 else
7137 value &= ~(bfd_vma)(1 << 24);
7138 }
7139
7140 /* Select the correct instruction (BL or BLX). */
7141 /* Only if we are not handling a BL to a stub. In this
7142 case, mode switching is performed by the stub. */
7143 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7144 value |= (1 << 28);
7145 else
7146 {
7147 value &= ~(bfd_vma)(1 << 28);
7148 value |= (1 << 24);
7149 }
7150 }
7151 }
7152 }
7153 break;
7154
7155 case R_ARM_ABS32:
7156 value += addend;
7157 if (sym_flags == STT_ARM_TFUNC)
7158 value |= 1;
7159 break;
7160
7161 case R_ARM_ABS32_NOI:
7162 value += addend;
7163 break;
7164
7165 case R_ARM_REL32:
7166 value += addend;
7167 if (sym_flags == STT_ARM_TFUNC)
7168 value |= 1;
7169 value -= (input_section->output_section->vma
7170 + input_section->output_offset + rel->r_offset);
7171 break;
7172
7173 case R_ARM_REL32_NOI:
7174 value += addend;
7175 value -= (input_section->output_section->vma
7176 + input_section->output_offset + rel->r_offset);
7177 break;
7178
7179 case R_ARM_PREL31:
7180 value -= (input_section->output_section->vma
7181 + input_section->output_offset + rel->r_offset);
7182 value += signed_addend;
7183 if (! h || h->root.type != bfd_link_hash_undefweak)
7184 {
7185 /* Check for overflow. */
7186 if ((value ^ (value >> 1)) & (1 << 30))
7187 return bfd_reloc_overflow;
7188 }
7189 value &= 0x7fffffff;
7190 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7191 if (sym_flags == STT_ARM_TFUNC)
7192 value |= 1;
7193 break;
7194 }
7195
7196 bfd_put_32 (input_bfd, value, hit_data);
7197 return bfd_reloc_ok;
7198
7199 case R_ARM_ABS8:
7200 value += addend;
7201 if ((long) value > 0x7f || (long) value < -0x80)
7202 return bfd_reloc_overflow;
7203
7204 bfd_put_8 (input_bfd, value, hit_data);
7205 return bfd_reloc_ok;
7206
7207 case R_ARM_ABS16:
7208 value += addend;
7209
7210 if ((long) value > 0x7fff || (long) value < -0x8000)
7211 return bfd_reloc_overflow;
7212
7213 bfd_put_16 (input_bfd, value, hit_data);
7214 return bfd_reloc_ok;
7215
7216 case R_ARM_THM_ABS5:
7217 /* Support ldr and str instructions for the thumb. */
7218 if (globals->use_rel)
7219 {
7220 /* Need to refetch addend. */
7221 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7222 /* ??? Need to determine shift amount from operand size. */
7223 addend >>= howto->rightshift;
7224 }
7225 value += addend;
7226
7227 /* ??? Isn't value unsigned? */
7228 if ((long) value > 0x1f || (long) value < -0x10)
7229 return bfd_reloc_overflow;
7230
7231 /* ??? Value needs to be properly shifted into place first. */
7232 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7233 bfd_put_16 (input_bfd, value, hit_data);
7234 return bfd_reloc_ok;
7235
7236 case R_ARM_THM_ALU_PREL_11_0:
7237 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7238 {
7239 bfd_vma insn;
7240 bfd_signed_vma relocation;
7241
7242 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7243 | bfd_get_16 (input_bfd, hit_data + 2);
7244
7245 if (globals->use_rel)
7246 {
7247 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7248 | ((insn & (1 << 26)) >> 15);
7249 if (insn & 0xf00000)
7250 signed_addend = -signed_addend;
7251 }
7252
7253 relocation = value + signed_addend;
7254 relocation -= (input_section->output_section->vma
7255 + input_section->output_offset
7256 + rel->r_offset);
7257
7258 value = abs (relocation);
7259
7260 if (value >= 0x1000)
7261 return bfd_reloc_overflow;
7262
7263 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7264 | ((value & 0x700) << 4)
7265 | ((value & 0x800) << 15);
7266 if (relocation < 0)
7267 insn |= 0xa00000;
7268
7269 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7270 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7271
7272 return bfd_reloc_ok;
7273 }
7274
7275 case R_ARM_THM_PC8:
7276 /* PR 10073: This reloc is not generated by the GNU toolchain,
7277 but it is supported for compatibility with third party libraries
7278 generated by other compilers, specifically the ARM/IAR. */
7279 {
7280 bfd_vma insn;
7281 bfd_signed_vma relocation;
7282
7283 insn = bfd_get_16 (input_bfd, hit_data);
7284
7285 if (globals->use_rel)
7286 addend = (insn & 0x00ff) << 2;
7287
7288 relocation = value + addend;
7289 relocation -= (input_section->output_section->vma
7290 + input_section->output_offset
7291 + rel->r_offset);
7292
7293 value = abs (relocation);
7294
7295 /* We do not check for overflow of this reloc. Although strictly
7296 speaking this is incorrect, it appears to be necessary in order
7297 to work with IAR generated relocs. Since GCC and GAS do not
7298 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7299 a problem for them. */
7300 value &= 0x3fc;
7301
7302 insn = (insn & 0xff00) | (value >> 2);
7303
7304 bfd_put_16 (input_bfd, insn, hit_data);
7305
7306 return bfd_reloc_ok;
7307 }
7308
7309 case R_ARM_THM_PC12:
7310 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7311 {
7312 bfd_vma insn;
7313 bfd_signed_vma relocation;
7314
7315 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7316 | bfd_get_16 (input_bfd, hit_data + 2);
7317
7318 if (globals->use_rel)
7319 {
7320 signed_addend = insn & 0xfff;
7321 if (!(insn & (1 << 23)))
7322 signed_addend = -signed_addend;
7323 }
7324
7325 relocation = value + signed_addend;
7326 relocation -= (input_section->output_section->vma
7327 + input_section->output_offset
7328 + rel->r_offset);
7329
7330 value = abs (relocation);
7331
7332 if (value >= 0x1000)
7333 return bfd_reloc_overflow;
7334
7335 insn = (insn & 0xff7ff000) | value;
7336 if (relocation >= 0)
7337 insn |= (1 << 23);
7338
7339 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7340 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7341
7342 return bfd_reloc_ok;
7343 }
7344
7345 case R_ARM_THM_XPC22:
7346 case R_ARM_THM_CALL:
7347 case R_ARM_THM_JUMP24:
7348 /* Thumb BL (branch long instruction). */
7349 {
7350 bfd_vma relocation;
7351 bfd_vma reloc_sign;
7352 bfd_boolean overflow = FALSE;
7353 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7354 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7355 bfd_signed_vma reloc_signed_max;
7356 bfd_signed_vma reloc_signed_min;
7357 bfd_vma check;
7358 bfd_signed_vma signed_check;
7359 int bitsize;
7360 const int thumb2 = using_thumb2 (globals);
7361
7362 /* A branch to an undefined weak symbol is turned into a jump to
7363 the next instruction unless a PLT entry will be created.
7364 The jump to the next instruction is optimized as a NOP.W for
7365 Thumb-2 enabled architectures. */
7366 if (h && h->root.type == bfd_link_hash_undefweak
7367 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7368 {
7369 if (arch_has_thumb2_nop (globals))
7370 {
7371 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7372 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7373 }
7374 else
7375 {
7376 bfd_put_16 (input_bfd, 0xe000, hit_data);
7377 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7378 }
7379 return bfd_reloc_ok;
7380 }
7381
7382 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7383 with Thumb-1) involving the J1 and J2 bits. */
7384 if (globals->use_rel)
7385 {
7386 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7387 bfd_vma upper = upper_insn & 0x3ff;
7388 bfd_vma lower = lower_insn & 0x7ff;
7389 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7390 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7391 bfd_vma i1 = j1 ^ s ? 0 : 1;
7392 bfd_vma i2 = j2 ^ s ? 0 : 1;
7393
7394 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7395 /* Sign extend. */
7396 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7397
7398 signed_addend = addend;
7399 }
7400
7401 if (r_type == R_ARM_THM_XPC22)
7402 {
7403 /* Check for Thumb to Thumb call. */
7404 /* FIXME: Should we translate the instruction into a BL
7405 instruction instead ? */
7406 if (sym_flags == STT_ARM_TFUNC)
7407 (*_bfd_error_handler)
7408 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7409 input_bfd,
7410 h ? h->root.root.string : "(local)");
7411 }
7412 else
7413 {
7414 /* If it is not a call to Thumb, assume call to Arm.
7415 If it is a call relative to a section name, then it is not a
7416 function call at all, but rather a long jump. Calls through
7417 the PLT do not require stubs. */
7418 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7419 && (h == NULL || splt == NULL
7420 || h->plt.offset == (bfd_vma) -1))
7421 {
7422 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7423 {
7424 /* Convert BL to BLX. */
7425 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7426 }
7427 else if (( r_type != R_ARM_THM_CALL)
7428 && (r_type != R_ARM_THM_JUMP24))
7429 {
7430 if (elf32_thumb_to_arm_stub
7431 (info, sym_name, input_bfd, output_bfd, input_section,
7432 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7433 error_message))
7434 return bfd_reloc_ok;
7435 else
7436 return bfd_reloc_dangerous;
7437 }
7438 }
7439 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7440 && r_type == R_ARM_THM_CALL)
7441 {
7442 /* Make sure this is a BL. */
7443 lower_insn |= 0x1800;
7444 }
7445 }
7446
7447 /* Handle calls via the PLT. */
7448 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7449 {
7450 value = (splt->output_section->vma
7451 + splt->output_offset
7452 + h->plt.offset);
7453 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7454 {
7455 /* If the Thumb BLX instruction is available, convert the
7456 BL to a BLX instruction to call the ARM-mode PLT entry. */
7457 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7458 sym_flags = STT_FUNC;
7459 }
7460 else
7461 {
7462 /* Target the Thumb stub before the ARM PLT entry. */
7463 value -= PLT_THUMB_STUB_SIZE;
7464 sym_flags = STT_ARM_TFUNC;
7465 }
7466 *unresolved_reloc_p = FALSE;
7467 }
7468
7469 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7470 {
7471 /* Check if a stub has to be inserted because the destination
7472 is too far. */
7473 bfd_vma from;
7474 bfd_signed_vma branch_offset;
7475 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7476
7477 from = (input_section->output_section->vma
7478 + input_section->output_offset
7479 + rel->r_offset);
7480 branch_offset = (bfd_signed_vma)(value - from);
7481
7482 if ((!thumb2
7483 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7484 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7485 ||
7486 (thumb2
7487 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7488 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7489 || ((sym_flags != STT_ARM_TFUNC)
7490 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7491 || r_type == R_ARM_THM_JUMP24)))
7492 {
7493 /* The target is out of reach or we are changing modes, so
7494 redirect the branch to the local stub for this
7495 function. */
7496 stub_entry = elf32_arm_get_stub_entry (input_section,
7497 sym_sec, h,
7498 rel, globals);
7499 if (stub_entry != NULL)
7500 value = (stub_entry->stub_offset
7501 + stub_entry->stub_sec->output_offset
7502 + stub_entry->stub_sec->output_section->vma);
7503
7504 /* If this call becomes a call to Arm, force BLX. */
7505 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7506 {
7507 if ((stub_entry
7508 && !arm_stub_is_thumb (stub_entry->stub_type))
7509 || (sym_flags != STT_ARM_TFUNC))
7510 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7511 }
7512 }
7513 }
7514
7515 relocation = value + signed_addend;
7516
7517 relocation -= (input_section->output_section->vma
7518 + input_section->output_offset
7519 + rel->r_offset);
7520
7521 check = relocation >> howto->rightshift;
7522
7523 /* If this is a signed value, the rightshift just dropped
7524 leading 1 bits (assuming twos complement). */
7525 if ((bfd_signed_vma) relocation >= 0)
7526 signed_check = check;
7527 else
7528 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7529
7530 /* Calculate the permissable maximum and minimum values for
7531 this relocation according to whether we're relocating for
7532 Thumb-2 or not. */
7533 bitsize = howto->bitsize;
7534 if (!thumb2)
7535 bitsize -= 2;
7536 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7537 reloc_signed_min = ~reloc_signed_max;
7538
7539 /* Assumes two's complement. */
7540 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7541 overflow = TRUE;
7542
7543 if ((lower_insn & 0x5000) == 0x4000)
7544 /* For a BLX instruction, make sure that the relocation is rounded up
7545 to a word boundary. This follows the semantics of the instruction
7546 which specifies that bit 1 of the target address will come from bit
7547 1 of the base address. */
7548 relocation = (relocation + 2) & ~ 3;
7549
7550 /* Put RELOCATION back into the insn. Assumes two's complement.
7551 We use the Thumb-2 encoding, which is safe even if dealing with
7552 a Thumb-1 instruction by virtue of our overflow check above. */
7553 reloc_sign = (signed_check < 0) ? 1 : 0;
7554 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7555 | ((relocation >> 12) & 0x3ff)
7556 | (reloc_sign << 10);
7557 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7558 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7559 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7560 | ((relocation >> 1) & 0x7ff);
7561
7562 /* Put the relocated value back in the object file: */
7563 bfd_put_16 (input_bfd, upper_insn, hit_data);
7564 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7565
7566 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7567 }
7568 break;
7569
7570 case R_ARM_THM_JUMP19:
7571 /* Thumb32 conditional branch instruction. */
7572 {
7573 bfd_vma relocation;
7574 bfd_boolean overflow = FALSE;
7575 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7576 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7577 bfd_signed_vma reloc_signed_max = 0xffffe;
7578 bfd_signed_vma reloc_signed_min = -0x100000;
7579 bfd_signed_vma signed_check;
7580
7581 /* Need to refetch the addend, reconstruct the top three bits,
7582 and squish the two 11 bit pieces together. */
7583 if (globals->use_rel)
7584 {
7585 bfd_vma S = (upper_insn & 0x0400) >> 10;
7586 bfd_vma upper = (upper_insn & 0x003f);
7587 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7588 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7589 bfd_vma lower = (lower_insn & 0x07ff);
7590
7591 upper |= J1 << 6;
7592 upper |= J2 << 7;
7593 upper |= (!S) << 8;
7594 upper -= 0x0100; /* Sign extend. */
7595
7596 addend = (upper << 12) | (lower << 1);
7597 signed_addend = addend;
7598 }
7599
7600 /* Handle calls via the PLT. */
7601 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7602 {
7603 value = (splt->output_section->vma
7604 + splt->output_offset
7605 + h->plt.offset);
7606 /* Target the Thumb stub before the ARM PLT entry. */
7607 value -= PLT_THUMB_STUB_SIZE;
7608 *unresolved_reloc_p = FALSE;
7609 }
7610
7611 /* ??? Should handle interworking? GCC might someday try to
7612 use this for tail calls. */
7613
7614 relocation = value + signed_addend;
7615 relocation -= (input_section->output_section->vma
7616 + input_section->output_offset
7617 + rel->r_offset);
7618 signed_check = (bfd_signed_vma) relocation;
7619
7620 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7621 overflow = TRUE;
7622
7623 /* Put RELOCATION back into the insn. */
7624 {
7625 bfd_vma S = (relocation & 0x00100000) >> 20;
7626 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7627 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7628 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7629 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7630
7631 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7632 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7633 }
7634
7635 /* Put the relocated value back in the object file: */
7636 bfd_put_16 (input_bfd, upper_insn, hit_data);
7637 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7638
7639 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7640 }
7641
7642 case R_ARM_THM_JUMP11:
7643 case R_ARM_THM_JUMP8:
7644 case R_ARM_THM_JUMP6:
7645 /* Thumb B (branch) instruction). */
7646 {
7647 bfd_signed_vma relocation;
7648 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7649 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7650 bfd_signed_vma signed_check;
7651
7652 /* CZB cannot jump backward. */
7653 if (r_type == R_ARM_THM_JUMP6)
7654 reloc_signed_min = 0;
7655
7656 if (globals->use_rel)
7657 {
7658 /* Need to refetch addend. */
7659 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7660 if (addend & ((howto->src_mask + 1) >> 1))
7661 {
7662 signed_addend = -1;
7663 signed_addend &= ~ howto->src_mask;
7664 signed_addend |= addend;
7665 }
7666 else
7667 signed_addend = addend;
7668 /* The value in the insn has been right shifted. We need to
7669 undo this, so that we can perform the address calculation
7670 in terms of bytes. */
7671 signed_addend <<= howto->rightshift;
7672 }
7673 relocation = value + signed_addend;
7674
7675 relocation -= (input_section->output_section->vma
7676 + input_section->output_offset
7677 + rel->r_offset);
7678
7679 relocation >>= howto->rightshift;
7680 signed_check = relocation;
7681
7682 if (r_type == R_ARM_THM_JUMP6)
7683 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7684 else
7685 relocation &= howto->dst_mask;
7686 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7687
7688 bfd_put_16 (input_bfd, relocation, hit_data);
7689
7690 /* Assumes two's complement. */
7691 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7692 return bfd_reloc_overflow;
7693
7694 return bfd_reloc_ok;
7695 }
7696
7697 case R_ARM_ALU_PCREL7_0:
7698 case R_ARM_ALU_PCREL15_8:
7699 case R_ARM_ALU_PCREL23_15:
7700 {
7701 bfd_vma insn;
7702 bfd_vma relocation;
7703
7704 insn = bfd_get_32 (input_bfd, hit_data);
7705 if (globals->use_rel)
7706 {
7707 /* Extract the addend. */
7708 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7709 signed_addend = addend;
7710 }
7711 relocation = value + signed_addend;
7712
7713 relocation -= (input_section->output_section->vma
7714 + input_section->output_offset
7715 + rel->r_offset);
7716 insn = (insn & ~0xfff)
7717 | ((howto->bitpos << 7) & 0xf00)
7718 | ((relocation >> howto->bitpos) & 0xff);
7719 bfd_put_32 (input_bfd, value, hit_data);
7720 }
7721 return bfd_reloc_ok;
7722
7723 case R_ARM_GNU_VTINHERIT:
7724 case R_ARM_GNU_VTENTRY:
7725 return bfd_reloc_ok;
7726
7727 case R_ARM_GOTOFF32:
7728 /* Relocation is relative to the start of the
7729 global offset table. */
7730
7731 BFD_ASSERT (sgot != NULL);
7732 if (sgot == NULL)
7733 return bfd_reloc_notsupported;
7734
7735 /* If we are addressing a Thumb function, we need to adjust the
7736 address by one, so that attempts to call the function pointer will
7737 correctly interpret it as Thumb code. */
7738 if (sym_flags == STT_ARM_TFUNC)
7739 value += 1;
7740
7741 /* Note that sgot->output_offset is not involved in this
7742 calculation. We always want the start of .got. If we
7743 define _GLOBAL_OFFSET_TABLE in a different way, as is
7744 permitted by the ABI, we might have to change this
7745 calculation. */
7746 value -= sgot->output_section->vma;
7747 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7748 contents, rel->r_offset, value,
7749 rel->r_addend);
7750
7751 case R_ARM_GOTPC:
7752 /* Use global offset table as symbol value. */
7753 BFD_ASSERT (sgot != NULL);
7754
7755 if (sgot == NULL)
7756 return bfd_reloc_notsupported;
7757
7758 *unresolved_reloc_p = FALSE;
7759 value = sgot->output_section->vma;
7760 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7761 contents, rel->r_offset, value,
7762 rel->r_addend);
7763
7764 case R_ARM_GOT32:
7765 case R_ARM_GOT_PREL:
7766 /* Relocation is to the entry for this symbol in the
7767 global offset table. */
7768 if (sgot == NULL)
7769 return bfd_reloc_notsupported;
7770
7771 if (h != NULL)
7772 {
7773 bfd_vma off;
7774 bfd_boolean dyn;
7775
7776 off = h->got.offset;
7777 BFD_ASSERT (off != (bfd_vma) -1);
7778 dyn = globals->root.dynamic_sections_created;
7779
7780 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7781 || (info->shared
7782 && SYMBOL_REFERENCES_LOCAL (info, h))
7783 || (ELF_ST_VISIBILITY (h->other)
7784 && h->root.type == bfd_link_hash_undefweak))
7785 {
7786 /* This is actually a static link, or it is a -Bsymbolic link
7787 and the symbol is defined locally. We must initialize this
7788 entry in the global offset table. Since the offset must
7789 always be a multiple of 4, we use the least significant bit
7790 to record whether we have initialized it already.
7791
7792 When doing a dynamic link, we create a .rel(a).got relocation
7793 entry to initialize the value. This is done in the
7794 finish_dynamic_symbol routine. */
7795 if ((off & 1) != 0)
7796 off &= ~1;
7797 else
7798 {
7799 /* If we are addressing a Thumb function, we need to
7800 adjust the address by one, so that attempts to
7801 call the function pointer will correctly
7802 interpret it as Thumb code. */
7803 if (sym_flags == STT_ARM_TFUNC)
7804 value |= 1;
7805
7806 bfd_put_32 (output_bfd, value, sgot->contents + off);
7807 h->got.offset |= 1;
7808 }
7809 }
7810 else
7811 *unresolved_reloc_p = FALSE;
7812
7813 value = sgot->output_offset + off;
7814 }
7815 else
7816 {
7817 bfd_vma off;
7818
7819 BFD_ASSERT (local_got_offsets != NULL &&
7820 local_got_offsets[r_symndx] != (bfd_vma) -1);
7821
7822 off = local_got_offsets[r_symndx];
7823
7824 /* The offset must always be a multiple of 4. We use the
7825 least significant bit to record whether we have already
7826 generated the necessary reloc. */
7827 if ((off & 1) != 0)
7828 off &= ~1;
7829 else
7830 {
7831 /* If we are addressing a Thumb function, we need to
7832 adjust the address by one, so that attempts to
7833 call the function pointer will correctly
7834 interpret it as Thumb code. */
7835 if (sym_flags == STT_ARM_TFUNC)
7836 value |= 1;
7837
7838 if (globals->use_rel)
7839 bfd_put_32 (output_bfd, value, sgot->contents + off);
7840
7841 if (info->shared)
7842 {
7843 asection * srelgot;
7844 Elf_Internal_Rela outrel;
7845 bfd_byte *loc;
7846
7847 srelgot = (bfd_get_section_by_name
7848 (dynobj, RELOC_SECTION (globals, ".got")));
7849 BFD_ASSERT (srelgot != NULL);
7850
7851 outrel.r_addend = addend + value;
7852 outrel.r_offset = (sgot->output_section->vma
7853 + sgot->output_offset
7854 + off);
7855 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7856 loc = srelgot->contents;
7857 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7858 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7859 }
7860
7861 local_got_offsets[r_symndx] |= 1;
7862 }
7863
7864 value = sgot->output_offset + off;
7865 }
7866 if (r_type != R_ARM_GOT32)
7867 value += sgot->output_section->vma;
7868
7869 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7870 contents, rel->r_offset, value,
7871 rel->r_addend);
7872
7873 case R_ARM_TLS_LDO32:
7874 value = value - dtpoff_base (info);
7875
7876 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7877 contents, rel->r_offset, value,
7878 rel->r_addend);
7879
7880 case R_ARM_TLS_LDM32:
7881 {
7882 bfd_vma off;
7883
7884 if (globals->sgot == NULL)
7885 abort ();
7886
7887 off = globals->tls_ldm_got.offset;
7888
7889 if ((off & 1) != 0)
7890 off &= ~1;
7891 else
7892 {
7893 /* If we don't know the module number, create a relocation
7894 for it. */
7895 if (info->shared)
7896 {
7897 Elf_Internal_Rela outrel;
7898 bfd_byte *loc;
7899
7900 if (globals->srelgot == NULL)
7901 abort ();
7902
7903 outrel.r_addend = 0;
7904 outrel.r_offset = (globals->sgot->output_section->vma
7905 + globals->sgot->output_offset + off);
7906 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7907
7908 if (globals->use_rel)
7909 bfd_put_32 (output_bfd, outrel.r_addend,
7910 globals->sgot->contents + off);
7911
7912 loc = globals->srelgot->contents;
7913 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7914 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7915 }
7916 else
7917 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7918
7919 globals->tls_ldm_got.offset |= 1;
7920 }
7921
7922 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7923 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7924
7925 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7926 contents, rel->r_offset, value,
7927 rel->r_addend);
7928 }
7929
7930 case R_ARM_TLS_GD32:
7931 case R_ARM_TLS_IE32:
7932 {
7933 bfd_vma off;
7934 int indx;
7935 char tls_type;
7936
7937 if (globals->sgot == NULL)
7938 abort ();
7939
7940 indx = 0;
7941 if (h != NULL)
7942 {
7943 bfd_boolean dyn;
7944 dyn = globals->root.dynamic_sections_created;
7945 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7946 && (!info->shared
7947 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7948 {
7949 *unresolved_reloc_p = FALSE;
7950 indx = h->dynindx;
7951 }
7952 off = h->got.offset;
7953 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7954 }
7955 else
7956 {
7957 if (local_got_offsets == NULL)
7958 abort ();
7959 off = local_got_offsets[r_symndx];
7960 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7961 }
7962
7963 if (tls_type == GOT_UNKNOWN)
7964 abort ();
7965
7966 if ((off & 1) != 0)
7967 off &= ~1;
7968 else
7969 {
7970 bfd_boolean need_relocs = FALSE;
7971 Elf_Internal_Rela outrel;
7972 bfd_byte *loc = NULL;
7973 int cur_off = off;
7974
7975 /* The GOT entries have not been initialized yet. Do it
7976 now, and emit any relocations. If both an IE GOT and a
7977 GD GOT are necessary, we emit the GD first. */
7978
7979 if ((info->shared || indx != 0)
7980 && (h == NULL
7981 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7982 || h->root.type != bfd_link_hash_undefweak))
7983 {
7984 need_relocs = TRUE;
7985 if (globals->srelgot == NULL)
7986 abort ();
7987 loc = globals->srelgot->contents;
7988 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7989 }
7990
7991 if (tls_type & GOT_TLS_GD)
7992 {
7993 if (need_relocs)
7994 {
7995 outrel.r_addend = 0;
7996 outrel.r_offset = (globals->sgot->output_section->vma
7997 + globals->sgot->output_offset
7998 + cur_off);
7999 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8000
8001 if (globals->use_rel)
8002 bfd_put_32 (output_bfd, outrel.r_addend,
8003 globals->sgot->contents + cur_off);
8004
8005 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8006 globals->srelgot->reloc_count++;
8007 loc += RELOC_SIZE (globals);
8008
8009 if (indx == 0)
8010 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8011 globals->sgot->contents + cur_off + 4);
8012 else
8013 {
8014 outrel.r_addend = 0;
8015 outrel.r_info = ELF32_R_INFO (indx,
8016 R_ARM_TLS_DTPOFF32);
8017 outrel.r_offset += 4;
8018
8019 if (globals->use_rel)
8020 bfd_put_32 (output_bfd, outrel.r_addend,
8021 globals->sgot->contents + cur_off + 4);
8022
8023
8024 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8025 globals->srelgot->reloc_count++;
8026 loc += RELOC_SIZE (globals);
8027 }
8028 }
8029 else
8030 {
8031 /* If we are not emitting relocations for a
8032 general dynamic reference, then we must be in a
8033 static link or an executable link with the
8034 symbol binding locally. Mark it as belonging
8035 to module 1, the executable. */
8036 bfd_put_32 (output_bfd, 1,
8037 globals->sgot->contents + cur_off);
8038 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8039 globals->sgot->contents + cur_off + 4);
8040 }
8041
8042 cur_off += 8;
8043 }
8044
8045 if (tls_type & GOT_TLS_IE)
8046 {
8047 if (need_relocs)
8048 {
8049 if (indx == 0)
8050 outrel.r_addend = value - dtpoff_base (info);
8051 else
8052 outrel.r_addend = 0;
8053 outrel.r_offset = (globals->sgot->output_section->vma
8054 + globals->sgot->output_offset
8055 + cur_off);
8056 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8057
8058 if (globals->use_rel)
8059 bfd_put_32 (output_bfd, outrel.r_addend,
8060 globals->sgot->contents + cur_off);
8061
8062 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8063 globals->srelgot->reloc_count++;
8064 loc += RELOC_SIZE (globals);
8065 }
8066 else
8067 bfd_put_32 (output_bfd, tpoff (info, value),
8068 globals->sgot->contents + cur_off);
8069 cur_off += 4;
8070 }
8071
8072 if (h != NULL)
8073 h->got.offset |= 1;
8074 else
8075 local_got_offsets[r_symndx] |= 1;
8076 }
8077
8078 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8079 off += 8;
8080 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8081 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8082
8083 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8084 contents, rel->r_offset, value,
8085 rel->r_addend);
8086 }
8087
8088 case R_ARM_TLS_LE32:
8089 if (info->shared)
8090 {
8091 (*_bfd_error_handler)
8092 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8093 input_bfd, input_section,
8094 (long) rel->r_offset, howto->name);
8095 return FALSE;
8096 }
8097 else
8098 value = tpoff (info, value);
8099
8100 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8101 contents, rel->r_offset, value,
8102 rel->r_addend);
8103
8104 case R_ARM_V4BX:
8105 if (globals->fix_v4bx)
8106 {
8107 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8108
8109 /* Ensure that we have a BX instruction. */
8110 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8111
8112 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8113 {
8114 /* Branch to veneer. */
8115 bfd_vma glue_addr;
8116 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8117 glue_addr -= input_section->output_section->vma
8118 + input_section->output_offset
8119 + rel->r_offset + 8;
8120 insn = (insn & 0xf0000000) | 0x0a000000
8121 | ((glue_addr >> 2) & 0x00ffffff);
8122 }
8123 else
8124 {
8125 /* Preserve Rm (lowest four bits) and the condition code
8126 (highest four bits). Other bits encode MOV PC,Rm. */
8127 insn = (insn & 0xf000000f) | 0x01a0f000;
8128 }
8129
8130 bfd_put_32 (input_bfd, insn, hit_data);
8131 }
8132 return bfd_reloc_ok;
8133
8134 case R_ARM_MOVW_ABS_NC:
8135 case R_ARM_MOVT_ABS:
8136 case R_ARM_MOVW_PREL_NC:
8137 case R_ARM_MOVT_PREL:
8138 /* Until we properly support segment-base-relative addressing then
8139 we assume the segment base to be zero, as for the group relocations.
8140 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8141 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8142 case R_ARM_MOVW_BREL_NC:
8143 case R_ARM_MOVW_BREL:
8144 case R_ARM_MOVT_BREL:
8145 {
8146 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8147
8148 if (globals->use_rel)
8149 {
8150 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8151 signed_addend = (addend ^ 0x8000) - 0x8000;
8152 }
8153
8154 value += signed_addend;
8155
8156 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8157 value -= (input_section->output_section->vma
8158 + input_section->output_offset + rel->r_offset);
8159
8160 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8161 return bfd_reloc_overflow;
8162
8163 if (sym_flags == STT_ARM_TFUNC)
8164 value |= 1;
8165
8166 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8167 || r_type == R_ARM_MOVT_BREL)
8168 value >>= 16;
8169
8170 insn &= 0xfff0f000;
8171 insn |= value & 0xfff;
8172 insn |= (value & 0xf000) << 4;
8173 bfd_put_32 (input_bfd, insn, hit_data);
8174 }
8175 return bfd_reloc_ok;
8176
8177 case R_ARM_THM_MOVW_ABS_NC:
8178 case R_ARM_THM_MOVT_ABS:
8179 case R_ARM_THM_MOVW_PREL_NC:
8180 case R_ARM_THM_MOVT_PREL:
8181 /* Until we properly support segment-base-relative addressing then
8182 we assume the segment base to be zero, as for the above relocations.
8183 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8184 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8185 as R_ARM_THM_MOVT_ABS. */
8186 case R_ARM_THM_MOVW_BREL_NC:
8187 case R_ARM_THM_MOVW_BREL:
8188 case R_ARM_THM_MOVT_BREL:
8189 {
8190 bfd_vma insn;
8191
8192 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8193 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8194
8195 if (globals->use_rel)
8196 {
8197 addend = ((insn >> 4) & 0xf000)
8198 | ((insn >> 15) & 0x0800)
8199 | ((insn >> 4) & 0x0700)
8200 | (insn & 0x00ff);
8201 signed_addend = (addend ^ 0x8000) - 0x8000;
8202 }
8203
8204 value += signed_addend;
8205
8206 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8207 value -= (input_section->output_section->vma
8208 + input_section->output_offset + rel->r_offset);
8209
8210 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8211 return bfd_reloc_overflow;
8212
8213 if (sym_flags == STT_ARM_TFUNC)
8214 value |= 1;
8215
8216 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8217 || r_type == R_ARM_THM_MOVT_BREL)
8218 value >>= 16;
8219
8220 insn &= 0xfbf08f00;
8221 insn |= (value & 0xf000) << 4;
8222 insn |= (value & 0x0800) << 15;
8223 insn |= (value & 0x0700) << 4;
8224 insn |= (value & 0x00ff);
8225
8226 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8227 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8228 }
8229 return bfd_reloc_ok;
8230
8231 case R_ARM_ALU_PC_G0_NC:
8232 case R_ARM_ALU_PC_G1_NC:
8233 case R_ARM_ALU_PC_G0:
8234 case R_ARM_ALU_PC_G1:
8235 case R_ARM_ALU_PC_G2:
8236 case R_ARM_ALU_SB_G0_NC:
8237 case R_ARM_ALU_SB_G1_NC:
8238 case R_ARM_ALU_SB_G0:
8239 case R_ARM_ALU_SB_G1:
8240 case R_ARM_ALU_SB_G2:
8241 {
8242 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8243 bfd_vma pc = input_section->output_section->vma
8244 + input_section->output_offset + rel->r_offset;
8245 /* sb should be the origin of the *segment* containing the symbol.
8246 It is not clear how to obtain this OS-dependent value, so we
8247 make an arbitrary choice of zero. */
8248 bfd_vma sb = 0;
8249 bfd_vma residual;
8250 bfd_vma g_n;
8251 bfd_signed_vma signed_value;
8252 int group = 0;
8253
8254 /* Determine which group of bits to select. */
8255 switch (r_type)
8256 {
8257 case R_ARM_ALU_PC_G0_NC:
8258 case R_ARM_ALU_PC_G0:
8259 case R_ARM_ALU_SB_G0_NC:
8260 case R_ARM_ALU_SB_G0:
8261 group = 0;
8262 break;
8263
8264 case R_ARM_ALU_PC_G1_NC:
8265 case R_ARM_ALU_PC_G1:
8266 case R_ARM_ALU_SB_G1_NC:
8267 case R_ARM_ALU_SB_G1:
8268 group = 1;
8269 break;
8270
8271 case R_ARM_ALU_PC_G2:
8272 case R_ARM_ALU_SB_G2:
8273 group = 2;
8274 break;
8275
8276 default:
8277 abort ();
8278 }
8279
8280 /* If REL, extract the addend from the insn. If RELA, it will
8281 have already been fetched for us. */
8282 if (globals->use_rel)
8283 {
8284 int negative;
8285 bfd_vma constant = insn & 0xff;
8286 bfd_vma rotation = (insn & 0xf00) >> 8;
8287
8288 if (rotation == 0)
8289 signed_addend = constant;
8290 else
8291 {
8292 /* Compensate for the fact that in the instruction, the
8293 rotation is stored in multiples of 2 bits. */
8294 rotation *= 2;
8295
8296 /* Rotate "constant" right by "rotation" bits. */
8297 signed_addend = (constant >> rotation) |
8298 (constant << (8 * sizeof (bfd_vma) - rotation));
8299 }
8300
8301 /* Determine if the instruction is an ADD or a SUB.
8302 (For REL, this determines the sign of the addend.) */
8303 negative = identify_add_or_sub (insn);
8304 if (negative == 0)
8305 {
8306 (*_bfd_error_handler)
8307 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8308 input_bfd, input_section,
8309 (long) rel->r_offset, howto->name);
8310 return bfd_reloc_overflow;
8311 }
8312
8313 signed_addend *= negative;
8314 }
8315
8316 /* Compute the value (X) to go in the place. */
8317 if (r_type == R_ARM_ALU_PC_G0_NC
8318 || r_type == R_ARM_ALU_PC_G1_NC
8319 || r_type == R_ARM_ALU_PC_G0
8320 || r_type == R_ARM_ALU_PC_G1
8321 || r_type == R_ARM_ALU_PC_G2)
8322 /* PC relative. */
8323 signed_value = value - pc + signed_addend;
8324 else
8325 /* Section base relative. */
8326 signed_value = value - sb + signed_addend;
8327
8328 /* If the target symbol is a Thumb function, then set the
8329 Thumb bit in the address. */
8330 if (sym_flags == STT_ARM_TFUNC)
8331 signed_value |= 1;
8332
8333 /* Calculate the value of the relevant G_n, in encoded
8334 constant-with-rotation format. */
8335 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8336 &residual);
8337
8338 /* Check for overflow if required. */
8339 if ((r_type == R_ARM_ALU_PC_G0
8340 || r_type == R_ARM_ALU_PC_G1
8341 || r_type == R_ARM_ALU_PC_G2
8342 || r_type == R_ARM_ALU_SB_G0
8343 || r_type == R_ARM_ALU_SB_G1
8344 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8345 {
8346 (*_bfd_error_handler)
8347 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8348 input_bfd, input_section,
8349 (long) rel->r_offset, abs (signed_value), howto->name);
8350 return bfd_reloc_overflow;
8351 }
8352
8353 /* Mask out the value and the ADD/SUB part of the opcode; take care
8354 not to destroy the S bit. */
8355 insn &= 0xff1ff000;
8356
8357 /* Set the opcode according to whether the value to go in the
8358 place is negative. */
8359 if (signed_value < 0)
8360 insn |= 1 << 22;
8361 else
8362 insn |= 1 << 23;
8363
8364 /* Encode the offset. */
8365 insn |= g_n;
8366
8367 bfd_put_32 (input_bfd, insn, hit_data);
8368 }
8369 return bfd_reloc_ok;
8370
8371 case R_ARM_LDR_PC_G0:
8372 case R_ARM_LDR_PC_G1:
8373 case R_ARM_LDR_PC_G2:
8374 case R_ARM_LDR_SB_G0:
8375 case R_ARM_LDR_SB_G1:
8376 case R_ARM_LDR_SB_G2:
8377 {
8378 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8379 bfd_vma pc = input_section->output_section->vma
8380 + input_section->output_offset + rel->r_offset;
8381 bfd_vma sb = 0; /* See note above. */
8382 bfd_vma residual;
8383 bfd_signed_vma signed_value;
8384 int group = 0;
8385
8386 /* Determine which groups of bits to calculate. */
8387 switch (r_type)
8388 {
8389 case R_ARM_LDR_PC_G0:
8390 case R_ARM_LDR_SB_G0:
8391 group = 0;
8392 break;
8393
8394 case R_ARM_LDR_PC_G1:
8395 case R_ARM_LDR_SB_G1:
8396 group = 1;
8397 break;
8398
8399 case R_ARM_LDR_PC_G2:
8400 case R_ARM_LDR_SB_G2:
8401 group = 2;
8402 break;
8403
8404 default:
8405 abort ();
8406 }
8407
8408 /* If REL, extract the addend from the insn. If RELA, it will
8409 have already been fetched for us. */
8410 if (globals->use_rel)
8411 {
8412 int negative = (insn & (1 << 23)) ? 1 : -1;
8413 signed_addend = negative * (insn & 0xfff);
8414 }
8415
8416 /* Compute the value (X) to go in the place. */
8417 if (r_type == R_ARM_LDR_PC_G0
8418 || r_type == R_ARM_LDR_PC_G1
8419 || r_type == R_ARM_LDR_PC_G2)
8420 /* PC relative. */
8421 signed_value = value - pc + signed_addend;
8422 else
8423 /* Section base relative. */
8424 signed_value = value - sb + signed_addend;
8425
8426 /* Calculate the value of the relevant G_{n-1} to obtain
8427 the residual at that stage. */
8428 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8429
8430 /* Check for overflow. */
8431 if (residual >= 0x1000)
8432 {
8433 (*_bfd_error_handler)
8434 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8435 input_bfd, input_section,
8436 (long) rel->r_offset, abs (signed_value), howto->name);
8437 return bfd_reloc_overflow;
8438 }
8439
8440 /* Mask out the value and U bit. */
8441 insn &= 0xff7ff000;
8442
8443 /* Set the U bit if the value to go in the place is non-negative. */
8444 if (signed_value >= 0)
8445 insn |= 1 << 23;
8446
8447 /* Encode the offset. */
8448 insn |= residual;
8449
8450 bfd_put_32 (input_bfd, insn, hit_data);
8451 }
8452 return bfd_reloc_ok;
8453
8454 case R_ARM_LDRS_PC_G0:
8455 case R_ARM_LDRS_PC_G1:
8456 case R_ARM_LDRS_PC_G2:
8457 case R_ARM_LDRS_SB_G0:
8458 case R_ARM_LDRS_SB_G1:
8459 case R_ARM_LDRS_SB_G2:
8460 {
8461 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8462 bfd_vma pc = input_section->output_section->vma
8463 + input_section->output_offset + rel->r_offset;
8464 bfd_vma sb = 0; /* See note above. */
8465 bfd_vma residual;
8466 bfd_signed_vma signed_value;
8467 int group = 0;
8468
8469 /* Determine which groups of bits to calculate. */
8470 switch (r_type)
8471 {
8472 case R_ARM_LDRS_PC_G0:
8473 case R_ARM_LDRS_SB_G0:
8474 group = 0;
8475 break;
8476
8477 case R_ARM_LDRS_PC_G1:
8478 case R_ARM_LDRS_SB_G1:
8479 group = 1;
8480 break;
8481
8482 case R_ARM_LDRS_PC_G2:
8483 case R_ARM_LDRS_SB_G2:
8484 group = 2;
8485 break;
8486
8487 default:
8488 abort ();
8489 }
8490
8491 /* If REL, extract the addend from the insn. If RELA, it will
8492 have already been fetched for us. */
8493 if (globals->use_rel)
8494 {
8495 int negative = (insn & (1 << 23)) ? 1 : -1;
8496 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8497 }
8498
8499 /* Compute the value (X) to go in the place. */
8500 if (r_type == R_ARM_LDRS_PC_G0
8501 || r_type == R_ARM_LDRS_PC_G1
8502 || r_type == R_ARM_LDRS_PC_G2)
8503 /* PC relative. */
8504 signed_value = value - pc + signed_addend;
8505 else
8506 /* Section base relative. */
8507 signed_value = value - sb + signed_addend;
8508
8509 /* Calculate the value of the relevant G_{n-1} to obtain
8510 the residual at that stage. */
8511 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8512
8513 /* Check for overflow. */
8514 if (residual >= 0x100)
8515 {
8516 (*_bfd_error_handler)
8517 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8518 input_bfd, input_section,
8519 (long) rel->r_offset, abs (signed_value), howto->name);
8520 return bfd_reloc_overflow;
8521 }
8522
8523 /* Mask out the value and U bit. */
8524 insn &= 0xff7ff0f0;
8525
8526 /* Set the U bit if the value to go in the place is non-negative. */
8527 if (signed_value >= 0)
8528 insn |= 1 << 23;
8529
8530 /* Encode the offset. */
8531 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8532
8533 bfd_put_32 (input_bfd, insn, hit_data);
8534 }
8535 return bfd_reloc_ok;
8536
8537 case R_ARM_LDC_PC_G0:
8538 case R_ARM_LDC_PC_G1:
8539 case R_ARM_LDC_PC_G2:
8540 case R_ARM_LDC_SB_G0:
8541 case R_ARM_LDC_SB_G1:
8542 case R_ARM_LDC_SB_G2:
8543 {
8544 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8545 bfd_vma pc = input_section->output_section->vma
8546 + input_section->output_offset + rel->r_offset;
8547 bfd_vma sb = 0; /* See note above. */
8548 bfd_vma residual;
8549 bfd_signed_vma signed_value;
8550 int group = 0;
8551
8552 /* Determine which groups of bits to calculate. */
8553 switch (r_type)
8554 {
8555 case R_ARM_LDC_PC_G0:
8556 case R_ARM_LDC_SB_G0:
8557 group = 0;
8558 break;
8559
8560 case R_ARM_LDC_PC_G1:
8561 case R_ARM_LDC_SB_G1:
8562 group = 1;
8563 break;
8564
8565 case R_ARM_LDC_PC_G2:
8566 case R_ARM_LDC_SB_G2:
8567 group = 2;
8568 break;
8569
8570 default:
8571 abort ();
8572 }
8573
8574 /* If REL, extract the addend from the insn. If RELA, it will
8575 have already been fetched for us. */
8576 if (globals->use_rel)
8577 {
8578 int negative = (insn & (1 << 23)) ? 1 : -1;
8579 signed_addend = negative * ((insn & 0xff) << 2);
8580 }
8581
8582 /* Compute the value (X) to go in the place. */
8583 if (r_type == R_ARM_LDC_PC_G0
8584 || r_type == R_ARM_LDC_PC_G1
8585 || r_type == R_ARM_LDC_PC_G2)
8586 /* PC relative. */
8587 signed_value = value - pc + signed_addend;
8588 else
8589 /* Section base relative. */
8590 signed_value = value - sb + signed_addend;
8591
8592 /* Calculate the value of the relevant G_{n-1} to obtain
8593 the residual at that stage. */
8594 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8595
8596 /* Check for overflow. (The absolute value to go in the place must be
8597 divisible by four and, after having been divided by four, must
8598 fit in eight bits.) */
8599 if ((residual & 0x3) != 0 || residual >= 0x400)
8600 {
8601 (*_bfd_error_handler)
8602 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8603 input_bfd, input_section,
8604 (long) rel->r_offset, abs (signed_value), howto->name);
8605 return bfd_reloc_overflow;
8606 }
8607
8608 /* Mask out the value and U bit. */
8609 insn &= 0xff7fff00;
8610
8611 /* Set the U bit if the value to go in the place is non-negative. */
8612 if (signed_value >= 0)
8613 insn |= 1 << 23;
8614
8615 /* Encode the offset. */
8616 insn |= residual >> 2;
8617
8618 bfd_put_32 (input_bfd, insn, hit_data);
8619 }
8620 return bfd_reloc_ok;
8621
8622 default:
8623 return bfd_reloc_notsupported;
8624 }
8625 }
8626
8627 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8628 static void
8629 arm_add_to_rel (bfd * abfd,
8630 bfd_byte * address,
8631 reloc_howto_type * howto,
8632 bfd_signed_vma increment)
8633 {
8634 bfd_signed_vma addend;
8635
8636 if (howto->type == R_ARM_THM_CALL
8637 || howto->type == R_ARM_THM_JUMP24)
8638 {
8639 int upper_insn, lower_insn;
8640 int upper, lower;
8641
8642 upper_insn = bfd_get_16 (abfd, address);
8643 lower_insn = bfd_get_16 (abfd, address + 2);
8644 upper = upper_insn & 0x7ff;
8645 lower = lower_insn & 0x7ff;
8646
8647 addend = (upper << 12) | (lower << 1);
8648 addend += increment;
8649 addend >>= 1;
8650
8651 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8652 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8653
8654 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8655 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8656 }
8657 else
8658 {
8659 bfd_vma contents;
8660
8661 contents = bfd_get_32 (abfd, address);
8662
8663 /* Get the (signed) value from the instruction. */
8664 addend = contents & howto->src_mask;
8665 if (addend & ((howto->src_mask + 1) >> 1))
8666 {
8667 bfd_signed_vma mask;
8668
8669 mask = -1;
8670 mask &= ~ howto->src_mask;
8671 addend |= mask;
8672 }
8673
8674 /* Add in the increment, (which is a byte value). */
8675 switch (howto->type)
8676 {
8677 default:
8678 addend += increment;
8679 break;
8680
8681 case R_ARM_PC24:
8682 case R_ARM_PLT32:
8683 case R_ARM_CALL:
8684 case R_ARM_JUMP24:
8685 addend <<= howto->size;
8686 addend += increment;
8687
8688 /* Should we check for overflow here ? */
8689
8690 /* Drop any undesired bits. */
8691 addend >>= howto->rightshift;
8692 break;
8693 }
8694
8695 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8696
8697 bfd_put_32 (abfd, contents, address);
8698 }
8699 }
8700
8701 #define IS_ARM_TLS_RELOC(R_TYPE) \
8702 ((R_TYPE) == R_ARM_TLS_GD32 \
8703 || (R_TYPE) == R_ARM_TLS_LDO32 \
8704 || (R_TYPE) == R_ARM_TLS_LDM32 \
8705 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8706 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8707 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8708 || (R_TYPE) == R_ARM_TLS_LE32 \
8709 || (R_TYPE) == R_ARM_TLS_IE32)
8710
8711 /* Relocate an ARM ELF section. */
8712
8713 static bfd_boolean
8714 elf32_arm_relocate_section (bfd * output_bfd,
8715 struct bfd_link_info * info,
8716 bfd * input_bfd,
8717 asection * input_section,
8718 bfd_byte * contents,
8719 Elf_Internal_Rela * relocs,
8720 Elf_Internal_Sym * local_syms,
8721 asection ** local_sections)
8722 {
8723 Elf_Internal_Shdr *symtab_hdr;
8724 struct elf_link_hash_entry **sym_hashes;
8725 Elf_Internal_Rela *rel;
8726 Elf_Internal_Rela *relend;
8727 const char *name;
8728 struct elf32_arm_link_hash_table * globals;
8729
8730 globals = elf32_arm_hash_table (info);
8731
8732 symtab_hdr = & elf_symtab_hdr (input_bfd);
8733 sym_hashes = elf_sym_hashes (input_bfd);
8734
8735 rel = relocs;
8736 relend = relocs + input_section->reloc_count;
8737 for (; rel < relend; rel++)
8738 {
8739 int r_type;
8740 reloc_howto_type * howto;
8741 unsigned long r_symndx;
8742 Elf_Internal_Sym * sym;
8743 asection * sec;
8744 struct elf_link_hash_entry * h;
8745 bfd_vma relocation;
8746 bfd_reloc_status_type r;
8747 arelent bfd_reloc;
8748 char sym_type;
8749 bfd_boolean unresolved_reloc = FALSE;
8750 char *error_message = NULL;
8751
8752 r_symndx = ELF32_R_SYM (rel->r_info);
8753 r_type = ELF32_R_TYPE (rel->r_info);
8754 r_type = arm_real_reloc_type (globals, r_type);
8755
8756 if ( r_type == R_ARM_GNU_VTENTRY
8757 || r_type == R_ARM_GNU_VTINHERIT)
8758 continue;
8759
8760 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8761 howto = bfd_reloc.howto;
8762
8763 h = NULL;
8764 sym = NULL;
8765 sec = NULL;
8766
8767 if (r_symndx < symtab_hdr->sh_info)
8768 {
8769 sym = local_syms + r_symndx;
8770 sym_type = ELF32_ST_TYPE (sym->st_info);
8771 sec = local_sections[r_symndx];
8772
8773 /* An object file might have a reference to a local
8774 undefined symbol. This is a daft object file, but we
8775 should at least do something about it. V4BX & NONE
8776 relocations do not use the symbol and are explicitly
8777 allowed to use the undefined symbol, so allow those. */
8778 if (r_type != R_ARM_V4BX
8779 && r_type != R_ARM_NONE
8780 && bfd_is_und_section (sec)
8781 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8782 {
8783 if (!info->callbacks->undefined_symbol
8784 (info, bfd_elf_string_from_elf_section
8785 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8786 input_bfd, input_section,
8787 rel->r_offset, TRUE))
8788 return FALSE;
8789 }
8790
8791 if (globals->use_rel)
8792 {
8793 relocation = (sec->output_section->vma
8794 + sec->output_offset
8795 + sym->st_value);
8796 if (!info->relocatable
8797 && (sec->flags & SEC_MERGE)
8798 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8799 {
8800 asection *msec;
8801 bfd_vma addend, value;
8802
8803 switch (r_type)
8804 {
8805 case R_ARM_MOVW_ABS_NC:
8806 case R_ARM_MOVT_ABS:
8807 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8808 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8809 addend = (addend ^ 0x8000) - 0x8000;
8810 break;
8811
8812 case R_ARM_THM_MOVW_ABS_NC:
8813 case R_ARM_THM_MOVT_ABS:
8814 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8815 << 16;
8816 value |= bfd_get_16 (input_bfd,
8817 contents + rel->r_offset + 2);
8818 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8819 | ((value & 0x04000000) >> 15);
8820 addend = (addend ^ 0x8000) - 0x8000;
8821 break;
8822
8823 default:
8824 if (howto->rightshift
8825 || (howto->src_mask & (howto->src_mask + 1)))
8826 {
8827 (*_bfd_error_handler)
8828 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8829 input_bfd, input_section,
8830 (long) rel->r_offset, howto->name);
8831 return FALSE;
8832 }
8833
8834 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8835
8836 /* Get the (signed) value from the instruction. */
8837 addend = value & howto->src_mask;
8838 if (addend & ((howto->src_mask + 1) >> 1))
8839 {
8840 bfd_signed_vma mask;
8841
8842 mask = -1;
8843 mask &= ~ howto->src_mask;
8844 addend |= mask;
8845 }
8846 break;
8847 }
8848
8849 msec = sec;
8850 addend =
8851 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8852 - relocation;
8853 addend += msec->output_section->vma + msec->output_offset;
8854
8855 /* Cases here must match those in the preceeding
8856 switch statement. */
8857 switch (r_type)
8858 {
8859 case R_ARM_MOVW_ABS_NC:
8860 case R_ARM_MOVT_ABS:
8861 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8862 | (addend & 0xfff);
8863 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8864 break;
8865
8866 case R_ARM_THM_MOVW_ABS_NC:
8867 case R_ARM_THM_MOVT_ABS:
8868 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8869 | (addend & 0xff) | ((addend & 0x0800) << 15);
8870 bfd_put_16 (input_bfd, value >> 16,
8871 contents + rel->r_offset);
8872 bfd_put_16 (input_bfd, value,
8873 contents + rel->r_offset + 2);
8874 break;
8875
8876 default:
8877 value = (value & ~ howto->dst_mask)
8878 | (addend & howto->dst_mask);
8879 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8880 break;
8881 }
8882 }
8883 }
8884 else
8885 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8886 }
8887 else
8888 {
8889 bfd_boolean warned;
8890
8891 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8892 r_symndx, symtab_hdr, sym_hashes,
8893 h, sec, relocation,
8894 unresolved_reloc, warned);
8895
8896 sym_type = h->type;
8897 }
8898
8899 if (sec != NULL && elf_discarded_section (sec))
8900 {
8901 /* For relocs against symbols from removed linkonce sections,
8902 or sections discarded by a linker script, we just want the
8903 section contents zeroed. Avoid any special processing. */
8904 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8905 rel->r_info = 0;
8906 rel->r_addend = 0;
8907 continue;
8908 }
8909
8910 if (info->relocatable)
8911 {
8912 /* This is a relocatable link. We don't have to change
8913 anything, unless the reloc is against a section symbol,
8914 in which case we have to adjust according to where the
8915 section symbol winds up in the output section. */
8916 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8917 {
8918 if (globals->use_rel)
8919 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8920 howto, (bfd_signed_vma) sec->output_offset);
8921 else
8922 rel->r_addend += sec->output_offset;
8923 }
8924 continue;
8925 }
8926
8927 if (h != NULL)
8928 name = h->root.root.string;
8929 else
8930 {
8931 name = (bfd_elf_string_from_elf_section
8932 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8933 if (name == NULL || *name == '\0')
8934 name = bfd_section_name (input_bfd, sec);
8935 }
8936
8937 if (r_symndx != 0
8938 && r_type != R_ARM_NONE
8939 && (h == NULL
8940 || h->root.type == bfd_link_hash_defined
8941 || h->root.type == bfd_link_hash_defweak)
8942 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8943 {
8944 (*_bfd_error_handler)
8945 ((sym_type == STT_TLS
8946 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8947 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8948 input_bfd,
8949 input_section,
8950 (long) rel->r_offset,
8951 howto->name,
8952 name);
8953 }
8954
8955 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8956 input_section, contents, rel,
8957 relocation, info, sec, name,
8958 (h ? ELF_ST_TYPE (h->type) :
8959 ELF_ST_TYPE (sym->st_info)), h,
8960 &unresolved_reloc, &error_message);
8961
8962 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8963 because such sections are not SEC_ALLOC and thus ld.so will
8964 not process them. */
8965 if (unresolved_reloc
8966 && !((input_section->flags & SEC_DEBUGGING) != 0
8967 && h->def_dynamic))
8968 {
8969 (*_bfd_error_handler)
8970 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8971 input_bfd,
8972 input_section,
8973 (long) rel->r_offset,
8974 howto->name,
8975 h->root.root.string);
8976 return FALSE;
8977 }
8978
8979 if (r != bfd_reloc_ok)
8980 {
8981 switch (r)
8982 {
8983 case bfd_reloc_overflow:
8984 /* If the overflowing reloc was to an undefined symbol,
8985 we have already printed one error message and there
8986 is no point complaining again. */
8987 if ((! h ||
8988 h->root.type != bfd_link_hash_undefined)
8989 && (!((*info->callbacks->reloc_overflow)
8990 (info, (h ? &h->root : NULL), name, howto->name,
8991 (bfd_vma) 0, input_bfd, input_section,
8992 rel->r_offset))))
8993 return FALSE;
8994 break;
8995
8996 case bfd_reloc_undefined:
8997 if (!((*info->callbacks->undefined_symbol)
8998 (info, name, input_bfd, input_section,
8999 rel->r_offset, TRUE)))
9000 return FALSE;
9001 break;
9002
9003 case bfd_reloc_outofrange:
9004 error_message = _("out of range");
9005 goto common_error;
9006
9007 case bfd_reloc_notsupported:
9008 error_message = _("unsupported relocation");
9009 goto common_error;
9010
9011 case bfd_reloc_dangerous:
9012 /* error_message should already be set. */
9013 goto common_error;
9014
9015 default:
9016 error_message = _("unknown error");
9017 /* Fall through. */
9018
9019 common_error:
9020 BFD_ASSERT (error_message != NULL);
9021 if (!((*info->callbacks->reloc_dangerous)
9022 (info, error_message, input_bfd, input_section,
9023 rel->r_offset)))
9024 return FALSE;
9025 break;
9026 }
9027 }
9028 }
9029
9030 return TRUE;
9031 }
9032
9033 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
9034 adds the edit to the start of the list. (The list must be built in order of
9035 ascending INDEX: the function's callers are primarily responsible for
9036 maintaining that condition). */
9037
9038 static void
9039 add_unwind_table_edit (arm_unwind_table_edit **head,
9040 arm_unwind_table_edit **tail,
9041 arm_unwind_edit_type type,
9042 asection *linked_section,
9043 unsigned int index)
9044 {
9045 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
9046
9047 new_edit->type = type;
9048 new_edit->linked_section = linked_section;
9049 new_edit->index = index;
9050
9051 if (index > 0)
9052 {
9053 new_edit->next = NULL;
9054
9055 if (*tail)
9056 (*tail)->next = new_edit;
9057
9058 (*tail) = new_edit;
9059
9060 if (!*head)
9061 (*head) = new_edit;
9062 }
9063 else
9064 {
9065 new_edit->next = *head;
9066
9067 if (!*tail)
9068 *tail = new_edit;
9069
9070 *head = new_edit;
9071 }
9072 }
9073
9074 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9075
9076 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9077 static void
9078 adjust_exidx_size(asection *exidx_sec, int adjust)
9079 {
9080 asection *out_sec;
9081
9082 if (!exidx_sec->rawsize)
9083 exidx_sec->rawsize = exidx_sec->size;
9084
9085 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9086 out_sec = exidx_sec->output_section;
9087 /* Adjust size of output section. */
9088 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9089 }
9090
9091 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9092 static void
9093 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9094 {
9095 struct _arm_elf_section_data *exidx_arm_data;
9096
9097 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9098 add_unwind_table_edit (
9099 &exidx_arm_data->u.exidx.unwind_edit_list,
9100 &exidx_arm_data->u.exidx.unwind_edit_tail,
9101 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9102
9103 adjust_exidx_size(exidx_sec, 8);
9104 }
9105
9106 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9107 made to those tables, such that:
9108
9109 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9110 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9111 codes which have been inlined into the index).
9112
9113 The edits are applied when the tables are written
9114 (in elf32_arm_write_section).
9115 */
9116
9117 bfd_boolean
9118 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9119 unsigned int num_text_sections,
9120 struct bfd_link_info *info)
9121 {
9122 bfd *inp;
9123 unsigned int last_second_word = 0, i;
9124 asection *last_exidx_sec = NULL;
9125 asection *last_text_sec = NULL;
9126 int last_unwind_type = -1;
9127
9128 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9129 text sections. */
9130 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9131 {
9132 asection *sec;
9133
9134 for (sec = inp->sections; sec != NULL; sec = sec->next)
9135 {
9136 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9137 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9138
9139 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9140 continue;
9141
9142 if (elf_sec->linked_to)
9143 {
9144 Elf_Internal_Shdr *linked_hdr
9145 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9146 struct _arm_elf_section_data *linked_sec_arm_data
9147 = get_arm_elf_section_data (linked_hdr->bfd_section);
9148
9149 if (linked_sec_arm_data == NULL)
9150 continue;
9151
9152 /* Link this .ARM.exidx section back from the text section it
9153 describes. */
9154 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9155 }
9156 }
9157 }
9158
9159 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9160 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9161 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9162 */
9163
9164 for (i = 0; i < num_text_sections; i++)
9165 {
9166 asection *sec = text_section_order[i];
9167 asection *exidx_sec;
9168 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9169 struct _arm_elf_section_data *exidx_arm_data;
9170 bfd_byte *contents = NULL;
9171 int deleted_exidx_bytes = 0;
9172 bfd_vma j;
9173 arm_unwind_table_edit *unwind_edit_head = NULL;
9174 arm_unwind_table_edit *unwind_edit_tail = NULL;
9175 Elf_Internal_Shdr *hdr;
9176 bfd *ibfd;
9177
9178 if (arm_data == NULL)
9179 continue;
9180
9181 exidx_sec = arm_data->u.text.arm_exidx_sec;
9182 if (exidx_sec == NULL)
9183 {
9184 /* Section has no unwind data. */
9185 if (last_unwind_type == 0 || !last_exidx_sec)
9186 continue;
9187
9188 /* Ignore zero sized sections. */
9189 if (sec->size == 0)
9190 continue;
9191
9192 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9193 last_unwind_type = 0;
9194 continue;
9195 }
9196
9197 /* Skip /DISCARD/ sections. */
9198 if (bfd_is_abs_section (exidx_sec->output_section))
9199 continue;
9200
9201 hdr = &elf_section_data (exidx_sec)->this_hdr;
9202 if (hdr->sh_type != SHT_ARM_EXIDX)
9203 continue;
9204
9205 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9206 if (exidx_arm_data == NULL)
9207 continue;
9208
9209 ibfd = exidx_sec->owner;
9210
9211 if (hdr->contents != NULL)
9212 contents = hdr->contents;
9213 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9214 /* An error? */
9215 continue;
9216
9217 for (j = 0; j < hdr->sh_size; j += 8)
9218 {
9219 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9220 int unwind_type;
9221 int elide = 0;
9222
9223 /* An EXIDX_CANTUNWIND entry. */
9224 if (second_word == 1)
9225 {
9226 if (last_unwind_type == 0)
9227 elide = 1;
9228 unwind_type = 0;
9229 }
9230 /* Inlined unwinding data. Merge if equal to previous. */
9231 else if ((second_word & 0x80000000) != 0)
9232 {
9233 if (last_second_word == second_word && last_unwind_type == 1)
9234 elide = 1;
9235 unwind_type = 1;
9236 last_second_word = second_word;
9237 }
9238 /* Normal table entry. In theory we could merge these too,
9239 but duplicate entries are likely to be much less common. */
9240 else
9241 unwind_type = 2;
9242
9243 if (elide)
9244 {
9245 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9246 DELETE_EXIDX_ENTRY, NULL, j / 8);
9247
9248 deleted_exidx_bytes += 8;
9249 }
9250
9251 last_unwind_type = unwind_type;
9252 }
9253
9254 /* Free contents if we allocated it ourselves. */
9255 if (contents != hdr->contents)
9256 free (contents);
9257
9258 /* Record edits to be applied later (in elf32_arm_write_section). */
9259 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9260 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9261
9262 if (deleted_exidx_bytes > 0)
9263 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9264
9265 last_exidx_sec = exidx_sec;
9266 last_text_sec = sec;
9267 }
9268
9269 /* Add terminating CANTUNWIND entry. */
9270 if (last_exidx_sec && last_unwind_type != 0)
9271 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9272
9273 return TRUE;
9274 }
9275
9276 static bfd_boolean
9277 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9278 bfd *ibfd, const char *name)
9279 {
9280 asection *sec, *osec;
9281
9282 sec = bfd_get_section_by_name (ibfd, name);
9283 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9284 return TRUE;
9285
9286 osec = sec->output_section;
9287 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9288 return TRUE;
9289
9290 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9291 sec->output_offset, sec->size))
9292 return FALSE;
9293
9294 return TRUE;
9295 }
9296
9297 static bfd_boolean
9298 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9299 {
9300 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9301
9302 /* Invoke the regular ELF backend linker to do all the work. */
9303 if (!bfd_elf_final_link (abfd, info))
9304 return FALSE;
9305
9306 /* Write out any glue sections now that we have created all the
9307 stubs. */
9308 if (globals->bfd_of_glue_owner != NULL)
9309 {
9310 if (! elf32_arm_output_glue_section (info, abfd,
9311 globals->bfd_of_glue_owner,
9312 ARM2THUMB_GLUE_SECTION_NAME))
9313 return FALSE;
9314
9315 if (! elf32_arm_output_glue_section (info, abfd,
9316 globals->bfd_of_glue_owner,
9317 THUMB2ARM_GLUE_SECTION_NAME))
9318 return FALSE;
9319
9320 if (! elf32_arm_output_glue_section (info, abfd,
9321 globals->bfd_of_glue_owner,
9322 VFP11_ERRATUM_VENEER_SECTION_NAME))
9323 return FALSE;
9324
9325 if (! elf32_arm_output_glue_section (info, abfd,
9326 globals->bfd_of_glue_owner,
9327 ARM_BX_GLUE_SECTION_NAME))
9328 return FALSE;
9329 }
9330
9331 return TRUE;
9332 }
9333
9334 /* Set the right machine number. */
9335
9336 static bfd_boolean
9337 elf32_arm_object_p (bfd *abfd)
9338 {
9339 unsigned int mach;
9340
9341 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9342
9343 if (mach != bfd_mach_arm_unknown)
9344 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9345
9346 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9347 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9348
9349 else
9350 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9351
9352 return TRUE;
9353 }
9354
9355 /* Function to keep ARM specific flags in the ELF header. */
9356
9357 static bfd_boolean
9358 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9359 {
9360 if (elf_flags_init (abfd)
9361 && elf_elfheader (abfd)->e_flags != flags)
9362 {
9363 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9364 {
9365 if (flags & EF_ARM_INTERWORK)
9366 (*_bfd_error_handler)
9367 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9368 abfd);
9369 else
9370 _bfd_error_handler
9371 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9372 abfd);
9373 }
9374 }
9375 else
9376 {
9377 elf_elfheader (abfd)->e_flags = flags;
9378 elf_flags_init (abfd) = TRUE;
9379 }
9380
9381 return TRUE;
9382 }
9383
9384 /* Copy backend specific data from one object module to another. */
9385
9386 static bfd_boolean
9387 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9388 {
9389 flagword in_flags;
9390 flagword out_flags;
9391
9392 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9393 return TRUE;
9394
9395 in_flags = elf_elfheader (ibfd)->e_flags;
9396 out_flags = elf_elfheader (obfd)->e_flags;
9397
9398 if (elf_flags_init (obfd)
9399 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9400 && in_flags != out_flags)
9401 {
9402 /* Cannot mix APCS26 and APCS32 code. */
9403 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9404 return FALSE;
9405
9406 /* Cannot mix float APCS and non-float APCS code. */
9407 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9408 return FALSE;
9409
9410 /* If the src and dest have different interworking flags
9411 then turn off the interworking bit. */
9412 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9413 {
9414 if (out_flags & EF_ARM_INTERWORK)
9415 _bfd_error_handler
9416 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9417 obfd, ibfd);
9418
9419 in_flags &= ~EF_ARM_INTERWORK;
9420 }
9421
9422 /* Likewise for PIC, though don't warn for this case. */
9423 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9424 in_flags &= ~EF_ARM_PIC;
9425 }
9426
9427 elf_elfheader (obfd)->e_flags = in_flags;
9428 elf_flags_init (obfd) = TRUE;
9429
9430 /* Also copy the EI_OSABI field. */
9431 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9432 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9433
9434 /* Copy object attributes. */
9435 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9436
9437 return TRUE;
9438 }
9439
9440 /* Values for Tag_ABI_PCS_R9_use. */
9441 enum
9442 {
9443 AEABI_R9_V6,
9444 AEABI_R9_SB,
9445 AEABI_R9_TLS,
9446 AEABI_R9_unused
9447 };
9448
9449 /* Values for Tag_ABI_PCS_RW_data. */
9450 enum
9451 {
9452 AEABI_PCS_RW_data_absolute,
9453 AEABI_PCS_RW_data_PCrel,
9454 AEABI_PCS_RW_data_SBrel,
9455 AEABI_PCS_RW_data_unused
9456 };
9457
9458 /* Values for Tag_ABI_enum_size. */
9459 enum
9460 {
9461 AEABI_enum_unused,
9462 AEABI_enum_short,
9463 AEABI_enum_wide,
9464 AEABI_enum_forced_wide
9465 };
9466
9467 /* Determine whether an object attribute tag takes an integer, a
9468 string or both. */
9469
9470 static int
9471 elf32_arm_obj_attrs_arg_type (int tag)
9472 {
9473 if (tag == Tag_compatibility)
9474 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9475 else if (tag == Tag_nodefaults)
9476 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9477 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9478 return ATTR_TYPE_FLAG_STR_VAL;
9479 else if (tag < 32)
9480 return ATTR_TYPE_FLAG_INT_VAL;
9481 else
9482 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9483 }
9484
9485 /* The ABI defines that Tag_conformance should be emitted first, and that
9486 Tag_nodefaults should be second (if either is defined). This sets those
9487 two positions, and bumps up the position of all the remaining tags to
9488 compensate. */
9489 static int
9490 elf32_arm_obj_attrs_order (int num)
9491 {
9492 if (num == 4)
9493 return Tag_conformance;
9494 if (num == 5)
9495 return Tag_nodefaults;
9496 if ((num - 2) < Tag_nodefaults)
9497 return num - 2;
9498 if ((num - 1) < Tag_conformance)
9499 return num - 1;
9500 return num;
9501 }
9502
9503 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9504 Returns -1 if no architecture could be read. */
9505
9506 static int
9507 get_secondary_compatible_arch (bfd *abfd)
9508 {
9509 obj_attribute *attr =
9510 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9511
9512 /* Note: the tag and its argument below are uleb128 values, though
9513 currently-defined values fit in one byte for each. */
9514 if (attr->s
9515 && attr->s[0] == Tag_CPU_arch
9516 && (attr->s[1] & 128) != 128
9517 && attr->s[2] == 0)
9518 return attr->s[1];
9519
9520 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9521 return -1;
9522 }
9523
9524 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9525 The tag is removed if ARCH is -1. */
9526
9527 static void
9528 set_secondary_compatible_arch (bfd *abfd, int arch)
9529 {
9530 obj_attribute *attr =
9531 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9532
9533 if (arch == -1)
9534 {
9535 attr->s = NULL;
9536 return;
9537 }
9538
9539 /* Note: the tag and its argument below are uleb128 values, though
9540 currently-defined values fit in one byte for each. */
9541 if (!attr->s)
9542 attr->s = bfd_alloc (abfd, 3);
9543 attr->s[0] = Tag_CPU_arch;
9544 attr->s[1] = arch;
9545 attr->s[2] = '\0';
9546 }
9547
9548 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9549 into account. */
9550
9551 static int
9552 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9553 int newtag, int secondary_compat)
9554 {
9555 #define T(X) TAG_CPU_ARCH_##X
9556 int tagl, tagh, result;
9557 const int v6t2[] =
9558 {
9559 T(V6T2), /* PRE_V4. */
9560 T(V6T2), /* V4. */
9561 T(V6T2), /* V4T. */
9562 T(V6T2), /* V5T. */
9563 T(V6T2), /* V5TE. */
9564 T(V6T2), /* V5TEJ. */
9565 T(V6T2), /* V6. */
9566 T(V7), /* V6KZ. */
9567 T(V6T2) /* V6T2. */
9568 };
9569 const int v6k[] =
9570 {
9571 T(V6K), /* PRE_V4. */
9572 T(V6K), /* V4. */
9573 T(V6K), /* V4T. */
9574 T(V6K), /* V5T. */
9575 T(V6K), /* V5TE. */
9576 T(V6K), /* V5TEJ. */
9577 T(V6K), /* V6. */
9578 T(V6KZ), /* V6KZ. */
9579 T(V7), /* V6T2. */
9580 T(V6K) /* V6K. */
9581 };
9582 const int v7[] =
9583 {
9584 T(V7), /* PRE_V4. */
9585 T(V7), /* V4. */
9586 T(V7), /* V4T. */
9587 T(V7), /* V5T. */
9588 T(V7), /* V5TE. */
9589 T(V7), /* V5TEJ. */
9590 T(V7), /* V6. */
9591 T(V7), /* V6KZ. */
9592 T(V7), /* V6T2. */
9593 T(V7), /* V6K. */
9594 T(V7) /* V7. */
9595 };
9596 const int v6_m[] =
9597 {
9598 -1, /* PRE_V4. */
9599 -1, /* V4. */
9600 T(V6K), /* V4T. */
9601 T(V6K), /* V5T. */
9602 T(V6K), /* V5TE. */
9603 T(V6K), /* V5TEJ. */
9604 T(V6K), /* V6. */
9605 T(V6KZ), /* V6KZ. */
9606 T(V7), /* V6T2. */
9607 T(V6K), /* V6K. */
9608 T(V7), /* V7. */
9609 T(V6_M) /* V6_M. */
9610 };
9611 const int v6s_m[] =
9612 {
9613 -1, /* PRE_V4. */
9614 -1, /* V4. */
9615 T(V6K), /* V4T. */
9616 T(V6K), /* V5T. */
9617 T(V6K), /* V5TE. */
9618 T(V6K), /* V5TEJ. */
9619 T(V6K), /* V6. */
9620 T(V6KZ), /* V6KZ. */
9621 T(V7), /* V6T2. */
9622 T(V6K), /* V6K. */
9623 T(V7), /* V7. */
9624 T(V6S_M), /* V6_M. */
9625 T(V6S_M) /* V6S_M. */
9626 };
9627 const int v4t_plus_v6_m[] =
9628 {
9629 -1, /* PRE_V4. */
9630 -1, /* V4. */
9631 T(V4T), /* V4T. */
9632 T(V5T), /* V5T. */
9633 T(V5TE), /* V5TE. */
9634 T(V5TEJ), /* V5TEJ. */
9635 T(V6), /* V6. */
9636 T(V6KZ), /* V6KZ. */
9637 T(V6T2), /* V6T2. */
9638 T(V6K), /* V6K. */
9639 T(V7), /* V7. */
9640 T(V6_M), /* V6_M. */
9641 T(V6S_M), /* V6S_M. */
9642 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9643 };
9644 const int *comb[] =
9645 {
9646 v6t2,
9647 v6k,
9648 v7,
9649 v6_m,
9650 v6s_m,
9651 /* Pseudo-architecture. */
9652 v4t_plus_v6_m
9653 };
9654
9655 /* Check we've not got a higher architecture than we know about. */
9656
9657 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9658 {
9659 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9660 return -1;
9661 }
9662
9663 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9664
9665 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9666 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9667 oldtag = T(V4T_PLUS_V6_M);
9668
9669 /* And override the new tag if we have a Tag_also_compatible_with on the
9670 input. */
9671
9672 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9673 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9674 newtag = T(V4T_PLUS_V6_M);
9675
9676 tagl = (oldtag < newtag) ? oldtag : newtag;
9677 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9678
9679 /* Architectures before V6KZ add features monotonically. */
9680 if (tagh <= TAG_CPU_ARCH_V6KZ)
9681 return result;
9682
9683 result = comb[tagh - T(V6T2)][tagl];
9684
9685 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9686 as the canonical version. */
9687 if (result == T(V4T_PLUS_V6_M))
9688 {
9689 result = T(V4T);
9690 *secondary_compat_out = T(V6_M);
9691 }
9692 else
9693 *secondary_compat_out = -1;
9694
9695 if (result == -1)
9696 {
9697 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9698 ibfd, oldtag, newtag);
9699 return -1;
9700 }
9701
9702 return result;
9703 #undef T
9704 }
9705
9706 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9707 are conflicting attributes. */
9708
9709 static bfd_boolean
9710 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9711 {
9712 obj_attribute *in_attr;
9713 obj_attribute *out_attr;
9714 obj_attribute_list *in_list;
9715 obj_attribute_list *out_list;
9716 obj_attribute_list **out_listp;
9717 /* Some tags have 0 = don't care, 1 = strong requirement,
9718 2 = weak requirement. */
9719 static const int order_021[3] = {0, 2, 1};
9720 /* For use with Tag_VFP_arch. */
9721 static const int order_01243[5] = {0, 1, 2, 4, 3};
9722 int i;
9723 bfd_boolean result = TRUE;
9724
9725 /* Skip the linker stubs file. This preserves previous behavior
9726 of accepting unknown attributes in the first input file - but
9727 is that a bug? */
9728 if (ibfd->flags & BFD_LINKER_CREATED)
9729 return TRUE;
9730
9731 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9732 {
9733 /* This is the first object. Copy the attributes. */
9734 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9735
9736 /* Use the Tag_null value to indicate the attributes have been
9737 initialized. */
9738 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9739
9740 return TRUE;
9741 }
9742
9743 in_attr = elf_known_obj_attributes_proc (ibfd);
9744 out_attr = elf_known_obj_attributes_proc (obfd);
9745 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9746 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9747 {
9748 /* Ignore mismatches if the object doesn't use floating point. */
9749 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9750 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9751 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9752 {
9753 _bfd_error_handler
9754 (_("error: %B uses VFP register arguments, %B does not"),
9755 ibfd, obfd);
9756 result = FALSE;
9757 }
9758 }
9759
9760 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9761 {
9762 /* Merge this attribute with existing attributes. */
9763 switch (i)
9764 {
9765 case Tag_CPU_raw_name:
9766 case Tag_CPU_name:
9767 /* These are merged after Tag_CPU_arch. */
9768 break;
9769
9770 case Tag_ABI_optimization_goals:
9771 case Tag_ABI_FP_optimization_goals:
9772 /* Use the first value seen. */
9773 break;
9774
9775 case Tag_CPU_arch:
9776 {
9777 int secondary_compat = -1, secondary_compat_out = -1;
9778 unsigned int saved_out_attr = out_attr[i].i;
9779 static const char *name_table[] = {
9780 /* These aren't real CPU names, but we can't guess
9781 that from the architecture version alone. */
9782 "Pre v4",
9783 "ARM v4",
9784 "ARM v4T",
9785 "ARM v5T",
9786 "ARM v5TE",
9787 "ARM v5TEJ",
9788 "ARM v6",
9789 "ARM v6KZ",
9790 "ARM v6T2",
9791 "ARM v6K",
9792 "ARM v7",
9793 "ARM v6-M",
9794 "ARM v6S-M"
9795 };
9796
9797 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9798 secondary_compat = get_secondary_compatible_arch (ibfd);
9799 secondary_compat_out = get_secondary_compatible_arch (obfd);
9800 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9801 &secondary_compat_out,
9802 in_attr[i].i,
9803 secondary_compat);
9804 set_secondary_compatible_arch (obfd, secondary_compat_out);
9805
9806 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9807 if (out_attr[i].i == saved_out_attr)
9808 ; /* Leave the names alone. */
9809 else if (out_attr[i].i == in_attr[i].i)
9810 {
9811 /* The output architecture has been changed to match the
9812 input architecture. Use the input names. */
9813 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9814 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9815 : NULL;
9816 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9817 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9818 : NULL;
9819 }
9820 else
9821 {
9822 out_attr[Tag_CPU_name].s = NULL;
9823 out_attr[Tag_CPU_raw_name].s = NULL;
9824 }
9825
9826 /* If we still don't have a value for Tag_CPU_name,
9827 make one up now. Tag_CPU_raw_name remains blank. */
9828 if (out_attr[Tag_CPU_name].s == NULL
9829 && out_attr[i].i < ARRAY_SIZE (name_table))
9830 out_attr[Tag_CPU_name].s =
9831 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9832 }
9833 break;
9834
9835 case Tag_ARM_ISA_use:
9836 case Tag_THUMB_ISA_use:
9837 case Tag_WMMX_arch:
9838 case Tag_Advanced_SIMD_arch:
9839 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9840 case Tag_ABI_FP_rounding:
9841 case Tag_ABI_FP_exceptions:
9842 case Tag_ABI_FP_user_exceptions:
9843 case Tag_ABI_FP_number_model:
9844 case Tag_VFP_HP_extension:
9845 case Tag_CPU_unaligned_access:
9846 case Tag_T2EE_use:
9847 case Tag_Virtualization_use:
9848 case Tag_MPextension_use:
9849 /* Use the largest value specified. */
9850 if (in_attr[i].i > out_attr[i].i)
9851 out_attr[i].i = in_attr[i].i;
9852 break;
9853
9854 case Tag_ABI_align8_preserved:
9855 case Tag_ABI_PCS_RO_data:
9856 /* Use the smallest value specified. */
9857 if (in_attr[i].i < out_attr[i].i)
9858 out_attr[i].i = in_attr[i].i;
9859 break;
9860
9861 case Tag_ABI_align8_needed:
9862 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9863 && (in_attr[Tag_ABI_align8_preserved].i == 0
9864 || out_attr[Tag_ABI_align8_preserved].i == 0))
9865 {
9866 /* This error message should be enabled once all non-conformant
9867 binaries in the toolchain have had the attributes set
9868 properly.
9869 _bfd_error_handler
9870 (_("error: %B: 8-byte data alignment conflicts with %B"),
9871 obfd, ibfd);
9872 result = FALSE; */
9873 }
9874 /* Fall through. */
9875 case Tag_ABI_FP_denormal:
9876 case Tag_ABI_PCS_GOT_use:
9877 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9878 value if greater than 2 (for future-proofing). */
9879 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9880 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9881 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9882 out_attr[i].i = in_attr[i].i;
9883 break;
9884
9885
9886 case Tag_CPU_arch_profile:
9887 if (out_attr[i].i != in_attr[i].i)
9888 {
9889 /* 0 will merge with anything.
9890 'A' and 'S' merge to 'A'.
9891 'R' and 'S' merge to 'R'.
9892 'M' and 'A|R|S' is an error. */
9893 if (out_attr[i].i == 0
9894 || (out_attr[i].i == 'S'
9895 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9896 out_attr[i].i = in_attr[i].i;
9897 else if (in_attr[i].i == 0
9898 || (in_attr[i].i == 'S'
9899 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9900 ; /* Do nothing. */
9901 else
9902 {
9903 _bfd_error_handler
9904 (_("error: %B: Conflicting architecture profiles %c/%c"),
9905 ibfd,
9906 in_attr[i].i ? in_attr[i].i : '0',
9907 out_attr[i].i ? out_attr[i].i : '0');
9908 result = FALSE;
9909 }
9910 }
9911 break;
9912 case Tag_VFP_arch:
9913 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9914 largest value if greater than 4 (for future-proofing). */
9915 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9916 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9917 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9918 out_attr[i].i = in_attr[i].i;
9919 break;
9920 case Tag_PCS_config:
9921 if (out_attr[i].i == 0)
9922 out_attr[i].i = in_attr[i].i;
9923 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9924 {
9925 /* It's sometimes ok to mix different configs, so this is only
9926 a warning. */
9927 _bfd_error_handler
9928 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9929 }
9930 break;
9931 case Tag_ABI_PCS_R9_use:
9932 if (in_attr[i].i != out_attr[i].i
9933 && out_attr[i].i != AEABI_R9_unused
9934 && in_attr[i].i != AEABI_R9_unused)
9935 {
9936 _bfd_error_handler
9937 (_("error: %B: Conflicting use of R9"), ibfd);
9938 result = FALSE;
9939 }
9940 if (out_attr[i].i == AEABI_R9_unused)
9941 out_attr[i].i = in_attr[i].i;
9942 break;
9943 case Tag_ABI_PCS_RW_data:
9944 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9945 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9946 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9947 {
9948 _bfd_error_handler
9949 (_("error: %B: SB relative addressing conflicts with use of R9"),
9950 ibfd);
9951 result = FALSE;
9952 }
9953 /* Use the smallest value specified. */
9954 if (in_attr[i].i < out_attr[i].i)
9955 out_attr[i].i = in_attr[i].i;
9956 break;
9957 case Tag_ABI_PCS_wchar_t:
9958 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9959 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9960 {
9961 _bfd_error_handler
9962 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9963 ibfd, in_attr[i].i, out_attr[i].i);
9964 }
9965 else if (in_attr[i].i && !out_attr[i].i)
9966 out_attr[i].i = in_attr[i].i;
9967 break;
9968 case Tag_ABI_enum_size:
9969 if (in_attr[i].i != AEABI_enum_unused)
9970 {
9971 if (out_attr[i].i == AEABI_enum_unused
9972 || out_attr[i].i == AEABI_enum_forced_wide)
9973 {
9974 /* The existing object is compatible with anything.
9975 Use whatever requirements the new object has. */
9976 out_attr[i].i = in_attr[i].i;
9977 }
9978 else if (in_attr[i].i != AEABI_enum_forced_wide
9979 && out_attr[i].i != in_attr[i].i
9980 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9981 {
9982 static const char *aeabi_enum_names[] =
9983 { "", "variable-size", "32-bit", "" };
9984 const char *in_name =
9985 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9986 ? aeabi_enum_names[in_attr[i].i]
9987 : "<unknown>";
9988 const char *out_name =
9989 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9990 ? aeabi_enum_names[out_attr[i].i]
9991 : "<unknown>";
9992 _bfd_error_handler
9993 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9994 ibfd, in_name, out_name);
9995 }
9996 }
9997 break;
9998 case Tag_ABI_VFP_args:
9999 /* Aready done. */
10000 break;
10001 case Tag_ABI_WMMX_args:
10002 if (in_attr[i].i != out_attr[i].i)
10003 {
10004 _bfd_error_handler
10005 (_("error: %B uses iWMMXt register arguments, %B does not"),
10006 ibfd, obfd);
10007 result = FALSE;
10008 }
10009 break;
10010 case Tag_compatibility:
10011 /* Merged in target-independent code. */
10012 break;
10013 case Tag_ABI_HardFP_use:
10014 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10015 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10016 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10017 out_attr[i].i = 3;
10018 else if (in_attr[i].i > out_attr[i].i)
10019 out_attr[i].i = in_attr[i].i;
10020 break;
10021 case Tag_ABI_FP_16bit_format:
10022 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10023 {
10024 if (in_attr[i].i != out_attr[i].i)
10025 {
10026 _bfd_error_handler
10027 (_("error: fp16 format mismatch between %B and %B"),
10028 ibfd, obfd);
10029 result = FALSE;
10030 }
10031 }
10032 if (in_attr[i].i != 0)
10033 out_attr[i].i = in_attr[i].i;
10034 break;
10035
10036 case Tag_nodefaults:
10037 /* This tag is set if it exists, but the value is unused (and is
10038 typically zero). We don't actually need to do anything here -
10039 the merge happens automatically when the type flags are merged
10040 below. */
10041 break;
10042 case Tag_also_compatible_with:
10043 /* Already done in Tag_CPU_arch. */
10044 break;
10045 case Tag_conformance:
10046 /* Keep the attribute if it matches. Throw it away otherwise.
10047 No attribute means no claim to conform. */
10048 if (!in_attr[i].s || !out_attr[i].s
10049 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10050 out_attr[i].s = NULL;
10051 break;
10052
10053 default:
10054 {
10055 bfd *err_bfd = NULL;
10056
10057 /* The "known_obj_attributes" table does contain some undefined
10058 attributes. Ensure that there are unused. */
10059 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10060 err_bfd = obfd;
10061 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10062 err_bfd = ibfd;
10063
10064 if (err_bfd != NULL)
10065 {
10066 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10067 if ((i & 127) < 64)
10068 {
10069 _bfd_error_handler
10070 (_("%B: Unknown mandatory EABI object attribute %d"),
10071 err_bfd, i);
10072 bfd_set_error (bfd_error_bad_value);
10073 result = FALSE;
10074 }
10075 else
10076 {
10077 _bfd_error_handler
10078 (_("Warning: %B: Unknown EABI object attribute %d"),
10079 err_bfd, i);
10080 }
10081 }
10082
10083 /* Only pass on attributes that match in both inputs. */
10084 if (in_attr[i].i != out_attr[i].i
10085 || in_attr[i].s != out_attr[i].s
10086 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10087 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10088 {
10089 out_attr[i].i = 0;
10090 out_attr[i].s = NULL;
10091 }
10092 }
10093 }
10094
10095 /* If out_attr was copied from in_attr then it won't have a type yet. */
10096 if (in_attr[i].type && !out_attr[i].type)
10097 out_attr[i].type = in_attr[i].type;
10098 }
10099
10100 /* Merge Tag_compatibility attributes and any common GNU ones. */
10101 _bfd_elf_merge_object_attributes (ibfd, obfd);
10102
10103 /* Check for any attributes not known on ARM. */
10104 in_list = elf_other_obj_attributes_proc (ibfd);
10105 out_listp = &elf_other_obj_attributes_proc (obfd);
10106 out_list = *out_listp;
10107
10108 for (; in_list || out_list; )
10109 {
10110 bfd *err_bfd = NULL;
10111 int err_tag = 0;
10112
10113 /* The tags for each list are in numerical order. */
10114 /* If the tags are equal, then merge. */
10115 if (out_list && (!in_list || in_list->tag > out_list->tag))
10116 {
10117 /* This attribute only exists in obfd. We can't merge, and we don't
10118 know what the tag means, so delete it. */
10119 err_bfd = obfd;
10120 err_tag = out_list->tag;
10121 *out_listp = out_list->next;
10122 out_list = *out_listp;
10123 }
10124 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10125 {
10126 /* This attribute only exists in ibfd. We can't merge, and we don't
10127 know what the tag means, so ignore it. */
10128 err_bfd = ibfd;
10129 err_tag = in_list->tag;
10130 in_list = in_list->next;
10131 }
10132 else /* The tags are equal. */
10133 {
10134 /* As present, all attributes in the list are unknown, and
10135 therefore can't be merged meaningfully. */
10136 err_bfd = obfd;
10137 err_tag = out_list->tag;
10138
10139 /* Only pass on attributes that match in both inputs. */
10140 if (in_list->attr.i != out_list->attr.i
10141 || in_list->attr.s != out_list->attr.s
10142 || (in_list->attr.s && out_list->attr.s
10143 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10144 {
10145 /* No match. Delete the attribute. */
10146 *out_listp = out_list->next;
10147 out_list = *out_listp;
10148 }
10149 else
10150 {
10151 /* Matched. Keep the attribute and move to the next. */
10152 out_list = out_list->next;
10153 in_list = in_list->next;
10154 }
10155 }
10156
10157 if (err_bfd)
10158 {
10159 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10160 if ((err_tag & 127) < 64)
10161 {
10162 _bfd_error_handler
10163 (_("%B: Unknown mandatory EABI object attribute %d"),
10164 err_bfd, err_tag);
10165 bfd_set_error (bfd_error_bad_value);
10166 result = FALSE;
10167 }
10168 else
10169 {
10170 _bfd_error_handler
10171 (_("Warning: %B: Unknown EABI object attribute %d"),
10172 err_bfd, err_tag);
10173 }
10174 }
10175 }
10176 return result;
10177 }
10178
10179
10180 /* Return TRUE if the two EABI versions are incompatible. */
10181
10182 static bfd_boolean
10183 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10184 {
10185 /* v4 and v5 are the same spec before and after it was released,
10186 so allow mixing them. */
10187 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10188 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10189 return TRUE;
10190
10191 return (iver == over);
10192 }
10193
10194 /* Merge backend specific data from an object file to the output
10195 object file when linking. */
10196
10197 static bfd_boolean
10198 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10199 {
10200 flagword out_flags;
10201 flagword in_flags;
10202 bfd_boolean flags_compatible = TRUE;
10203 asection *sec;
10204
10205 /* Check if we have the same endianess. */
10206 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10207 return FALSE;
10208
10209 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10210 return TRUE;
10211
10212 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10213 return FALSE;
10214
10215 /* The input BFD must have had its flags initialised. */
10216 /* The following seems bogus to me -- The flags are initialized in
10217 the assembler but I don't think an elf_flags_init field is
10218 written into the object. */
10219 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10220
10221 in_flags = elf_elfheader (ibfd)->e_flags;
10222 out_flags = elf_elfheader (obfd)->e_flags;
10223
10224 /* In theory there is no reason why we couldn't handle this. However
10225 in practice it isn't even close to working and there is no real
10226 reason to want it. */
10227 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10228 && !(ibfd->flags & DYNAMIC)
10229 && (in_flags & EF_ARM_BE8))
10230 {
10231 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10232 ibfd);
10233 return FALSE;
10234 }
10235
10236 if (!elf_flags_init (obfd))
10237 {
10238 /* If the input is the default architecture and had the default
10239 flags then do not bother setting the flags for the output
10240 architecture, instead allow future merges to do this. If no
10241 future merges ever set these flags then they will retain their
10242 uninitialised values, which surprise surprise, correspond
10243 to the default values. */
10244 if (bfd_get_arch_info (ibfd)->the_default
10245 && elf_elfheader (ibfd)->e_flags == 0)
10246 return TRUE;
10247
10248 elf_flags_init (obfd) = TRUE;
10249 elf_elfheader (obfd)->e_flags = in_flags;
10250
10251 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10252 && bfd_get_arch_info (obfd)->the_default)
10253 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10254
10255 return TRUE;
10256 }
10257
10258 /* Determine what should happen if the input ARM architecture
10259 does not match the output ARM architecture. */
10260 if (! bfd_arm_merge_machines (ibfd, obfd))
10261 return FALSE;
10262
10263 /* Identical flags must be compatible. */
10264 if (in_flags == out_flags)
10265 return TRUE;
10266
10267 /* Check to see if the input BFD actually contains any sections. If
10268 not, its flags may not have been initialised either, but it
10269 cannot actually cause any incompatiblity. Do not short-circuit
10270 dynamic objects; their section list may be emptied by
10271 elf_link_add_object_symbols.
10272
10273 Also check to see if there are no code sections in the input.
10274 In this case there is no need to check for code specific flags.
10275 XXX - do we need to worry about floating-point format compatability
10276 in data sections ? */
10277 if (!(ibfd->flags & DYNAMIC))
10278 {
10279 bfd_boolean null_input_bfd = TRUE;
10280 bfd_boolean only_data_sections = TRUE;
10281
10282 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10283 {
10284 /* Ignore synthetic glue sections. */
10285 if (strcmp (sec->name, ".glue_7")
10286 && strcmp (sec->name, ".glue_7t"))
10287 {
10288 if ((bfd_get_section_flags (ibfd, sec)
10289 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10290 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10291 only_data_sections = FALSE;
10292
10293 null_input_bfd = FALSE;
10294 break;
10295 }
10296 }
10297
10298 if (null_input_bfd || only_data_sections)
10299 return TRUE;
10300 }
10301
10302 /* Complain about various flag mismatches. */
10303 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10304 EF_ARM_EABI_VERSION (out_flags)))
10305 {
10306 _bfd_error_handler
10307 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10308 ibfd, obfd,
10309 (in_flags & EF_ARM_EABIMASK) >> 24,
10310 (out_flags & EF_ARM_EABIMASK) >> 24);
10311 return FALSE;
10312 }
10313
10314 /* Not sure what needs to be checked for EABI versions >= 1. */
10315 /* VxWorks libraries do not use these flags. */
10316 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10317 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10318 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10319 {
10320 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10321 {
10322 _bfd_error_handler
10323 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10324 ibfd, obfd,
10325 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10326 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10327 flags_compatible = FALSE;
10328 }
10329
10330 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10331 {
10332 if (in_flags & EF_ARM_APCS_FLOAT)
10333 _bfd_error_handler
10334 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10335 ibfd, obfd);
10336 else
10337 _bfd_error_handler
10338 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10339 ibfd, obfd);
10340
10341 flags_compatible = FALSE;
10342 }
10343
10344 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10345 {
10346 if (in_flags & EF_ARM_VFP_FLOAT)
10347 _bfd_error_handler
10348 (_("error: %B uses VFP instructions, whereas %B does not"),
10349 ibfd, obfd);
10350 else
10351 _bfd_error_handler
10352 (_("error: %B uses FPA instructions, whereas %B does not"),
10353 ibfd, obfd);
10354
10355 flags_compatible = FALSE;
10356 }
10357
10358 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10359 {
10360 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10361 _bfd_error_handler
10362 (_("error: %B uses Maverick instructions, whereas %B does not"),
10363 ibfd, obfd);
10364 else
10365 _bfd_error_handler
10366 (_("error: %B does not use Maverick instructions, whereas %B does"),
10367 ibfd, obfd);
10368
10369 flags_compatible = FALSE;
10370 }
10371
10372 #ifdef EF_ARM_SOFT_FLOAT
10373 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10374 {
10375 /* We can allow interworking between code that is VFP format
10376 layout, and uses either soft float or integer regs for
10377 passing floating point arguments and results. We already
10378 know that the APCS_FLOAT flags match; similarly for VFP
10379 flags. */
10380 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10381 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10382 {
10383 if (in_flags & EF_ARM_SOFT_FLOAT)
10384 _bfd_error_handler
10385 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10386 ibfd, obfd);
10387 else
10388 _bfd_error_handler
10389 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10390 ibfd, obfd);
10391
10392 flags_compatible = FALSE;
10393 }
10394 }
10395 #endif
10396
10397 /* Interworking mismatch is only a warning. */
10398 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10399 {
10400 if (in_flags & EF_ARM_INTERWORK)
10401 {
10402 _bfd_error_handler
10403 (_("Warning: %B supports interworking, whereas %B does not"),
10404 ibfd, obfd);
10405 }
10406 else
10407 {
10408 _bfd_error_handler
10409 (_("Warning: %B does not support interworking, whereas %B does"),
10410 ibfd, obfd);
10411 }
10412 }
10413 }
10414
10415 return flags_compatible;
10416 }
10417
10418 /* Display the flags field. */
10419
10420 static bfd_boolean
10421 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10422 {
10423 FILE * file = (FILE *) ptr;
10424 unsigned long flags;
10425
10426 BFD_ASSERT (abfd != NULL && ptr != NULL);
10427
10428 /* Print normal ELF private data. */
10429 _bfd_elf_print_private_bfd_data (abfd, ptr);
10430
10431 flags = elf_elfheader (abfd)->e_flags;
10432 /* Ignore init flag - it may not be set, despite the flags field
10433 containing valid data. */
10434
10435 /* xgettext:c-format */
10436 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10437
10438 switch (EF_ARM_EABI_VERSION (flags))
10439 {
10440 case EF_ARM_EABI_UNKNOWN:
10441 /* The following flag bits are GNU extensions and not part of the
10442 official ARM ELF extended ABI. Hence they are only decoded if
10443 the EABI version is not set. */
10444 if (flags & EF_ARM_INTERWORK)
10445 fprintf (file, _(" [interworking enabled]"));
10446
10447 if (flags & EF_ARM_APCS_26)
10448 fprintf (file, " [APCS-26]");
10449 else
10450 fprintf (file, " [APCS-32]");
10451
10452 if (flags & EF_ARM_VFP_FLOAT)
10453 fprintf (file, _(" [VFP float format]"));
10454 else if (flags & EF_ARM_MAVERICK_FLOAT)
10455 fprintf (file, _(" [Maverick float format]"));
10456 else
10457 fprintf (file, _(" [FPA float format]"));
10458
10459 if (flags & EF_ARM_APCS_FLOAT)
10460 fprintf (file, _(" [floats passed in float registers]"));
10461
10462 if (flags & EF_ARM_PIC)
10463 fprintf (file, _(" [position independent]"));
10464
10465 if (flags & EF_ARM_NEW_ABI)
10466 fprintf (file, _(" [new ABI]"));
10467
10468 if (flags & EF_ARM_OLD_ABI)
10469 fprintf (file, _(" [old ABI]"));
10470
10471 if (flags & EF_ARM_SOFT_FLOAT)
10472 fprintf (file, _(" [software FP]"));
10473
10474 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10475 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10476 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10477 | EF_ARM_MAVERICK_FLOAT);
10478 break;
10479
10480 case EF_ARM_EABI_VER1:
10481 fprintf (file, _(" [Version1 EABI]"));
10482
10483 if (flags & EF_ARM_SYMSARESORTED)
10484 fprintf (file, _(" [sorted symbol table]"));
10485 else
10486 fprintf (file, _(" [unsorted symbol table]"));
10487
10488 flags &= ~ EF_ARM_SYMSARESORTED;
10489 break;
10490
10491 case EF_ARM_EABI_VER2:
10492 fprintf (file, _(" [Version2 EABI]"));
10493
10494 if (flags & EF_ARM_SYMSARESORTED)
10495 fprintf (file, _(" [sorted symbol table]"));
10496 else
10497 fprintf (file, _(" [unsorted symbol table]"));
10498
10499 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10500 fprintf (file, _(" [dynamic symbols use segment index]"));
10501
10502 if (flags & EF_ARM_MAPSYMSFIRST)
10503 fprintf (file, _(" [mapping symbols precede others]"));
10504
10505 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10506 | EF_ARM_MAPSYMSFIRST);
10507 break;
10508
10509 case EF_ARM_EABI_VER3:
10510 fprintf (file, _(" [Version3 EABI]"));
10511 break;
10512
10513 case EF_ARM_EABI_VER4:
10514 fprintf (file, _(" [Version4 EABI]"));
10515 goto eabi;
10516
10517 case EF_ARM_EABI_VER5:
10518 fprintf (file, _(" [Version5 EABI]"));
10519 eabi:
10520 if (flags & EF_ARM_BE8)
10521 fprintf (file, _(" [BE8]"));
10522
10523 if (flags & EF_ARM_LE8)
10524 fprintf (file, _(" [LE8]"));
10525
10526 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10527 break;
10528
10529 default:
10530 fprintf (file, _(" <EABI version unrecognised>"));
10531 break;
10532 }
10533
10534 flags &= ~ EF_ARM_EABIMASK;
10535
10536 if (flags & EF_ARM_RELEXEC)
10537 fprintf (file, _(" [relocatable executable]"));
10538
10539 if (flags & EF_ARM_HASENTRY)
10540 fprintf (file, _(" [has entry point]"));
10541
10542 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10543
10544 if (flags)
10545 fprintf (file, _("<Unrecognised flag bits set>"));
10546
10547 fputc ('\n', file);
10548
10549 return TRUE;
10550 }
10551
10552 static int
10553 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10554 {
10555 switch (ELF_ST_TYPE (elf_sym->st_info))
10556 {
10557 case STT_ARM_TFUNC:
10558 return ELF_ST_TYPE (elf_sym->st_info);
10559
10560 case STT_ARM_16BIT:
10561 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10562 This allows us to distinguish between data used by Thumb instructions
10563 and non-data (which is probably code) inside Thumb regions of an
10564 executable. */
10565 if (type != STT_OBJECT && type != STT_TLS)
10566 return ELF_ST_TYPE (elf_sym->st_info);
10567 break;
10568
10569 default:
10570 break;
10571 }
10572
10573 return type;
10574 }
10575
10576 static asection *
10577 elf32_arm_gc_mark_hook (asection *sec,
10578 struct bfd_link_info *info,
10579 Elf_Internal_Rela *rel,
10580 struct elf_link_hash_entry *h,
10581 Elf_Internal_Sym *sym)
10582 {
10583 if (h != NULL)
10584 switch (ELF32_R_TYPE (rel->r_info))
10585 {
10586 case R_ARM_GNU_VTINHERIT:
10587 case R_ARM_GNU_VTENTRY:
10588 return NULL;
10589 }
10590
10591 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10592 }
10593
10594 /* Update the got entry reference counts for the section being removed. */
10595
10596 static bfd_boolean
10597 elf32_arm_gc_sweep_hook (bfd * abfd,
10598 struct bfd_link_info * info,
10599 asection * sec,
10600 const Elf_Internal_Rela * relocs)
10601 {
10602 Elf_Internal_Shdr *symtab_hdr;
10603 struct elf_link_hash_entry **sym_hashes;
10604 bfd_signed_vma *local_got_refcounts;
10605 const Elf_Internal_Rela *rel, *relend;
10606 struct elf32_arm_link_hash_table * globals;
10607
10608 if (info->relocatable)
10609 return TRUE;
10610
10611 globals = elf32_arm_hash_table (info);
10612
10613 elf_section_data (sec)->local_dynrel = NULL;
10614
10615 symtab_hdr = & elf_symtab_hdr (abfd);
10616 sym_hashes = elf_sym_hashes (abfd);
10617 local_got_refcounts = elf_local_got_refcounts (abfd);
10618
10619 check_use_blx (globals);
10620
10621 relend = relocs + sec->reloc_count;
10622 for (rel = relocs; rel < relend; rel++)
10623 {
10624 unsigned long r_symndx;
10625 struct elf_link_hash_entry *h = NULL;
10626 int r_type;
10627
10628 r_symndx = ELF32_R_SYM (rel->r_info);
10629 if (r_symndx >= symtab_hdr->sh_info)
10630 {
10631 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10632 while (h->root.type == bfd_link_hash_indirect
10633 || h->root.type == bfd_link_hash_warning)
10634 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10635 }
10636
10637 r_type = ELF32_R_TYPE (rel->r_info);
10638 r_type = arm_real_reloc_type (globals, r_type);
10639 switch (r_type)
10640 {
10641 case R_ARM_GOT32:
10642 case R_ARM_GOT_PREL:
10643 case R_ARM_TLS_GD32:
10644 case R_ARM_TLS_IE32:
10645 if (h != NULL)
10646 {
10647 if (h->got.refcount > 0)
10648 h->got.refcount -= 1;
10649 }
10650 else if (local_got_refcounts != NULL)
10651 {
10652 if (local_got_refcounts[r_symndx] > 0)
10653 local_got_refcounts[r_symndx] -= 1;
10654 }
10655 break;
10656
10657 case R_ARM_TLS_LDM32:
10658 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10659 break;
10660
10661 case R_ARM_ABS32:
10662 case R_ARM_ABS32_NOI:
10663 case R_ARM_REL32:
10664 case R_ARM_REL32_NOI:
10665 case R_ARM_PC24:
10666 case R_ARM_PLT32:
10667 case R_ARM_CALL:
10668 case R_ARM_JUMP24:
10669 case R_ARM_PREL31:
10670 case R_ARM_THM_CALL:
10671 case R_ARM_THM_JUMP24:
10672 case R_ARM_THM_JUMP19:
10673 case R_ARM_MOVW_ABS_NC:
10674 case R_ARM_MOVT_ABS:
10675 case R_ARM_MOVW_PREL_NC:
10676 case R_ARM_MOVT_PREL:
10677 case R_ARM_THM_MOVW_ABS_NC:
10678 case R_ARM_THM_MOVT_ABS:
10679 case R_ARM_THM_MOVW_PREL_NC:
10680 case R_ARM_THM_MOVT_PREL:
10681 /* Should the interworking branches be here also? */
10682
10683 if (h != NULL)
10684 {
10685 struct elf32_arm_link_hash_entry *eh;
10686 struct elf32_arm_relocs_copied **pp;
10687 struct elf32_arm_relocs_copied *p;
10688
10689 eh = (struct elf32_arm_link_hash_entry *) h;
10690
10691 if (h->plt.refcount > 0)
10692 {
10693 h->plt.refcount -= 1;
10694 if (r_type == R_ARM_THM_CALL)
10695 eh->plt_maybe_thumb_refcount--;
10696
10697 if (r_type == R_ARM_THM_JUMP24
10698 || r_type == R_ARM_THM_JUMP19)
10699 eh->plt_thumb_refcount--;
10700 }
10701
10702 if (r_type == R_ARM_ABS32
10703 || r_type == R_ARM_REL32
10704 || r_type == R_ARM_ABS32_NOI
10705 || r_type == R_ARM_REL32_NOI)
10706 {
10707 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10708 pp = &p->next)
10709 if (p->section == sec)
10710 {
10711 p->count -= 1;
10712 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10713 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10714 p->pc_count -= 1;
10715 if (p->count == 0)
10716 *pp = p->next;
10717 break;
10718 }
10719 }
10720 }
10721 break;
10722
10723 default:
10724 break;
10725 }
10726 }
10727
10728 return TRUE;
10729 }
10730
10731 /* Look through the relocs for a section during the first phase. */
10732
10733 static bfd_boolean
10734 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10735 asection *sec, const Elf_Internal_Rela *relocs)
10736 {
10737 Elf_Internal_Shdr *symtab_hdr;
10738 struct elf_link_hash_entry **sym_hashes;
10739 const Elf_Internal_Rela *rel;
10740 const Elf_Internal_Rela *rel_end;
10741 bfd *dynobj;
10742 asection *sreloc;
10743 bfd_vma *local_got_offsets;
10744 struct elf32_arm_link_hash_table *htab;
10745 bfd_boolean needs_plt;
10746 unsigned long nsyms;
10747
10748 if (info->relocatable)
10749 return TRUE;
10750
10751 BFD_ASSERT (is_arm_elf (abfd));
10752
10753 htab = elf32_arm_hash_table (info);
10754 sreloc = NULL;
10755
10756 /* Create dynamic sections for relocatable executables so that we can
10757 copy relocations. */
10758 if (htab->root.is_relocatable_executable
10759 && ! htab->root.dynamic_sections_created)
10760 {
10761 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10762 return FALSE;
10763 }
10764
10765 dynobj = elf_hash_table (info)->dynobj;
10766 local_got_offsets = elf_local_got_offsets (abfd);
10767
10768 symtab_hdr = & elf_symtab_hdr (abfd);
10769 sym_hashes = elf_sym_hashes (abfd);
10770 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10771
10772 rel_end = relocs + sec->reloc_count;
10773 for (rel = relocs; rel < rel_end; rel++)
10774 {
10775 struct elf_link_hash_entry *h;
10776 struct elf32_arm_link_hash_entry *eh;
10777 unsigned long r_symndx;
10778 int r_type;
10779
10780 r_symndx = ELF32_R_SYM (rel->r_info);
10781 r_type = ELF32_R_TYPE (rel->r_info);
10782 r_type = arm_real_reloc_type (htab, r_type);
10783
10784 if (r_symndx >= nsyms
10785 /* PR 9934: It is possible to have relocations that do not
10786 refer to symbols, thus it is also possible to have an
10787 object file containing relocations but no symbol table. */
10788 && (r_symndx > 0 || nsyms > 0))
10789 {
10790 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10791 r_symndx);
10792 return FALSE;
10793 }
10794
10795 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10796 h = NULL;
10797 else
10798 {
10799 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10800 while (h->root.type == bfd_link_hash_indirect
10801 || h->root.type == bfd_link_hash_warning)
10802 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10803 }
10804
10805 eh = (struct elf32_arm_link_hash_entry *) h;
10806
10807 switch (r_type)
10808 {
10809 case R_ARM_GOT32:
10810 case R_ARM_GOT_PREL:
10811 case R_ARM_TLS_GD32:
10812 case R_ARM_TLS_IE32:
10813 /* This symbol requires a global offset table entry. */
10814 {
10815 int tls_type, old_tls_type;
10816
10817 switch (r_type)
10818 {
10819 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10820 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10821 default: tls_type = GOT_NORMAL; break;
10822 }
10823
10824 if (h != NULL)
10825 {
10826 h->got.refcount++;
10827 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10828 }
10829 else
10830 {
10831 bfd_signed_vma *local_got_refcounts;
10832
10833 /* This is a global offset table entry for a local symbol. */
10834 local_got_refcounts = elf_local_got_refcounts (abfd);
10835 if (local_got_refcounts == NULL)
10836 {
10837 bfd_size_type size;
10838
10839 size = symtab_hdr->sh_info;
10840 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10841 local_got_refcounts = bfd_zalloc (abfd, size);
10842 if (local_got_refcounts == NULL)
10843 return FALSE;
10844 elf_local_got_refcounts (abfd) = local_got_refcounts;
10845 elf32_arm_local_got_tls_type (abfd)
10846 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10847 }
10848 local_got_refcounts[r_symndx] += 1;
10849 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10850 }
10851
10852 /* We will already have issued an error message if there is a
10853 TLS / non-TLS mismatch, based on the symbol type. We don't
10854 support any linker relaxations. So just combine any TLS
10855 types needed. */
10856 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10857 && tls_type != GOT_NORMAL)
10858 tls_type |= old_tls_type;
10859
10860 if (old_tls_type != tls_type)
10861 {
10862 if (h != NULL)
10863 elf32_arm_hash_entry (h)->tls_type = tls_type;
10864 else
10865 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10866 }
10867 }
10868 /* Fall through. */
10869
10870 case R_ARM_TLS_LDM32:
10871 if (r_type == R_ARM_TLS_LDM32)
10872 htab->tls_ldm_got.refcount++;
10873 /* Fall through. */
10874
10875 case R_ARM_GOTOFF32:
10876 case R_ARM_GOTPC:
10877 if (htab->sgot == NULL)
10878 {
10879 if (htab->root.dynobj == NULL)
10880 htab->root.dynobj = abfd;
10881 if (!create_got_section (htab->root.dynobj, info))
10882 return FALSE;
10883 }
10884 break;
10885
10886 case R_ARM_ABS12:
10887 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10888 ldr __GOTT_INDEX__ offsets. */
10889 if (!htab->vxworks_p)
10890 break;
10891 /* Fall through. */
10892
10893 case R_ARM_PC24:
10894 case R_ARM_PLT32:
10895 case R_ARM_CALL:
10896 case R_ARM_JUMP24:
10897 case R_ARM_PREL31:
10898 case R_ARM_THM_CALL:
10899 case R_ARM_THM_JUMP24:
10900 case R_ARM_THM_JUMP19:
10901 needs_plt = 1;
10902 goto normal_reloc;
10903
10904 case R_ARM_MOVW_ABS_NC:
10905 case R_ARM_MOVT_ABS:
10906 case R_ARM_THM_MOVW_ABS_NC:
10907 case R_ARM_THM_MOVT_ABS:
10908 if (info->shared)
10909 {
10910 (*_bfd_error_handler)
10911 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10912 abfd, elf32_arm_howto_table_1[r_type].name,
10913 (h) ? h->root.root.string : "a local symbol");
10914 bfd_set_error (bfd_error_bad_value);
10915 return FALSE;
10916 }
10917
10918 /* Fall through. */
10919 case R_ARM_ABS32:
10920 case R_ARM_ABS32_NOI:
10921 case R_ARM_REL32:
10922 case R_ARM_REL32_NOI:
10923 case R_ARM_MOVW_PREL_NC:
10924 case R_ARM_MOVT_PREL:
10925 case R_ARM_THM_MOVW_PREL_NC:
10926 case R_ARM_THM_MOVT_PREL:
10927 needs_plt = 0;
10928 normal_reloc:
10929
10930 /* Should the interworking branches be listed here? */
10931 if (h != NULL)
10932 {
10933 /* If this reloc is in a read-only section, we might
10934 need a copy reloc. We can't check reliably at this
10935 stage whether the section is read-only, as input
10936 sections have not yet been mapped to output sections.
10937 Tentatively set the flag for now, and correct in
10938 adjust_dynamic_symbol. */
10939 if (!info->shared)
10940 h->non_got_ref = 1;
10941
10942 /* We may need a .plt entry if the function this reloc
10943 refers to is in a different object. We can't tell for
10944 sure yet, because something later might force the
10945 symbol local. */
10946 if (needs_plt)
10947 h->needs_plt = 1;
10948
10949 /* If we create a PLT entry, this relocation will reference
10950 it, even if it's an ABS32 relocation. */
10951 h->plt.refcount += 1;
10952
10953 /* It's too early to use htab->use_blx here, so we have to
10954 record possible blx references separately from
10955 relocs that definitely need a thumb stub. */
10956
10957 if (r_type == R_ARM_THM_CALL)
10958 eh->plt_maybe_thumb_refcount += 1;
10959
10960 if (r_type == R_ARM_THM_JUMP24
10961 || r_type == R_ARM_THM_JUMP19)
10962 eh->plt_thumb_refcount += 1;
10963 }
10964
10965 /* If we are creating a shared library or relocatable executable,
10966 and this is a reloc against a global symbol, or a non PC
10967 relative reloc against a local symbol, then we need to copy
10968 the reloc into the shared library. However, if we are linking
10969 with -Bsymbolic, we do not need to copy a reloc against a
10970 global symbol which is defined in an object we are
10971 including in the link (i.e., DEF_REGULAR is set). At
10972 this point we have not seen all the input files, so it is
10973 possible that DEF_REGULAR is not set now but will be set
10974 later (it is never cleared). We account for that
10975 possibility below by storing information in the
10976 relocs_copied field of the hash table entry. */
10977 if ((info->shared || htab->root.is_relocatable_executable)
10978 && (sec->flags & SEC_ALLOC) != 0
10979 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10980 || (h != NULL && ! h->needs_plt
10981 && (! info->symbolic || ! h->def_regular))))
10982 {
10983 struct elf32_arm_relocs_copied *p, **head;
10984
10985 /* When creating a shared object, we must copy these
10986 reloc types into the output file. We create a reloc
10987 section in dynobj and make room for this reloc. */
10988 if (sreloc == NULL)
10989 {
10990 sreloc = _bfd_elf_make_dynamic_reloc_section
10991 (sec, dynobj, 2, abfd, ! htab->use_rel);
10992
10993 if (sreloc == NULL)
10994 return FALSE;
10995
10996 /* BPABI objects never have dynamic relocations mapped. */
10997 if (htab->symbian_p)
10998 {
10999 flagword flags;
11000
11001 flags = bfd_get_section_flags (dynobj, sreloc);
11002 flags &= ~(SEC_LOAD | SEC_ALLOC);
11003 bfd_set_section_flags (dynobj, sreloc, flags);
11004 }
11005 }
11006
11007 /* If this is a global symbol, we count the number of
11008 relocations we need for this symbol. */
11009 if (h != NULL)
11010 {
11011 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11012 }
11013 else
11014 {
11015 /* Track dynamic relocs needed for local syms too.
11016 We really need local syms available to do this
11017 easily. Oh well. */
11018 asection *s;
11019 void *vpp;
11020 Elf_Internal_Sym *isym;
11021
11022 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11023 abfd, r_symndx);
11024 if (isym == NULL)
11025 return FALSE;
11026
11027 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11028 if (s == NULL)
11029 s = sec;
11030
11031 vpp = &elf_section_data (s)->local_dynrel;
11032 head = (struct elf32_arm_relocs_copied **) vpp;
11033 }
11034
11035 p = *head;
11036 if (p == NULL || p->section != sec)
11037 {
11038 bfd_size_type amt = sizeof *p;
11039
11040 p = bfd_alloc (htab->root.dynobj, amt);
11041 if (p == NULL)
11042 return FALSE;
11043 p->next = *head;
11044 *head = p;
11045 p->section = sec;
11046 p->count = 0;
11047 p->pc_count = 0;
11048 }
11049
11050 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11051 p->pc_count += 1;
11052 p->count += 1;
11053 }
11054 break;
11055
11056 /* This relocation describes the C++ object vtable hierarchy.
11057 Reconstruct it for later use during GC. */
11058 case R_ARM_GNU_VTINHERIT:
11059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11060 return FALSE;
11061 break;
11062
11063 /* This relocation describes which C++ vtable entries are actually
11064 used. Record for later use during GC. */
11065 case R_ARM_GNU_VTENTRY:
11066 BFD_ASSERT (h != NULL);
11067 if (h != NULL
11068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11069 return FALSE;
11070 break;
11071 }
11072 }
11073
11074 return TRUE;
11075 }
11076
11077 /* Unwinding tables are not referenced directly. This pass marks them as
11078 required if the corresponding code section is marked. */
11079
11080 static bfd_boolean
11081 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11082 elf_gc_mark_hook_fn gc_mark_hook)
11083 {
11084 bfd *sub;
11085 Elf_Internal_Shdr **elf_shdrp;
11086 bfd_boolean again;
11087
11088 /* Marking EH data may cause additional code sections to be marked,
11089 requiring multiple passes. */
11090 again = TRUE;
11091 while (again)
11092 {
11093 again = FALSE;
11094 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11095 {
11096 asection *o;
11097
11098 if (! is_arm_elf (sub))
11099 continue;
11100
11101 elf_shdrp = elf_elfsections (sub);
11102 for (o = sub->sections; o != NULL; o = o->next)
11103 {
11104 Elf_Internal_Shdr *hdr;
11105
11106 hdr = &elf_section_data (o)->this_hdr;
11107 if (hdr->sh_type == SHT_ARM_EXIDX
11108 && hdr->sh_link
11109 && hdr->sh_link < elf_numsections (sub)
11110 && !o->gc_mark
11111 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11112 {
11113 again = TRUE;
11114 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11115 return FALSE;
11116 }
11117 }
11118 }
11119 }
11120
11121 return TRUE;
11122 }
11123
11124 /* Treat mapping symbols as special target symbols. */
11125
11126 static bfd_boolean
11127 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11128 {
11129 return bfd_is_arm_special_symbol_name (sym->name,
11130 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11131 }
11132
11133 /* This is a copy of elf_find_function() from elf.c except that
11134 ARM mapping symbols are ignored when looking for function names
11135 and STT_ARM_TFUNC is considered to a function type. */
11136
11137 static bfd_boolean
11138 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11139 asection * section,
11140 asymbol ** symbols,
11141 bfd_vma offset,
11142 const char ** filename_ptr,
11143 const char ** functionname_ptr)
11144 {
11145 const char * filename = NULL;
11146 asymbol * func = NULL;
11147 bfd_vma low_func = 0;
11148 asymbol ** p;
11149
11150 for (p = symbols; *p != NULL; p++)
11151 {
11152 elf_symbol_type *q;
11153
11154 q = (elf_symbol_type *) *p;
11155
11156 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11157 {
11158 default:
11159 break;
11160 case STT_FILE:
11161 filename = bfd_asymbol_name (&q->symbol);
11162 break;
11163 case STT_FUNC:
11164 case STT_ARM_TFUNC:
11165 case STT_NOTYPE:
11166 /* Skip mapping symbols. */
11167 if ((q->symbol.flags & BSF_LOCAL)
11168 && bfd_is_arm_special_symbol_name (q->symbol.name,
11169 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11170 continue;
11171 /* Fall through. */
11172 if (bfd_get_section (&q->symbol) == section
11173 && q->symbol.value >= low_func
11174 && q->symbol.value <= offset)
11175 {
11176 func = (asymbol *) q;
11177 low_func = q->symbol.value;
11178 }
11179 break;
11180 }
11181 }
11182
11183 if (func == NULL)
11184 return FALSE;
11185
11186 if (filename_ptr)
11187 *filename_ptr = filename;
11188 if (functionname_ptr)
11189 *functionname_ptr = bfd_asymbol_name (func);
11190
11191 return TRUE;
11192 }
11193
11194
11195 /* Find the nearest line to a particular section and offset, for error
11196 reporting. This code is a duplicate of the code in elf.c, except
11197 that it uses arm_elf_find_function. */
11198
11199 static bfd_boolean
11200 elf32_arm_find_nearest_line (bfd * abfd,
11201 asection * section,
11202 asymbol ** symbols,
11203 bfd_vma offset,
11204 const char ** filename_ptr,
11205 const char ** functionname_ptr,
11206 unsigned int * line_ptr)
11207 {
11208 bfd_boolean found = FALSE;
11209
11210 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11211
11212 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11213 filename_ptr, functionname_ptr,
11214 line_ptr, 0,
11215 & elf_tdata (abfd)->dwarf2_find_line_info))
11216 {
11217 if (!*functionname_ptr)
11218 arm_elf_find_function (abfd, section, symbols, offset,
11219 *filename_ptr ? NULL : filename_ptr,
11220 functionname_ptr);
11221
11222 return TRUE;
11223 }
11224
11225 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11226 & found, filename_ptr,
11227 functionname_ptr, line_ptr,
11228 & elf_tdata (abfd)->line_info))
11229 return FALSE;
11230
11231 if (found && (*functionname_ptr || *line_ptr))
11232 return TRUE;
11233
11234 if (symbols == NULL)
11235 return FALSE;
11236
11237 if (! arm_elf_find_function (abfd, section, symbols, offset,
11238 filename_ptr, functionname_ptr))
11239 return FALSE;
11240
11241 *line_ptr = 0;
11242 return TRUE;
11243 }
11244
11245 static bfd_boolean
11246 elf32_arm_find_inliner_info (bfd * abfd,
11247 const char ** filename_ptr,
11248 const char ** functionname_ptr,
11249 unsigned int * line_ptr)
11250 {
11251 bfd_boolean found;
11252 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11253 functionname_ptr, line_ptr,
11254 & elf_tdata (abfd)->dwarf2_find_line_info);
11255 return found;
11256 }
11257
11258 /* Adjust a symbol defined by a dynamic object and referenced by a
11259 regular object. The current definition is in some section of the
11260 dynamic object, but we're not including those sections. We have to
11261 change the definition to something the rest of the link can
11262 understand. */
11263
11264 static bfd_boolean
11265 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11266 struct elf_link_hash_entry * h)
11267 {
11268 bfd * dynobj;
11269 asection * s;
11270 struct elf32_arm_link_hash_entry * eh;
11271 struct elf32_arm_link_hash_table *globals;
11272
11273 globals = elf32_arm_hash_table (info);
11274 dynobj = elf_hash_table (info)->dynobj;
11275
11276 /* Make sure we know what is going on here. */
11277 BFD_ASSERT (dynobj != NULL
11278 && (h->needs_plt
11279 || h->u.weakdef != NULL
11280 || (h->def_dynamic
11281 && h->ref_regular
11282 && !h->def_regular)));
11283
11284 eh = (struct elf32_arm_link_hash_entry *) h;
11285
11286 /* If this is a function, put it in the procedure linkage table. We
11287 will fill in the contents of the procedure linkage table later,
11288 when we know the address of the .got section. */
11289 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11290 || h->needs_plt)
11291 {
11292 if (h->plt.refcount <= 0
11293 || SYMBOL_CALLS_LOCAL (info, h)
11294 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11295 && h->root.type == bfd_link_hash_undefweak))
11296 {
11297 /* This case can occur if we saw a PLT32 reloc in an input
11298 file, but the symbol was never referred to by a dynamic
11299 object, or if all references were garbage collected. In
11300 such a case, we don't actually need to build a procedure
11301 linkage table, and we can just do a PC24 reloc instead. */
11302 h->plt.offset = (bfd_vma) -1;
11303 eh->plt_thumb_refcount = 0;
11304 eh->plt_maybe_thumb_refcount = 0;
11305 h->needs_plt = 0;
11306 }
11307
11308 return TRUE;
11309 }
11310 else
11311 {
11312 /* It's possible that we incorrectly decided a .plt reloc was
11313 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11314 in check_relocs. We can't decide accurately between function
11315 and non-function syms in check-relocs; Objects loaded later in
11316 the link may change h->type. So fix it now. */
11317 h->plt.offset = (bfd_vma) -1;
11318 eh->plt_thumb_refcount = 0;
11319 eh->plt_maybe_thumb_refcount = 0;
11320 }
11321
11322 /* If this is a weak symbol, and there is a real definition, the
11323 processor independent code will have arranged for us to see the
11324 real definition first, and we can just use the same value. */
11325 if (h->u.weakdef != NULL)
11326 {
11327 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11328 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11329 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11330 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11331 return TRUE;
11332 }
11333
11334 /* If there are no non-GOT references, we do not need a copy
11335 relocation. */
11336 if (!h->non_got_ref)
11337 return TRUE;
11338
11339 /* This is a reference to a symbol defined by a dynamic object which
11340 is not a function. */
11341
11342 /* If we are creating a shared library, we must presume that the
11343 only references to the symbol are via the global offset table.
11344 For such cases we need not do anything here; the relocations will
11345 be handled correctly by relocate_section. Relocatable executables
11346 can reference data in shared objects directly, so we don't need to
11347 do anything here. */
11348 if (info->shared || globals->root.is_relocatable_executable)
11349 return TRUE;
11350
11351 if (h->size == 0)
11352 {
11353 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11354 h->root.root.string);
11355 return TRUE;
11356 }
11357
11358 /* We must allocate the symbol in our .dynbss section, which will
11359 become part of the .bss section of the executable. There will be
11360 an entry for this symbol in the .dynsym section. The dynamic
11361 object will contain position independent code, so all references
11362 from the dynamic object to this symbol will go through the global
11363 offset table. The dynamic linker will use the .dynsym entry to
11364 determine the address it must put in the global offset table, so
11365 both the dynamic object and the regular object will refer to the
11366 same memory location for the variable. */
11367 s = bfd_get_section_by_name (dynobj, ".dynbss");
11368 BFD_ASSERT (s != NULL);
11369
11370 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11371 copy the initial value out of the dynamic object and into the
11372 runtime process image. We need to remember the offset into the
11373 .rel(a).bss section we are going to use. */
11374 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11375 {
11376 asection *srel;
11377
11378 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11379 BFD_ASSERT (srel != NULL);
11380 srel->size += RELOC_SIZE (globals);
11381 h->needs_copy = 1;
11382 }
11383
11384 return _bfd_elf_adjust_dynamic_copy (h, s);
11385 }
11386
11387 /* Allocate space in .plt, .got and associated reloc sections for
11388 dynamic relocs. */
11389
11390 static bfd_boolean
11391 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11392 {
11393 struct bfd_link_info *info;
11394 struct elf32_arm_link_hash_table *htab;
11395 struct elf32_arm_link_hash_entry *eh;
11396 struct elf32_arm_relocs_copied *p;
11397 bfd_signed_vma thumb_refs;
11398
11399 eh = (struct elf32_arm_link_hash_entry *) h;
11400
11401 if (h->root.type == bfd_link_hash_indirect)
11402 return TRUE;
11403
11404 if (h->root.type == bfd_link_hash_warning)
11405 /* When warning symbols are created, they **replace** the "real"
11406 entry in the hash table, thus we never get to see the real
11407 symbol in a hash traversal. So look at it now. */
11408 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11409
11410 info = (struct bfd_link_info *) inf;
11411 htab = elf32_arm_hash_table (info);
11412
11413 if (htab->root.dynamic_sections_created
11414 && h->plt.refcount > 0)
11415 {
11416 /* Make sure this symbol is output as a dynamic symbol.
11417 Undefined weak syms won't yet be marked as dynamic. */
11418 if (h->dynindx == -1
11419 && !h->forced_local)
11420 {
11421 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11422 return FALSE;
11423 }
11424
11425 if (info->shared
11426 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11427 {
11428 asection *s = htab->splt;
11429
11430 /* If this is the first .plt entry, make room for the special
11431 first entry. */
11432 if (s->size == 0)
11433 s->size += htab->plt_header_size;
11434
11435 h->plt.offset = s->size;
11436
11437 /* If we will insert a Thumb trampoline before this PLT, leave room
11438 for it. */
11439 thumb_refs = eh->plt_thumb_refcount;
11440 if (!htab->use_blx)
11441 thumb_refs += eh->plt_maybe_thumb_refcount;
11442
11443 if (thumb_refs > 0)
11444 {
11445 h->plt.offset += PLT_THUMB_STUB_SIZE;
11446 s->size += PLT_THUMB_STUB_SIZE;
11447 }
11448
11449 /* If this symbol is not defined in a regular file, and we are
11450 not generating a shared library, then set the symbol to this
11451 location in the .plt. This is required to make function
11452 pointers compare as equal between the normal executable and
11453 the shared library. */
11454 if (! info->shared
11455 && !h->def_regular)
11456 {
11457 h->root.u.def.section = s;
11458 h->root.u.def.value = h->plt.offset;
11459
11460 /* Make sure the function is not marked as Thumb, in case
11461 it is the target of an ABS32 relocation, which will
11462 point to the PLT entry. */
11463 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11464 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11465 }
11466
11467 /* Make room for this entry. */
11468 s->size += htab->plt_entry_size;
11469
11470 if (!htab->symbian_p)
11471 {
11472 /* We also need to make an entry in the .got.plt section, which
11473 will be placed in the .got section by the linker script. */
11474 eh->plt_got_offset = htab->sgotplt->size;
11475 htab->sgotplt->size += 4;
11476 }
11477
11478 /* We also need to make an entry in the .rel(a).plt section. */
11479 htab->srelplt->size += RELOC_SIZE (htab);
11480
11481 /* VxWorks executables have a second set of relocations for
11482 each PLT entry. They go in a separate relocation section,
11483 which is processed by the kernel loader. */
11484 if (htab->vxworks_p && !info->shared)
11485 {
11486 /* There is a relocation for the initial PLT entry:
11487 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11488 if (h->plt.offset == htab->plt_header_size)
11489 htab->srelplt2->size += RELOC_SIZE (htab);
11490
11491 /* There are two extra relocations for each subsequent
11492 PLT entry: an R_ARM_32 relocation for the GOT entry,
11493 and an R_ARM_32 relocation for the PLT entry. */
11494 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11495 }
11496 }
11497 else
11498 {
11499 h->plt.offset = (bfd_vma) -1;
11500 h->needs_plt = 0;
11501 }
11502 }
11503 else
11504 {
11505 h->plt.offset = (bfd_vma) -1;
11506 h->needs_plt = 0;
11507 }
11508
11509 if (h->got.refcount > 0)
11510 {
11511 asection *s;
11512 bfd_boolean dyn;
11513 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11514 int indx;
11515
11516 /* Make sure this symbol is output as a dynamic symbol.
11517 Undefined weak syms won't yet be marked as dynamic. */
11518 if (h->dynindx == -1
11519 && !h->forced_local)
11520 {
11521 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11522 return FALSE;
11523 }
11524
11525 if (!htab->symbian_p)
11526 {
11527 s = htab->sgot;
11528 h->got.offset = s->size;
11529
11530 if (tls_type == GOT_UNKNOWN)
11531 abort ();
11532
11533 if (tls_type == GOT_NORMAL)
11534 /* Non-TLS symbols need one GOT slot. */
11535 s->size += 4;
11536 else
11537 {
11538 if (tls_type & GOT_TLS_GD)
11539 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11540 s->size += 8;
11541 if (tls_type & GOT_TLS_IE)
11542 /* R_ARM_TLS_IE32 needs one GOT slot. */
11543 s->size += 4;
11544 }
11545
11546 dyn = htab->root.dynamic_sections_created;
11547
11548 indx = 0;
11549 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11550 && (!info->shared
11551 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11552 indx = h->dynindx;
11553
11554 if (tls_type != GOT_NORMAL
11555 && (info->shared || indx != 0)
11556 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11557 || h->root.type != bfd_link_hash_undefweak))
11558 {
11559 if (tls_type & GOT_TLS_IE)
11560 htab->srelgot->size += RELOC_SIZE (htab);
11561
11562 if (tls_type & GOT_TLS_GD)
11563 htab->srelgot->size += RELOC_SIZE (htab);
11564
11565 if ((tls_type & GOT_TLS_GD) && indx != 0)
11566 htab->srelgot->size += RELOC_SIZE (htab);
11567 }
11568 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11569 || h->root.type != bfd_link_hash_undefweak)
11570 && (info->shared
11571 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11572 htab->srelgot->size += RELOC_SIZE (htab);
11573 }
11574 }
11575 else
11576 h->got.offset = (bfd_vma) -1;
11577
11578 /* Allocate stubs for exported Thumb functions on v4t. */
11579 if (!htab->use_blx && h->dynindx != -1
11580 && h->def_regular
11581 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11582 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11583 {
11584 struct elf_link_hash_entry * th;
11585 struct bfd_link_hash_entry * bh;
11586 struct elf_link_hash_entry * myh;
11587 char name[1024];
11588 asection *s;
11589 bh = NULL;
11590 /* Create a new symbol to regist the real location of the function. */
11591 s = h->root.u.def.section;
11592 sprintf (name, "__real_%s", h->root.root.string);
11593 _bfd_generic_link_add_one_symbol (info, s->owner,
11594 name, BSF_GLOBAL, s,
11595 h->root.u.def.value,
11596 NULL, TRUE, FALSE, &bh);
11597
11598 myh = (struct elf_link_hash_entry *) bh;
11599 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11600 myh->forced_local = 1;
11601 eh->export_glue = myh;
11602 th = record_arm_to_thumb_glue (info, h);
11603 /* Point the symbol at the stub. */
11604 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11605 h->root.u.def.section = th->root.u.def.section;
11606 h->root.u.def.value = th->root.u.def.value & ~1;
11607 }
11608
11609 if (eh->relocs_copied == NULL)
11610 return TRUE;
11611
11612 /* In the shared -Bsymbolic case, discard space allocated for
11613 dynamic pc-relative relocs against symbols which turn out to be
11614 defined in regular objects. For the normal shared case, discard
11615 space for pc-relative relocs that have become local due to symbol
11616 visibility changes. */
11617
11618 if (info->shared || htab->root.is_relocatable_executable)
11619 {
11620 /* The only relocs that use pc_count are R_ARM_REL32 and
11621 R_ARM_REL32_NOI, which will appear on something like
11622 ".long foo - .". We want calls to protected symbols to resolve
11623 directly to the function rather than going via the plt. If people
11624 want function pointer comparisons to work as expected then they
11625 should avoid writing assembly like ".long foo - .". */
11626 if (SYMBOL_CALLS_LOCAL (info, h))
11627 {
11628 struct elf32_arm_relocs_copied **pp;
11629
11630 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11631 {
11632 p->count -= p->pc_count;
11633 p->pc_count = 0;
11634 if (p->count == 0)
11635 *pp = p->next;
11636 else
11637 pp = &p->next;
11638 }
11639 }
11640
11641 if (elf32_arm_hash_table (info)->vxworks_p)
11642 {
11643 struct elf32_arm_relocs_copied **pp;
11644
11645 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11646 {
11647 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11648 *pp = p->next;
11649 else
11650 pp = &p->next;
11651 }
11652 }
11653
11654 /* Also discard relocs on undefined weak syms with non-default
11655 visibility. */
11656 if (eh->relocs_copied != NULL
11657 && h->root.type == bfd_link_hash_undefweak)
11658 {
11659 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11660 eh->relocs_copied = NULL;
11661
11662 /* Make sure undefined weak symbols are output as a dynamic
11663 symbol in PIEs. */
11664 else if (h->dynindx == -1
11665 && !h->forced_local)
11666 {
11667 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11668 return FALSE;
11669 }
11670 }
11671
11672 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11673 && h->root.type == bfd_link_hash_new)
11674 {
11675 /* Output absolute symbols so that we can create relocations
11676 against them. For normal symbols we output a relocation
11677 against the section that contains them. */
11678 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11679 return FALSE;
11680 }
11681
11682 }
11683 else
11684 {
11685 /* For the non-shared case, discard space for relocs against
11686 symbols which turn out to need copy relocs or are not
11687 dynamic. */
11688
11689 if (!h->non_got_ref
11690 && ((h->def_dynamic
11691 && !h->def_regular)
11692 || (htab->root.dynamic_sections_created
11693 && (h->root.type == bfd_link_hash_undefweak
11694 || h->root.type == bfd_link_hash_undefined))))
11695 {
11696 /* Make sure this symbol is output as a dynamic symbol.
11697 Undefined weak syms won't yet be marked as dynamic. */
11698 if (h->dynindx == -1
11699 && !h->forced_local)
11700 {
11701 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11702 return FALSE;
11703 }
11704
11705 /* If that succeeded, we know we'll be keeping all the
11706 relocs. */
11707 if (h->dynindx != -1)
11708 goto keep;
11709 }
11710
11711 eh->relocs_copied = NULL;
11712
11713 keep: ;
11714 }
11715
11716 /* Finally, allocate space. */
11717 for (p = eh->relocs_copied; p != NULL; p = p->next)
11718 {
11719 asection *sreloc = elf_section_data (p->section)->sreloc;
11720 sreloc->size += p->count * RELOC_SIZE (htab);
11721 }
11722
11723 return TRUE;
11724 }
11725
11726 /* Find any dynamic relocs that apply to read-only sections. */
11727
11728 static bfd_boolean
11729 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11730 {
11731 struct elf32_arm_link_hash_entry * eh;
11732 struct elf32_arm_relocs_copied * p;
11733
11734 if (h->root.type == bfd_link_hash_warning)
11735 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11736
11737 eh = (struct elf32_arm_link_hash_entry *) h;
11738 for (p = eh->relocs_copied; p != NULL; p = p->next)
11739 {
11740 asection *s = p->section;
11741
11742 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11743 {
11744 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11745
11746 info->flags |= DF_TEXTREL;
11747
11748 /* Not an error, just cut short the traversal. */
11749 return FALSE;
11750 }
11751 }
11752 return TRUE;
11753 }
11754
11755 void
11756 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11757 int byteswap_code)
11758 {
11759 struct elf32_arm_link_hash_table *globals;
11760
11761 globals = elf32_arm_hash_table (info);
11762 globals->byteswap_code = byteswap_code;
11763 }
11764
11765 /* Set the sizes of the dynamic sections. */
11766
11767 static bfd_boolean
11768 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11769 struct bfd_link_info * info)
11770 {
11771 bfd * dynobj;
11772 asection * s;
11773 bfd_boolean plt;
11774 bfd_boolean relocs;
11775 bfd *ibfd;
11776 struct elf32_arm_link_hash_table *htab;
11777
11778 htab = elf32_arm_hash_table (info);
11779 dynobj = elf_hash_table (info)->dynobj;
11780 BFD_ASSERT (dynobj != NULL);
11781 check_use_blx (htab);
11782
11783 if (elf_hash_table (info)->dynamic_sections_created)
11784 {
11785 /* Set the contents of the .interp section to the interpreter. */
11786 if (info->executable)
11787 {
11788 s = bfd_get_section_by_name (dynobj, ".interp");
11789 BFD_ASSERT (s != NULL);
11790 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11791 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11792 }
11793 }
11794
11795 /* Set up .got offsets for local syms, and space for local dynamic
11796 relocs. */
11797 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11798 {
11799 bfd_signed_vma *local_got;
11800 bfd_signed_vma *end_local_got;
11801 char *local_tls_type;
11802 bfd_size_type locsymcount;
11803 Elf_Internal_Shdr *symtab_hdr;
11804 asection *srel;
11805 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11806
11807 if (! is_arm_elf (ibfd))
11808 continue;
11809
11810 for (s = ibfd->sections; s != NULL; s = s->next)
11811 {
11812 struct elf32_arm_relocs_copied *p;
11813
11814 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11815 {
11816 if (!bfd_is_abs_section (p->section)
11817 && bfd_is_abs_section (p->section->output_section))
11818 {
11819 /* Input section has been discarded, either because
11820 it is a copy of a linkonce section or due to
11821 linker script /DISCARD/, so we'll be discarding
11822 the relocs too. */
11823 }
11824 else if (is_vxworks
11825 && strcmp (p->section->output_section->name,
11826 ".tls_vars") == 0)
11827 {
11828 /* Relocations in vxworks .tls_vars sections are
11829 handled specially by the loader. */
11830 }
11831 else if (p->count != 0)
11832 {
11833 srel = elf_section_data (p->section)->sreloc;
11834 srel->size += p->count * RELOC_SIZE (htab);
11835 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11836 info->flags |= DF_TEXTREL;
11837 }
11838 }
11839 }
11840
11841 local_got = elf_local_got_refcounts (ibfd);
11842 if (!local_got)
11843 continue;
11844
11845 symtab_hdr = & elf_symtab_hdr (ibfd);
11846 locsymcount = symtab_hdr->sh_info;
11847 end_local_got = local_got + locsymcount;
11848 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11849 s = htab->sgot;
11850 srel = htab->srelgot;
11851 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11852 {
11853 if (*local_got > 0)
11854 {
11855 *local_got = s->size;
11856 if (*local_tls_type & GOT_TLS_GD)
11857 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11858 s->size += 8;
11859 if (*local_tls_type & GOT_TLS_IE)
11860 s->size += 4;
11861 if (*local_tls_type == GOT_NORMAL)
11862 s->size += 4;
11863
11864 if (info->shared || *local_tls_type == GOT_TLS_GD)
11865 srel->size += RELOC_SIZE (htab);
11866 }
11867 else
11868 *local_got = (bfd_vma) -1;
11869 }
11870 }
11871
11872 if (htab->tls_ldm_got.refcount > 0)
11873 {
11874 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11875 for R_ARM_TLS_LDM32 relocations. */
11876 htab->tls_ldm_got.offset = htab->sgot->size;
11877 htab->sgot->size += 8;
11878 if (info->shared)
11879 htab->srelgot->size += RELOC_SIZE (htab);
11880 }
11881 else
11882 htab->tls_ldm_got.offset = -1;
11883
11884 /* Allocate global sym .plt and .got entries, and space for global
11885 sym dynamic relocs. */
11886 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11887
11888 /* Here we rummage through the found bfds to collect glue information. */
11889 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11890 {
11891 if (! is_arm_elf (ibfd))
11892 continue;
11893
11894 /* Initialise mapping tables for code/data. */
11895 bfd_elf32_arm_init_maps (ibfd);
11896
11897 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11898 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11899 /* xgettext:c-format */
11900 _bfd_error_handler (_("Errors encountered processing file %s"),
11901 ibfd->filename);
11902 }
11903
11904 /* Allocate space for the glue sections now that we've sized them. */
11905 bfd_elf32_arm_allocate_interworking_sections (info);
11906
11907 /* The check_relocs and adjust_dynamic_symbol entry points have
11908 determined the sizes of the various dynamic sections. Allocate
11909 memory for them. */
11910 plt = FALSE;
11911 relocs = FALSE;
11912 for (s = dynobj->sections; s != NULL; s = s->next)
11913 {
11914 const char * name;
11915
11916 if ((s->flags & SEC_LINKER_CREATED) == 0)
11917 continue;
11918
11919 /* It's OK to base decisions on the section name, because none
11920 of the dynobj section names depend upon the input files. */
11921 name = bfd_get_section_name (dynobj, s);
11922
11923 if (strcmp (name, ".plt") == 0)
11924 {
11925 /* Remember whether there is a PLT. */
11926 plt = s->size != 0;
11927 }
11928 else if (CONST_STRNEQ (name, ".rel"))
11929 {
11930 if (s->size != 0)
11931 {
11932 /* Remember whether there are any reloc sections other
11933 than .rel(a).plt and .rela.plt.unloaded. */
11934 if (s != htab->srelplt && s != htab->srelplt2)
11935 relocs = TRUE;
11936
11937 /* We use the reloc_count field as a counter if we need
11938 to copy relocs into the output file. */
11939 s->reloc_count = 0;
11940 }
11941 }
11942 else if (! CONST_STRNEQ (name, ".got")
11943 && strcmp (name, ".dynbss") != 0)
11944 {
11945 /* It's not one of our sections, so don't allocate space. */
11946 continue;
11947 }
11948
11949 if (s->size == 0)
11950 {
11951 /* If we don't need this section, strip it from the
11952 output file. This is mostly to handle .rel(a).bss and
11953 .rel(a).plt. We must create both sections in
11954 create_dynamic_sections, because they must be created
11955 before the linker maps input sections to output
11956 sections. The linker does that before
11957 adjust_dynamic_symbol is called, and it is that
11958 function which decides whether anything needs to go
11959 into these sections. */
11960 s->flags |= SEC_EXCLUDE;
11961 continue;
11962 }
11963
11964 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11965 continue;
11966
11967 /* Allocate memory for the section contents. */
11968 s->contents = bfd_zalloc (dynobj, s->size);
11969 if (s->contents == NULL)
11970 return FALSE;
11971 }
11972
11973 if (elf_hash_table (info)->dynamic_sections_created)
11974 {
11975 /* Add some entries to the .dynamic section. We fill in the
11976 values later, in elf32_arm_finish_dynamic_sections, but we
11977 must add the entries now so that we get the correct size for
11978 the .dynamic section. The DT_DEBUG entry is filled in by the
11979 dynamic linker and used by the debugger. */
11980 #define add_dynamic_entry(TAG, VAL) \
11981 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11982
11983 if (info->executable)
11984 {
11985 if (!add_dynamic_entry (DT_DEBUG, 0))
11986 return FALSE;
11987 }
11988
11989 if (plt)
11990 {
11991 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11992 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11993 || !add_dynamic_entry (DT_PLTREL,
11994 htab->use_rel ? DT_REL : DT_RELA)
11995 || !add_dynamic_entry (DT_JMPREL, 0))
11996 return FALSE;
11997 }
11998
11999 if (relocs)
12000 {
12001 if (htab->use_rel)
12002 {
12003 if (!add_dynamic_entry (DT_REL, 0)
12004 || !add_dynamic_entry (DT_RELSZ, 0)
12005 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12006 return FALSE;
12007 }
12008 else
12009 {
12010 if (!add_dynamic_entry (DT_RELA, 0)
12011 || !add_dynamic_entry (DT_RELASZ, 0)
12012 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12013 return FALSE;
12014 }
12015 }
12016
12017 /* If any dynamic relocs apply to a read-only section,
12018 then we need a DT_TEXTREL entry. */
12019 if ((info->flags & DF_TEXTREL) == 0)
12020 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12021 info);
12022
12023 if ((info->flags & DF_TEXTREL) != 0)
12024 {
12025 if (!add_dynamic_entry (DT_TEXTREL, 0))
12026 return FALSE;
12027 }
12028 if (htab->vxworks_p
12029 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12030 return FALSE;
12031 }
12032 #undef add_dynamic_entry
12033
12034 return TRUE;
12035 }
12036
12037 /* Finish up dynamic symbol handling. We set the contents of various
12038 dynamic sections here. */
12039
12040 static bfd_boolean
12041 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12042 struct bfd_link_info * info,
12043 struct elf_link_hash_entry * h,
12044 Elf_Internal_Sym * sym)
12045 {
12046 bfd * dynobj;
12047 struct elf32_arm_link_hash_table *htab;
12048 struct elf32_arm_link_hash_entry *eh;
12049
12050 dynobj = elf_hash_table (info)->dynobj;
12051 htab = elf32_arm_hash_table (info);
12052 eh = (struct elf32_arm_link_hash_entry *) h;
12053
12054 if (h->plt.offset != (bfd_vma) -1)
12055 {
12056 asection * splt;
12057 asection * srel;
12058 bfd_byte *loc;
12059 bfd_vma plt_index;
12060 Elf_Internal_Rela rel;
12061
12062 /* This symbol has an entry in the procedure linkage table. Set
12063 it up. */
12064
12065 BFD_ASSERT (h->dynindx != -1);
12066
12067 splt = bfd_get_section_by_name (dynobj, ".plt");
12068 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12069 BFD_ASSERT (splt != NULL && srel != NULL);
12070
12071 /* Fill in the entry in the procedure linkage table. */
12072 if (htab->symbian_p)
12073 {
12074 put_arm_insn (htab, output_bfd,
12075 elf32_arm_symbian_plt_entry[0],
12076 splt->contents + h->plt.offset);
12077 bfd_put_32 (output_bfd,
12078 elf32_arm_symbian_plt_entry[1],
12079 splt->contents + h->plt.offset + 4);
12080
12081 /* Fill in the entry in the .rel.plt section. */
12082 rel.r_offset = (splt->output_section->vma
12083 + splt->output_offset
12084 + h->plt.offset + 4);
12085 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12086
12087 /* Get the index in the procedure linkage table which
12088 corresponds to this symbol. This is the index of this symbol
12089 in all the symbols for which we are making plt entries. The
12090 first entry in the procedure linkage table is reserved. */
12091 plt_index = ((h->plt.offset - htab->plt_header_size)
12092 / htab->plt_entry_size);
12093 }
12094 else
12095 {
12096 bfd_vma got_offset, got_address, plt_address;
12097 bfd_vma got_displacement;
12098 asection * sgot;
12099 bfd_byte * ptr;
12100
12101 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12102 BFD_ASSERT (sgot != NULL);
12103
12104 /* Get the offset into the .got.plt table of the entry that
12105 corresponds to this function. */
12106 got_offset = eh->plt_got_offset;
12107
12108 /* Get the index in the procedure linkage table which
12109 corresponds to this symbol. This is the index of this symbol
12110 in all the symbols for which we are making plt entries. The
12111 first three entries in .got.plt are reserved; after that
12112 symbols appear in the same order as in .plt. */
12113 plt_index = (got_offset - 12) / 4;
12114
12115 /* Calculate the address of the GOT entry. */
12116 got_address = (sgot->output_section->vma
12117 + sgot->output_offset
12118 + got_offset);
12119
12120 /* ...and the address of the PLT entry. */
12121 plt_address = (splt->output_section->vma
12122 + splt->output_offset
12123 + h->plt.offset);
12124
12125 ptr = htab->splt->contents + h->plt.offset;
12126 if (htab->vxworks_p && info->shared)
12127 {
12128 unsigned int i;
12129 bfd_vma val;
12130
12131 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12132 {
12133 val = elf32_arm_vxworks_shared_plt_entry[i];
12134 if (i == 2)
12135 val |= got_address - sgot->output_section->vma;
12136 if (i == 5)
12137 val |= plt_index * RELOC_SIZE (htab);
12138 if (i == 2 || i == 5)
12139 bfd_put_32 (output_bfd, val, ptr);
12140 else
12141 put_arm_insn (htab, output_bfd, val, ptr);
12142 }
12143 }
12144 else if (htab->vxworks_p)
12145 {
12146 unsigned int i;
12147 bfd_vma val;
12148
12149 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12150 {
12151 val = elf32_arm_vxworks_exec_plt_entry[i];
12152 if (i == 2)
12153 val |= got_address;
12154 if (i == 4)
12155 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12156 if (i == 5)
12157 val |= plt_index * RELOC_SIZE (htab);
12158 if (i == 2 || i == 5)
12159 bfd_put_32 (output_bfd, val, ptr);
12160 else
12161 put_arm_insn (htab, output_bfd, val, ptr);
12162 }
12163
12164 loc = (htab->srelplt2->contents
12165 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12166
12167 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12168 referencing the GOT for this PLT entry. */
12169 rel.r_offset = plt_address + 8;
12170 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12171 rel.r_addend = got_offset;
12172 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12173 loc += RELOC_SIZE (htab);
12174
12175 /* Create the R_ARM_ABS32 relocation referencing the
12176 beginning of the PLT for this GOT entry. */
12177 rel.r_offset = got_address;
12178 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12179 rel.r_addend = 0;
12180 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12181 }
12182 else
12183 {
12184 bfd_signed_vma thumb_refs;
12185 /* Calculate the displacement between the PLT slot and the
12186 entry in the GOT. The eight-byte offset accounts for the
12187 value produced by adding to pc in the first instruction
12188 of the PLT stub. */
12189 got_displacement = got_address - (plt_address + 8);
12190
12191 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12192
12193 thumb_refs = eh->plt_thumb_refcount;
12194 if (!htab->use_blx)
12195 thumb_refs += eh->plt_maybe_thumb_refcount;
12196
12197 if (thumb_refs > 0)
12198 {
12199 put_thumb_insn (htab, output_bfd,
12200 elf32_arm_plt_thumb_stub[0], ptr - 4);
12201 put_thumb_insn (htab, output_bfd,
12202 elf32_arm_plt_thumb_stub[1], ptr - 2);
12203 }
12204
12205 put_arm_insn (htab, output_bfd,
12206 elf32_arm_plt_entry[0]
12207 | ((got_displacement & 0x0ff00000) >> 20),
12208 ptr + 0);
12209 put_arm_insn (htab, output_bfd,
12210 elf32_arm_plt_entry[1]
12211 | ((got_displacement & 0x000ff000) >> 12),
12212 ptr+ 4);
12213 put_arm_insn (htab, output_bfd,
12214 elf32_arm_plt_entry[2]
12215 | (got_displacement & 0x00000fff),
12216 ptr + 8);
12217 #ifdef FOUR_WORD_PLT
12218 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12219 #endif
12220 }
12221
12222 /* Fill in the entry in the global offset table. */
12223 bfd_put_32 (output_bfd,
12224 (splt->output_section->vma
12225 + splt->output_offset),
12226 sgot->contents + got_offset);
12227
12228 /* Fill in the entry in the .rel(a).plt section. */
12229 rel.r_addend = 0;
12230 rel.r_offset = got_address;
12231 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12232 }
12233
12234 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12235 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12236
12237 if (!h->def_regular)
12238 {
12239 /* Mark the symbol as undefined, rather than as defined in
12240 the .plt section. Leave the value alone. */
12241 sym->st_shndx = SHN_UNDEF;
12242 /* If the symbol is weak, we do need to clear the value.
12243 Otherwise, the PLT entry would provide a definition for
12244 the symbol even if the symbol wasn't defined anywhere,
12245 and so the symbol would never be NULL. */
12246 if (!h->ref_regular_nonweak)
12247 sym->st_value = 0;
12248 }
12249 }
12250
12251 if (h->got.offset != (bfd_vma) -1
12252 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12253 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12254 {
12255 asection * sgot;
12256 asection * srel;
12257 Elf_Internal_Rela rel;
12258 bfd_byte *loc;
12259 bfd_vma offset;
12260
12261 /* This symbol has an entry in the global offset table. Set it
12262 up. */
12263 sgot = bfd_get_section_by_name (dynobj, ".got");
12264 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12265 BFD_ASSERT (sgot != NULL && srel != NULL);
12266
12267 offset = (h->got.offset & ~(bfd_vma) 1);
12268 rel.r_addend = 0;
12269 rel.r_offset = (sgot->output_section->vma
12270 + sgot->output_offset
12271 + offset);
12272
12273 /* If this is a static link, or it is a -Bsymbolic link and the
12274 symbol is defined locally or was forced to be local because
12275 of a version file, we just want to emit a RELATIVE reloc.
12276 The entry in the global offset table will already have been
12277 initialized in the relocate_section function. */
12278 if (info->shared
12279 && SYMBOL_REFERENCES_LOCAL (info, h))
12280 {
12281 BFD_ASSERT ((h->got.offset & 1) != 0);
12282 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12283 if (!htab->use_rel)
12284 {
12285 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12286 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12287 }
12288 }
12289 else
12290 {
12291 BFD_ASSERT ((h->got.offset & 1) == 0);
12292 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12293 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12294 }
12295
12296 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12297 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12298 }
12299
12300 if (h->needs_copy)
12301 {
12302 asection * s;
12303 Elf_Internal_Rela rel;
12304 bfd_byte *loc;
12305
12306 /* This symbol needs a copy reloc. Set it up. */
12307 BFD_ASSERT (h->dynindx != -1
12308 && (h->root.type == bfd_link_hash_defined
12309 || h->root.type == bfd_link_hash_defweak));
12310
12311 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12312 RELOC_SECTION (htab, ".bss"));
12313 BFD_ASSERT (s != NULL);
12314
12315 rel.r_addend = 0;
12316 rel.r_offset = (h->root.u.def.value
12317 + h->root.u.def.section->output_section->vma
12318 + h->root.u.def.section->output_offset);
12319 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12320 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12321 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12322 }
12323
12324 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12325 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12326 to the ".got" section. */
12327 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12328 || (!htab->vxworks_p && h == htab->root.hgot))
12329 sym->st_shndx = SHN_ABS;
12330
12331 return TRUE;
12332 }
12333
12334 /* Finish up the dynamic sections. */
12335
12336 static bfd_boolean
12337 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12338 {
12339 bfd * dynobj;
12340 asection * sgot;
12341 asection * sdyn;
12342
12343 dynobj = elf_hash_table (info)->dynobj;
12344
12345 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12346 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12347 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12348
12349 if (elf_hash_table (info)->dynamic_sections_created)
12350 {
12351 asection *splt;
12352 Elf32_External_Dyn *dyncon, *dynconend;
12353 struct elf32_arm_link_hash_table *htab;
12354
12355 htab = elf32_arm_hash_table (info);
12356 splt = bfd_get_section_by_name (dynobj, ".plt");
12357 BFD_ASSERT (splt != NULL && sdyn != NULL);
12358
12359 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12360 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12361
12362 for (; dyncon < dynconend; dyncon++)
12363 {
12364 Elf_Internal_Dyn dyn;
12365 const char * name;
12366 asection * s;
12367
12368 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12369
12370 switch (dyn.d_tag)
12371 {
12372 unsigned int type;
12373
12374 default:
12375 if (htab->vxworks_p
12376 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12377 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12378 break;
12379
12380 case DT_HASH:
12381 name = ".hash";
12382 goto get_vma_if_bpabi;
12383 case DT_STRTAB:
12384 name = ".dynstr";
12385 goto get_vma_if_bpabi;
12386 case DT_SYMTAB:
12387 name = ".dynsym";
12388 goto get_vma_if_bpabi;
12389 case DT_VERSYM:
12390 name = ".gnu.version";
12391 goto get_vma_if_bpabi;
12392 case DT_VERDEF:
12393 name = ".gnu.version_d";
12394 goto get_vma_if_bpabi;
12395 case DT_VERNEED:
12396 name = ".gnu.version_r";
12397 goto get_vma_if_bpabi;
12398
12399 case DT_PLTGOT:
12400 name = ".got";
12401 goto get_vma;
12402 case DT_JMPREL:
12403 name = RELOC_SECTION (htab, ".plt");
12404 get_vma:
12405 s = bfd_get_section_by_name (output_bfd, name);
12406 BFD_ASSERT (s != NULL);
12407 if (!htab->symbian_p)
12408 dyn.d_un.d_ptr = s->vma;
12409 else
12410 /* In the BPABI, tags in the PT_DYNAMIC section point
12411 at the file offset, not the memory address, for the
12412 convenience of the post linker. */
12413 dyn.d_un.d_ptr = s->filepos;
12414 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12415 break;
12416
12417 get_vma_if_bpabi:
12418 if (htab->symbian_p)
12419 goto get_vma;
12420 break;
12421
12422 case DT_PLTRELSZ:
12423 s = bfd_get_section_by_name (output_bfd,
12424 RELOC_SECTION (htab, ".plt"));
12425 BFD_ASSERT (s != NULL);
12426 dyn.d_un.d_val = s->size;
12427 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12428 break;
12429
12430 case DT_RELSZ:
12431 case DT_RELASZ:
12432 if (!htab->symbian_p)
12433 {
12434 /* My reading of the SVR4 ABI indicates that the
12435 procedure linkage table relocs (DT_JMPREL) should be
12436 included in the overall relocs (DT_REL). This is
12437 what Solaris does. However, UnixWare can not handle
12438 that case. Therefore, we override the DT_RELSZ entry
12439 here to make it not include the JMPREL relocs. Since
12440 the linker script arranges for .rel(a).plt to follow all
12441 other relocation sections, we don't have to worry
12442 about changing the DT_REL entry. */
12443 s = bfd_get_section_by_name (output_bfd,
12444 RELOC_SECTION (htab, ".plt"));
12445 if (s != NULL)
12446 dyn.d_un.d_val -= s->size;
12447 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12448 break;
12449 }
12450 /* Fall through. */
12451
12452 case DT_REL:
12453 case DT_RELA:
12454 /* In the BPABI, the DT_REL tag must point at the file
12455 offset, not the VMA, of the first relocation
12456 section. So, we use code similar to that in
12457 elflink.c, but do not check for SHF_ALLOC on the
12458 relcoation section, since relocations sections are
12459 never allocated under the BPABI. The comments above
12460 about Unixware notwithstanding, we include all of the
12461 relocations here. */
12462 if (htab->symbian_p)
12463 {
12464 unsigned int i;
12465 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12466 ? SHT_REL : SHT_RELA);
12467 dyn.d_un.d_val = 0;
12468 for (i = 1; i < elf_numsections (output_bfd); i++)
12469 {
12470 Elf_Internal_Shdr *hdr
12471 = elf_elfsections (output_bfd)[i];
12472 if (hdr->sh_type == type)
12473 {
12474 if (dyn.d_tag == DT_RELSZ
12475 || dyn.d_tag == DT_RELASZ)
12476 dyn.d_un.d_val += hdr->sh_size;
12477 else if ((ufile_ptr) hdr->sh_offset
12478 <= dyn.d_un.d_val - 1)
12479 dyn.d_un.d_val = hdr->sh_offset;
12480 }
12481 }
12482 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12483 }
12484 break;
12485
12486 /* Set the bottom bit of DT_INIT/FINI if the
12487 corresponding function is Thumb. */
12488 case DT_INIT:
12489 name = info->init_function;
12490 goto get_sym;
12491 case DT_FINI:
12492 name = info->fini_function;
12493 get_sym:
12494 /* If it wasn't set by elf_bfd_final_link
12495 then there is nothing to adjust. */
12496 if (dyn.d_un.d_val != 0)
12497 {
12498 struct elf_link_hash_entry * eh;
12499
12500 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12501 FALSE, FALSE, TRUE);
12502 if (eh != NULL
12503 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12504 {
12505 dyn.d_un.d_val |= 1;
12506 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12507 }
12508 }
12509 break;
12510 }
12511 }
12512
12513 /* Fill in the first entry in the procedure linkage table. */
12514 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12515 {
12516 const bfd_vma *plt0_entry;
12517 bfd_vma got_address, plt_address, got_displacement;
12518
12519 /* Calculate the addresses of the GOT and PLT. */
12520 got_address = sgot->output_section->vma + sgot->output_offset;
12521 plt_address = splt->output_section->vma + splt->output_offset;
12522
12523 if (htab->vxworks_p)
12524 {
12525 /* The VxWorks GOT is relocated by the dynamic linker.
12526 Therefore, we must emit relocations rather than simply
12527 computing the values now. */
12528 Elf_Internal_Rela rel;
12529
12530 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12531 put_arm_insn (htab, output_bfd, plt0_entry[0],
12532 splt->contents + 0);
12533 put_arm_insn (htab, output_bfd, plt0_entry[1],
12534 splt->contents + 4);
12535 put_arm_insn (htab, output_bfd, plt0_entry[2],
12536 splt->contents + 8);
12537 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12538
12539 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12540 rel.r_offset = plt_address + 12;
12541 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12542 rel.r_addend = 0;
12543 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12544 htab->srelplt2->contents);
12545 }
12546 else
12547 {
12548 got_displacement = got_address - (plt_address + 16);
12549
12550 plt0_entry = elf32_arm_plt0_entry;
12551 put_arm_insn (htab, output_bfd, plt0_entry[0],
12552 splt->contents + 0);
12553 put_arm_insn (htab, output_bfd, plt0_entry[1],
12554 splt->contents + 4);
12555 put_arm_insn (htab, output_bfd, plt0_entry[2],
12556 splt->contents + 8);
12557 put_arm_insn (htab, output_bfd, plt0_entry[3],
12558 splt->contents + 12);
12559
12560 #ifdef FOUR_WORD_PLT
12561 /* The displacement value goes in the otherwise-unused
12562 last word of the second entry. */
12563 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12564 #else
12565 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12566 #endif
12567 }
12568 }
12569
12570 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12571 really seem like the right value. */
12572 if (splt->output_section->owner == output_bfd)
12573 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12574
12575 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12576 {
12577 /* Correct the .rel(a).plt.unloaded relocations. They will have
12578 incorrect symbol indexes. */
12579 int num_plts;
12580 unsigned char *p;
12581
12582 num_plts = ((htab->splt->size - htab->plt_header_size)
12583 / htab->plt_entry_size);
12584 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12585
12586 for (; num_plts; num_plts--)
12587 {
12588 Elf_Internal_Rela rel;
12589
12590 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12591 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12592 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12593 p += RELOC_SIZE (htab);
12594
12595 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12596 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12597 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12598 p += RELOC_SIZE (htab);
12599 }
12600 }
12601 }
12602
12603 /* Fill in the first three entries in the global offset table. */
12604 if (sgot)
12605 {
12606 if (sgot->size > 0)
12607 {
12608 if (sdyn == NULL)
12609 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12610 else
12611 bfd_put_32 (output_bfd,
12612 sdyn->output_section->vma + sdyn->output_offset,
12613 sgot->contents);
12614 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12615 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12616 }
12617
12618 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12619 }
12620
12621 return TRUE;
12622 }
12623
12624 static void
12625 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12626 {
12627 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12628 struct elf32_arm_link_hash_table *globals;
12629
12630 i_ehdrp = elf_elfheader (abfd);
12631
12632 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12633 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12634 else
12635 i_ehdrp->e_ident[EI_OSABI] = 0;
12636 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12637
12638 if (link_info)
12639 {
12640 globals = elf32_arm_hash_table (link_info);
12641 if (globals->byteswap_code)
12642 i_ehdrp->e_flags |= EF_ARM_BE8;
12643 }
12644 }
12645
12646 static enum elf_reloc_type_class
12647 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12648 {
12649 switch ((int) ELF32_R_TYPE (rela->r_info))
12650 {
12651 case R_ARM_RELATIVE:
12652 return reloc_class_relative;
12653 case R_ARM_JUMP_SLOT:
12654 return reloc_class_plt;
12655 case R_ARM_COPY:
12656 return reloc_class_copy;
12657 default:
12658 return reloc_class_normal;
12659 }
12660 }
12661
12662 /* Set the right machine number for an Arm ELF file. */
12663
12664 static bfd_boolean
12665 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12666 {
12667 if (hdr->sh_type == SHT_NOTE)
12668 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12669
12670 return TRUE;
12671 }
12672
12673 static void
12674 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12675 {
12676 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12677 }
12678
12679 /* Return TRUE if this is an unwinding table entry. */
12680
12681 static bfd_boolean
12682 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12683 {
12684 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12685 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12686 }
12687
12688
12689 /* Set the type and flags for an ARM section. We do this by
12690 the section name, which is a hack, but ought to work. */
12691
12692 static bfd_boolean
12693 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12694 {
12695 const char * name;
12696
12697 name = bfd_get_section_name (abfd, sec);
12698
12699 if (is_arm_elf_unwind_section_name (abfd, name))
12700 {
12701 hdr->sh_type = SHT_ARM_EXIDX;
12702 hdr->sh_flags |= SHF_LINK_ORDER;
12703 }
12704 return TRUE;
12705 }
12706
12707 /* Handle an ARM specific section when reading an object file. This is
12708 called when bfd_section_from_shdr finds a section with an unknown
12709 type. */
12710
12711 static bfd_boolean
12712 elf32_arm_section_from_shdr (bfd *abfd,
12713 Elf_Internal_Shdr * hdr,
12714 const char *name,
12715 int shindex)
12716 {
12717 /* There ought to be a place to keep ELF backend specific flags, but
12718 at the moment there isn't one. We just keep track of the
12719 sections by their name, instead. Fortunately, the ABI gives
12720 names for all the ARM specific sections, so we will probably get
12721 away with this. */
12722 switch (hdr->sh_type)
12723 {
12724 case SHT_ARM_EXIDX:
12725 case SHT_ARM_PREEMPTMAP:
12726 case SHT_ARM_ATTRIBUTES:
12727 break;
12728
12729 default:
12730 return FALSE;
12731 }
12732
12733 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12734 return FALSE;
12735
12736 return TRUE;
12737 }
12738
12739 /* A structure used to record a list of sections, independently
12740 of the next and prev fields in the asection structure. */
12741 typedef struct section_list
12742 {
12743 asection * sec;
12744 struct section_list * next;
12745 struct section_list * prev;
12746 }
12747 section_list;
12748
12749 /* Unfortunately we need to keep a list of sections for which
12750 an _arm_elf_section_data structure has been allocated. This
12751 is because it is possible for functions like elf32_arm_write_section
12752 to be called on a section which has had an elf_data_structure
12753 allocated for it (and so the used_by_bfd field is valid) but
12754 for which the ARM extended version of this structure - the
12755 _arm_elf_section_data structure - has not been allocated. */
12756 static section_list * sections_with_arm_elf_section_data = NULL;
12757
12758 static void
12759 record_section_with_arm_elf_section_data (asection * sec)
12760 {
12761 struct section_list * entry;
12762
12763 entry = bfd_malloc (sizeof (* entry));
12764 if (entry == NULL)
12765 return;
12766 entry->sec = sec;
12767 entry->next = sections_with_arm_elf_section_data;
12768 entry->prev = NULL;
12769 if (entry->next != NULL)
12770 entry->next->prev = entry;
12771 sections_with_arm_elf_section_data = entry;
12772 }
12773
12774 static struct section_list *
12775 find_arm_elf_section_entry (asection * sec)
12776 {
12777 struct section_list * entry;
12778 static struct section_list * last_entry = NULL;
12779
12780 /* This is a short cut for the typical case where the sections are added
12781 to the sections_with_arm_elf_section_data list in forward order and
12782 then looked up here in backwards order. This makes a real difference
12783 to the ld-srec/sec64k.exp linker test. */
12784 entry = sections_with_arm_elf_section_data;
12785 if (last_entry != NULL)
12786 {
12787 if (last_entry->sec == sec)
12788 entry = last_entry;
12789 else if (last_entry->next != NULL
12790 && last_entry->next->sec == sec)
12791 entry = last_entry->next;
12792 }
12793
12794 for (; entry; entry = entry->next)
12795 if (entry->sec == sec)
12796 break;
12797
12798 if (entry)
12799 /* Record the entry prior to this one - it is the entry we are most
12800 likely to want to locate next time. Also this way if we have been
12801 called from unrecord_section_with_arm_elf_section_data() we will not
12802 be caching a pointer that is about to be freed. */
12803 last_entry = entry->prev;
12804
12805 return entry;
12806 }
12807
12808 static _arm_elf_section_data *
12809 get_arm_elf_section_data (asection * sec)
12810 {
12811 struct section_list * entry;
12812
12813 entry = find_arm_elf_section_entry (sec);
12814
12815 if (entry)
12816 return elf32_arm_section_data (entry->sec);
12817 else
12818 return NULL;
12819 }
12820
12821 static void
12822 unrecord_section_with_arm_elf_section_data (asection * sec)
12823 {
12824 struct section_list * entry;
12825
12826 entry = find_arm_elf_section_entry (sec);
12827
12828 if (entry)
12829 {
12830 if (entry->prev != NULL)
12831 entry->prev->next = entry->next;
12832 if (entry->next != NULL)
12833 entry->next->prev = entry->prev;
12834 if (entry == sections_with_arm_elf_section_data)
12835 sections_with_arm_elf_section_data = entry->next;
12836 free (entry);
12837 }
12838 }
12839
12840
12841 typedef struct
12842 {
12843 void *finfo;
12844 struct bfd_link_info *info;
12845 asection *sec;
12846 int sec_shndx;
12847 int (*func) (void *, const char *, Elf_Internal_Sym *,
12848 asection *, struct elf_link_hash_entry *);
12849 } output_arch_syminfo;
12850
12851 enum map_symbol_type
12852 {
12853 ARM_MAP_ARM,
12854 ARM_MAP_THUMB,
12855 ARM_MAP_DATA
12856 };
12857
12858
12859 /* Output a single mapping symbol. */
12860
12861 static bfd_boolean
12862 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12863 enum map_symbol_type type,
12864 bfd_vma offset)
12865 {
12866 static const char *names[3] = {"$a", "$t", "$d"};
12867 struct elf32_arm_link_hash_table *htab;
12868 Elf_Internal_Sym sym;
12869
12870 htab = elf32_arm_hash_table (osi->info);
12871 sym.st_value = osi->sec->output_section->vma
12872 + osi->sec->output_offset
12873 + offset;
12874 sym.st_size = 0;
12875 sym.st_other = 0;
12876 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12877 sym.st_shndx = osi->sec_shndx;
12878 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12879 }
12880
12881
12882 /* Output mapping symbols for PLT entries associated with H. */
12883
12884 static bfd_boolean
12885 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12886 {
12887 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12888 struct elf32_arm_link_hash_table *htab;
12889 struct elf32_arm_link_hash_entry *eh;
12890 bfd_vma addr;
12891
12892 htab = elf32_arm_hash_table (osi->info);
12893
12894 if (h->root.type == bfd_link_hash_indirect)
12895 return TRUE;
12896
12897 if (h->root.type == bfd_link_hash_warning)
12898 /* When warning symbols are created, they **replace** the "real"
12899 entry in the hash table, thus we never get to see the real
12900 symbol in a hash traversal. So look at it now. */
12901 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12902
12903 if (h->plt.offset == (bfd_vma) -1)
12904 return TRUE;
12905
12906 eh = (struct elf32_arm_link_hash_entry *) h;
12907 addr = h->plt.offset;
12908 if (htab->symbian_p)
12909 {
12910 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12911 return FALSE;
12912 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12913 return FALSE;
12914 }
12915 else if (htab->vxworks_p)
12916 {
12917 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12918 return FALSE;
12919 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12920 return FALSE;
12921 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12922 return FALSE;
12923 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12924 return FALSE;
12925 }
12926 else
12927 {
12928 bfd_signed_vma thumb_refs;
12929
12930 thumb_refs = eh->plt_thumb_refcount;
12931 if (!htab->use_blx)
12932 thumb_refs += eh->plt_maybe_thumb_refcount;
12933
12934 if (thumb_refs > 0)
12935 {
12936 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12937 return FALSE;
12938 }
12939 #ifdef FOUR_WORD_PLT
12940 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12941 return FALSE;
12942 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12943 return FALSE;
12944 #else
12945 /* A three-word PLT with no Thumb thunk contains only Arm code,
12946 so only need to output a mapping symbol for the first PLT entry and
12947 entries with thumb thunks. */
12948 if (thumb_refs > 0 || addr == 20)
12949 {
12950 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12951 return FALSE;
12952 }
12953 #endif
12954 }
12955
12956 return TRUE;
12957 }
12958
12959 /* Output a single local symbol for a generated stub. */
12960
12961 static bfd_boolean
12962 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12963 bfd_vma offset, bfd_vma size)
12964 {
12965 struct elf32_arm_link_hash_table *htab;
12966 Elf_Internal_Sym sym;
12967
12968 htab = elf32_arm_hash_table (osi->info);
12969 sym.st_value = osi->sec->output_section->vma
12970 + osi->sec->output_offset
12971 + offset;
12972 sym.st_size = size;
12973 sym.st_other = 0;
12974 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12975 sym.st_shndx = osi->sec_shndx;
12976 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12977 }
12978
12979 static bfd_boolean
12980 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12981 void * in_arg)
12982 {
12983 struct elf32_arm_stub_hash_entry *stub_entry;
12984 struct bfd_link_info *info;
12985 struct elf32_arm_link_hash_table *htab;
12986 asection *stub_sec;
12987 bfd_vma addr;
12988 char *stub_name;
12989 output_arch_syminfo *osi;
12990 const insn_sequence *template_sequence;
12991 enum stub_insn_type prev_type;
12992 int size;
12993 int i;
12994 enum map_symbol_type sym_type;
12995
12996 /* Massage our args to the form they really have. */
12997 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12998 osi = (output_arch_syminfo *) in_arg;
12999
13000 info = osi->info;
13001
13002 htab = elf32_arm_hash_table (info);
13003 stub_sec = stub_entry->stub_sec;
13004
13005 /* Ensure this stub is attached to the current section being
13006 processed. */
13007 if (stub_sec != osi->sec)
13008 return TRUE;
13009
13010 addr = (bfd_vma) stub_entry->stub_offset;
13011 stub_name = stub_entry->output_name;
13012
13013 template_sequence = stub_entry->stub_template;
13014 switch (template_sequence[0].type)
13015 {
13016 case ARM_TYPE:
13017 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13018 return FALSE;
13019 break;
13020 case THUMB16_TYPE:
13021 case THUMB32_TYPE:
13022 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13023 stub_entry->stub_size))
13024 return FALSE;
13025 break;
13026 default:
13027 BFD_FAIL ();
13028 return 0;
13029 }
13030
13031 prev_type = DATA_TYPE;
13032 size = 0;
13033 for (i = 0; i < stub_entry->stub_template_size; i++)
13034 {
13035 switch (template_sequence[i].type)
13036 {
13037 case ARM_TYPE:
13038 sym_type = ARM_MAP_ARM;
13039 break;
13040
13041 case THUMB16_TYPE:
13042 case THUMB32_TYPE:
13043 sym_type = ARM_MAP_THUMB;
13044 break;
13045
13046 case DATA_TYPE:
13047 sym_type = ARM_MAP_DATA;
13048 break;
13049
13050 default:
13051 BFD_FAIL ();
13052 return FALSE;
13053 }
13054
13055 if (template_sequence[i].type != prev_type)
13056 {
13057 prev_type = template_sequence[i].type;
13058 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13059 return FALSE;
13060 }
13061
13062 switch (template_sequence[i].type)
13063 {
13064 case ARM_TYPE:
13065 case THUMB32_TYPE:
13066 size += 4;
13067 break;
13068
13069 case THUMB16_TYPE:
13070 size += 2;
13071 break;
13072
13073 case DATA_TYPE:
13074 size += 4;
13075 break;
13076
13077 default:
13078 BFD_FAIL ();
13079 return FALSE;
13080 }
13081 }
13082
13083 return TRUE;
13084 }
13085
13086 /* Output mapping symbols for linker generated sections. */
13087
13088 static bfd_boolean
13089 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13090 struct bfd_link_info *info,
13091 void *finfo,
13092 int (*func) (void *, const char *,
13093 Elf_Internal_Sym *,
13094 asection *,
13095 struct elf_link_hash_entry *))
13096 {
13097 output_arch_syminfo osi;
13098 struct elf32_arm_link_hash_table *htab;
13099 bfd_vma offset;
13100 bfd_size_type size;
13101
13102 htab = elf32_arm_hash_table (info);
13103 check_use_blx (htab);
13104
13105 osi.finfo = finfo;
13106 osi.info = info;
13107 osi.func = func;
13108
13109 /* ARM->Thumb glue. */
13110 if (htab->arm_glue_size > 0)
13111 {
13112 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13113 ARM2THUMB_GLUE_SECTION_NAME);
13114
13115 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13116 (output_bfd, osi.sec->output_section);
13117 if (info->shared || htab->root.is_relocatable_executable
13118 || htab->pic_veneer)
13119 size = ARM2THUMB_PIC_GLUE_SIZE;
13120 else if (htab->use_blx)
13121 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13122 else
13123 size = ARM2THUMB_STATIC_GLUE_SIZE;
13124
13125 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13126 {
13127 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13128 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13129 }
13130 }
13131
13132 /* Thumb->ARM glue. */
13133 if (htab->thumb_glue_size > 0)
13134 {
13135 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13136 THUMB2ARM_GLUE_SECTION_NAME);
13137
13138 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13139 (output_bfd, osi.sec->output_section);
13140 size = THUMB2ARM_GLUE_SIZE;
13141
13142 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13143 {
13144 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13145 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13146 }
13147 }
13148
13149 /* ARMv4 BX veneers. */
13150 if (htab->bx_glue_size > 0)
13151 {
13152 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13153 ARM_BX_GLUE_SECTION_NAME);
13154
13155 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13156 (output_bfd, osi.sec->output_section);
13157
13158 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13159 }
13160
13161 /* Long calls stubs. */
13162 if (htab->stub_bfd && htab->stub_bfd->sections)
13163 {
13164 asection* stub_sec;
13165
13166 for (stub_sec = htab->stub_bfd->sections;
13167 stub_sec != NULL;
13168 stub_sec = stub_sec->next)
13169 {
13170 /* Ignore non-stub sections. */
13171 if (!strstr (stub_sec->name, STUB_SUFFIX))
13172 continue;
13173
13174 osi.sec = stub_sec;
13175
13176 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13177 (output_bfd, osi.sec->output_section);
13178
13179 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13180 }
13181 }
13182
13183 /* Finally, output mapping symbols for the PLT. */
13184 if (!htab->splt || htab->splt->size == 0)
13185 return TRUE;
13186
13187 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13188 htab->splt->output_section);
13189 osi.sec = htab->splt;
13190 /* Output mapping symbols for the plt header. SymbianOS does not have a
13191 plt header. */
13192 if (htab->vxworks_p)
13193 {
13194 /* VxWorks shared libraries have no PLT header. */
13195 if (!info->shared)
13196 {
13197 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13198 return FALSE;
13199 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13200 return FALSE;
13201 }
13202 }
13203 else if (!htab->symbian_p)
13204 {
13205 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13206 return FALSE;
13207 #ifndef FOUR_WORD_PLT
13208 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13209 return FALSE;
13210 #endif
13211 }
13212
13213 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13214 return TRUE;
13215 }
13216
13217 /* Allocate target specific section data. */
13218
13219 static bfd_boolean
13220 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13221 {
13222 if (!sec->used_by_bfd)
13223 {
13224 _arm_elf_section_data *sdata;
13225 bfd_size_type amt = sizeof (*sdata);
13226
13227 sdata = bfd_zalloc (abfd, amt);
13228 if (sdata == NULL)
13229 return FALSE;
13230 sec->used_by_bfd = sdata;
13231 }
13232
13233 record_section_with_arm_elf_section_data (sec);
13234
13235 return _bfd_elf_new_section_hook (abfd, sec);
13236 }
13237
13238
13239 /* Used to order a list of mapping symbols by address. */
13240
13241 static int
13242 elf32_arm_compare_mapping (const void * a, const void * b)
13243 {
13244 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13245 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13246
13247 if (amap->vma > bmap->vma)
13248 return 1;
13249 else if (amap->vma < bmap->vma)
13250 return -1;
13251 else if (amap->type > bmap->type)
13252 /* Ensure results do not depend on the host qsort for objects with
13253 multiple mapping symbols at the same address by sorting on type
13254 after vma. */
13255 return 1;
13256 else if (amap->type < bmap->type)
13257 return -1;
13258 else
13259 return 0;
13260 }
13261
13262 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13263
13264 static unsigned long
13265 offset_prel31 (unsigned long addr, bfd_vma offset)
13266 {
13267 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13268 }
13269
13270 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13271 relocations. */
13272
13273 static void
13274 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13275 {
13276 unsigned long first_word = bfd_get_32 (output_bfd, from);
13277 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13278
13279 /* High bit of first word is supposed to be zero. */
13280 if ((first_word & 0x80000000ul) == 0)
13281 first_word = offset_prel31 (first_word, offset);
13282
13283 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13284 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13285 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13286 second_word = offset_prel31 (second_word, offset);
13287
13288 bfd_put_32 (output_bfd, first_word, to);
13289 bfd_put_32 (output_bfd, second_word, to + 4);
13290 }
13291
13292 /* Data for make_branch_to_a8_stub(). */
13293
13294 struct a8_branch_to_stub_data {
13295 asection *writing_section;
13296 bfd_byte *contents;
13297 };
13298
13299
13300 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13301 places for a particular section. */
13302
13303 static bfd_boolean
13304 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13305 void *in_arg)
13306 {
13307 struct elf32_arm_stub_hash_entry *stub_entry;
13308 struct a8_branch_to_stub_data *data;
13309 bfd_byte *contents;
13310 unsigned long branch_insn;
13311 bfd_vma veneered_insn_loc, veneer_entry_loc;
13312 bfd_signed_vma branch_offset;
13313 bfd *abfd;
13314 unsigned int index;
13315
13316 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13317 data = (struct a8_branch_to_stub_data *) in_arg;
13318
13319 if (stub_entry->target_section != data->writing_section
13320 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13321 return TRUE;
13322
13323 contents = data->contents;
13324
13325 veneered_insn_loc = stub_entry->target_section->output_section->vma
13326 + stub_entry->target_section->output_offset
13327 + stub_entry->target_value;
13328
13329 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13330 + stub_entry->stub_sec->output_offset
13331 + stub_entry->stub_offset;
13332
13333 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13334 veneered_insn_loc &= ~3u;
13335
13336 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13337
13338 abfd = stub_entry->target_section->owner;
13339 index = stub_entry->target_value;
13340
13341 /* We attempt to avoid this condition by setting stubs_always_after_branch
13342 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13343 This check is just to be on the safe side... */
13344 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13345 {
13346 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13347 "allocated in unsafe location"), abfd);
13348 return FALSE;
13349 }
13350
13351 switch (stub_entry->stub_type)
13352 {
13353 case arm_stub_a8_veneer_b:
13354 case arm_stub_a8_veneer_b_cond:
13355 branch_insn = 0xf0009000;
13356 goto jump24;
13357
13358 case arm_stub_a8_veneer_blx:
13359 branch_insn = 0xf000e800;
13360 goto jump24;
13361
13362 case arm_stub_a8_veneer_bl:
13363 {
13364 unsigned int i1, j1, i2, j2, s;
13365
13366 branch_insn = 0xf000d000;
13367
13368 jump24:
13369 if (branch_offset < -16777216 || branch_offset > 16777214)
13370 {
13371 /* There's not much we can do apart from complain if this
13372 happens. */
13373 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13374 "of range (input file too large)"), abfd);
13375 return FALSE;
13376 }
13377
13378 /* i1 = not(j1 eor s), so:
13379 not i1 = j1 eor s
13380 j1 = (not i1) eor s. */
13381
13382 branch_insn |= (branch_offset >> 1) & 0x7ff;
13383 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13384 i2 = (branch_offset >> 22) & 1;
13385 i1 = (branch_offset >> 23) & 1;
13386 s = (branch_offset >> 24) & 1;
13387 j1 = (!i1) ^ s;
13388 j2 = (!i2) ^ s;
13389 branch_insn |= j2 << 11;
13390 branch_insn |= j1 << 13;
13391 branch_insn |= s << 26;
13392 }
13393 break;
13394
13395 default:
13396 BFD_FAIL ();
13397 return FALSE;
13398 }
13399
13400 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13401 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13402
13403 return TRUE;
13404 }
13405
13406 /* Do code byteswapping. Return FALSE afterwards so that the section is
13407 written out as normal. */
13408
13409 static bfd_boolean
13410 elf32_arm_write_section (bfd *output_bfd,
13411 struct bfd_link_info *link_info,
13412 asection *sec,
13413 bfd_byte *contents)
13414 {
13415 unsigned int mapcount, errcount;
13416 _arm_elf_section_data *arm_data;
13417 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13418 elf32_arm_section_map *map;
13419 elf32_vfp11_erratum_list *errnode;
13420 bfd_vma ptr;
13421 bfd_vma end;
13422 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13423 bfd_byte tmp;
13424 unsigned int i;
13425
13426 /* If this section has not been allocated an _arm_elf_section_data
13427 structure then we cannot record anything. */
13428 arm_data = get_arm_elf_section_data (sec);
13429 if (arm_data == NULL)
13430 return FALSE;
13431
13432 mapcount = arm_data->mapcount;
13433 map = arm_data->map;
13434 errcount = arm_data->erratumcount;
13435
13436 if (errcount != 0)
13437 {
13438 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13439
13440 for (errnode = arm_data->erratumlist; errnode != 0;
13441 errnode = errnode->next)
13442 {
13443 bfd_vma index = errnode->vma - offset;
13444
13445 switch (errnode->type)
13446 {
13447 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13448 {
13449 bfd_vma branch_to_veneer;
13450 /* Original condition code of instruction, plus bit mask for
13451 ARM B instruction. */
13452 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13453 | 0x0a000000;
13454
13455 /* The instruction is before the label. */
13456 index -= 4;
13457
13458 /* Above offset included in -4 below. */
13459 branch_to_veneer = errnode->u.b.veneer->vma
13460 - errnode->vma - 4;
13461
13462 if ((signed) branch_to_veneer < -(1 << 25)
13463 || (signed) branch_to_veneer >= (1 << 25))
13464 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13465 "range"), output_bfd);
13466
13467 insn |= (branch_to_veneer >> 2) & 0xffffff;
13468 contents[endianflip ^ index] = insn & 0xff;
13469 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13470 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13471 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13472 }
13473 break;
13474
13475 case VFP11_ERRATUM_ARM_VENEER:
13476 {
13477 bfd_vma branch_from_veneer;
13478 unsigned int insn;
13479
13480 /* Take size of veneer into account. */
13481 branch_from_veneer = errnode->u.v.branch->vma
13482 - errnode->vma - 12;
13483
13484 if ((signed) branch_from_veneer < -(1 << 25)
13485 || (signed) branch_from_veneer >= (1 << 25))
13486 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13487 "range"), output_bfd);
13488
13489 /* Original instruction. */
13490 insn = errnode->u.v.branch->u.b.vfp_insn;
13491 contents[endianflip ^ index] = insn & 0xff;
13492 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13493 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13494 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13495
13496 /* Branch back to insn after original insn. */
13497 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13498 contents[endianflip ^ (index + 4)] = insn & 0xff;
13499 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13500 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13501 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13502 }
13503 break;
13504
13505 default:
13506 abort ();
13507 }
13508 }
13509 }
13510
13511 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13512 {
13513 arm_unwind_table_edit *edit_node
13514 = arm_data->u.exidx.unwind_edit_list;
13515 /* Now, sec->size is the size of the section we will write. The original
13516 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13517 markers) was sec->rawsize. (This isn't the case if we perform no
13518 edits, then rawsize will be zero and we should use size). */
13519 bfd_byte *edited_contents = bfd_malloc (sec->size);
13520 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13521 unsigned int in_index, out_index;
13522 bfd_vma add_to_offsets = 0;
13523
13524 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13525 {
13526 if (edit_node)
13527 {
13528 unsigned int edit_index = edit_node->index;
13529
13530 if (in_index < edit_index && in_index * 8 < input_size)
13531 {
13532 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13533 contents + in_index * 8, add_to_offsets);
13534 out_index++;
13535 in_index++;
13536 }
13537 else if (in_index == edit_index
13538 || (in_index * 8 >= input_size
13539 && edit_index == UINT_MAX))
13540 {
13541 switch (edit_node->type)
13542 {
13543 case DELETE_EXIDX_ENTRY:
13544 in_index++;
13545 add_to_offsets += 8;
13546 break;
13547
13548 case INSERT_EXIDX_CANTUNWIND_AT_END:
13549 {
13550 asection *text_sec = edit_node->linked_section;
13551 bfd_vma text_offset = text_sec->output_section->vma
13552 + text_sec->output_offset
13553 + text_sec->size;
13554 bfd_vma exidx_offset = offset + out_index * 8;
13555 unsigned long prel31_offset;
13556
13557 /* Note: this is meant to be equivalent to an
13558 R_ARM_PREL31 relocation. These synthetic
13559 EXIDX_CANTUNWIND markers are not relocated by the
13560 usual BFD method. */
13561 prel31_offset = (text_offset - exidx_offset)
13562 & 0x7ffffffful;
13563
13564 /* First address we can't unwind. */
13565 bfd_put_32 (output_bfd, prel31_offset,
13566 &edited_contents[out_index * 8]);
13567
13568 /* Code for EXIDX_CANTUNWIND. */
13569 bfd_put_32 (output_bfd, 0x1,
13570 &edited_contents[out_index * 8 + 4]);
13571
13572 out_index++;
13573 add_to_offsets -= 8;
13574 }
13575 break;
13576 }
13577
13578 edit_node = edit_node->next;
13579 }
13580 }
13581 else
13582 {
13583 /* No more edits, copy remaining entries verbatim. */
13584 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13585 contents + in_index * 8, add_to_offsets);
13586 out_index++;
13587 in_index++;
13588 }
13589 }
13590
13591 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13592 bfd_set_section_contents (output_bfd, sec->output_section,
13593 edited_contents,
13594 (file_ptr) sec->output_offset, sec->size);
13595
13596 return TRUE;
13597 }
13598
13599 /* Fix code to point to Cortex-A8 erratum stubs. */
13600 if (globals->fix_cortex_a8)
13601 {
13602 struct a8_branch_to_stub_data data;
13603
13604 data.writing_section = sec;
13605 data.contents = contents;
13606
13607 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13608 &data);
13609 }
13610
13611 if (mapcount == 0)
13612 return FALSE;
13613
13614 if (globals->byteswap_code)
13615 {
13616 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13617
13618 ptr = map[0].vma;
13619 for (i = 0; i < mapcount; i++)
13620 {
13621 if (i == mapcount - 1)
13622 end = sec->size;
13623 else
13624 end = map[i + 1].vma;
13625
13626 switch (map[i].type)
13627 {
13628 case 'a':
13629 /* Byte swap code words. */
13630 while (ptr + 3 < end)
13631 {
13632 tmp = contents[ptr];
13633 contents[ptr] = contents[ptr + 3];
13634 contents[ptr + 3] = tmp;
13635 tmp = contents[ptr + 1];
13636 contents[ptr + 1] = contents[ptr + 2];
13637 contents[ptr + 2] = tmp;
13638 ptr += 4;
13639 }
13640 break;
13641
13642 case 't':
13643 /* Byte swap code halfwords. */
13644 while (ptr + 1 < end)
13645 {
13646 tmp = contents[ptr];
13647 contents[ptr] = contents[ptr + 1];
13648 contents[ptr + 1] = tmp;
13649 ptr += 2;
13650 }
13651 break;
13652
13653 case 'd':
13654 /* Leave data alone. */
13655 break;
13656 }
13657 ptr = end;
13658 }
13659 }
13660
13661 free (map);
13662 arm_data->mapcount = 0;
13663 arm_data->mapsize = 0;
13664 arm_data->map = NULL;
13665 unrecord_section_with_arm_elf_section_data (sec);
13666
13667 return FALSE;
13668 }
13669
13670 static void
13671 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13672 asection * sec,
13673 void * ignore ATTRIBUTE_UNUSED)
13674 {
13675 unrecord_section_with_arm_elf_section_data (sec);
13676 }
13677
13678 static bfd_boolean
13679 elf32_arm_close_and_cleanup (bfd * abfd)
13680 {
13681 if (abfd->sections)
13682 bfd_map_over_sections (abfd,
13683 unrecord_section_via_map_over_sections,
13684 NULL);
13685
13686 return _bfd_elf_close_and_cleanup (abfd);
13687 }
13688
13689 static bfd_boolean
13690 elf32_arm_bfd_free_cached_info (bfd * abfd)
13691 {
13692 if (abfd->sections)
13693 bfd_map_over_sections (abfd,
13694 unrecord_section_via_map_over_sections,
13695 NULL);
13696
13697 return _bfd_free_cached_info (abfd);
13698 }
13699
13700 /* Display STT_ARM_TFUNC symbols as functions. */
13701
13702 static void
13703 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13704 asymbol *asym)
13705 {
13706 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13707
13708 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13709 elfsym->symbol.flags |= BSF_FUNCTION;
13710 }
13711
13712
13713 /* Mangle thumb function symbols as we read them in. */
13714
13715 static bfd_boolean
13716 elf32_arm_swap_symbol_in (bfd * abfd,
13717 const void *psrc,
13718 const void *pshn,
13719 Elf_Internal_Sym *dst)
13720 {
13721 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13722 return FALSE;
13723
13724 /* New EABI objects mark thumb function symbols by setting the low bit of
13725 the address. Turn these into STT_ARM_TFUNC. */
13726 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13727 && (dst->st_value & 1))
13728 {
13729 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13730 dst->st_value &= ~(bfd_vma) 1;
13731 }
13732 return TRUE;
13733 }
13734
13735
13736 /* Mangle thumb function symbols as we write them out. */
13737
13738 static void
13739 elf32_arm_swap_symbol_out (bfd *abfd,
13740 const Elf_Internal_Sym *src,
13741 void *cdst,
13742 void *shndx)
13743 {
13744 Elf_Internal_Sym newsym;
13745
13746 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13747 of the address set, as per the new EABI. We do this unconditionally
13748 because objcopy does not set the elf header flags until after
13749 it writes out the symbol table. */
13750 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13751 {
13752 newsym = *src;
13753 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13754 if (newsym.st_shndx != SHN_UNDEF)
13755 {
13756 /* Do this only for defined symbols. At link type, the static
13757 linker will simulate the work of dynamic linker of resolving
13758 symbols and will carry over the thumbness of found symbols to
13759 the output symbol table. It's not clear how it happens, but
13760 the thumbness of undefined symbols can well be different at
13761 runtime, and writing '1' for them will be confusing for users
13762 and possibly for dynamic linker itself.
13763 */
13764 newsym.st_value |= 1;
13765 }
13766
13767 src = &newsym;
13768 }
13769 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13770 }
13771
13772 /* Add the PT_ARM_EXIDX program header. */
13773
13774 static bfd_boolean
13775 elf32_arm_modify_segment_map (bfd *abfd,
13776 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13777 {
13778 struct elf_segment_map *m;
13779 asection *sec;
13780
13781 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13782 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13783 {
13784 /* If there is already a PT_ARM_EXIDX header, then we do not
13785 want to add another one. This situation arises when running
13786 "strip"; the input binary already has the header. */
13787 m = elf_tdata (abfd)->segment_map;
13788 while (m && m->p_type != PT_ARM_EXIDX)
13789 m = m->next;
13790 if (!m)
13791 {
13792 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13793 if (m == NULL)
13794 return FALSE;
13795 m->p_type = PT_ARM_EXIDX;
13796 m->count = 1;
13797 m->sections[0] = sec;
13798
13799 m->next = elf_tdata (abfd)->segment_map;
13800 elf_tdata (abfd)->segment_map = m;
13801 }
13802 }
13803
13804 return TRUE;
13805 }
13806
13807 /* We may add a PT_ARM_EXIDX program header. */
13808
13809 static int
13810 elf32_arm_additional_program_headers (bfd *abfd,
13811 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13812 {
13813 asection *sec;
13814
13815 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13816 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13817 return 1;
13818 else
13819 return 0;
13820 }
13821
13822 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13823
13824 static bfd_boolean
13825 elf32_arm_is_function_type (unsigned int type)
13826 {
13827 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13828 }
13829
13830 /* We use this to override swap_symbol_in and swap_symbol_out. */
13831 const struct elf_size_info elf32_arm_size_info =
13832 {
13833 sizeof (Elf32_External_Ehdr),
13834 sizeof (Elf32_External_Phdr),
13835 sizeof (Elf32_External_Shdr),
13836 sizeof (Elf32_External_Rel),
13837 sizeof (Elf32_External_Rela),
13838 sizeof (Elf32_External_Sym),
13839 sizeof (Elf32_External_Dyn),
13840 sizeof (Elf_External_Note),
13841 4,
13842 1,
13843 32, 2,
13844 ELFCLASS32, EV_CURRENT,
13845 bfd_elf32_write_out_phdrs,
13846 bfd_elf32_write_shdrs_and_ehdr,
13847 bfd_elf32_checksum_contents,
13848 bfd_elf32_write_relocs,
13849 elf32_arm_swap_symbol_in,
13850 elf32_arm_swap_symbol_out,
13851 bfd_elf32_slurp_reloc_table,
13852 bfd_elf32_slurp_symbol_table,
13853 bfd_elf32_swap_dyn_in,
13854 bfd_elf32_swap_dyn_out,
13855 bfd_elf32_swap_reloc_in,
13856 bfd_elf32_swap_reloc_out,
13857 bfd_elf32_swap_reloca_in,
13858 bfd_elf32_swap_reloca_out
13859 };
13860
13861 #define ELF_ARCH bfd_arch_arm
13862 #define ELF_MACHINE_CODE EM_ARM
13863 #ifdef __QNXTARGET__
13864 #define ELF_MAXPAGESIZE 0x1000
13865 #else
13866 #define ELF_MAXPAGESIZE 0x8000
13867 #endif
13868 #define ELF_MINPAGESIZE 0x1000
13869 #define ELF_COMMONPAGESIZE 0x1000
13870
13871 #define bfd_elf32_mkobject elf32_arm_mkobject
13872
13873 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13874 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13875 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13876 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13877 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13878 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13879 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13880 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13881 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13882 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13883 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13884 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13885 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13886 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13887 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13888
13889 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13890 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13891 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13892 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13893 #define elf_backend_check_relocs elf32_arm_check_relocs
13894 #define elf_backend_relocate_section elf32_arm_relocate_section
13895 #define elf_backend_write_section elf32_arm_write_section
13896 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13897 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13898 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13899 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13900 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13901 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13902 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13903 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13904 #define elf_backend_object_p elf32_arm_object_p
13905 #define elf_backend_section_flags elf32_arm_section_flags
13906 #define elf_backend_fake_sections elf32_arm_fake_sections
13907 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13908 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13909 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13910 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13911 #define elf_backend_size_info elf32_arm_size_info
13912 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13913 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13914 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13915 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13916 #define elf_backend_is_function_type elf32_arm_is_function_type
13917
13918 #define elf_backend_can_refcount 1
13919 #define elf_backend_can_gc_sections 1
13920 #define elf_backend_plt_readonly 1
13921 #define elf_backend_want_got_plt 1
13922 #define elf_backend_want_plt_sym 0
13923 #define elf_backend_may_use_rel_p 1
13924 #define elf_backend_may_use_rela_p 0
13925 #define elf_backend_default_use_rela_p 0
13926
13927 #define elf_backend_got_header_size 12
13928
13929 #undef elf_backend_obj_attrs_vendor
13930 #define elf_backend_obj_attrs_vendor "aeabi"
13931 #undef elf_backend_obj_attrs_section
13932 #define elf_backend_obj_attrs_section ".ARM.attributes"
13933 #undef elf_backend_obj_attrs_arg_type
13934 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13935 #undef elf_backend_obj_attrs_section_type
13936 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13937 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13938
13939 #include "elf32-target.h"
13940
13941 /* VxWorks Targets. */
13942
13943 #undef TARGET_LITTLE_SYM
13944 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13945 #undef TARGET_LITTLE_NAME
13946 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13947 #undef TARGET_BIG_SYM
13948 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13949 #undef TARGET_BIG_NAME
13950 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13951
13952 /* Like elf32_arm_link_hash_table_create -- but overrides
13953 appropriately for VxWorks. */
13954
13955 static struct bfd_link_hash_table *
13956 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13957 {
13958 struct bfd_link_hash_table *ret;
13959
13960 ret = elf32_arm_link_hash_table_create (abfd);
13961 if (ret)
13962 {
13963 struct elf32_arm_link_hash_table *htab
13964 = (struct elf32_arm_link_hash_table *) ret;
13965 htab->use_rel = 0;
13966 htab->vxworks_p = 1;
13967 }
13968 return ret;
13969 }
13970
13971 static void
13972 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13973 {
13974 elf32_arm_final_write_processing (abfd, linker);
13975 elf_vxworks_final_write_processing (abfd, linker);
13976 }
13977
13978 #undef elf32_bed
13979 #define elf32_bed elf32_arm_vxworks_bed
13980
13981 #undef bfd_elf32_bfd_link_hash_table_create
13982 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13983 #undef elf_backend_add_symbol_hook
13984 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13985 #undef elf_backend_final_write_processing
13986 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13987 #undef elf_backend_emit_relocs
13988 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13989
13990 #undef elf_backend_may_use_rel_p
13991 #define elf_backend_may_use_rel_p 0
13992 #undef elf_backend_may_use_rela_p
13993 #define elf_backend_may_use_rela_p 1
13994 #undef elf_backend_default_use_rela_p
13995 #define elf_backend_default_use_rela_p 1
13996 #undef elf_backend_want_plt_sym
13997 #define elf_backend_want_plt_sym 1
13998 #undef ELF_MAXPAGESIZE
13999 #define ELF_MAXPAGESIZE 0x1000
14000
14001 #include "elf32-target.h"
14002
14003
14004 /* Symbian OS Targets. */
14005
14006 #undef TARGET_LITTLE_SYM
14007 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14008 #undef TARGET_LITTLE_NAME
14009 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14010 #undef TARGET_BIG_SYM
14011 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14012 #undef TARGET_BIG_NAME
14013 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14014
14015 /* Like elf32_arm_link_hash_table_create -- but overrides
14016 appropriately for Symbian OS. */
14017
14018 static struct bfd_link_hash_table *
14019 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14020 {
14021 struct bfd_link_hash_table *ret;
14022
14023 ret = elf32_arm_link_hash_table_create (abfd);
14024 if (ret)
14025 {
14026 struct elf32_arm_link_hash_table *htab
14027 = (struct elf32_arm_link_hash_table *)ret;
14028 /* There is no PLT header for Symbian OS. */
14029 htab->plt_header_size = 0;
14030 /* The PLT entries are each one instruction and one word. */
14031 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14032 htab->symbian_p = 1;
14033 /* Symbian uses armv5t or above, so use_blx is always true. */
14034 htab->use_blx = 1;
14035 htab->root.is_relocatable_executable = 1;
14036 }
14037 return ret;
14038 }
14039
14040 static const struct bfd_elf_special_section
14041 elf32_arm_symbian_special_sections[] =
14042 {
14043 /* In a BPABI executable, the dynamic linking sections do not go in
14044 the loadable read-only segment. The post-linker may wish to
14045 refer to these sections, but they are not part of the final
14046 program image. */
14047 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14048 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14049 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14050 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14051 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14052 /* These sections do not need to be writable as the SymbianOS
14053 postlinker will arrange things so that no dynamic relocation is
14054 required. */
14055 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14056 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14057 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14058 { NULL, 0, 0, 0, 0 }
14059 };
14060
14061 static void
14062 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14063 struct bfd_link_info *link_info)
14064 {
14065 /* BPABI objects are never loaded directly by an OS kernel; they are
14066 processed by a postlinker first, into an OS-specific format. If
14067 the D_PAGED bit is set on the file, BFD will align segments on
14068 page boundaries, so that an OS can directly map the file. With
14069 BPABI objects, that just results in wasted space. In addition,
14070 because we clear the D_PAGED bit, map_sections_to_segments will
14071 recognize that the program headers should not be mapped into any
14072 loadable segment. */
14073 abfd->flags &= ~D_PAGED;
14074 elf32_arm_begin_write_processing (abfd, link_info);
14075 }
14076
14077 static bfd_boolean
14078 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14079 struct bfd_link_info *info)
14080 {
14081 struct elf_segment_map *m;
14082 asection *dynsec;
14083
14084 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14085 segment. However, because the .dynamic section is not marked
14086 with SEC_LOAD, the generic ELF code will not create such a
14087 segment. */
14088 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14089 if (dynsec)
14090 {
14091 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14092 if (m->p_type == PT_DYNAMIC)
14093 break;
14094
14095 if (m == NULL)
14096 {
14097 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14098 m->next = elf_tdata (abfd)->segment_map;
14099 elf_tdata (abfd)->segment_map = m;
14100 }
14101 }
14102
14103 /* Also call the generic arm routine. */
14104 return elf32_arm_modify_segment_map (abfd, info);
14105 }
14106
14107 /* Return address for Ith PLT stub in section PLT, for relocation REL
14108 or (bfd_vma) -1 if it should not be included. */
14109
14110 static bfd_vma
14111 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14112 const arelent *rel ATTRIBUTE_UNUSED)
14113 {
14114 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14115 }
14116
14117
14118 #undef elf32_bed
14119 #define elf32_bed elf32_arm_symbian_bed
14120
14121 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14122 will process them and then discard them. */
14123 #undef ELF_DYNAMIC_SEC_FLAGS
14124 #define ELF_DYNAMIC_SEC_FLAGS \
14125 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14126
14127 #undef elf_backend_add_symbol_hook
14128 #undef elf_backend_emit_relocs
14129
14130 #undef bfd_elf32_bfd_link_hash_table_create
14131 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14132 #undef elf_backend_special_sections
14133 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14134 #undef elf_backend_begin_write_processing
14135 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14136 #undef elf_backend_final_write_processing
14137 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14138
14139 #undef elf_backend_modify_segment_map
14140 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14141
14142 /* There is no .got section for BPABI objects, and hence no header. */
14143 #undef elf_backend_got_header_size
14144 #define elf_backend_got_header_size 0
14145
14146 /* Similarly, there is no .got.plt section. */
14147 #undef elf_backend_want_got_plt
14148 #define elf_backend_want_got_plt 0
14149
14150 #undef elf_backend_plt_sym_val
14151 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14152
14153 #undef elf_backend_may_use_rel_p
14154 #define elf_backend_may_use_rel_p 1
14155 #undef elf_backend_may_use_rela_p
14156 #define elf_backend_may_use_rela_p 0
14157 #undef elf_backend_default_use_rela_p
14158 #define elf_backend_default_use_rela_p 0
14159 #undef elf_backend_want_plt_sym
14160 #define elf_backend_want_plt_sym 0
14161 #undef ELF_MAXPAGESIZE
14162 #define ELF_MAXPAGESIZE 0x8000
14163
14164 #include "elf32-target.h"
This page took 0.356753 seconds and 3 git commands to generate.