2009-06-16 H.J. Lu <hongjiu.lu@intel.com>
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym to section mapping cache. */
2622 struct sym_sec_cache sym_sec;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2720 eh->stub_size = 0;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2723 eh->h = NULL;
2724 eh->id_sec = NULL;
2725 }
2726
2727 return entry;
2728 }
2729
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2732
2733 static bfd_boolean
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2735 {
2736 struct elf32_arm_link_hash_table *htab;
2737
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2741 return TRUE;
2742
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2744 return FALSE;
2745
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2749 abort ();
2750
2751 htab->srelgot = bfd_make_section_with_flags (dynobj,
2752 RELOC_SECTION (htab, ".got"),
2753 (SEC_ALLOC | SEC_LOAD
2754 | SEC_HAS_CONTENTS
2755 | SEC_IN_MEMORY
2756 | SEC_LINKER_CREATED
2757 | SEC_READONLY));
2758 if (htab->srelgot == NULL
2759 || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2))
2760 return FALSE;
2761 return TRUE;
2762 }
2763
2764 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2765 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2766 hash table. */
2767
2768 static bfd_boolean
2769 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2770 {
2771 struct elf32_arm_link_hash_table *htab;
2772
2773 htab = elf32_arm_hash_table (info);
2774 if (!htab->sgot && !create_got_section (dynobj, info))
2775 return FALSE;
2776
2777 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2778 return FALSE;
2779
2780 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2781 htab->srelplt = bfd_get_section_by_name (dynobj,
2782 RELOC_SECTION (htab, ".plt"));
2783 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2784 if (!info->shared)
2785 htab->srelbss = bfd_get_section_by_name (dynobj,
2786 RELOC_SECTION (htab, ".bss"));
2787
2788 if (htab->vxworks_p)
2789 {
2790 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2791 return FALSE;
2792
2793 if (info->shared)
2794 {
2795 htab->plt_header_size = 0;
2796 htab->plt_entry_size
2797 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2798 }
2799 else
2800 {
2801 htab->plt_header_size
2802 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2803 htab->plt_entry_size
2804 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2805 }
2806 }
2807
2808 if (!htab->splt
2809 || !htab->srelplt
2810 || !htab->sdynbss
2811 || (!info->shared && !htab->srelbss))
2812 abort ();
2813
2814 return TRUE;
2815 }
2816
2817 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2818
2819 static void
2820 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2821 struct elf_link_hash_entry *dir,
2822 struct elf_link_hash_entry *ind)
2823 {
2824 struct elf32_arm_link_hash_entry *edir, *eind;
2825
2826 edir = (struct elf32_arm_link_hash_entry *) dir;
2827 eind = (struct elf32_arm_link_hash_entry *) ind;
2828
2829 if (eind->relocs_copied != NULL)
2830 {
2831 if (edir->relocs_copied != NULL)
2832 {
2833 struct elf32_arm_relocs_copied **pp;
2834 struct elf32_arm_relocs_copied *p;
2835
2836 /* Add reloc counts against the indirect sym to the direct sym
2837 list. Merge any entries against the same section. */
2838 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2839 {
2840 struct elf32_arm_relocs_copied *q;
2841
2842 for (q = edir->relocs_copied; q != NULL; q = q->next)
2843 if (q->section == p->section)
2844 {
2845 q->pc_count += p->pc_count;
2846 q->count += p->count;
2847 *pp = p->next;
2848 break;
2849 }
2850 if (q == NULL)
2851 pp = &p->next;
2852 }
2853 *pp = edir->relocs_copied;
2854 }
2855
2856 edir->relocs_copied = eind->relocs_copied;
2857 eind->relocs_copied = NULL;
2858 }
2859
2860 if (ind->root.type == bfd_link_hash_indirect)
2861 {
2862 /* Copy over PLT info. */
2863 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2864 eind->plt_thumb_refcount = 0;
2865 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2866 eind->plt_maybe_thumb_refcount = 0;
2867
2868 if (dir->got.refcount <= 0)
2869 {
2870 edir->tls_type = eind->tls_type;
2871 eind->tls_type = GOT_UNKNOWN;
2872 }
2873 }
2874
2875 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2876 }
2877
2878 /* Create an ARM elf linker hash table. */
2879
2880 static struct bfd_link_hash_table *
2881 elf32_arm_link_hash_table_create (bfd *abfd)
2882 {
2883 struct elf32_arm_link_hash_table *ret;
2884 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2885
2886 ret = bfd_malloc (amt);
2887 if (ret == NULL)
2888 return NULL;
2889
2890 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2891 elf32_arm_link_hash_newfunc,
2892 sizeof (struct elf32_arm_link_hash_entry)))
2893 {
2894 free (ret);
2895 return NULL;
2896 }
2897
2898 ret->sgot = NULL;
2899 ret->sgotplt = NULL;
2900 ret->srelgot = NULL;
2901 ret->splt = NULL;
2902 ret->srelplt = NULL;
2903 ret->sdynbss = NULL;
2904 ret->srelbss = NULL;
2905 ret->srelplt2 = NULL;
2906 ret->thumb_glue_size = 0;
2907 ret->arm_glue_size = 0;
2908 ret->bx_glue_size = 0;
2909 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2910 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2911 ret->vfp11_erratum_glue_size = 0;
2912 ret->num_vfp11_fixes = 0;
2913 ret->fix_cortex_a8 = 0;
2914 ret->bfd_of_glue_owner = NULL;
2915 ret->byteswap_code = 0;
2916 ret->target1_is_rel = 0;
2917 ret->target2_reloc = R_ARM_NONE;
2918 #ifdef FOUR_WORD_PLT
2919 ret->plt_header_size = 16;
2920 ret->plt_entry_size = 16;
2921 #else
2922 ret->plt_header_size = 20;
2923 ret->plt_entry_size = 12;
2924 #endif
2925 ret->fix_v4bx = 0;
2926 ret->use_blx = 0;
2927 ret->vxworks_p = 0;
2928 ret->symbian_p = 0;
2929 ret->use_rel = 1;
2930 ret->sym_sec.abfd = NULL;
2931 ret->obfd = abfd;
2932 ret->tls_ldm_got.refcount = 0;
2933 ret->stub_bfd = NULL;
2934 ret->add_stub_section = NULL;
2935 ret->layout_sections_again = NULL;
2936 ret->stub_group = NULL;
2937 ret->bfd_count = 0;
2938 ret->top_index = 0;
2939 ret->input_list = NULL;
2940
2941 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2942 sizeof (struct elf32_arm_stub_hash_entry)))
2943 {
2944 free (ret);
2945 return NULL;
2946 }
2947
2948 return &ret->root.root;
2949 }
2950
2951 /* Free the derived linker hash table. */
2952
2953 static void
2954 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2955 {
2956 struct elf32_arm_link_hash_table *ret
2957 = (struct elf32_arm_link_hash_table *) hash;
2958
2959 bfd_hash_table_free (&ret->stub_hash_table);
2960 _bfd_generic_link_hash_table_free (hash);
2961 }
2962
2963 /* Determine if we're dealing with a Thumb only architecture. */
2964
2965 static bfd_boolean
2966 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2967 {
2968 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2969 Tag_CPU_arch);
2970 int profile;
2971
2972 if (arch != TAG_CPU_ARCH_V7)
2973 return FALSE;
2974
2975 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2976 Tag_CPU_arch_profile);
2977
2978 return profile == 'M';
2979 }
2980
2981 /* Determine if we're dealing with a Thumb-2 object. */
2982
2983 static bfd_boolean
2984 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2985 {
2986 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2987 Tag_CPU_arch);
2988 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2989 }
2990
2991 static bfd_boolean
2992 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2993 {
2994 switch (stub_type)
2995 {
2996 case arm_stub_long_branch_thumb_only:
2997 case arm_stub_long_branch_v4t_thumb_arm:
2998 case arm_stub_short_branch_v4t_thumb_arm:
2999 case arm_stub_long_branch_v4t_thumb_arm_pic:
3000 case arm_stub_long_branch_thumb_only_pic:
3001 return TRUE;
3002 case arm_stub_none:
3003 BFD_FAIL ();
3004 return FALSE;
3005 break;
3006 default:
3007 return FALSE;
3008 }
3009 }
3010
3011 /* Determine the type of stub needed, if any, for a call. */
3012
3013 static enum elf32_arm_stub_type
3014 arm_type_of_stub (struct bfd_link_info *info,
3015 asection *input_sec,
3016 const Elf_Internal_Rela *rel,
3017 unsigned char st_type,
3018 struct elf32_arm_link_hash_entry *hash,
3019 bfd_vma destination,
3020 asection *sym_sec,
3021 bfd *input_bfd,
3022 const char *name)
3023 {
3024 bfd_vma location;
3025 bfd_signed_vma branch_offset;
3026 unsigned int r_type;
3027 struct elf32_arm_link_hash_table * globals;
3028 int thumb2;
3029 int thumb_only;
3030 enum elf32_arm_stub_type stub_type = arm_stub_none;
3031 int use_plt = 0;
3032
3033 /* We don't know the actual type of destination in case it is of
3034 type STT_SECTION: give up. */
3035 if (st_type == STT_SECTION)
3036 return stub_type;
3037
3038 globals = elf32_arm_hash_table (info);
3039
3040 thumb_only = using_thumb_only (globals);
3041
3042 thumb2 = using_thumb2 (globals);
3043
3044 /* Determine where the call point is. */
3045 location = (input_sec->output_offset
3046 + input_sec->output_section->vma
3047 + rel->r_offset);
3048
3049 branch_offset = (bfd_signed_vma)(destination - location);
3050
3051 r_type = ELF32_R_TYPE (rel->r_info);
3052
3053 /* Keep a simpler condition, for the sake of clarity. */
3054 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3055 {
3056 use_plt = 1;
3057 /* Note when dealing with PLT entries: the main PLT stub is in
3058 ARM mode, so if the branch is in Thumb mode, another
3059 Thumb->ARM stub will be inserted later just before the ARM
3060 PLT stub. We don't take this extra distance into account
3061 here, because if a long branch stub is needed, we'll add a
3062 Thumb->Arm one and branch directly to the ARM PLT entry
3063 because it avoids spreading offset corrections in several
3064 places. */
3065 }
3066
3067 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3068 {
3069 /* Handle cases where:
3070 - this call goes too far (different Thumb/Thumb2 max
3071 distance)
3072 - it's a Thumb->Arm call and blx is not available, or it's a
3073 Thumb->Arm branch (not bl). A stub is needed in this case,
3074 but only if this call is not through a PLT entry. Indeed,
3075 PLT stubs handle mode switching already.
3076 */
3077 if ((!thumb2
3078 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3079 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3080 || (thumb2
3081 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3082 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3083 || ((st_type != STT_ARM_TFUNC)
3084 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3085 || (r_type == R_ARM_THM_JUMP24))
3086 && !use_plt))
3087 {
3088 if (st_type == STT_ARM_TFUNC)
3089 {
3090 /* Thumb to thumb. */
3091 if (!thumb_only)
3092 {
3093 stub_type = (info->shared | globals->pic_veneer)
3094 /* PIC stubs. */
3095 ? ((globals->use_blx
3096 && (r_type ==R_ARM_THM_CALL))
3097 /* V5T and above. Stub starts with ARM code, so
3098 we must be able to switch mode before
3099 reaching it, which is only possible for 'bl'
3100 (ie R_ARM_THM_CALL relocation). */
3101 ? arm_stub_long_branch_any_thumb_pic
3102 /* On V4T, use Thumb code only. */
3103 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3104
3105 /* non-PIC stubs. */
3106 : ((globals->use_blx
3107 && (r_type ==R_ARM_THM_CALL))
3108 /* V5T and above. */
3109 ? arm_stub_long_branch_any_any
3110 /* V4T. */
3111 : arm_stub_long_branch_v4t_thumb_thumb);
3112 }
3113 else
3114 {
3115 stub_type = (info->shared | globals->pic_veneer)
3116 /* PIC stub. */
3117 ? arm_stub_long_branch_thumb_only_pic
3118 /* non-PIC stub. */
3119 : arm_stub_long_branch_thumb_only;
3120 }
3121 }
3122 else
3123 {
3124 /* Thumb to arm. */
3125 if (sym_sec != NULL
3126 && sym_sec->owner != NULL
3127 && !INTERWORK_FLAG (sym_sec->owner))
3128 {
3129 (*_bfd_error_handler)
3130 (_("%B(%s): warning: interworking not enabled.\n"
3131 " first occurrence: %B: Thumb call to ARM"),
3132 sym_sec->owner, input_bfd, name);
3133 }
3134
3135 stub_type = (info->shared | globals->pic_veneer)
3136 /* PIC stubs. */
3137 ? ((globals->use_blx
3138 && (r_type ==R_ARM_THM_CALL))
3139 /* V5T and above. */
3140 ? arm_stub_long_branch_any_arm_pic
3141 /* V4T PIC stub. */
3142 : arm_stub_long_branch_v4t_thumb_arm_pic)
3143
3144 /* non-PIC stubs. */
3145 : ((globals->use_blx
3146 && (r_type ==R_ARM_THM_CALL))
3147 /* V5T and above. */
3148 ? arm_stub_long_branch_any_any
3149 /* V4T. */
3150 : arm_stub_long_branch_v4t_thumb_arm);
3151
3152 /* Handle v4t short branches. */
3153 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3154 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3155 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3156 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3157 }
3158 }
3159 }
3160 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3161 {
3162 if (st_type == STT_ARM_TFUNC)
3163 {
3164 /* Arm to thumb. */
3165
3166 if (sym_sec != NULL
3167 && sym_sec->owner != NULL
3168 && !INTERWORK_FLAG (sym_sec->owner))
3169 {
3170 (*_bfd_error_handler)
3171 (_("%B(%s): warning: interworking not enabled.\n"
3172 " first occurrence: %B: ARM call to Thumb"),
3173 sym_sec->owner, input_bfd, name);
3174 }
3175
3176 /* We have an extra 2-bytes reach because of
3177 the mode change (bit 24 (H) of BLX encoding). */
3178 if ((branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3179 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3180 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3181 || (r_type == R_ARM_JUMP24)
3182 || (r_type == R_ARM_PLT32))
3183 && !use_plt)
3184 {
3185 stub_type = (info->shared | globals->pic_veneer)
3186 /* PIC stubs. */
3187 ? ((globals->use_blx)
3188 /* V5T and above. */
3189 ? arm_stub_long_branch_any_thumb_pic
3190 /* V4T stub. */
3191 : arm_stub_long_branch_v4t_arm_thumb_pic)
3192
3193 /* non-PIC stubs. */
3194 : ((globals->use_blx)
3195 /* V5T and above. */
3196 ? arm_stub_long_branch_any_any
3197 /* V4T. */
3198 : arm_stub_long_branch_v4t_arm_thumb);
3199 }
3200 }
3201 else
3202 {
3203 /* Arm to arm. */
3204 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3205 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3206 {
3207 stub_type = (info->shared | globals->pic_veneer)
3208 /* PIC stubs. */
3209 ? arm_stub_long_branch_any_arm_pic
3210 /* non-PIC stubs. */
3211 : arm_stub_long_branch_any_any;
3212 }
3213 }
3214 }
3215
3216 return stub_type;
3217 }
3218
3219 /* Build a name for an entry in the stub hash table. */
3220
3221 static char *
3222 elf32_arm_stub_name (const asection *input_section,
3223 const asection *sym_sec,
3224 const struct elf32_arm_link_hash_entry *hash,
3225 const Elf_Internal_Rela *rel)
3226 {
3227 char *stub_name;
3228 bfd_size_type len;
3229
3230 if (hash)
3231 {
3232 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3233 stub_name = bfd_malloc (len);
3234 if (stub_name != NULL)
3235 sprintf (stub_name, "%08x_%s+%x",
3236 input_section->id & 0xffffffff,
3237 hash->root.root.root.string,
3238 (int) rel->r_addend & 0xffffffff);
3239 }
3240 else
3241 {
3242 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3243 stub_name = bfd_malloc (len);
3244 if (stub_name != NULL)
3245 sprintf (stub_name, "%08x_%x:%x+%x",
3246 input_section->id & 0xffffffff,
3247 sym_sec->id & 0xffffffff,
3248 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3249 (int) rel->r_addend & 0xffffffff);
3250 }
3251
3252 return stub_name;
3253 }
3254
3255 /* Look up an entry in the stub hash. Stub entries are cached because
3256 creating the stub name takes a bit of time. */
3257
3258 static struct elf32_arm_stub_hash_entry *
3259 elf32_arm_get_stub_entry (const asection *input_section,
3260 const asection *sym_sec,
3261 struct elf_link_hash_entry *hash,
3262 const Elf_Internal_Rela *rel,
3263 struct elf32_arm_link_hash_table *htab)
3264 {
3265 struct elf32_arm_stub_hash_entry *stub_entry;
3266 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3267 const asection *id_sec;
3268
3269 if ((input_section->flags & SEC_CODE) == 0)
3270 return NULL;
3271
3272 /* If this input section is part of a group of sections sharing one
3273 stub section, then use the id of the first section in the group.
3274 Stub names need to include a section id, as there may well be
3275 more than one stub used to reach say, printf, and we need to
3276 distinguish between them. */
3277 id_sec = htab->stub_group[input_section->id].link_sec;
3278
3279 if (h != NULL && h->stub_cache != NULL
3280 && h->stub_cache->h == h
3281 && h->stub_cache->id_sec == id_sec)
3282 {
3283 stub_entry = h->stub_cache;
3284 }
3285 else
3286 {
3287 char *stub_name;
3288
3289 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3290 if (stub_name == NULL)
3291 return NULL;
3292
3293 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3294 stub_name, FALSE, FALSE);
3295 if (h != NULL)
3296 h->stub_cache = stub_entry;
3297
3298 free (stub_name);
3299 }
3300
3301 return stub_entry;
3302 }
3303
3304 /* Find or create a stub section. Returns a pointer to the stub section, and
3305 the section to which the stub section will be attached (in *LINK_SEC_P).
3306 LINK_SEC_P may be NULL. */
3307
3308 static asection *
3309 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3310 struct elf32_arm_link_hash_table *htab)
3311 {
3312 asection *link_sec;
3313 asection *stub_sec;
3314
3315 link_sec = htab->stub_group[section->id].link_sec;
3316 stub_sec = htab->stub_group[section->id].stub_sec;
3317 if (stub_sec == NULL)
3318 {
3319 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3320 if (stub_sec == NULL)
3321 {
3322 size_t namelen;
3323 bfd_size_type len;
3324 char *s_name;
3325
3326 namelen = strlen (link_sec->name);
3327 len = namelen + sizeof (STUB_SUFFIX);
3328 s_name = bfd_alloc (htab->stub_bfd, len);
3329 if (s_name == NULL)
3330 return NULL;
3331
3332 memcpy (s_name, link_sec->name, namelen);
3333 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3334 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3335 if (stub_sec == NULL)
3336 return NULL;
3337 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3338 }
3339 htab->stub_group[section->id].stub_sec = stub_sec;
3340 }
3341
3342 if (link_sec_p)
3343 *link_sec_p = link_sec;
3344
3345 return stub_sec;
3346 }
3347
3348 /* Add a new stub entry to the stub hash. Not all fields of the new
3349 stub entry are initialised. */
3350
3351 static struct elf32_arm_stub_hash_entry *
3352 elf32_arm_add_stub (const char *stub_name,
3353 asection *section,
3354 struct elf32_arm_link_hash_table *htab)
3355 {
3356 asection *link_sec;
3357 asection *stub_sec;
3358 struct elf32_arm_stub_hash_entry *stub_entry;
3359
3360 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3361 if (stub_sec == NULL)
3362 return NULL;
3363
3364 /* Enter this entry into the linker stub hash table. */
3365 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3366 TRUE, FALSE);
3367 if (stub_entry == NULL)
3368 {
3369 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3370 section->owner,
3371 stub_name);
3372 return NULL;
3373 }
3374
3375 stub_entry->stub_sec = stub_sec;
3376 stub_entry->stub_offset = 0;
3377 stub_entry->id_sec = link_sec;
3378
3379 return stub_entry;
3380 }
3381
3382 /* Store an Arm insn into an output section not processed by
3383 elf32_arm_write_section. */
3384
3385 static void
3386 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3387 bfd * output_bfd, bfd_vma val, void * ptr)
3388 {
3389 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3390 bfd_putl32 (val, ptr);
3391 else
3392 bfd_putb32 (val, ptr);
3393 }
3394
3395 /* Store a 16-bit Thumb insn into an output section not processed by
3396 elf32_arm_write_section. */
3397
3398 static void
3399 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3400 bfd * output_bfd, bfd_vma val, void * ptr)
3401 {
3402 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3403 bfd_putl16 (val, ptr);
3404 else
3405 bfd_putb16 (val, ptr);
3406 }
3407
3408 static bfd_reloc_status_type elf32_arm_final_link_relocate
3409 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3410 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3411 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3412
3413 static bfd_boolean
3414 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3415 void * in_arg)
3416 {
3417 #define MAXRELOCS 2
3418 struct elf32_arm_stub_hash_entry *stub_entry;
3419 struct bfd_link_info *info;
3420 struct elf32_arm_link_hash_table *htab;
3421 asection *stub_sec;
3422 bfd *stub_bfd;
3423 bfd_vma stub_addr;
3424 bfd_byte *loc;
3425 bfd_vma sym_value;
3426 int template_size;
3427 int size;
3428 const insn_sequence *template;
3429 int i;
3430 struct elf32_arm_link_hash_table * globals;
3431 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3432 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3433 int nrelocs = 0;
3434
3435 /* Massage our args to the form they really have. */
3436 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3437 info = (struct bfd_link_info *) in_arg;
3438
3439 globals = elf32_arm_hash_table (info);
3440
3441 htab = elf32_arm_hash_table (info);
3442 stub_sec = stub_entry->stub_sec;
3443
3444 /* Make a note of the offset within the stubs for this entry. */
3445 stub_entry->stub_offset = stub_sec->size;
3446 loc = stub_sec->contents + stub_entry->stub_offset;
3447
3448 stub_bfd = stub_sec->owner;
3449
3450 /* This is the address of the start of the stub. */
3451 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3452 + stub_entry->stub_offset;
3453
3454 /* This is the address of the stub destination. */
3455 sym_value = (stub_entry->target_value
3456 + stub_entry->target_section->output_offset
3457 + stub_entry->target_section->output_section->vma);
3458
3459 template = stub_entry->stub_template;
3460 template_size = stub_entry->stub_template_size;
3461
3462 size = 0;
3463 for (i = 0; i < template_size; i++)
3464 {
3465 switch (template[i].type)
3466 {
3467 case THUMB16_TYPE:
3468 {
3469 bfd_vma data = template[i].data;
3470 if (template[i].reloc_addend != 0)
3471 {
3472 /* We've borrowed the reloc_addend field to mean we should
3473 insert a condition code into this (Thumb-1 branch)
3474 instruction. See THUMB16_BCOND_INSN. */
3475 BFD_ASSERT ((data & 0xff00) == 0xd000);
3476 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3477 }
3478 put_thumb_insn (globals, stub_bfd, data, loc + size);
3479 size += 2;
3480 }
3481 break;
3482
3483 case THUMB32_TYPE:
3484 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3485 loc + size);
3486 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3487 loc + size + 2);
3488 if (template[i].r_type != R_ARM_NONE)
3489 {
3490 stub_reloc_idx[nrelocs] = i;
3491 stub_reloc_offset[nrelocs++] = size;
3492 }
3493 size += 4;
3494 break;
3495
3496 case ARM_TYPE:
3497 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3498 /* Handle cases where the target is encoded within the
3499 instruction. */
3500 if (template[i].r_type == R_ARM_JUMP24)
3501 {
3502 stub_reloc_idx[nrelocs] = i;
3503 stub_reloc_offset[nrelocs++] = size;
3504 }
3505 size += 4;
3506 break;
3507
3508 case DATA_TYPE:
3509 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3510 stub_reloc_idx[nrelocs] = i;
3511 stub_reloc_offset[nrelocs++] = size;
3512 size += 4;
3513 break;
3514
3515 default:
3516 BFD_FAIL ();
3517 return FALSE;
3518 }
3519 }
3520
3521 stub_sec->size += size;
3522
3523 /* Stub size has already been computed in arm_size_one_stub. Check
3524 consistency. */
3525 BFD_ASSERT (size == stub_entry->stub_size);
3526
3527 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3528 if (stub_entry->st_type == STT_ARM_TFUNC)
3529 sym_value |= 1;
3530
3531 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3532 in each stub. */
3533 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3534
3535 for (i = 0; i < nrelocs; i++)
3536 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3537 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3538 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3539 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3540 {
3541 Elf_Internal_Rela rel;
3542 bfd_boolean unresolved_reloc;
3543 char *error_message;
3544 int sym_flags
3545 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3546 ? STT_ARM_TFUNC : 0;
3547 bfd_vma points_to = sym_value + stub_entry->target_addend;
3548
3549 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3550 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3551 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3552
3553 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3554 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3555 template should refer back to the instruction after the original
3556 branch. */
3557 points_to = sym_value;
3558
3559 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3560 properly. We should probably use this function unconditionally,
3561 rather than only for certain relocations listed in the enclosing
3562 conditional, for the sake of consistency. */
3563 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3564 (template[stub_reloc_idx[i]].r_type),
3565 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3566 points_to, info, stub_entry->target_section, "", sym_flags,
3567 (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc,
3568 &error_message);
3569 }
3570 else
3571 {
3572 _bfd_final_link_relocate (elf32_arm_howto_from_type
3573 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3574 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3575 sym_value + stub_entry->target_addend,
3576 template[stub_reloc_idx[i]].reloc_addend);
3577 }
3578
3579 return TRUE;
3580 #undef MAXRELOCS
3581 }
3582
3583 /* Calculate the template, template size and instruction size for a stub.
3584 Return value is the instruction size. */
3585
3586 static unsigned int
3587 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3588 const insn_sequence **stub_template,
3589 int *stub_template_size)
3590 {
3591 const insn_sequence *template = NULL;
3592 int template_size = 0, i;
3593 unsigned int size;
3594
3595 template = stub_definitions[stub_type].template;
3596 template_size = stub_definitions[stub_type].template_size;
3597
3598 size = 0;
3599 for (i = 0; i < template_size; i++)
3600 {
3601 switch (template[i].type)
3602 {
3603 case THUMB16_TYPE:
3604 size += 2;
3605 break;
3606
3607 case ARM_TYPE:
3608 case THUMB32_TYPE:
3609 case DATA_TYPE:
3610 size += 4;
3611 break;
3612
3613 default:
3614 BFD_FAIL ();
3615 return FALSE;
3616 }
3617 }
3618
3619 if (stub_template)
3620 *stub_template = template;
3621
3622 if (stub_template_size)
3623 *stub_template_size = template_size;
3624
3625 return size;
3626 }
3627
3628 /* As above, but don't actually build the stub. Just bump offset so
3629 we know stub section sizes. */
3630
3631 static bfd_boolean
3632 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3633 void * in_arg)
3634 {
3635 struct elf32_arm_stub_hash_entry *stub_entry;
3636 struct elf32_arm_link_hash_table *htab;
3637 const insn_sequence *template;
3638 int template_size, size;
3639
3640 /* Massage our args to the form they really have. */
3641 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3642 htab = (struct elf32_arm_link_hash_table *) in_arg;
3643
3644 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3645 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3646
3647 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3648 &template_size);
3649
3650 stub_entry->stub_size = size;
3651 stub_entry->stub_template = template;
3652 stub_entry->stub_template_size = template_size;
3653
3654 size = (size + 7) & ~7;
3655 stub_entry->stub_sec->size += size;
3656
3657 return TRUE;
3658 }
3659
3660 /* External entry points for sizing and building linker stubs. */
3661
3662 /* Set up various things so that we can make a list of input sections
3663 for each output section included in the link. Returns -1 on error,
3664 0 when no stubs will be needed, and 1 on success. */
3665
3666 int
3667 elf32_arm_setup_section_lists (bfd *output_bfd,
3668 struct bfd_link_info *info)
3669 {
3670 bfd *input_bfd;
3671 unsigned int bfd_count;
3672 int top_id, top_index;
3673 asection *section;
3674 asection **input_list, **list;
3675 bfd_size_type amt;
3676 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3677
3678 if (! is_elf_hash_table (htab))
3679 return 0;
3680
3681 /* Count the number of input BFDs and find the top input section id. */
3682 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3683 input_bfd != NULL;
3684 input_bfd = input_bfd->link_next)
3685 {
3686 bfd_count += 1;
3687 for (section = input_bfd->sections;
3688 section != NULL;
3689 section = section->next)
3690 {
3691 if (top_id < section->id)
3692 top_id = section->id;
3693 }
3694 }
3695 htab->bfd_count = bfd_count;
3696
3697 amt = sizeof (struct map_stub) * (top_id + 1);
3698 htab->stub_group = bfd_zmalloc (amt);
3699 if (htab->stub_group == NULL)
3700 return -1;
3701
3702 /* We can't use output_bfd->section_count here to find the top output
3703 section index as some sections may have been removed, and
3704 _bfd_strip_section_from_output doesn't renumber the indices. */
3705 for (section = output_bfd->sections, top_index = 0;
3706 section != NULL;
3707 section = section->next)
3708 {
3709 if (top_index < section->index)
3710 top_index = section->index;
3711 }
3712
3713 htab->top_index = top_index;
3714 amt = sizeof (asection *) * (top_index + 1);
3715 input_list = bfd_malloc (amt);
3716 htab->input_list = input_list;
3717 if (input_list == NULL)
3718 return -1;
3719
3720 /* For sections we aren't interested in, mark their entries with a
3721 value we can check later. */
3722 list = input_list + top_index;
3723 do
3724 *list = bfd_abs_section_ptr;
3725 while (list-- != input_list);
3726
3727 for (section = output_bfd->sections;
3728 section != NULL;
3729 section = section->next)
3730 {
3731 if ((section->flags & SEC_CODE) != 0)
3732 input_list[section->index] = NULL;
3733 }
3734
3735 return 1;
3736 }
3737
3738 /* The linker repeatedly calls this function for each input section,
3739 in the order that input sections are linked into output sections.
3740 Build lists of input sections to determine groupings between which
3741 we may insert linker stubs. */
3742
3743 void
3744 elf32_arm_next_input_section (struct bfd_link_info *info,
3745 asection *isec)
3746 {
3747 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3748
3749 if (isec->output_section->index <= htab->top_index)
3750 {
3751 asection **list = htab->input_list + isec->output_section->index;
3752
3753 if (*list != bfd_abs_section_ptr)
3754 {
3755 /* Steal the link_sec pointer for our list. */
3756 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3757 /* This happens to make the list in reverse order,
3758 which we reverse later. */
3759 PREV_SEC (isec) = *list;
3760 *list = isec;
3761 }
3762 }
3763 }
3764
3765 /* See whether we can group stub sections together. Grouping stub
3766 sections may result in fewer stubs. More importantly, we need to
3767 put all .init* and .fini* stubs at the end of the .init or
3768 .fini output sections respectively, because glibc splits the
3769 _init and _fini functions into multiple parts. Putting a stub in
3770 the middle of a function is not a good idea. */
3771
3772 static void
3773 group_sections (struct elf32_arm_link_hash_table *htab,
3774 bfd_size_type stub_group_size,
3775 bfd_boolean stubs_always_after_branch)
3776 {
3777 asection **list = htab->input_list;
3778
3779 do
3780 {
3781 asection *tail = *list;
3782 asection *head;
3783
3784 if (tail == bfd_abs_section_ptr)
3785 continue;
3786
3787 /* Reverse the list: we must avoid placing stubs at the
3788 beginning of the section because the beginning of the text
3789 section may be required for an interrupt vector in bare metal
3790 code. */
3791 #define NEXT_SEC PREV_SEC
3792 head = NULL;
3793 while (tail != NULL)
3794 {
3795 /* Pop from tail. */
3796 asection *item = tail;
3797 tail = PREV_SEC (item);
3798
3799 /* Push on head. */
3800 NEXT_SEC (item) = head;
3801 head = item;
3802 }
3803
3804 while (head != NULL)
3805 {
3806 asection *curr;
3807 asection *next;
3808 bfd_vma stub_group_start = head->output_offset;
3809 bfd_vma end_of_next;
3810
3811 curr = head;
3812 while (NEXT_SEC (curr) != NULL)
3813 {
3814 next = NEXT_SEC (curr);
3815 end_of_next = next->output_offset + next->size;
3816 if (end_of_next - stub_group_start >= stub_group_size)
3817 /* End of NEXT is too far from start, so stop. */
3818 break;
3819 /* Add NEXT to the group. */
3820 curr = next;
3821 }
3822
3823 /* OK, the size from the start to the start of CURR is less
3824 than stub_group_size and thus can be handled by one stub
3825 section. (Or the head section is itself larger than
3826 stub_group_size, in which case we may be toast.)
3827 We should really be keeping track of the total size of
3828 stubs added here, as stubs contribute to the final output
3829 section size. */
3830 do
3831 {
3832 next = NEXT_SEC (head);
3833 /* Set up this stub group. */
3834 htab->stub_group[head->id].link_sec = curr;
3835 }
3836 while (head != curr && (head = next) != NULL);
3837
3838 /* But wait, there's more! Input sections up to stub_group_size
3839 bytes after the stub section can be handled by it too. */
3840 if (!stubs_always_after_branch)
3841 {
3842 stub_group_start = curr->output_offset + curr->size;
3843
3844 while (next != NULL)
3845 {
3846 end_of_next = next->output_offset + next->size;
3847 if (end_of_next - stub_group_start >= stub_group_size)
3848 /* End of NEXT is too far from stubs, so stop. */
3849 break;
3850 /* Add NEXT to the stub group. */
3851 head = next;
3852 next = NEXT_SEC (head);
3853 htab->stub_group[head->id].link_sec = curr;
3854 }
3855 }
3856 head = next;
3857 }
3858 }
3859 while (list++ != htab->input_list + htab->top_index);
3860
3861 free (htab->input_list);
3862 #undef PREV_SEC
3863 #undef NEXT_SEC
3864 }
3865
3866 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3867 erratum fix. */
3868
3869 static int
3870 a8_reloc_compare (const void *a, const void *b)
3871 {
3872 const struct a8_erratum_reloc *ra = a, *rb = b;
3873
3874 if (ra->from < rb->from)
3875 return -1;
3876 else if (ra->from > rb->from)
3877 return 1;
3878 else
3879 return 0;
3880 }
3881
3882 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3883 const char *, char **);
3884
3885 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3886 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3887 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3888 otherwise. */
3889
3890 static bfd_boolean
3891 cortex_a8_erratum_scan (bfd *input_bfd,
3892 struct bfd_link_info *info,
3893 struct a8_erratum_fix **a8_fixes_p,
3894 unsigned int *num_a8_fixes_p,
3895 unsigned int *a8_fix_table_size_p,
3896 struct a8_erratum_reloc *a8_relocs,
3897 unsigned int num_a8_relocs)
3898 {
3899 asection *section;
3900 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3901 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3902 unsigned int num_a8_fixes = *num_a8_fixes_p;
3903 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3904
3905 for (section = input_bfd->sections;
3906 section != NULL;
3907 section = section->next)
3908 {
3909 bfd_byte *contents = NULL;
3910 struct _arm_elf_section_data *sec_data;
3911 unsigned int span;
3912 bfd_vma base_vma;
3913
3914 if (elf_section_type (section) != SHT_PROGBITS
3915 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3916 || (section->flags & SEC_EXCLUDE) != 0
3917 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3918 || (section->output_section == bfd_abs_section_ptr))
3919 continue;
3920
3921 base_vma = section->output_section->vma + section->output_offset;
3922
3923 if (elf_section_data (section)->this_hdr.contents != NULL)
3924 contents = elf_section_data (section)->this_hdr.contents;
3925 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3926 return TRUE;
3927
3928 sec_data = elf32_arm_section_data (section);
3929
3930 for (span = 0; span < sec_data->mapcount; span++)
3931 {
3932 unsigned int span_start = sec_data->map[span].vma;
3933 unsigned int span_end = (span == sec_data->mapcount - 1)
3934 ? section->size : sec_data->map[span + 1].vma;
3935 unsigned int i;
3936 char span_type = sec_data->map[span].type;
3937 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3938
3939 if (span_type != 't')
3940 continue;
3941
3942 /* Span is entirely within a single 4KB region: skip scanning. */
3943 if (((base_vma + span_start) & ~0xfff)
3944 == ((base_vma + span_end) & ~0xfff))
3945 continue;
3946
3947 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3948
3949 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3950 * The branch target is in the same 4KB region as the
3951 first half of the branch.
3952 * The instruction before the branch is a 32-bit
3953 length non-branch instruction. */
3954 for (i = span_start; i < span_end;)
3955 {
3956 unsigned int insn = bfd_getl16 (&contents[i]);
3957 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3958 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3959
3960 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3961 insn_32bit = TRUE;
3962
3963 if (insn_32bit)
3964 {
3965 /* Load the rest of the insn (in manual-friendly order). */
3966 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3967
3968 /* Encoding T4: B<c>.W. */
3969 is_b = (insn & 0xf800d000) == 0xf0009000;
3970 /* Encoding T1: BL<c>.W. */
3971 is_bl = (insn & 0xf800d000) == 0xf000d000;
3972 /* Encoding T2: BLX<c>.W. */
3973 is_blx = (insn & 0xf800d000) == 0xf000c000;
3974 /* Encoding T3: B<c>.W (not permitted in IT block). */
3975 is_bcc = (insn & 0xf800d000) == 0xf0008000
3976 && (insn & 0x07f00000) != 0x03800000;
3977 }
3978
3979 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3980
3981 if (((base_vma + i) & 0xfff) == 0xffe
3982 && insn_32bit
3983 && is_32bit_branch
3984 && last_was_32bit
3985 && ! last_was_branch)
3986 {
3987 bfd_signed_vma offset;
3988 bfd_boolean force_target_arm = FALSE;
3989 bfd_boolean force_target_thumb = FALSE;
3990 bfd_vma target;
3991 enum elf32_arm_stub_type stub_type = arm_stub_none;
3992 struct a8_erratum_reloc key, *found;
3993
3994 key.from = base_vma + i;
3995 found = bsearch (&key, a8_relocs, num_a8_relocs,
3996 sizeof (struct a8_erratum_reloc),
3997 &a8_reloc_compare);
3998
3999 if (found)
4000 {
4001 char *error_message = NULL;
4002 struct elf_link_hash_entry *entry;
4003
4004 /* We don't care about the error returned from this
4005 function, only if there is glue or not. */
4006 entry = find_thumb_glue (info, found->sym_name,
4007 &error_message);
4008
4009 if (entry)
4010 found->non_a8_stub = TRUE;
4011
4012 if (found->r_type == R_ARM_THM_CALL
4013 && found->st_type != STT_ARM_TFUNC)
4014 force_target_arm = TRUE;
4015 else if (found->r_type == R_ARM_THM_CALL
4016 && found->st_type == STT_ARM_TFUNC)
4017 force_target_thumb = TRUE;
4018 }
4019
4020 /* Check if we have an offending branch instruction. */
4021
4022 if (found && found->non_a8_stub)
4023 /* We've already made a stub for this instruction, e.g.
4024 it's a long branch or a Thumb->ARM stub. Assume that
4025 stub will suffice to work around the A8 erratum (see
4026 setting of always_after_branch above). */
4027 ;
4028 else if (is_bcc)
4029 {
4030 offset = (insn & 0x7ff) << 1;
4031 offset |= (insn & 0x3f0000) >> 4;
4032 offset |= (insn & 0x2000) ? 0x40000 : 0;
4033 offset |= (insn & 0x800) ? 0x80000 : 0;
4034 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4035 if (offset & 0x100000)
4036 offset |= ~ ((bfd_signed_vma) 0xfffff);
4037 stub_type = arm_stub_a8_veneer_b_cond;
4038 }
4039 else if (is_b || is_bl || is_blx)
4040 {
4041 int s = (insn & 0x4000000) != 0;
4042 int j1 = (insn & 0x2000) != 0;
4043 int j2 = (insn & 0x800) != 0;
4044 int i1 = !(j1 ^ s);
4045 int i2 = !(j2 ^ s);
4046
4047 offset = (insn & 0x7ff) << 1;
4048 offset |= (insn & 0x3ff0000) >> 4;
4049 offset |= i2 << 22;
4050 offset |= i1 << 23;
4051 offset |= s << 24;
4052 if (offset & 0x1000000)
4053 offset |= ~ ((bfd_signed_vma) 0xffffff);
4054
4055 if (is_blx)
4056 offset &= ~ ((bfd_signed_vma) 3);
4057
4058 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4059 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4060 }
4061
4062 if (stub_type != arm_stub_none)
4063 {
4064 bfd_vma pc_for_insn = base_vma + i + 4;
4065
4066 /* The original instruction is a BL, but the target is
4067 an ARM instruction. If we were not making a stub,
4068 the BL would have been converted to a BLX. Use the
4069 BLX stub instead in that case. */
4070 if (htab->use_blx && force_target_arm
4071 && stub_type == arm_stub_a8_veneer_bl)
4072 {
4073 stub_type = arm_stub_a8_veneer_blx;
4074 is_blx = TRUE;
4075 is_bl = FALSE;
4076 }
4077 /* Conversely, if the original instruction was
4078 BLX but the target is Thumb mode, use the BL
4079 stub. */
4080 else if (force_target_thumb
4081 && stub_type == arm_stub_a8_veneer_blx)
4082 {
4083 stub_type = arm_stub_a8_veneer_bl;
4084 is_blx = FALSE;
4085 is_bl = TRUE;
4086 }
4087
4088 if (is_blx)
4089 pc_for_insn &= ~ ((bfd_vma) 3);
4090
4091 /* If we found a relocation, use the proper destination,
4092 not the offset in the (unrelocated) instruction.
4093 Note this is always done if we switched the stub type
4094 above. */
4095 if (found)
4096 offset =
4097 (bfd_signed_vma) (found->destination - pc_for_insn);
4098
4099 target = pc_for_insn + offset;
4100
4101 /* The BLX stub is ARM-mode code. Adjust the offset to
4102 take the different PC value (+8 instead of +4) into
4103 account. */
4104 if (stub_type == arm_stub_a8_veneer_blx)
4105 offset += 4;
4106
4107 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4108 {
4109 char *stub_name;
4110
4111 if (num_a8_fixes == a8_fix_table_size)
4112 {
4113 a8_fix_table_size *= 2;
4114 a8_fixes = bfd_realloc (a8_fixes,
4115 sizeof (struct a8_erratum_fix)
4116 * a8_fix_table_size);
4117 }
4118
4119 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4120 if (stub_name != NULL)
4121 sprintf (stub_name, "%x:%x", section->id, i);
4122
4123 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4124 a8_fixes[num_a8_fixes].section = section;
4125 a8_fixes[num_a8_fixes].offset = i;
4126 a8_fixes[num_a8_fixes].addend = offset;
4127 a8_fixes[num_a8_fixes].orig_insn = insn;
4128 a8_fixes[num_a8_fixes].stub_name = stub_name;
4129 a8_fixes[num_a8_fixes].stub_type = stub_type;
4130
4131 num_a8_fixes++;
4132 }
4133 }
4134 }
4135
4136 i += insn_32bit ? 4 : 2;
4137 last_was_32bit = insn_32bit;
4138 last_was_branch = is_32bit_branch;
4139 }
4140 }
4141
4142 if (elf_section_data (section)->this_hdr.contents == NULL)
4143 free (contents);
4144 }
4145
4146 *a8_fixes_p = a8_fixes;
4147 *num_a8_fixes_p = num_a8_fixes;
4148 *a8_fix_table_size_p = a8_fix_table_size;
4149
4150 return FALSE;
4151 }
4152
4153 /* Determine and set the size of the stub section for a final link.
4154
4155 The basic idea here is to examine all the relocations looking for
4156 PC-relative calls to a target that is unreachable with a "bl"
4157 instruction. */
4158
4159 bfd_boolean
4160 elf32_arm_size_stubs (bfd *output_bfd,
4161 bfd *stub_bfd,
4162 struct bfd_link_info *info,
4163 bfd_signed_vma group_size,
4164 asection * (*add_stub_section) (const char *, asection *),
4165 void (*layout_sections_again) (void))
4166 {
4167 bfd_size_type stub_group_size;
4168 bfd_boolean stubs_always_after_branch;
4169 bfd_boolean stub_changed = 0;
4170 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4171 struct a8_erratum_fix *a8_fixes = NULL;
4172 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4173 struct a8_erratum_reloc *a8_relocs = NULL;
4174 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4175
4176 if (htab->fix_cortex_a8)
4177 {
4178 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4179 * a8_fix_table_size);
4180 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4181 * a8_reloc_table_size);
4182 }
4183
4184 /* Propagate mach to stub bfd, because it may not have been
4185 finalized when we created stub_bfd. */
4186 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4187 bfd_get_mach (output_bfd));
4188
4189 /* Stash our params away. */
4190 htab->stub_bfd = stub_bfd;
4191 htab->add_stub_section = add_stub_section;
4192 htab->layout_sections_again = layout_sections_again;
4193 stubs_always_after_branch = group_size < 0;
4194
4195 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4196 as the first half of a 32-bit branch straddling two 4K pages. This is a
4197 crude way of enforcing that. */
4198 if (htab->fix_cortex_a8)
4199 stubs_always_after_branch = 1;
4200
4201 if (group_size < 0)
4202 stub_group_size = -group_size;
4203 else
4204 stub_group_size = group_size;
4205
4206 if (stub_group_size == 1)
4207 {
4208 /* Default values. */
4209 /* Thumb branch range is +-4MB has to be used as the default
4210 maximum size (a given section can contain both ARM and Thumb
4211 code, so the worst case has to be taken into account).
4212
4213 This value is 24K less than that, which allows for 2025
4214 12-byte stubs. If we exceed that, then we will fail to link.
4215 The user will have to relink with an explicit group size
4216 option. */
4217 stub_group_size = 4170000;
4218 }
4219
4220 group_sections (htab, stub_group_size, stubs_always_after_branch);
4221
4222 while (1)
4223 {
4224 bfd *input_bfd;
4225 unsigned int bfd_indx;
4226 asection *stub_sec;
4227
4228 num_a8_fixes = 0;
4229
4230 for (input_bfd = info->input_bfds, bfd_indx = 0;
4231 input_bfd != NULL;
4232 input_bfd = input_bfd->link_next, bfd_indx++)
4233 {
4234 Elf_Internal_Shdr *symtab_hdr;
4235 asection *section;
4236 Elf_Internal_Sym *local_syms = NULL;
4237
4238 num_a8_relocs = 0;
4239
4240 /* We'll need the symbol table in a second. */
4241 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4242 if (symtab_hdr->sh_info == 0)
4243 continue;
4244
4245 /* Walk over each section attached to the input bfd. */
4246 for (section = input_bfd->sections;
4247 section != NULL;
4248 section = section->next)
4249 {
4250 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4251
4252 /* If there aren't any relocs, then there's nothing more
4253 to do. */
4254 if ((section->flags & SEC_RELOC) == 0
4255 || section->reloc_count == 0
4256 || (section->flags & SEC_CODE) == 0)
4257 continue;
4258
4259 /* If this section is a link-once section that will be
4260 discarded, then don't create any stubs. */
4261 if (section->output_section == NULL
4262 || section->output_section->owner != output_bfd)
4263 continue;
4264
4265 /* Get the relocs. */
4266 internal_relocs
4267 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4268 NULL, info->keep_memory);
4269 if (internal_relocs == NULL)
4270 goto error_ret_free_local;
4271
4272 /* Now examine each relocation. */
4273 irela = internal_relocs;
4274 irelaend = irela + section->reloc_count;
4275 for (; irela < irelaend; irela++)
4276 {
4277 unsigned int r_type, r_indx;
4278 enum elf32_arm_stub_type stub_type;
4279 struct elf32_arm_stub_hash_entry *stub_entry;
4280 asection *sym_sec;
4281 bfd_vma sym_value;
4282 bfd_vma destination;
4283 struct elf32_arm_link_hash_entry *hash;
4284 const char *sym_name;
4285 char *stub_name;
4286 const asection *id_sec;
4287 unsigned char st_type;
4288 bfd_boolean created_stub = FALSE;
4289
4290 r_type = ELF32_R_TYPE (irela->r_info);
4291 r_indx = ELF32_R_SYM (irela->r_info);
4292
4293 if (r_type >= (unsigned int) R_ARM_max)
4294 {
4295 bfd_set_error (bfd_error_bad_value);
4296 error_ret_free_internal:
4297 if (elf_section_data (section)->relocs == NULL)
4298 free (internal_relocs);
4299 goto error_ret_free_local;
4300 }
4301
4302 /* Only look for stubs on branch instructions. */
4303 if ((r_type != (unsigned int) R_ARM_CALL)
4304 && (r_type != (unsigned int) R_ARM_THM_CALL)
4305 && (r_type != (unsigned int) R_ARM_JUMP24)
4306 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4307 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4308 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4309 && (r_type != (unsigned int) R_ARM_PLT32))
4310 continue;
4311
4312 /* Now determine the call target, its name, value,
4313 section. */
4314 sym_sec = NULL;
4315 sym_value = 0;
4316 destination = 0;
4317 hash = NULL;
4318 sym_name = NULL;
4319 if (r_indx < symtab_hdr->sh_info)
4320 {
4321 /* It's a local symbol. */
4322 Elf_Internal_Sym *sym;
4323 Elf_Internal_Shdr *hdr;
4324
4325 if (local_syms == NULL)
4326 {
4327 local_syms
4328 = (Elf_Internal_Sym *) symtab_hdr->contents;
4329 if (local_syms == NULL)
4330 local_syms
4331 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4332 symtab_hdr->sh_info, 0,
4333 NULL, NULL, NULL);
4334 if (local_syms == NULL)
4335 goto error_ret_free_internal;
4336 }
4337
4338 sym = local_syms + r_indx;
4339 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4340 sym_sec = hdr->bfd_section;
4341 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4342 sym_value = sym->st_value;
4343 destination = (sym_value + irela->r_addend
4344 + sym_sec->output_offset
4345 + sym_sec->output_section->vma);
4346 st_type = ELF_ST_TYPE (sym->st_info);
4347 sym_name
4348 = bfd_elf_string_from_elf_section (input_bfd,
4349 symtab_hdr->sh_link,
4350 sym->st_name);
4351 }
4352 else
4353 {
4354 /* It's an external symbol. */
4355 int e_indx;
4356
4357 e_indx = r_indx - symtab_hdr->sh_info;
4358 hash = ((struct elf32_arm_link_hash_entry *)
4359 elf_sym_hashes (input_bfd)[e_indx]);
4360
4361 while (hash->root.root.type == bfd_link_hash_indirect
4362 || hash->root.root.type == bfd_link_hash_warning)
4363 hash = ((struct elf32_arm_link_hash_entry *)
4364 hash->root.root.u.i.link);
4365
4366 if (hash->root.root.type == bfd_link_hash_defined
4367 || hash->root.root.type == bfd_link_hash_defweak)
4368 {
4369 sym_sec = hash->root.root.u.def.section;
4370 sym_value = hash->root.root.u.def.value;
4371 if (sym_sec->output_section != NULL)
4372 destination = (sym_value + irela->r_addend
4373 + sym_sec->output_offset
4374 + sym_sec->output_section->vma);
4375 }
4376 else if ((hash->root.root.type == bfd_link_hash_undefined)
4377 || (hash->root.root.type == bfd_link_hash_undefweak))
4378 {
4379 /* For a shared library, use the PLT stub as
4380 target address to decide whether a long
4381 branch stub is needed.
4382 For absolute code, they cannot be handled. */
4383 struct elf32_arm_link_hash_table *globals =
4384 elf32_arm_hash_table (info);
4385
4386 if (globals->splt != NULL && hash != NULL
4387 && hash->root.plt.offset != (bfd_vma) -1)
4388 {
4389 sym_sec = globals->splt;
4390 sym_value = hash->root.plt.offset;
4391 if (sym_sec->output_section != NULL)
4392 destination = (sym_value
4393 + sym_sec->output_offset
4394 + sym_sec->output_section->vma);
4395 }
4396 else
4397 continue;
4398 }
4399 else
4400 {
4401 bfd_set_error (bfd_error_bad_value);
4402 goto error_ret_free_internal;
4403 }
4404 st_type = ELF_ST_TYPE (hash->root.type);
4405 sym_name = hash->root.root.root.string;
4406 }
4407
4408 do
4409 {
4410 /* Determine what (if any) linker stub is needed. */
4411 stub_type = arm_type_of_stub (info, section, irela,
4412 st_type, hash,
4413 destination, sym_sec,
4414 input_bfd, sym_name);
4415 if (stub_type == arm_stub_none)
4416 break;
4417
4418 /* Support for grouping stub sections. */
4419 id_sec = htab->stub_group[section->id].link_sec;
4420
4421 /* Get the name of this stub. */
4422 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4423 irela);
4424 if (!stub_name)
4425 goto error_ret_free_internal;
4426
4427 /* We've either created a stub for this reloc already,
4428 or we are about to. */
4429 created_stub = TRUE;
4430
4431 stub_entry = arm_stub_hash_lookup
4432 (&htab->stub_hash_table, stub_name,
4433 FALSE, FALSE);
4434 if (stub_entry != NULL)
4435 {
4436 /* The proper stub has already been created. */
4437 free (stub_name);
4438 break;
4439 }
4440
4441 stub_entry = elf32_arm_add_stub (stub_name, section,
4442 htab);
4443 if (stub_entry == NULL)
4444 {
4445 free (stub_name);
4446 goto error_ret_free_internal;
4447 }
4448
4449 stub_entry->target_value = sym_value;
4450 stub_entry->target_section = sym_sec;
4451 stub_entry->stub_type = stub_type;
4452 stub_entry->h = hash;
4453 stub_entry->st_type = st_type;
4454
4455 if (sym_name == NULL)
4456 sym_name = "unnamed";
4457 stub_entry->output_name
4458 = bfd_alloc (htab->stub_bfd,
4459 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4460 + strlen (sym_name));
4461 if (stub_entry->output_name == NULL)
4462 {
4463 free (stub_name);
4464 goto error_ret_free_internal;
4465 }
4466
4467 /* For historical reasons, use the existing names for
4468 ARM-to-Thumb and Thumb-to-ARM stubs. */
4469 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4470 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4471 && st_type != STT_ARM_TFUNC)
4472 sprintf (stub_entry->output_name,
4473 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4474 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4475 || (r_type == (unsigned int) R_ARM_JUMP24))
4476 && st_type == STT_ARM_TFUNC)
4477 sprintf (stub_entry->output_name,
4478 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4479 else
4480 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4481 sym_name);
4482
4483 stub_changed = TRUE;
4484 }
4485 while (0);
4486
4487 /* Look for relocations which might trigger Cortex-A8
4488 erratum. */
4489 if (htab->fix_cortex_a8
4490 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4491 || r_type == (unsigned int) R_ARM_THM_JUMP19
4492 || r_type == (unsigned int) R_ARM_THM_CALL
4493 || r_type == (unsigned int) R_ARM_THM_XPC22))
4494 {
4495 bfd_vma from = section->output_section->vma
4496 + section->output_offset
4497 + irela->r_offset;
4498
4499 if ((from & 0xfff) == 0xffe)
4500 {
4501 /* Found a candidate. Note we haven't checked the
4502 destination is within 4K here: if we do so (and
4503 don't create an entry in a8_relocs) we can't tell
4504 that a branch should have been relocated when
4505 scanning later. */
4506 if (num_a8_relocs == a8_reloc_table_size)
4507 {
4508 a8_reloc_table_size *= 2;
4509 a8_relocs = bfd_realloc (a8_relocs,
4510 sizeof (struct a8_erratum_reloc)
4511 * a8_reloc_table_size);
4512 }
4513
4514 a8_relocs[num_a8_relocs].from = from;
4515 a8_relocs[num_a8_relocs].destination = destination;
4516 a8_relocs[num_a8_relocs].r_type = r_type;
4517 a8_relocs[num_a8_relocs].st_type = st_type;
4518 a8_relocs[num_a8_relocs].sym_name = sym_name;
4519 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4520
4521 num_a8_relocs++;
4522 }
4523 }
4524 }
4525
4526 /* We're done with the internal relocs, free them. */
4527 if (elf_section_data (section)->relocs == NULL)
4528 free (internal_relocs);
4529 }
4530
4531 if (htab->fix_cortex_a8)
4532 {
4533 /* Sort relocs which might apply to Cortex-A8 erratum. */
4534 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4535 &a8_reloc_compare);
4536
4537 /* Scan for branches which might trigger Cortex-A8 erratum. */
4538 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4539 &num_a8_fixes, &a8_fix_table_size,
4540 a8_relocs, num_a8_relocs) != 0)
4541 goto error_ret_free_local;
4542 }
4543 }
4544
4545 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4546 stub_changed = TRUE;
4547
4548 if (!stub_changed)
4549 break;
4550
4551 /* OK, we've added some stubs. Find out the new size of the
4552 stub sections. */
4553 for (stub_sec = htab->stub_bfd->sections;
4554 stub_sec != NULL;
4555 stub_sec = stub_sec->next)
4556 {
4557 /* Ignore non-stub sections. */
4558 if (!strstr (stub_sec->name, STUB_SUFFIX))
4559 continue;
4560
4561 stub_sec->size = 0;
4562 }
4563
4564 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4565
4566 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4567 if (htab->fix_cortex_a8)
4568 for (i = 0; i < num_a8_fixes; i++)
4569 {
4570 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4571 a8_fixes[i].section, htab);
4572
4573 if (stub_sec == NULL)
4574 goto error_ret_free_local;
4575
4576 stub_sec->size
4577 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4578 NULL);
4579 }
4580
4581
4582 /* Ask the linker to do its stuff. */
4583 (*htab->layout_sections_again) ();
4584 stub_changed = FALSE;
4585 prev_num_a8_fixes = num_a8_fixes;
4586 }
4587
4588 /* Add stubs for Cortex-A8 erratum fixes now. */
4589 if (htab->fix_cortex_a8)
4590 {
4591 for (i = 0; i < num_a8_fixes; i++)
4592 {
4593 struct elf32_arm_stub_hash_entry *stub_entry;
4594 char *stub_name = a8_fixes[i].stub_name;
4595 asection *section = a8_fixes[i].section;
4596 unsigned int section_id = a8_fixes[i].section->id;
4597 asection *link_sec = htab->stub_group[section_id].link_sec;
4598 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4599 const insn_sequence *template;
4600 int template_size, size = 0;
4601
4602 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4603 TRUE, FALSE);
4604 if (stub_entry == NULL)
4605 {
4606 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4607 section->owner,
4608 stub_name);
4609 return FALSE;
4610 }
4611
4612 stub_entry->stub_sec = stub_sec;
4613 stub_entry->stub_offset = 0;
4614 stub_entry->id_sec = link_sec;
4615 stub_entry->stub_type = a8_fixes[i].stub_type;
4616 stub_entry->target_section = a8_fixes[i].section;
4617 stub_entry->target_value = a8_fixes[i].offset;
4618 stub_entry->target_addend = a8_fixes[i].addend;
4619 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4620 stub_entry->st_type = STT_ARM_TFUNC;
4621
4622 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4623 &template_size);
4624
4625 stub_entry->stub_size = size;
4626 stub_entry->stub_template = template;
4627 stub_entry->stub_template_size = template_size;
4628 }
4629
4630 /* Stash the Cortex-A8 erratum fix array for use later in
4631 elf32_arm_write_section(). */
4632 htab->a8_erratum_fixes = a8_fixes;
4633 htab->num_a8_erratum_fixes = num_a8_fixes;
4634 }
4635 else
4636 {
4637 htab->a8_erratum_fixes = NULL;
4638 htab->num_a8_erratum_fixes = 0;
4639 }
4640 return TRUE;
4641
4642 error_ret_free_local:
4643 return FALSE;
4644 }
4645
4646 /* Build all the stubs associated with the current output file. The
4647 stubs are kept in a hash table attached to the main linker hash
4648 table. We also set up the .plt entries for statically linked PIC
4649 functions here. This function is called via arm_elf_finish in the
4650 linker. */
4651
4652 bfd_boolean
4653 elf32_arm_build_stubs (struct bfd_link_info *info)
4654 {
4655 asection *stub_sec;
4656 struct bfd_hash_table *table;
4657 struct elf32_arm_link_hash_table *htab;
4658
4659 htab = elf32_arm_hash_table (info);
4660
4661 for (stub_sec = htab->stub_bfd->sections;
4662 stub_sec != NULL;
4663 stub_sec = stub_sec->next)
4664 {
4665 bfd_size_type size;
4666
4667 /* Ignore non-stub sections. */
4668 if (!strstr (stub_sec->name, STUB_SUFFIX))
4669 continue;
4670
4671 /* Allocate memory to hold the linker stubs. */
4672 size = stub_sec->size;
4673 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4674 if (stub_sec->contents == NULL && size != 0)
4675 return FALSE;
4676 stub_sec->size = 0;
4677 }
4678
4679 /* Build the stubs as directed by the stub hash table. */
4680 table = &htab->stub_hash_table;
4681 bfd_hash_traverse (table, arm_build_one_stub, info);
4682
4683 return TRUE;
4684 }
4685
4686 /* Locate the Thumb encoded calling stub for NAME. */
4687
4688 static struct elf_link_hash_entry *
4689 find_thumb_glue (struct bfd_link_info *link_info,
4690 const char *name,
4691 char **error_message)
4692 {
4693 char *tmp_name;
4694 struct elf_link_hash_entry *hash;
4695 struct elf32_arm_link_hash_table *hash_table;
4696
4697 /* We need a pointer to the armelf specific hash table. */
4698 hash_table = elf32_arm_hash_table (link_info);
4699
4700 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4701 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4702
4703 BFD_ASSERT (tmp_name);
4704
4705 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4706
4707 hash = elf_link_hash_lookup
4708 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4709
4710 if (hash == NULL
4711 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4712 tmp_name, name) == -1)
4713 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4714
4715 free (tmp_name);
4716
4717 return hash;
4718 }
4719
4720 /* Locate the ARM encoded calling stub for NAME. */
4721
4722 static struct elf_link_hash_entry *
4723 find_arm_glue (struct bfd_link_info *link_info,
4724 const char *name,
4725 char **error_message)
4726 {
4727 char *tmp_name;
4728 struct elf_link_hash_entry *myh;
4729 struct elf32_arm_link_hash_table *hash_table;
4730
4731 /* We need a pointer to the elfarm specific hash table. */
4732 hash_table = elf32_arm_hash_table (link_info);
4733
4734 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4735 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4736
4737 BFD_ASSERT (tmp_name);
4738
4739 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4740
4741 myh = elf_link_hash_lookup
4742 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4743
4744 if (myh == NULL
4745 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4746 tmp_name, name) == -1)
4747 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4748
4749 free (tmp_name);
4750
4751 return myh;
4752 }
4753
4754 /* ARM->Thumb glue (static images):
4755
4756 .arm
4757 __func_from_arm:
4758 ldr r12, __func_addr
4759 bx r12
4760 __func_addr:
4761 .word func @ behave as if you saw a ARM_32 reloc.
4762
4763 (v5t static images)
4764 .arm
4765 __func_from_arm:
4766 ldr pc, __func_addr
4767 __func_addr:
4768 .word func @ behave as if you saw a ARM_32 reloc.
4769
4770 (relocatable images)
4771 .arm
4772 __func_from_arm:
4773 ldr r12, __func_offset
4774 add r12, r12, pc
4775 bx r12
4776 __func_offset:
4777 .word func - . */
4778
4779 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4780 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4781 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4782 static const insn32 a2t3_func_addr_insn = 0x00000001;
4783
4784 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4785 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4786 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4787
4788 #define ARM2THUMB_PIC_GLUE_SIZE 16
4789 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4790 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4791 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4792
4793 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4794
4795 .thumb .thumb
4796 .align 2 .align 2
4797 __func_from_thumb: __func_from_thumb:
4798 bx pc push {r6, lr}
4799 nop ldr r6, __func_addr
4800 .arm mov lr, pc
4801 b func bx r6
4802 .arm
4803 ;; back_to_thumb
4804 ldmia r13! {r6, lr}
4805 bx lr
4806 __func_addr:
4807 .word func */
4808
4809 #define THUMB2ARM_GLUE_SIZE 8
4810 static const insn16 t2a1_bx_pc_insn = 0x4778;
4811 static const insn16 t2a2_noop_insn = 0x46c0;
4812 static const insn32 t2a3_b_insn = 0xea000000;
4813
4814 #define VFP11_ERRATUM_VENEER_SIZE 8
4815
4816 #define ARM_BX_VENEER_SIZE 12
4817 static const insn32 armbx1_tst_insn = 0xe3100001;
4818 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4819 static const insn32 armbx3_bx_insn = 0xe12fff10;
4820
4821 #ifndef ELFARM_NABI_C_INCLUDED
4822 static void
4823 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4824 {
4825 asection * s;
4826 bfd_byte * contents;
4827
4828 if (size == 0)
4829 {
4830 /* Do not include empty glue sections in the output. */
4831 if (abfd != NULL)
4832 {
4833 s = bfd_get_section_by_name (abfd, name);
4834 if (s != NULL)
4835 s->flags |= SEC_EXCLUDE;
4836 }
4837 return;
4838 }
4839
4840 BFD_ASSERT (abfd != NULL);
4841
4842 s = bfd_get_section_by_name (abfd, name);
4843 BFD_ASSERT (s != NULL);
4844
4845 contents = bfd_alloc (abfd, size);
4846
4847 BFD_ASSERT (s->size == size);
4848 s->contents = contents;
4849 }
4850
4851 bfd_boolean
4852 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4853 {
4854 struct elf32_arm_link_hash_table * globals;
4855
4856 globals = elf32_arm_hash_table (info);
4857 BFD_ASSERT (globals != NULL);
4858
4859 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4860 globals->arm_glue_size,
4861 ARM2THUMB_GLUE_SECTION_NAME);
4862
4863 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4864 globals->thumb_glue_size,
4865 THUMB2ARM_GLUE_SECTION_NAME);
4866
4867 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4868 globals->vfp11_erratum_glue_size,
4869 VFP11_ERRATUM_VENEER_SECTION_NAME);
4870
4871 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4872 globals->bx_glue_size,
4873 ARM_BX_GLUE_SECTION_NAME);
4874
4875 return TRUE;
4876 }
4877
4878 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4879 returns the symbol identifying the stub. */
4880
4881 static struct elf_link_hash_entry *
4882 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4883 struct elf_link_hash_entry * h)
4884 {
4885 const char * name = h->root.root.string;
4886 asection * s;
4887 char * tmp_name;
4888 struct elf_link_hash_entry * myh;
4889 struct bfd_link_hash_entry * bh;
4890 struct elf32_arm_link_hash_table * globals;
4891 bfd_vma val;
4892 bfd_size_type size;
4893
4894 globals = elf32_arm_hash_table (link_info);
4895
4896 BFD_ASSERT (globals != NULL);
4897 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4898
4899 s = bfd_get_section_by_name
4900 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4901
4902 BFD_ASSERT (s != NULL);
4903
4904 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4905
4906 BFD_ASSERT (tmp_name);
4907
4908 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4909
4910 myh = elf_link_hash_lookup
4911 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4912
4913 if (myh != NULL)
4914 {
4915 /* We've already seen this guy. */
4916 free (tmp_name);
4917 return myh;
4918 }
4919
4920 /* The only trick here is using hash_table->arm_glue_size as the value.
4921 Even though the section isn't allocated yet, this is where we will be
4922 putting it. The +1 on the value marks that the stub has not been
4923 output yet - not that it is a Thumb function. */
4924 bh = NULL;
4925 val = globals->arm_glue_size + 1;
4926 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4927 tmp_name, BSF_GLOBAL, s, val,
4928 NULL, TRUE, FALSE, &bh);
4929
4930 myh = (struct elf_link_hash_entry *) bh;
4931 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4932 myh->forced_local = 1;
4933
4934 free (tmp_name);
4935
4936 if (link_info->shared || globals->root.is_relocatable_executable
4937 || globals->pic_veneer)
4938 size = ARM2THUMB_PIC_GLUE_SIZE;
4939 else if (globals->use_blx)
4940 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4941 else
4942 size = ARM2THUMB_STATIC_GLUE_SIZE;
4943
4944 s->size += size;
4945 globals->arm_glue_size += size;
4946
4947 return myh;
4948 }
4949
4950 /* Allocate space for ARMv4 BX veneers. */
4951
4952 static void
4953 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4954 {
4955 asection * s;
4956 struct elf32_arm_link_hash_table *globals;
4957 char *tmp_name;
4958 struct elf_link_hash_entry *myh;
4959 struct bfd_link_hash_entry *bh;
4960 bfd_vma val;
4961
4962 /* BX PC does not need a veneer. */
4963 if (reg == 15)
4964 return;
4965
4966 globals = elf32_arm_hash_table (link_info);
4967
4968 BFD_ASSERT (globals != NULL);
4969 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4970
4971 /* Check if this veneer has already been allocated. */
4972 if (globals->bx_glue_offset[reg])
4973 return;
4974
4975 s = bfd_get_section_by_name
4976 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4977
4978 BFD_ASSERT (s != NULL);
4979
4980 /* Add symbol for veneer. */
4981 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4982
4983 BFD_ASSERT (tmp_name);
4984
4985 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
4986
4987 myh = elf_link_hash_lookup
4988 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
4989
4990 BFD_ASSERT (myh == NULL);
4991
4992 bh = NULL;
4993 val = globals->bx_glue_size;
4994 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4995 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
4996 NULL, TRUE, FALSE, &bh);
4997
4998 myh = (struct elf_link_hash_entry *) bh;
4999 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5000 myh->forced_local = 1;
5001
5002 s->size += ARM_BX_VENEER_SIZE;
5003 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5004 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5005 }
5006
5007
5008 /* Add an entry to the code/data map for section SEC. */
5009
5010 static void
5011 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5012 {
5013 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5014 unsigned int newidx;
5015
5016 if (sec_data->map == NULL)
5017 {
5018 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5019 sec_data->mapcount = 0;
5020 sec_data->mapsize = 1;
5021 }
5022
5023 newidx = sec_data->mapcount++;
5024
5025 if (sec_data->mapcount > sec_data->mapsize)
5026 {
5027 sec_data->mapsize *= 2;
5028 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5029 * sizeof (elf32_arm_section_map));
5030 }
5031
5032 if (sec_data->map)
5033 {
5034 sec_data->map[newidx].vma = vma;
5035 sec_data->map[newidx].type = type;
5036 }
5037 }
5038
5039
5040 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5041 veneers are handled for now. */
5042
5043 static bfd_vma
5044 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5045 elf32_vfp11_erratum_list *branch,
5046 bfd *branch_bfd,
5047 asection *branch_sec,
5048 unsigned int offset)
5049 {
5050 asection *s;
5051 struct elf32_arm_link_hash_table *hash_table;
5052 char *tmp_name;
5053 struct elf_link_hash_entry *myh;
5054 struct bfd_link_hash_entry *bh;
5055 bfd_vma val;
5056 struct _arm_elf_section_data *sec_data;
5057 int errcount;
5058 elf32_vfp11_erratum_list *newerr;
5059
5060 hash_table = elf32_arm_hash_table (link_info);
5061
5062 BFD_ASSERT (hash_table != NULL);
5063 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5064
5065 s = bfd_get_section_by_name
5066 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5067
5068 sec_data = elf32_arm_section_data (s);
5069
5070 BFD_ASSERT (s != NULL);
5071
5072 tmp_name = bfd_malloc ((bfd_size_type) strlen
5073 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5074
5075 BFD_ASSERT (tmp_name);
5076
5077 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5078 hash_table->num_vfp11_fixes);
5079
5080 myh = elf_link_hash_lookup
5081 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5082
5083 BFD_ASSERT (myh == NULL);
5084
5085 bh = NULL;
5086 val = hash_table->vfp11_erratum_glue_size;
5087 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5088 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5089 NULL, TRUE, FALSE, &bh);
5090
5091 myh = (struct elf_link_hash_entry *) bh;
5092 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5093 myh->forced_local = 1;
5094
5095 /* Link veneer back to calling location. */
5096 errcount = ++(sec_data->erratumcount);
5097 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5098
5099 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5100 newerr->vma = -1;
5101 newerr->u.v.branch = branch;
5102 newerr->u.v.id = hash_table->num_vfp11_fixes;
5103 branch->u.b.veneer = newerr;
5104
5105 newerr->next = sec_data->erratumlist;
5106 sec_data->erratumlist = newerr;
5107
5108 /* A symbol for the return from the veneer. */
5109 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5110 hash_table->num_vfp11_fixes);
5111
5112 myh = elf_link_hash_lookup
5113 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5114
5115 if (myh != NULL)
5116 abort ();
5117
5118 bh = NULL;
5119 val = offset + 4;
5120 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5121 branch_sec, val, NULL, TRUE, FALSE, &bh);
5122
5123 myh = (struct elf_link_hash_entry *) bh;
5124 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5125 myh->forced_local = 1;
5126
5127 free (tmp_name);
5128
5129 /* Generate a mapping symbol for the veneer section, and explicitly add an
5130 entry for that symbol to the code/data map for the section. */
5131 if (hash_table->vfp11_erratum_glue_size == 0)
5132 {
5133 bh = NULL;
5134 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5135 ever requires this erratum fix. */
5136 _bfd_generic_link_add_one_symbol (link_info,
5137 hash_table->bfd_of_glue_owner, "$a",
5138 BSF_LOCAL, s, 0, NULL,
5139 TRUE, FALSE, &bh);
5140
5141 myh = (struct elf_link_hash_entry *) bh;
5142 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5143 myh->forced_local = 1;
5144
5145 /* The elf32_arm_init_maps function only cares about symbols from input
5146 BFDs. We must make a note of this generated mapping symbol
5147 ourselves so that code byteswapping works properly in
5148 elf32_arm_write_section. */
5149 elf32_arm_section_map_add (s, 'a', 0);
5150 }
5151
5152 s->size += VFP11_ERRATUM_VENEER_SIZE;
5153 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5154 hash_table->num_vfp11_fixes++;
5155
5156 /* The offset of the veneer. */
5157 return val;
5158 }
5159
5160 #define ARM_GLUE_SECTION_FLAGS \
5161 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5162 | SEC_READONLY | SEC_LINKER_CREATED)
5163
5164 /* Create a fake section for use by the ARM backend of the linker. */
5165
5166 static bfd_boolean
5167 arm_make_glue_section (bfd * abfd, const char * name)
5168 {
5169 asection * sec;
5170
5171 sec = bfd_get_section_by_name (abfd, name);
5172 if (sec != NULL)
5173 /* Already made. */
5174 return TRUE;
5175
5176 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5177
5178 if (sec == NULL
5179 || !bfd_set_section_alignment (abfd, sec, 2))
5180 return FALSE;
5181
5182 /* Set the gc mark to prevent the section from being removed by garbage
5183 collection, despite the fact that no relocs refer to this section. */
5184 sec->gc_mark = 1;
5185
5186 return TRUE;
5187 }
5188
5189 /* Add the glue sections to ABFD. This function is called from the
5190 linker scripts in ld/emultempl/{armelf}.em. */
5191
5192 bfd_boolean
5193 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5194 struct bfd_link_info *info)
5195 {
5196 /* If we are only performing a partial
5197 link do not bother adding the glue. */
5198 if (info->relocatable)
5199 return TRUE;
5200
5201 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5202 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5203 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5204 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5205 }
5206
5207 /* Select a BFD to be used to hold the sections used by the glue code.
5208 This function is called from the linker scripts in ld/emultempl/
5209 {armelf/pe}.em. */
5210
5211 bfd_boolean
5212 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5213 {
5214 struct elf32_arm_link_hash_table *globals;
5215
5216 /* If we are only performing a partial link
5217 do not bother getting a bfd to hold the glue. */
5218 if (info->relocatable)
5219 return TRUE;
5220
5221 /* Make sure we don't attach the glue sections to a dynamic object. */
5222 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5223
5224 globals = elf32_arm_hash_table (info);
5225
5226 BFD_ASSERT (globals != NULL);
5227
5228 if (globals->bfd_of_glue_owner != NULL)
5229 return TRUE;
5230
5231 /* Save the bfd for later use. */
5232 globals->bfd_of_glue_owner = abfd;
5233
5234 return TRUE;
5235 }
5236
5237 static void
5238 check_use_blx (struct elf32_arm_link_hash_table *globals)
5239 {
5240 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5241 Tag_CPU_arch) > 2)
5242 globals->use_blx = 1;
5243 }
5244
5245 bfd_boolean
5246 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5247 struct bfd_link_info *link_info)
5248 {
5249 Elf_Internal_Shdr *symtab_hdr;
5250 Elf_Internal_Rela *internal_relocs = NULL;
5251 Elf_Internal_Rela *irel, *irelend;
5252 bfd_byte *contents = NULL;
5253
5254 asection *sec;
5255 struct elf32_arm_link_hash_table *globals;
5256
5257 /* If we are only performing a partial link do not bother
5258 to construct any glue. */
5259 if (link_info->relocatable)
5260 return TRUE;
5261
5262 /* Here we have a bfd that is to be included on the link. We have a
5263 hook to do reloc rummaging, before section sizes are nailed down. */
5264 globals = elf32_arm_hash_table (link_info);
5265
5266 BFD_ASSERT (globals != NULL);
5267
5268 check_use_blx (globals);
5269
5270 if (globals->byteswap_code && !bfd_big_endian (abfd))
5271 {
5272 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5273 abfd);
5274 return FALSE;
5275 }
5276
5277 /* PR 5398: If we have not decided to include any loadable sections in
5278 the output then we will not have a glue owner bfd. This is OK, it
5279 just means that there is nothing else for us to do here. */
5280 if (globals->bfd_of_glue_owner == NULL)
5281 return TRUE;
5282
5283 /* Rummage around all the relocs and map the glue vectors. */
5284 sec = abfd->sections;
5285
5286 if (sec == NULL)
5287 return TRUE;
5288
5289 for (; sec != NULL; sec = sec->next)
5290 {
5291 if (sec->reloc_count == 0)
5292 continue;
5293
5294 if ((sec->flags & SEC_EXCLUDE) != 0)
5295 continue;
5296
5297 symtab_hdr = & elf_symtab_hdr (abfd);
5298
5299 /* Load the relocs. */
5300 internal_relocs
5301 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5302
5303 if (internal_relocs == NULL)
5304 goto error_return;
5305
5306 irelend = internal_relocs + sec->reloc_count;
5307 for (irel = internal_relocs; irel < irelend; irel++)
5308 {
5309 long r_type;
5310 unsigned long r_index;
5311
5312 struct elf_link_hash_entry *h;
5313
5314 r_type = ELF32_R_TYPE (irel->r_info);
5315 r_index = ELF32_R_SYM (irel->r_info);
5316
5317 /* These are the only relocation types we care about. */
5318 if ( r_type != R_ARM_PC24
5319 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5320 continue;
5321
5322 /* Get the section contents if we haven't done so already. */
5323 if (contents == NULL)
5324 {
5325 /* Get cached copy if it exists. */
5326 if (elf_section_data (sec)->this_hdr.contents != NULL)
5327 contents = elf_section_data (sec)->this_hdr.contents;
5328 else
5329 {
5330 /* Go get them off disk. */
5331 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5332 goto error_return;
5333 }
5334 }
5335
5336 if (r_type == R_ARM_V4BX)
5337 {
5338 int reg;
5339
5340 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5341 record_arm_bx_glue (link_info, reg);
5342 continue;
5343 }
5344
5345 /* If the relocation is not against a symbol it cannot concern us. */
5346 h = NULL;
5347
5348 /* We don't care about local symbols. */
5349 if (r_index < symtab_hdr->sh_info)
5350 continue;
5351
5352 /* This is an external symbol. */
5353 r_index -= symtab_hdr->sh_info;
5354 h = (struct elf_link_hash_entry *)
5355 elf_sym_hashes (abfd)[r_index];
5356
5357 /* If the relocation is against a static symbol it must be within
5358 the current section and so cannot be a cross ARM/Thumb relocation. */
5359 if (h == NULL)
5360 continue;
5361
5362 /* If the call will go through a PLT entry then we do not need
5363 glue. */
5364 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5365 continue;
5366
5367 switch (r_type)
5368 {
5369 case R_ARM_PC24:
5370 /* This one is a call from arm code. We need to look up
5371 the target of the call. If it is a thumb target, we
5372 insert glue. */
5373 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5374 record_arm_to_thumb_glue (link_info, h);
5375 break;
5376
5377 default:
5378 abort ();
5379 }
5380 }
5381
5382 if (contents != NULL
5383 && elf_section_data (sec)->this_hdr.contents != contents)
5384 free (contents);
5385 contents = NULL;
5386
5387 if (internal_relocs != NULL
5388 && elf_section_data (sec)->relocs != internal_relocs)
5389 free (internal_relocs);
5390 internal_relocs = NULL;
5391 }
5392
5393 return TRUE;
5394
5395 error_return:
5396 if (contents != NULL
5397 && elf_section_data (sec)->this_hdr.contents != contents)
5398 free (contents);
5399 if (internal_relocs != NULL
5400 && elf_section_data (sec)->relocs != internal_relocs)
5401 free (internal_relocs);
5402
5403 return FALSE;
5404 }
5405 #endif
5406
5407
5408 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5409
5410 void
5411 bfd_elf32_arm_init_maps (bfd *abfd)
5412 {
5413 Elf_Internal_Sym *isymbuf;
5414 Elf_Internal_Shdr *hdr;
5415 unsigned int i, localsyms;
5416
5417 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5418 if (! is_arm_elf (abfd))
5419 return;
5420
5421 if ((abfd->flags & DYNAMIC) != 0)
5422 return;
5423
5424 hdr = & elf_symtab_hdr (abfd);
5425 localsyms = hdr->sh_info;
5426
5427 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5428 should contain the number of local symbols, which should come before any
5429 global symbols. Mapping symbols are always local. */
5430 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5431 NULL);
5432
5433 /* No internal symbols read? Skip this BFD. */
5434 if (isymbuf == NULL)
5435 return;
5436
5437 for (i = 0; i < localsyms; i++)
5438 {
5439 Elf_Internal_Sym *isym = &isymbuf[i];
5440 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5441 const char *name;
5442
5443 if (sec != NULL
5444 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5445 {
5446 name = bfd_elf_string_from_elf_section (abfd,
5447 hdr->sh_link, isym->st_name);
5448
5449 if (bfd_is_arm_special_symbol_name (name,
5450 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5451 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5452 }
5453 }
5454 }
5455
5456
5457 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5458 say what they wanted. */
5459
5460 void
5461 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5462 {
5463 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5464 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5465
5466 if (globals->fix_cortex_a8 == -1)
5467 {
5468 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5469 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5470 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5471 || out_attr[Tag_CPU_arch_profile].i == 0))
5472 globals->fix_cortex_a8 = 1;
5473 else
5474 globals->fix_cortex_a8 = 0;
5475 }
5476 }
5477
5478
5479 void
5480 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5481 {
5482 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5483 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5484
5485 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5486 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5487 {
5488 switch (globals->vfp11_fix)
5489 {
5490 case BFD_ARM_VFP11_FIX_DEFAULT:
5491 case BFD_ARM_VFP11_FIX_NONE:
5492 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5493 break;
5494
5495 default:
5496 /* Give a warning, but do as the user requests anyway. */
5497 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5498 "workaround is not necessary for target architecture"), obfd);
5499 }
5500 }
5501 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5502 /* For earlier architectures, we might need the workaround, but do not
5503 enable it by default. If users is running with broken hardware, they
5504 must enable the erratum fix explicitly. */
5505 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5506 }
5507
5508
5509 enum bfd_arm_vfp11_pipe
5510 {
5511 VFP11_FMAC,
5512 VFP11_LS,
5513 VFP11_DS,
5514 VFP11_BAD
5515 };
5516
5517 /* Return a VFP register number. This is encoded as RX:X for single-precision
5518 registers, or X:RX for double-precision registers, where RX is the group of
5519 four bits in the instruction encoding and X is the single extension bit.
5520 RX and X fields are specified using their lowest (starting) bit. The return
5521 value is:
5522
5523 0...31: single-precision registers s0...s31
5524 32...63: double-precision registers d0...d31.
5525
5526 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5527 encounter VFP3 instructions, so we allow the full range for DP registers. */
5528
5529 static unsigned int
5530 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5531 unsigned int x)
5532 {
5533 if (is_double)
5534 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5535 else
5536 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5537 }
5538
5539 /* Set bits in *WMASK according to a register number REG as encoded by
5540 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5541
5542 static void
5543 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5544 {
5545 if (reg < 32)
5546 *wmask |= 1 << reg;
5547 else if (reg < 48)
5548 *wmask |= 3 << ((reg - 32) * 2);
5549 }
5550
5551 /* Return TRUE if WMASK overwrites anything in REGS. */
5552
5553 static bfd_boolean
5554 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5555 {
5556 int i;
5557
5558 for (i = 0; i < numregs; i++)
5559 {
5560 unsigned int reg = regs[i];
5561
5562 if (reg < 32 && (wmask & (1 << reg)) != 0)
5563 return TRUE;
5564
5565 reg -= 32;
5566
5567 if (reg >= 16)
5568 continue;
5569
5570 if ((wmask & (3 << (reg * 2))) != 0)
5571 return TRUE;
5572 }
5573
5574 return FALSE;
5575 }
5576
5577 /* In this function, we're interested in two things: finding input registers
5578 for VFP data-processing instructions, and finding the set of registers which
5579 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5580 hold the written set, so FLDM etc. are easy to deal with (we're only
5581 interested in 32 SP registers or 16 dp registers, due to the VFP version
5582 implemented by the chip in question). DP registers are marked by setting
5583 both SP registers in the write mask). */
5584
5585 static enum bfd_arm_vfp11_pipe
5586 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5587 int *numregs)
5588 {
5589 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5590 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5591
5592 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5593 {
5594 unsigned int pqrs;
5595 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5596 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5597
5598 pqrs = ((insn & 0x00800000) >> 20)
5599 | ((insn & 0x00300000) >> 19)
5600 | ((insn & 0x00000040) >> 6);
5601
5602 switch (pqrs)
5603 {
5604 case 0: /* fmac[sd]. */
5605 case 1: /* fnmac[sd]. */
5606 case 2: /* fmsc[sd]. */
5607 case 3: /* fnmsc[sd]. */
5608 pipe = VFP11_FMAC;
5609 bfd_arm_vfp11_write_mask (destmask, fd);
5610 regs[0] = fd;
5611 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5612 regs[2] = fm;
5613 *numregs = 3;
5614 break;
5615
5616 case 4: /* fmul[sd]. */
5617 case 5: /* fnmul[sd]. */
5618 case 6: /* fadd[sd]. */
5619 case 7: /* fsub[sd]. */
5620 pipe = VFP11_FMAC;
5621 goto vfp_binop;
5622
5623 case 8: /* fdiv[sd]. */
5624 pipe = VFP11_DS;
5625 vfp_binop:
5626 bfd_arm_vfp11_write_mask (destmask, fd);
5627 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5628 regs[1] = fm;
5629 *numregs = 2;
5630 break;
5631
5632 case 15: /* extended opcode. */
5633 {
5634 unsigned int extn = ((insn >> 15) & 0x1e)
5635 | ((insn >> 7) & 1);
5636
5637 switch (extn)
5638 {
5639 case 0: /* fcpy[sd]. */
5640 case 1: /* fabs[sd]. */
5641 case 2: /* fneg[sd]. */
5642 case 8: /* fcmp[sd]. */
5643 case 9: /* fcmpe[sd]. */
5644 case 10: /* fcmpz[sd]. */
5645 case 11: /* fcmpez[sd]. */
5646 case 16: /* fuito[sd]. */
5647 case 17: /* fsito[sd]. */
5648 case 24: /* ftoui[sd]. */
5649 case 25: /* ftouiz[sd]. */
5650 case 26: /* ftosi[sd]. */
5651 case 27: /* ftosiz[sd]. */
5652 /* These instructions will not bounce due to underflow. */
5653 *numregs = 0;
5654 pipe = VFP11_FMAC;
5655 break;
5656
5657 case 3: /* fsqrt[sd]. */
5658 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5659 registers to cause the erratum in previous instructions. */
5660 bfd_arm_vfp11_write_mask (destmask, fd);
5661 pipe = VFP11_DS;
5662 break;
5663
5664 case 15: /* fcvt{ds,sd}. */
5665 {
5666 int rnum = 0;
5667
5668 bfd_arm_vfp11_write_mask (destmask, fd);
5669
5670 /* Only FCVTSD can underflow. */
5671 if ((insn & 0x100) != 0)
5672 regs[rnum++] = fm;
5673
5674 *numregs = rnum;
5675
5676 pipe = VFP11_FMAC;
5677 }
5678 break;
5679
5680 default:
5681 return VFP11_BAD;
5682 }
5683 }
5684 break;
5685
5686 default:
5687 return VFP11_BAD;
5688 }
5689 }
5690 /* Two-register transfer. */
5691 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5692 {
5693 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5694
5695 if ((insn & 0x100000) == 0)
5696 {
5697 if (is_double)
5698 bfd_arm_vfp11_write_mask (destmask, fm);
5699 else
5700 {
5701 bfd_arm_vfp11_write_mask (destmask, fm);
5702 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5703 }
5704 }
5705
5706 pipe = VFP11_LS;
5707 }
5708 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5709 {
5710 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5711 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5712
5713 switch (puw)
5714 {
5715 case 0: /* Two-reg transfer. We should catch these above. */
5716 abort ();
5717
5718 case 2: /* fldm[sdx]. */
5719 case 3:
5720 case 5:
5721 {
5722 unsigned int i, offset = insn & 0xff;
5723
5724 if (is_double)
5725 offset >>= 1;
5726
5727 for (i = fd; i < fd + offset; i++)
5728 bfd_arm_vfp11_write_mask (destmask, i);
5729 }
5730 break;
5731
5732 case 4: /* fld[sd]. */
5733 case 6:
5734 bfd_arm_vfp11_write_mask (destmask, fd);
5735 break;
5736
5737 default:
5738 return VFP11_BAD;
5739 }
5740
5741 pipe = VFP11_LS;
5742 }
5743 /* Single-register transfer. Note L==0. */
5744 else if ((insn & 0x0f100e10) == 0x0e000a10)
5745 {
5746 unsigned int opcode = (insn >> 21) & 7;
5747 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5748
5749 switch (opcode)
5750 {
5751 case 0: /* fmsr/fmdlr. */
5752 case 1: /* fmdhr. */
5753 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5754 destination register. I don't know if this is exactly right,
5755 but it is the conservative choice. */
5756 bfd_arm_vfp11_write_mask (destmask, fn);
5757 break;
5758
5759 case 7: /* fmxr. */
5760 break;
5761 }
5762
5763 pipe = VFP11_LS;
5764 }
5765
5766 return pipe;
5767 }
5768
5769
5770 static int elf32_arm_compare_mapping (const void * a, const void * b);
5771
5772
5773 /* Look for potentially-troublesome code sequences which might trigger the
5774 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5775 (available from ARM) for details of the erratum. A short version is
5776 described in ld.texinfo. */
5777
5778 bfd_boolean
5779 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5780 {
5781 asection *sec;
5782 bfd_byte *contents = NULL;
5783 int state = 0;
5784 int regs[3], numregs = 0;
5785 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5786 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5787
5788 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5789 The states transition as follows:
5790
5791 0 -> 1 (vector) or 0 -> 2 (scalar)
5792 A VFP FMAC-pipeline instruction has been seen. Fill
5793 regs[0]..regs[numregs-1] with its input operands. Remember this
5794 instruction in 'first_fmac'.
5795
5796 1 -> 2
5797 Any instruction, except for a VFP instruction which overwrites
5798 regs[*].
5799
5800 1 -> 3 [ -> 0 ] or
5801 2 -> 3 [ -> 0 ]
5802 A VFP instruction has been seen which overwrites any of regs[*].
5803 We must make a veneer! Reset state to 0 before examining next
5804 instruction.
5805
5806 2 -> 0
5807 If we fail to match anything in state 2, reset to state 0 and reset
5808 the instruction pointer to the instruction after 'first_fmac'.
5809
5810 If the VFP11 vector mode is in use, there must be at least two unrelated
5811 instructions between anti-dependent VFP11 instructions to properly avoid
5812 triggering the erratum, hence the use of the extra state 1. */
5813
5814 /* If we are only performing a partial link do not bother
5815 to construct any glue. */
5816 if (link_info->relocatable)
5817 return TRUE;
5818
5819 /* Skip if this bfd does not correspond to an ELF image. */
5820 if (! is_arm_elf (abfd))
5821 return TRUE;
5822
5823 /* We should have chosen a fix type by the time we get here. */
5824 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5825
5826 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5827 return TRUE;
5828
5829 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5830 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5831 return TRUE;
5832
5833 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5834 {
5835 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5836 struct _arm_elf_section_data *sec_data;
5837
5838 /* If we don't have executable progbits, we're not interested in this
5839 section. Also skip if section is to be excluded. */
5840 if (elf_section_type (sec) != SHT_PROGBITS
5841 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5842 || (sec->flags & SEC_EXCLUDE) != 0
5843 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5844 || sec->output_section == bfd_abs_section_ptr
5845 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5846 continue;
5847
5848 sec_data = elf32_arm_section_data (sec);
5849
5850 if (sec_data->mapcount == 0)
5851 continue;
5852
5853 if (elf_section_data (sec)->this_hdr.contents != NULL)
5854 contents = elf_section_data (sec)->this_hdr.contents;
5855 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5856 goto error_return;
5857
5858 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5859 elf32_arm_compare_mapping);
5860
5861 for (span = 0; span < sec_data->mapcount; span++)
5862 {
5863 unsigned int span_start = sec_data->map[span].vma;
5864 unsigned int span_end = (span == sec_data->mapcount - 1)
5865 ? sec->size : sec_data->map[span + 1].vma;
5866 char span_type = sec_data->map[span].type;
5867
5868 /* FIXME: Only ARM mode is supported at present. We may need to
5869 support Thumb-2 mode also at some point. */
5870 if (span_type != 'a')
5871 continue;
5872
5873 for (i = span_start; i < span_end;)
5874 {
5875 unsigned int next_i = i + 4;
5876 unsigned int insn = bfd_big_endian (abfd)
5877 ? (contents[i] << 24)
5878 | (contents[i + 1] << 16)
5879 | (contents[i + 2] << 8)
5880 | contents[i + 3]
5881 : (contents[i + 3] << 24)
5882 | (contents[i + 2] << 16)
5883 | (contents[i + 1] << 8)
5884 | contents[i];
5885 unsigned int writemask = 0;
5886 enum bfd_arm_vfp11_pipe pipe;
5887
5888 switch (state)
5889 {
5890 case 0:
5891 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5892 &numregs);
5893 /* I'm assuming the VFP11 erratum can trigger with denorm
5894 operands on either the FMAC or the DS pipeline. This might
5895 lead to slightly overenthusiastic veneer insertion. */
5896 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5897 {
5898 state = use_vector ? 1 : 2;
5899 first_fmac = i;
5900 veneer_of_insn = insn;
5901 }
5902 break;
5903
5904 case 1:
5905 {
5906 int other_regs[3], other_numregs;
5907 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5908 other_regs,
5909 &other_numregs);
5910 if (pipe != VFP11_BAD
5911 && bfd_arm_vfp11_antidependency (writemask, regs,
5912 numregs))
5913 state = 3;
5914 else
5915 state = 2;
5916 }
5917 break;
5918
5919 case 2:
5920 {
5921 int other_regs[3], other_numregs;
5922 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5923 other_regs,
5924 &other_numregs);
5925 if (pipe != VFP11_BAD
5926 && bfd_arm_vfp11_antidependency (writemask, regs,
5927 numregs))
5928 state = 3;
5929 else
5930 {
5931 state = 0;
5932 next_i = first_fmac + 4;
5933 }
5934 }
5935 break;
5936
5937 case 3:
5938 abort (); /* Should be unreachable. */
5939 }
5940
5941 if (state == 3)
5942 {
5943 elf32_vfp11_erratum_list *newerr
5944 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5945 int errcount;
5946
5947 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5948
5949 newerr->u.b.vfp_insn = veneer_of_insn;
5950
5951 switch (span_type)
5952 {
5953 case 'a':
5954 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5955 break;
5956
5957 default:
5958 abort ();
5959 }
5960
5961 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5962 first_fmac);
5963
5964 newerr->vma = -1;
5965
5966 newerr->next = sec_data->erratumlist;
5967 sec_data->erratumlist = newerr;
5968
5969 state = 0;
5970 }
5971
5972 i = next_i;
5973 }
5974 }
5975
5976 if (contents != NULL
5977 && elf_section_data (sec)->this_hdr.contents != contents)
5978 free (contents);
5979 contents = NULL;
5980 }
5981
5982 return TRUE;
5983
5984 error_return:
5985 if (contents != NULL
5986 && elf_section_data (sec)->this_hdr.contents != contents)
5987 free (contents);
5988
5989 return FALSE;
5990 }
5991
5992 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
5993 after sections have been laid out, using specially-named symbols. */
5994
5995 void
5996 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
5997 struct bfd_link_info *link_info)
5998 {
5999 asection *sec;
6000 struct elf32_arm_link_hash_table *globals;
6001 char *tmp_name;
6002
6003 if (link_info->relocatable)
6004 return;
6005
6006 /* Skip if this bfd does not correspond to an ELF image. */
6007 if (! is_arm_elf (abfd))
6008 return;
6009
6010 globals = elf32_arm_hash_table (link_info);
6011
6012 tmp_name = bfd_malloc ((bfd_size_type) strlen
6013 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6014
6015 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6016 {
6017 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6018 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6019
6020 for (; errnode != NULL; errnode = errnode->next)
6021 {
6022 struct elf_link_hash_entry *myh;
6023 bfd_vma vma;
6024
6025 switch (errnode->type)
6026 {
6027 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6028 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6029 /* Find veneer symbol. */
6030 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6031 errnode->u.b.veneer->u.v.id);
6032
6033 myh = elf_link_hash_lookup
6034 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6035
6036 if (myh == NULL)
6037 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6038 "`%s'"), abfd, tmp_name);
6039
6040 vma = myh->root.u.def.section->output_section->vma
6041 + myh->root.u.def.section->output_offset
6042 + myh->root.u.def.value;
6043
6044 errnode->u.b.veneer->vma = vma;
6045 break;
6046
6047 case VFP11_ERRATUM_ARM_VENEER:
6048 case VFP11_ERRATUM_THUMB_VENEER:
6049 /* Find return location. */
6050 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6051 errnode->u.v.id);
6052
6053 myh = elf_link_hash_lookup
6054 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6055
6056 if (myh == NULL)
6057 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6058 "`%s'"), abfd, tmp_name);
6059
6060 vma = myh->root.u.def.section->output_section->vma
6061 + myh->root.u.def.section->output_offset
6062 + myh->root.u.def.value;
6063
6064 errnode->u.v.branch->vma = vma;
6065 break;
6066
6067 default:
6068 abort ();
6069 }
6070 }
6071 }
6072
6073 free (tmp_name);
6074 }
6075
6076
6077 /* Set target relocation values needed during linking. */
6078
6079 void
6080 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6081 struct bfd_link_info *link_info,
6082 int target1_is_rel,
6083 char * target2_type,
6084 int fix_v4bx,
6085 int use_blx,
6086 bfd_arm_vfp11_fix vfp11_fix,
6087 int no_enum_warn, int no_wchar_warn,
6088 int pic_veneer, int fix_cortex_a8)
6089 {
6090 struct elf32_arm_link_hash_table *globals;
6091
6092 globals = elf32_arm_hash_table (link_info);
6093
6094 globals->target1_is_rel = target1_is_rel;
6095 if (strcmp (target2_type, "rel") == 0)
6096 globals->target2_reloc = R_ARM_REL32;
6097 else if (strcmp (target2_type, "abs") == 0)
6098 globals->target2_reloc = R_ARM_ABS32;
6099 else if (strcmp (target2_type, "got-rel") == 0)
6100 globals->target2_reloc = R_ARM_GOT_PREL;
6101 else
6102 {
6103 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6104 target2_type);
6105 }
6106 globals->fix_v4bx = fix_v4bx;
6107 globals->use_blx |= use_blx;
6108 globals->vfp11_fix = vfp11_fix;
6109 globals->pic_veneer = pic_veneer;
6110 globals->fix_cortex_a8 = fix_cortex_a8;
6111
6112 BFD_ASSERT (is_arm_elf (output_bfd));
6113 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6114 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6115 }
6116
6117 /* Replace the target offset of a Thumb bl or b.w instruction. */
6118
6119 static void
6120 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6121 {
6122 bfd_vma upper;
6123 bfd_vma lower;
6124 int reloc_sign;
6125
6126 BFD_ASSERT ((offset & 1) == 0);
6127
6128 upper = bfd_get_16 (abfd, insn);
6129 lower = bfd_get_16 (abfd, insn + 2);
6130 reloc_sign = (offset < 0) ? 1 : 0;
6131 upper = (upper & ~(bfd_vma) 0x7ff)
6132 | ((offset >> 12) & 0x3ff)
6133 | (reloc_sign << 10);
6134 lower = (lower & ~(bfd_vma) 0x2fff)
6135 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6136 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6137 | ((offset >> 1) & 0x7ff);
6138 bfd_put_16 (abfd, upper, insn);
6139 bfd_put_16 (abfd, lower, insn + 2);
6140 }
6141
6142 /* Thumb code calling an ARM function. */
6143
6144 static int
6145 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6146 const char * name,
6147 bfd * input_bfd,
6148 bfd * output_bfd,
6149 asection * input_section,
6150 bfd_byte * hit_data,
6151 asection * sym_sec,
6152 bfd_vma offset,
6153 bfd_signed_vma addend,
6154 bfd_vma val,
6155 char **error_message)
6156 {
6157 asection * s = 0;
6158 bfd_vma my_offset;
6159 long int ret_offset;
6160 struct elf_link_hash_entry * myh;
6161 struct elf32_arm_link_hash_table * globals;
6162
6163 myh = find_thumb_glue (info, name, error_message);
6164 if (myh == NULL)
6165 return FALSE;
6166
6167 globals = elf32_arm_hash_table (info);
6168
6169 BFD_ASSERT (globals != NULL);
6170 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6171
6172 my_offset = myh->root.u.def.value;
6173
6174 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6175 THUMB2ARM_GLUE_SECTION_NAME);
6176
6177 BFD_ASSERT (s != NULL);
6178 BFD_ASSERT (s->contents != NULL);
6179 BFD_ASSERT (s->output_section != NULL);
6180
6181 if ((my_offset & 0x01) == 0x01)
6182 {
6183 if (sym_sec != NULL
6184 && sym_sec->owner != NULL
6185 && !INTERWORK_FLAG (sym_sec->owner))
6186 {
6187 (*_bfd_error_handler)
6188 (_("%B(%s): warning: interworking not enabled.\n"
6189 " first occurrence: %B: thumb call to arm"),
6190 sym_sec->owner, input_bfd, name);
6191
6192 return FALSE;
6193 }
6194
6195 --my_offset;
6196 myh->root.u.def.value = my_offset;
6197
6198 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6199 s->contents + my_offset);
6200
6201 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6202 s->contents + my_offset + 2);
6203
6204 ret_offset =
6205 /* Address of destination of the stub. */
6206 ((bfd_signed_vma) val)
6207 - ((bfd_signed_vma)
6208 /* Offset from the start of the current section
6209 to the start of the stubs. */
6210 (s->output_offset
6211 /* Offset of the start of this stub from the start of the stubs. */
6212 + my_offset
6213 /* Address of the start of the current section. */
6214 + s->output_section->vma)
6215 /* The branch instruction is 4 bytes into the stub. */
6216 + 4
6217 /* ARM branches work from the pc of the instruction + 8. */
6218 + 8);
6219
6220 put_arm_insn (globals, output_bfd,
6221 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6222 s->contents + my_offset + 4);
6223 }
6224
6225 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6226
6227 /* Now go back and fix up the original BL insn to point to here. */
6228 ret_offset =
6229 /* Address of where the stub is located. */
6230 (s->output_section->vma + s->output_offset + my_offset)
6231 /* Address of where the BL is located. */
6232 - (input_section->output_section->vma + input_section->output_offset
6233 + offset)
6234 /* Addend in the relocation. */
6235 - addend
6236 /* Biassing for PC-relative addressing. */
6237 - 8;
6238
6239 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6240
6241 return TRUE;
6242 }
6243
6244 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6245
6246 static struct elf_link_hash_entry *
6247 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6248 const char * name,
6249 bfd * input_bfd,
6250 bfd * output_bfd,
6251 asection * sym_sec,
6252 bfd_vma val,
6253 asection * s,
6254 char ** error_message)
6255 {
6256 bfd_vma my_offset;
6257 long int ret_offset;
6258 struct elf_link_hash_entry * myh;
6259 struct elf32_arm_link_hash_table * globals;
6260
6261 myh = find_arm_glue (info, name, error_message);
6262 if (myh == NULL)
6263 return NULL;
6264
6265 globals = elf32_arm_hash_table (info);
6266
6267 BFD_ASSERT (globals != NULL);
6268 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6269
6270 my_offset = myh->root.u.def.value;
6271
6272 if ((my_offset & 0x01) == 0x01)
6273 {
6274 if (sym_sec != NULL
6275 && sym_sec->owner != NULL
6276 && !INTERWORK_FLAG (sym_sec->owner))
6277 {
6278 (*_bfd_error_handler)
6279 (_("%B(%s): warning: interworking not enabled.\n"
6280 " first occurrence: %B: arm call to thumb"),
6281 sym_sec->owner, input_bfd, name);
6282 }
6283
6284 --my_offset;
6285 myh->root.u.def.value = my_offset;
6286
6287 if (info->shared || globals->root.is_relocatable_executable
6288 || globals->pic_veneer)
6289 {
6290 /* For relocatable objects we can't use absolute addresses,
6291 so construct the address from a relative offset. */
6292 /* TODO: If the offset is small it's probably worth
6293 constructing the address with adds. */
6294 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6295 s->contents + my_offset);
6296 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6297 s->contents + my_offset + 4);
6298 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6299 s->contents + my_offset + 8);
6300 /* Adjust the offset by 4 for the position of the add,
6301 and 8 for the pipeline offset. */
6302 ret_offset = (val - (s->output_offset
6303 + s->output_section->vma
6304 + my_offset + 12))
6305 | 1;
6306 bfd_put_32 (output_bfd, ret_offset,
6307 s->contents + my_offset + 12);
6308 }
6309 else if (globals->use_blx)
6310 {
6311 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6312 s->contents + my_offset);
6313
6314 /* It's a thumb address. Add the low order bit. */
6315 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6316 s->contents + my_offset + 4);
6317 }
6318 else
6319 {
6320 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6321 s->contents + my_offset);
6322
6323 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6324 s->contents + my_offset + 4);
6325
6326 /* It's a thumb address. Add the low order bit. */
6327 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6328 s->contents + my_offset + 8);
6329
6330 my_offset += 12;
6331 }
6332 }
6333
6334 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6335
6336 return myh;
6337 }
6338
6339 /* Arm code calling a Thumb function. */
6340
6341 static int
6342 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6343 const char * name,
6344 bfd * input_bfd,
6345 bfd * output_bfd,
6346 asection * input_section,
6347 bfd_byte * hit_data,
6348 asection * sym_sec,
6349 bfd_vma offset,
6350 bfd_signed_vma addend,
6351 bfd_vma val,
6352 char **error_message)
6353 {
6354 unsigned long int tmp;
6355 bfd_vma my_offset;
6356 asection * s;
6357 long int ret_offset;
6358 struct elf_link_hash_entry * myh;
6359 struct elf32_arm_link_hash_table * globals;
6360
6361 globals = elf32_arm_hash_table (info);
6362
6363 BFD_ASSERT (globals != NULL);
6364 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6365
6366 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6367 ARM2THUMB_GLUE_SECTION_NAME);
6368 BFD_ASSERT (s != NULL);
6369 BFD_ASSERT (s->contents != NULL);
6370 BFD_ASSERT (s->output_section != NULL);
6371
6372 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6373 sym_sec, val, s, error_message);
6374 if (!myh)
6375 return FALSE;
6376
6377 my_offset = myh->root.u.def.value;
6378 tmp = bfd_get_32 (input_bfd, hit_data);
6379 tmp = tmp & 0xFF000000;
6380
6381 /* Somehow these are both 4 too far, so subtract 8. */
6382 ret_offset = (s->output_offset
6383 + my_offset
6384 + s->output_section->vma
6385 - (input_section->output_offset
6386 + input_section->output_section->vma
6387 + offset + addend)
6388 - 8);
6389
6390 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6391
6392 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6393
6394 return TRUE;
6395 }
6396
6397 /* Populate Arm stub for an exported Thumb function. */
6398
6399 static bfd_boolean
6400 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6401 {
6402 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6403 asection * s;
6404 struct elf_link_hash_entry * myh;
6405 struct elf32_arm_link_hash_entry *eh;
6406 struct elf32_arm_link_hash_table * globals;
6407 asection *sec;
6408 bfd_vma val;
6409 char *error_message;
6410
6411 eh = elf32_arm_hash_entry (h);
6412 /* Allocate stubs for exported Thumb functions on v4t. */
6413 if (eh->export_glue == NULL)
6414 return TRUE;
6415
6416 globals = elf32_arm_hash_table (info);
6417
6418 BFD_ASSERT (globals != NULL);
6419 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6420
6421 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6422 ARM2THUMB_GLUE_SECTION_NAME);
6423 BFD_ASSERT (s != NULL);
6424 BFD_ASSERT (s->contents != NULL);
6425 BFD_ASSERT (s->output_section != NULL);
6426
6427 sec = eh->export_glue->root.u.def.section;
6428
6429 BFD_ASSERT (sec->output_section != NULL);
6430
6431 val = eh->export_glue->root.u.def.value + sec->output_offset
6432 + sec->output_section->vma;
6433
6434 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6435 h->root.u.def.section->owner,
6436 globals->obfd, sec, val, s,
6437 &error_message);
6438 BFD_ASSERT (myh);
6439 return TRUE;
6440 }
6441
6442 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6443
6444 static bfd_vma
6445 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6446 {
6447 bfd_byte *p;
6448 bfd_vma glue_addr;
6449 asection *s;
6450 struct elf32_arm_link_hash_table *globals;
6451
6452 globals = elf32_arm_hash_table (info);
6453
6454 BFD_ASSERT (globals != NULL);
6455 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6456
6457 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6458 ARM_BX_GLUE_SECTION_NAME);
6459 BFD_ASSERT (s != NULL);
6460 BFD_ASSERT (s->contents != NULL);
6461 BFD_ASSERT (s->output_section != NULL);
6462
6463 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6464
6465 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6466
6467 if ((globals->bx_glue_offset[reg] & 1) == 0)
6468 {
6469 p = s->contents + glue_addr;
6470 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6471 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6472 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6473 globals->bx_glue_offset[reg] |= 1;
6474 }
6475
6476 return glue_addr + s->output_section->vma + s->output_offset;
6477 }
6478
6479 /* Generate Arm stubs for exported Thumb symbols. */
6480 static void
6481 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6482 struct bfd_link_info *link_info)
6483 {
6484 struct elf32_arm_link_hash_table * globals;
6485
6486 if (link_info == NULL)
6487 /* Ignore this if we are not called by the ELF backend linker. */
6488 return;
6489
6490 globals = elf32_arm_hash_table (link_info);
6491 /* If blx is available then exported Thumb symbols are OK and there is
6492 nothing to do. */
6493 if (globals->use_blx)
6494 return;
6495
6496 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6497 link_info);
6498 }
6499
6500 /* Some relocations map to different relocations depending on the
6501 target. Return the real relocation. */
6502
6503 static int
6504 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6505 int r_type)
6506 {
6507 switch (r_type)
6508 {
6509 case R_ARM_TARGET1:
6510 if (globals->target1_is_rel)
6511 return R_ARM_REL32;
6512 else
6513 return R_ARM_ABS32;
6514
6515 case R_ARM_TARGET2:
6516 return globals->target2_reloc;
6517
6518 default:
6519 return r_type;
6520 }
6521 }
6522
6523 /* Return the base VMA address which should be subtracted from real addresses
6524 when resolving @dtpoff relocation.
6525 This is PT_TLS segment p_vaddr. */
6526
6527 static bfd_vma
6528 dtpoff_base (struct bfd_link_info *info)
6529 {
6530 /* If tls_sec is NULL, we should have signalled an error already. */
6531 if (elf_hash_table (info)->tls_sec == NULL)
6532 return 0;
6533 return elf_hash_table (info)->tls_sec->vma;
6534 }
6535
6536 /* Return the relocation value for @tpoff relocation
6537 if STT_TLS virtual address is ADDRESS. */
6538
6539 static bfd_vma
6540 tpoff (struct bfd_link_info *info, bfd_vma address)
6541 {
6542 struct elf_link_hash_table *htab = elf_hash_table (info);
6543 bfd_vma base;
6544
6545 /* If tls_sec is NULL, we should have signalled an error already. */
6546 if (htab->tls_sec == NULL)
6547 return 0;
6548 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6549 return address - htab->tls_sec->vma + base;
6550 }
6551
6552 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6553 VALUE is the relocation value. */
6554
6555 static bfd_reloc_status_type
6556 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6557 {
6558 if (value > 0xfff)
6559 return bfd_reloc_overflow;
6560
6561 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6562 bfd_put_32 (abfd, value, data);
6563 return bfd_reloc_ok;
6564 }
6565
6566 /* For a given value of n, calculate the value of G_n as required to
6567 deal with group relocations. We return it in the form of an
6568 encoded constant-and-rotation, together with the final residual. If n is
6569 specified as less than zero, then final_residual is filled with the
6570 input value and no further action is performed. */
6571
6572 static bfd_vma
6573 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6574 {
6575 int current_n;
6576 bfd_vma g_n;
6577 bfd_vma encoded_g_n = 0;
6578 bfd_vma residual = value; /* Also known as Y_n. */
6579
6580 for (current_n = 0; current_n <= n; current_n++)
6581 {
6582 int shift;
6583
6584 /* Calculate which part of the value to mask. */
6585 if (residual == 0)
6586 shift = 0;
6587 else
6588 {
6589 int msb;
6590
6591 /* Determine the most significant bit in the residual and
6592 align the resulting value to a 2-bit boundary. */
6593 for (msb = 30; msb >= 0; msb -= 2)
6594 if (residual & (3 << msb))
6595 break;
6596
6597 /* The desired shift is now (msb - 6), or zero, whichever
6598 is the greater. */
6599 shift = msb - 6;
6600 if (shift < 0)
6601 shift = 0;
6602 }
6603
6604 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6605 g_n = residual & (0xff << shift);
6606 encoded_g_n = (g_n >> shift)
6607 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6608
6609 /* Calculate the residual for the next time around. */
6610 residual &= ~g_n;
6611 }
6612
6613 *final_residual = residual;
6614
6615 return encoded_g_n;
6616 }
6617
6618 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6619 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6620
6621 static int
6622 identify_add_or_sub (bfd_vma insn)
6623 {
6624 int opcode = insn & 0x1e00000;
6625
6626 if (opcode == 1 << 23) /* ADD */
6627 return 1;
6628
6629 if (opcode == 1 << 22) /* SUB */
6630 return -1;
6631
6632 return 0;
6633 }
6634
6635 /* Perform a relocation as part of a final link. */
6636
6637 static bfd_reloc_status_type
6638 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6639 bfd * input_bfd,
6640 bfd * output_bfd,
6641 asection * input_section,
6642 bfd_byte * contents,
6643 Elf_Internal_Rela * rel,
6644 bfd_vma value,
6645 struct bfd_link_info * info,
6646 asection * sym_sec,
6647 const char * sym_name,
6648 int sym_flags,
6649 struct elf_link_hash_entry * h,
6650 bfd_boolean * unresolved_reloc_p,
6651 char ** error_message)
6652 {
6653 unsigned long r_type = howto->type;
6654 unsigned long r_symndx;
6655 bfd_byte * hit_data = contents + rel->r_offset;
6656 bfd * dynobj = NULL;
6657 Elf_Internal_Shdr * symtab_hdr;
6658 struct elf_link_hash_entry ** sym_hashes;
6659 bfd_vma * local_got_offsets;
6660 asection * sgot = NULL;
6661 asection * splt = NULL;
6662 asection * sreloc = NULL;
6663 bfd_vma addend;
6664 bfd_signed_vma signed_addend;
6665 struct elf32_arm_link_hash_table * globals;
6666
6667 globals = elf32_arm_hash_table (info);
6668
6669 BFD_ASSERT (is_arm_elf (input_bfd));
6670
6671 /* Some relocation types map to different relocations depending on the
6672 target. We pick the right one here. */
6673 r_type = arm_real_reloc_type (globals, r_type);
6674 if (r_type != howto->type)
6675 howto = elf32_arm_howto_from_type (r_type);
6676
6677 /* If the start address has been set, then set the EF_ARM_HASENTRY
6678 flag. Setting this more than once is redundant, but the cost is
6679 not too high, and it keeps the code simple.
6680
6681 The test is done here, rather than somewhere else, because the
6682 start address is only set just before the final link commences.
6683
6684 Note - if the user deliberately sets a start address of 0, the
6685 flag will not be set. */
6686 if (bfd_get_start_address (output_bfd) != 0)
6687 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6688
6689 dynobj = elf_hash_table (info)->dynobj;
6690 if (dynobj)
6691 {
6692 sgot = bfd_get_section_by_name (dynobj, ".got");
6693 splt = bfd_get_section_by_name (dynobj, ".plt");
6694 }
6695 symtab_hdr = & elf_symtab_hdr (input_bfd);
6696 sym_hashes = elf_sym_hashes (input_bfd);
6697 local_got_offsets = elf_local_got_offsets (input_bfd);
6698 r_symndx = ELF32_R_SYM (rel->r_info);
6699
6700 if (globals->use_rel)
6701 {
6702 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6703
6704 if (addend & ((howto->src_mask + 1) >> 1))
6705 {
6706 signed_addend = -1;
6707 signed_addend &= ~ howto->src_mask;
6708 signed_addend |= addend;
6709 }
6710 else
6711 signed_addend = addend;
6712 }
6713 else
6714 addend = signed_addend = rel->r_addend;
6715
6716 switch (r_type)
6717 {
6718 case R_ARM_NONE:
6719 /* We don't need to find a value for this symbol. It's just a
6720 marker. */
6721 *unresolved_reloc_p = FALSE;
6722 return bfd_reloc_ok;
6723
6724 case R_ARM_ABS12:
6725 if (!globals->vxworks_p)
6726 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6727
6728 case R_ARM_PC24:
6729 case R_ARM_ABS32:
6730 case R_ARM_ABS32_NOI:
6731 case R_ARM_REL32:
6732 case R_ARM_REL32_NOI:
6733 case R_ARM_CALL:
6734 case R_ARM_JUMP24:
6735 case R_ARM_XPC25:
6736 case R_ARM_PREL31:
6737 case R_ARM_PLT32:
6738 /* Handle relocations which should use the PLT entry. ABS32/REL32
6739 will use the symbol's value, which may point to a PLT entry, but we
6740 don't need to handle that here. If we created a PLT entry, all
6741 branches in this object should go to it, except if the PLT is too
6742 far away, in which case a long branch stub should be inserted. */
6743 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6744 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6745 && r_type != R_ARM_CALL
6746 && r_type != R_ARM_JUMP24
6747 && r_type != R_ARM_PLT32)
6748 && h != NULL
6749 && splt != NULL
6750 && h->plt.offset != (bfd_vma) -1)
6751 {
6752 /* If we've created a .plt section, and assigned a PLT entry to
6753 this function, it should not be known to bind locally. If
6754 it were, we would have cleared the PLT entry. */
6755 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6756
6757 value = (splt->output_section->vma
6758 + splt->output_offset
6759 + h->plt.offset);
6760 *unresolved_reloc_p = FALSE;
6761 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6762 contents, rel->r_offset, value,
6763 rel->r_addend);
6764 }
6765
6766 /* When generating a shared object or relocatable executable, these
6767 relocations are copied into the output file to be resolved at
6768 run time. */
6769 if ((info->shared || globals->root.is_relocatable_executable)
6770 && (input_section->flags & SEC_ALLOC)
6771 && !(elf32_arm_hash_table (info)->vxworks_p
6772 && strcmp (input_section->output_section->name,
6773 ".tls_vars") == 0)
6774 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6775 || !SYMBOL_CALLS_LOCAL (info, h))
6776 && (h == NULL
6777 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6778 || h->root.type != bfd_link_hash_undefweak)
6779 && r_type != R_ARM_PC24
6780 && r_type != R_ARM_CALL
6781 && r_type != R_ARM_JUMP24
6782 && r_type != R_ARM_PREL31
6783 && r_type != R_ARM_PLT32)
6784 {
6785 Elf_Internal_Rela outrel;
6786 bfd_byte *loc;
6787 bfd_boolean skip, relocate;
6788
6789 *unresolved_reloc_p = FALSE;
6790
6791 if (sreloc == NULL)
6792 {
6793 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6794 ! globals->use_rel);
6795
6796 if (sreloc == NULL)
6797 return bfd_reloc_notsupported;
6798 }
6799
6800 skip = FALSE;
6801 relocate = FALSE;
6802
6803 outrel.r_addend = addend;
6804 outrel.r_offset =
6805 _bfd_elf_section_offset (output_bfd, info, input_section,
6806 rel->r_offset);
6807 if (outrel.r_offset == (bfd_vma) -1)
6808 skip = TRUE;
6809 else if (outrel.r_offset == (bfd_vma) -2)
6810 skip = TRUE, relocate = TRUE;
6811 outrel.r_offset += (input_section->output_section->vma
6812 + input_section->output_offset);
6813
6814 if (skip)
6815 memset (&outrel, 0, sizeof outrel);
6816 else if (h != NULL
6817 && h->dynindx != -1
6818 && (!info->shared
6819 || !info->symbolic
6820 || !h->def_regular))
6821 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6822 else
6823 {
6824 int symbol;
6825
6826 /* This symbol is local, or marked to become local. */
6827 if (sym_flags == STT_ARM_TFUNC)
6828 value |= 1;
6829 if (globals->symbian_p)
6830 {
6831 asection *osec;
6832
6833 /* On Symbian OS, the data segment and text segement
6834 can be relocated independently. Therefore, we
6835 must indicate the segment to which this
6836 relocation is relative. The BPABI allows us to
6837 use any symbol in the right segment; we just use
6838 the section symbol as it is convenient. (We
6839 cannot use the symbol given by "h" directly as it
6840 will not appear in the dynamic symbol table.)
6841
6842 Note that the dynamic linker ignores the section
6843 symbol value, so we don't subtract osec->vma
6844 from the emitted reloc addend. */
6845 if (sym_sec)
6846 osec = sym_sec->output_section;
6847 else
6848 osec = input_section->output_section;
6849 symbol = elf_section_data (osec)->dynindx;
6850 if (symbol == 0)
6851 {
6852 struct elf_link_hash_table *htab = elf_hash_table (info);
6853
6854 if ((osec->flags & SEC_READONLY) == 0
6855 && htab->data_index_section != NULL)
6856 osec = htab->data_index_section;
6857 else
6858 osec = htab->text_index_section;
6859 symbol = elf_section_data (osec)->dynindx;
6860 }
6861 BFD_ASSERT (symbol != 0);
6862 }
6863 else
6864 /* On SVR4-ish systems, the dynamic loader cannot
6865 relocate the text and data segments independently,
6866 so the symbol does not matter. */
6867 symbol = 0;
6868 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6869 if (globals->use_rel)
6870 relocate = TRUE;
6871 else
6872 outrel.r_addend += value;
6873 }
6874
6875 loc = sreloc->contents;
6876 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6877 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6878
6879 /* If this reloc is against an external symbol, we do not want to
6880 fiddle with the addend. Otherwise, we need to include the symbol
6881 value so that it becomes an addend for the dynamic reloc. */
6882 if (! relocate)
6883 return bfd_reloc_ok;
6884
6885 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6886 contents, rel->r_offset, value,
6887 (bfd_vma) 0);
6888 }
6889 else switch (r_type)
6890 {
6891 case R_ARM_ABS12:
6892 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6893
6894 case R_ARM_XPC25: /* Arm BLX instruction. */
6895 case R_ARM_CALL:
6896 case R_ARM_JUMP24:
6897 case R_ARM_PC24: /* Arm B/BL instruction. */
6898 case R_ARM_PLT32:
6899 {
6900 bfd_vma from;
6901 bfd_signed_vma branch_offset;
6902 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6903
6904 if (r_type == R_ARM_XPC25)
6905 {
6906 /* Check for Arm calling Arm function. */
6907 /* FIXME: Should we translate the instruction into a BL
6908 instruction instead ? */
6909 if (sym_flags != STT_ARM_TFUNC)
6910 (*_bfd_error_handler)
6911 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6912 input_bfd,
6913 h ? h->root.root.string : "(local)");
6914 }
6915 else if (r_type == R_ARM_PC24)
6916 {
6917 /* Check for Arm calling Thumb function. */
6918 if (sym_flags == STT_ARM_TFUNC)
6919 {
6920 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6921 output_bfd, input_section,
6922 hit_data, sym_sec, rel->r_offset,
6923 signed_addend, value,
6924 error_message))
6925 return bfd_reloc_ok;
6926 else
6927 return bfd_reloc_dangerous;
6928 }
6929 }
6930
6931 /* Check if a stub has to be inserted because the
6932 destination is too far or we are changing mode. */
6933 if ( r_type == R_ARM_CALL
6934 || r_type == R_ARM_JUMP24
6935 || r_type == R_ARM_PLT32)
6936 {
6937 /* If the call goes through a PLT entry, make sure to
6938 check distance to the right destination address. */
6939 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6940 {
6941 value = (splt->output_section->vma
6942 + splt->output_offset
6943 + h->plt.offset);
6944 *unresolved_reloc_p = FALSE;
6945 }
6946
6947 from = (input_section->output_section->vma
6948 + input_section->output_offset
6949 + rel->r_offset);
6950 branch_offset = (bfd_signed_vma)(value - from);
6951
6952 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6953 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6954 || ((sym_flags == STT_ARM_TFUNC)
6955 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6956 || (r_type == R_ARM_JUMP24)
6957 || (r_type == R_ARM_PLT32) ))
6958 )
6959 {
6960 /* The target is out of reach, so redirect the
6961 branch to the local stub for this function. */
6962
6963 stub_entry = elf32_arm_get_stub_entry (input_section,
6964 sym_sec, h,
6965 rel, globals);
6966 if (stub_entry != NULL)
6967 value = (stub_entry->stub_offset
6968 + stub_entry->stub_sec->output_offset
6969 + stub_entry->stub_sec->output_section->vma);
6970 }
6971 }
6972
6973 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6974 where:
6975 S is the address of the symbol in the relocation.
6976 P is address of the instruction being relocated.
6977 A is the addend (extracted from the instruction) in bytes.
6978
6979 S is held in 'value'.
6980 P is the base address of the section containing the
6981 instruction plus the offset of the reloc into that
6982 section, ie:
6983 (input_section->output_section->vma +
6984 input_section->output_offset +
6985 rel->r_offset).
6986 A is the addend, converted into bytes, ie:
6987 (signed_addend * 4)
6988
6989 Note: None of these operations have knowledge of the pipeline
6990 size of the processor, thus it is up to the assembler to
6991 encode this information into the addend. */
6992 value -= (input_section->output_section->vma
6993 + input_section->output_offset);
6994 value -= rel->r_offset;
6995 if (globals->use_rel)
6996 value += (signed_addend << howto->size);
6997 else
6998 /* RELA addends do not have to be adjusted by howto->size. */
6999 value += signed_addend;
7000
7001 signed_addend = value;
7002 signed_addend >>= howto->rightshift;
7003
7004 /* A branch to an undefined weak symbol is turned into a jump to
7005 the next instruction unless a PLT entry will be created. */
7006 if (h && h->root.type == bfd_link_hash_undefweak
7007 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7008 {
7009 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7010 | 0x0affffff;
7011 }
7012 else
7013 {
7014 /* Perform a signed range check. */
7015 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7016 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7017 return bfd_reloc_overflow;
7018
7019 addend = (value & 2);
7020
7021 value = (signed_addend & howto->dst_mask)
7022 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7023
7024 if (r_type == R_ARM_CALL)
7025 {
7026 /* Set the H bit in the BLX instruction. */
7027 if (sym_flags == STT_ARM_TFUNC)
7028 {
7029 if (addend)
7030 value |= (1 << 24);
7031 else
7032 value &= ~(bfd_vma)(1 << 24);
7033 }
7034
7035 /* Select the correct instruction (BL or BLX). */
7036 /* Only if we are not handling a BL to a stub. In this
7037 case, mode switching is performed by the stub. */
7038 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7039 value |= (1 << 28);
7040 else
7041 {
7042 value &= ~(bfd_vma)(1 << 28);
7043 value |= (1 << 24);
7044 }
7045 }
7046 }
7047 }
7048 break;
7049
7050 case R_ARM_ABS32:
7051 value += addend;
7052 if (sym_flags == STT_ARM_TFUNC)
7053 value |= 1;
7054 break;
7055
7056 case R_ARM_ABS32_NOI:
7057 value += addend;
7058 break;
7059
7060 case R_ARM_REL32:
7061 value += addend;
7062 if (sym_flags == STT_ARM_TFUNC)
7063 value |= 1;
7064 value -= (input_section->output_section->vma
7065 + input_section->output_offset + rel->r_offset);
7066 break;
7067
7068 case R_ARM_REL32_NOI:
7069 value += addend;
7070 value -= (input_section->output_section->vma
7071 + input_section->output_offset + rel->r_offset);
7072 break;
7073
7074 case R_ARM_PREL31:
7075 value -= (input_section->output_section->vma
7076 + input_section->output_offset + rel->r_offset);
7077 value += signed_addend;
7078 if (! h || h->root.type != bfd_link_hash_undefweak)
7079 {
7080 /* Check for overflow. */
7081 if ((value ^ (value >> 1)) & (1 << 30))
7082 return bfd_reloc_overflow;
7083 }
7084 value &= 0x7fffffff;
7085 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7086 if (sym_flags == STT_ARM_TFUNC)
7087 value |= 1;
7088 break;
7089 }
7090
7091 bfd_put_32 (input_bfd, value, hit_data);
7092 return bfd_reloc_ok;
7093
7094 case R_ARM_ABS8:
7095 value += addend;
7096 if ((long) value > 0x7f || (long) value < -0x80)
7097 return bfd_reloc_overflow;
7098
7099 bfd_put_8 (input_bfd, value, hit_data);
7100 return bfd_reloc_ok;
7101
7102 case R_ARM_ABS16:
7103 value += addend;
7104
7105 if ((long) value > 0x7fff || (long) value < -0x8000)
7106 return bfd_reloc_overflow;
7107
7108 bfd_put_16 (input_bfd, value, hit_data);
7109 return bfd_reloc_ok;
7110
7111 case R_ARM_THM_ABS5:
7112 /* Support ldr and str instructions for the thumb. */
7113 if (globals->use_rel)
7114 {
7115 /* Need to refetch addend. */
7116 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7117 /* ??? Need to determine shift amount from operand size. */
7118 addend >>= howto->rightshift;
7119 }
7120 value += addend;
7121
7122 /* ??? Isn't value unsigned? */
7123 if ((long) value > 0x1f || (long) value < -0x10)
7124 return bfd_reloc_overflow;
7125
7126 /* ??? Value needs to be properly shifted into place first. */
7127 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7128 bfd_put_16 (input_bfd, value, hit_data);
7129 return bfd_reloc_ok;
7130
7131 case R_ARM_THM_ALU_PREL_11_0:
7132 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7133 {
7134 bfd_vma insn;
7135 bfd_signed_vma relocation;
7136
7137 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7138 | bfd_get_16 (input_bfd, hit_data + 2);
7139
7140 if (globals->use_rel)
7141 {
7142 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7143 | ((insn & (1 << 26)) >> 15);
7144 if (insn & 0xf00000)
7145 signed_addend = -signed_addend;
7146 }
7147
7148 relocation = value + signed_addend;
7149 relocation -= (input_section->output_section->vma
7150 + input_section->output_offset
7151 + rel->r_offset);
7152
7153 value = abs (relocation);
7154
7155 if (value >= 0x1000)
7156 return bfd_reloc_overflow;
7157
7158 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7159 | ((value & 0x700) << 4)
7160 | ((value & 0x800) << 15);
7161 if (relocation < 0)
7162 insn |= 0xa00000;
7163
7164 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7165 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7166
7167 return bfd_reloc_ok;
7168 }
7169
7170 case R_ARM_THM_PC12:
7171 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7172 {
7173 bfd_vma insn;
7174 bfd_signed_vma relocation;
7175
7176 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7177 | bfd_get_16 (input_bfd, hit_data + 2);
7178
7179 if (globals->use_rel)
7180 {
7181 signed_addend = insn & 0xfff;
7182 if (!(insn & (1 << 23)))
7183 signed_addend = -signed_addend;
7184 }
7185
7186 relocation = value + signed_addend;
7187 relocation -= (input_section->output_section->vma
7188 + input_section->output_offset
7189 + rel->r_offset);
7190
7191 value = abs (relocation);
7192
7193 if (value >= 0x1000)
7194 return bfd_reloc_overflow;
7195
7196 insn = (insn & 0xff7ff000) | value;
7197 if (relocation >= 0)
7198 insn |= (1 << 23);
7199
7200 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7201 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7202
7203 return bfd_reloc_ok;
7204 }
7205
7206 case R_ARM_THM_XPC22:
7207 case R_ARM_THM_CALL:
7208 case R_ARM_THM_JUMP24:
7209 /* Thumb BL (branch long instruction). */
7210 {
7211 bfd_vma relocation;
7212 bfd_vma reloc_sign;
7213 bfd_boolean overflow = FALSE;
7214 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7215 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7216 bfd_signed_vma reloc_signed_max;
7217 bfd_signed_vma reloc_signed_min;
7218 bfd_vma check;
7219 bfd_signed_vma signed_check;
7220 int bitsize;
7221 int thumb2 = using_thumb2 (globals);
7222
7223 /* A branch to an undefined weak symbol is turned into a jump to
7224 the next instruction unless a PLT entry will be created. */
7225 if (h && h->root.type == bfd_link_hash_undefweak
7226 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7227 {
7228 bfd_put_16 (input_bfd, 0xe000, hit_data);
7229 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7230 return bfd_reloc_ok;
7231 }
7232
7233 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7234 with Thumb-1) involving the J1 and J2 bits. */
7235 if (globals->use_rel)
7236 {
7237 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7238 bfd_vma upper = upper_insn & 0x3ff;
7239 bfd_vma lower = lower_insn & 0x7ff;
7240 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7241 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7242 bfd_vma i1 = j1 ^ s ? 0 : 1;
7243 bfd_vma i2 = j2 ^ s ? 0 : 1;
7244
7245 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7246 /* Sign extend. */
7247 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7248
7249 signed_addend = addend;
7250 }
7251
7252 if (r_type == R_ARM_THM_XPC22)
7253 {
7254 /* Check for Thumb to Thumb call. */
7255 /* FIXME: Should we translate the instruction into a BL
7256 instruction instead ? */
7257 if (sym_flags == STT_ARM_TFUNC)
7258 (*_bfd_error_handler)
7259 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7260 input_bfd,
7261 h ? h->root.root.string : "(local)");
7262 }
7263 else
7264 {
7265 /* If it is not a call to Thumb, assume call to Arm.
7266 If it is a call relative to a section name, then it is not a
7267 function call at all, but rather a long jump. Calls through
7268 the PLT do not require stubs. */
7269 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7270 && (h == NULL || splt == NULL
7271 || h->plt.offset == (bfd_vma) -1))
7272 {
7273 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7274 {
7275 /* Convert BL to BLX. */
7276 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7277 }
7278 else if (( r_type != R_ARM_THM_CALL)
7279 && (r_type != R_ARM_THM_JUMP24))
7280 {
7281 if (elf32_thumb_to_arm_stub
7282 (info, sym_name, input_bfd, output_bfd, input_section,
7283 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7284 error_message))
7285 return bfd_reloc_ok;
7286 else
7287 return bfd_reloc_dangerous;
7288 }
7289 }
7290 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7291 && r_type == R_ARM_THM_CALL)
7292 {
7293 /* Make sure this is a BL. */
7294 lower_insn |= 0x1800;
7295 }
7296 }
7297
7298 /* Handle calls via the PLT. */
7299 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7300 {
7301 value = (splt->output_section->vma
7302 + splt->output_offset
7303 + h->plt.offset);
7304 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7305 {
7306 /* If the Thumb BLX instruction is available, convert the
7307 BL to a BLX instruction to call the ARM-mode PLT entry. */
7308 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7309 }
7310 else
7311 /* Target the Thumb stub before the ARM PLT entry. */
7312 value -= PLT_THUMB_STUB_SIZE;
7313 *unresolved_reloc_p = FALSE;
7314 }
7315
7316 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7317 {
7318 /* Check if a stub has to be inserted because the destination
7319 is too far. */
7320 bfd_vma from;
7321 bfd_signed_vma branch_offset;
7322 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7323
7324 from = (input_section->output_section->vma
7325 + input_section->output_offset
7326 + rel->r_offset);
7327 branch_offset = (bfd_signed_vma)(value - from);
7328
7329 if ((!thumb2
7330 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7331 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7332 ||
7333 (thumb2
7334 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7335 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7336 || ((sym_flags != STT_ARM_TFUNC)
7337 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7338 || r_type == R_ARM_THM_JUMP24)))
7339 {
7340 /* The target is out of reach or we are changing modes, so
7341 redirect the branch to the local stub for this
7342 function. */
7343 stub_entry = elf32_arm_get_stub_entry (input_section,
7344 sym_sec, h,
7345 rel, globals);
7346 if (stub_entry != NULL)
7347 value = (stub_entry->stub_offset
7348 + stub_entry->stub_sec->output_offset
7349 + stub_entry->stub_sec->output_section->vma);
7350
7351 /* If this call becomes a call to Arm, force BLX. */
7352 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7353 {
7354 if ((stub_entry
7355 && !arm_stub_is_thumb (stub_entry->stub_type))
7356 || (sym_flags != STT_ARM_TFUNC))
7357 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7358 }
7359 }
7360 }
7361
7362 relocation = value + signed_addend;
7363
7364 relocation -= (input_section->output_section->vma
7365 + input_section->output_offset
7366 + rel->r_offset);
7367
7368 check = relocation >> howto->rightshift;
7369
7370 /* If this is a signed value, the rightshift just dropped
7371 leading 1 bits (assuming twos complement). */
7372 if ((bfd_signed_vma) relocation >= 0)
7373 signed_check = check;
7374 else
7375 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7376
7377 /* Calculate the permissable maximum and minimum values for
7378 this relocation according to whether we're relocating for
7379 Thumb-2 or not. */
7380 bitsize = howto->bitsize;
7381 if (!thumb2)
7382 bitsize -= 2;
7383 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7384 reloc_signed_min = ~reloc_signed_max;
7385
7386 /* Assumes two's complement. */
7387 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7388 overflow = TRUE;
7389
7390 if ((lower_insn & 0x5000) == 0x4000)
7391 /* For a BLX instruction, make sure that the relocation is rounded up
7392 to a word boundary. This follows the semantics of the instruction
7393 which specifies that bit 1 of the target address will come from bit
7394 1 of the base address. */
7395 relocation = (relocation + 2) & ~ 3;
7396
7397 /* Put RELOCATION back into the insn. Assumes two's complement.
7398 We use the Thumb-2 encoding, which is safe even if dealing with
7399 a Thumb-1 instruction by virtue of our overflow check above. */
7400 reloc_sign = (signed_check < 0) ? 1 : 0;
7401 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7402 | ((relocation >> 12) & 0x3ff)
7403 | (reloc_sign << 10);
7404 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7405 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7406 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7407 | ((relocation >> 1) & 0x7ff);
7408
7409 /* Put the relocated value back in the object file: */
7410 bfd_put_16 (input_bfd, upper_insn, hit_data);
7411 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7412
7413 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7414 }
7415 break;
7416
7417 case R_ARM_THM_JUMP19:
7418 /* Thumb32 conditional branch instruction. */
7419 {
7420 bfd_vma relocation;
7421 bfd_boolean overflow = FALSE;
7422 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7423 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7424 bfd_signed_vma reloc_signed_max = 0xffffe;
7425 bfd_signed_vma reloc_signed_min = -0x100000;
7426 bfd_signed_vma signed_check;
7427
7428 /* Need to refetch the addend, reconstruct the top three bits,
7429 and squish the two 11 bit pieces together. */
7430 if (globals->use_rel)
7431 {
7432 bfd_vma S = (upper_insn & 0x0400) >> 10;
7433 bfd_vma upper = (upper_insn & 0x003f);
7434 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7435 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7436 bfd_vma lower = (lower_insn & 0x07ff);
7437
7438 upper |= J1 << 6;
7439 upper |= J2 << 7;
7440 upper |= (!S) << 8;
7441 upper -= 0x0100; /* Sign extend. */
7442
7443 addend = (upper << 12) | (lower << 1);
7444 signed_addend = addend;
7445 }
7446
7447 /* Handle calls via the PLT. */
7448 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7449 {
7450 value = (splt->output_section->vma
7451 + splt->output_offset
7452 + h->plt.offset);
7453 /* Target the Thumb stub before the ARM PLT entry. */
7454 value -= PLT_THUMB_STUB_SIZE;
7455 *unresolved_reloc_p = FALSE;
7456 }
7457
7458 /* ??? Should handle interworking? GCC might someday try to
7459 use this for tail calls. */
7460
7461 relocation = value + signed_addend;
7462 relocation -= (input_section->output_section->vma
7463 + input_section->output_offset
7464 + rel->r_offset);
7465 signed_check = (bfd_signed_vma) relocation;
7466
7467 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7468 overflow = TRUE;
7469
7470 /* Put RELOCATION back into the insn. */
7471 {
7472 bfd_vma S = (relocation & 0x00100000) >> 20;
7473 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7474 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7475 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7476 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7477
7478 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7479 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7480 }
7481
7482 /* Put the relocated value back in the object file: */
7483 bfd_put_16 (input_bfd, upper_insn, hit_data);
7484 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7485
7486 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7487 }
7488
7489 case R_ARM_THM_JUMP11:
7490 case R_ARM_THM_JUMP8:
7491 case R_ARM_THM_JUMP6:
7492 /* Thumb B (branch) instruction). */
7493 {
7494 bfd_signed_vma relocation;
7495 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7496 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7497 bfd_signed_vma signed_check;
7498
7499 /* CZB cannot jump backward. */
7500 if (r_type == R_ARM_THM_JUMP6)
7501 reloc_signed_min = 0;
7502
7503 if (globals->use_rel)
7504 {
7505 /* Need to refetch addend. */
7506 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7507 if (addend & ((howto->src_mask + 1) >> 1))
7508 {
7509 signed_addend = -1;
7510 signed_addend &= ~ howto->src_mask;
7511 signed_addend |= addend;
7512 }
7513 else
7514 signed_addend = addend;
7515 /* The value in the insn has been right shifted. We need to
7516 undo this, so that we can perform the address calculation
7517 in terms of bytes. */
7518 signed_addend <<= howto->rightshift;
7519 }
7520 relocation = value + signed_addend;
7521
7522 relocation -= (input_section->output_section->vma
7523 + input_section->output_offset
7524 + rel->r_offset);
7525
7526 relocation >>= howto->rightshift;
7527 signed_check = relocation;
7528
7529 if (r_type == R_ARM_THM_JUMP6)
7530 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7531 else
7532 relocation &= howto->dst_mask;
7533 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7534
7535 bfd_put_16 (input_bfd, relocation, hit_data);
7536
7537 /* Assumes two's complement. */
7538 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7539 return bfd_reloc_overflow;
7540
7541 return bfd_reloc_ok;
7542 }
7543
7544 case R_ARM_ALU_PCREL7_0:
7545 case R_ARM_ALU_PCREL15_8:
7546 case R_ARM_ALU_PCREL23_15:
7547 {
7548 bfd_vma insn;
7549 bfd_vma relocation;
7550
7551 insn = bfd_get_32 (input_bfd, hit_data);
7552 if (globals->use_rel)
7553 {
7554 /* Extract the addend. */
7555 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7556 signed_addend = addend;
7557 }
7558 relocation = value + signed_addend;
7559
7560 relocation -= (input_section->output_section->vma
7561 + input_section->output_offset
7562 + rel->r_offset);
7563 insn = (insn & ~0xfff)
7564 | ((howto->bitpos << 7) & 0xf00)
7565 | ((relocation >> howto->bitpos) & 0xff);
7566 bfd_put_32 (input_bfd, value, hit_data);
7567 }
7568 return bfd_reloc_ok;
7569
7570 case R_ARM_GNU_VTINHERIT:
7571 case R_ARM_GNU_VTENTRY:
7572 return bfd_reloc_ok;
7573
7574 case R_ARM_GOTOFF32:
7575 /* Relocation is relative to the start of the
7576 global offset table. */
7577
7578 BFD_ASSERT (sgot != NULL);
7579 if (sgot == NULL)
7580 return bfd_reloc_notsupported;
7581
7582 /* If we are addressing a Thumb function, we need to adjust the
7583 address by one, so that attempts to call the function pointer will
7584 correctly interpret it as Thumb code. */
7585 if (sym_flags == STT_ARM_TFUNC)
7586 value += 1;
7587
7588 /* Note that sgot->output_offset is not involved in this
7589 calculation. We always want the start of .got. If we
7590 define _GLOBAL_OFFSET_TABLE in a different way, as is
7591 permitted by the ABI, we might have to change this
7592 calculation. */
7593 value -= sgot->output_section->vma;
7594 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7595 contents, rel->r_offset, value,
7596 rel->r_addend);
7597
7598 case R_ARM_GOTPC:
7599 /* Use global offset table as symbol value. */
7600 BFD_ASSERT (sgot != NULL);
7601
7602 if (sgot == NULL)
7603 return bfd_reloc_notsupported;
7604
7605 *unresolved_reloc_p = FALSE;
7606 value = sgot->output_section->vma;
7607 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7608 contents, rel->r_offset, value,
7609 rel->r_addend);
7610
7611 case R_ARM_GOT32:
7612 case R_ARM_GOT_PREL:
7613 /* Relocation is to the entry for this symbol in the
7614 global offset table. */
7615 if (sgot == NULL)
7616 return bfd_reloc_notsupported;
7617
7618 if (h != NULL)
7619 {
7620 bfd_vma off;
7621 bfd_boolean dyn;
7622
7623 off = h->got.offset;
7624 BFD_ASSERT (off != (bfd_vma) -1);
7625 dyn = globals->root.dynamic_sections_created;
7626
7627 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7628 || (info->shared
7629 && SYMBOL_REFERENCES_LOCAL (info, h))
7630 || (ELF_ST_VISIBILITY (h->other)
7631 && h->root.type == bfd_link_hash_undefweak))
7632 {
7633 /* This is actually a static link, or it is a -Bsymbolic link
7634 and the symbol is defined locally. We must initialize this
7635 entry in the global offset table. Since the offset must
7636 always be a multiple of 4, we use the least significant bit
7637 to record whether we have initialized it already.
7638
7639 When doing a dynamic link, we create a .rel(a).got relocation
7640 entry to initialize the value. This is done in the
7641 finish_dynamic_symbol routine. */
7642 if ((off & 1) != 0)
7643 off &= ~1;
7644 else
7645 {
7646 /* If we are addressing a Thumb function, we need to
7647 adjust the address by one, so that attempts to
7648 call the function pointer will correctly
7649 interpret it as Thumb code. */
7650 if (sym_flags == STT_ARM_TFUNC)
7651 value |= 1;
7652
7653 bfd_put_32 (output_bfd, value, sgot->contents + off);
7654 h->got.offset |= 1;
7655 }
7656 }
7657 else
7658 *unresolved_reloc_p = FALSE;
7659
7660 value = sgot->output_offset + off;
7661 }
7662 else
7663 {
7664 bfd_vma off;
7665
7666 BFD_ASSERT (local_got_offsets != NULL &&
7667 local_got_offsets[r_symndx] != (bfd_vma) -1);
7668
7669 off = local_got_offsets[r_symndx];
7670
7671 /* The offset must always be a multiple of 4. We use the
7672 least significant bit to record whether we have already
7673 generated the necessary reloc. */
7674 if ((off & 1) != 0)
7675 off &= ~1;
7676 else
7677 {
7678 /* If we are addressing a Thumb function, we need to
7679 adjust the address by one, so that attempts to
7680 call the function pointer will correctly
7681 interpret it as Thumb code. */
7682 if (sym_flags == STT_ARM_TFUNC)
7683 value |= 1;
7684
7685 if (globals->use_rel)
7686 bfd_put_32 (output_bfd, value, sgot->contents + off);
7687
7688 if (info->shared)
7689 {
7690 asection * srelgot;
7691 Elf_Internal_Rela outrel;
7692 bfd_byte *loc;
7693
7694 srelgot = (bfd_get_section_by_name
7695 (dynobj, RELOC_SECTION (globals, ".got")));
7696 BFD_ASSERT (srelgot != NULL);
7697
7698 outrel.r_addend = addend + value;
7699 outrel.r_offset = (sgot->output_section->vma
7700 + sgot->output_offset
7701 + off);
7702 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7703 loc = srelgot->contents;
7704 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7705 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7706 }
7707
7708 local_got_offsets[r_symndx] |= 1;
7709 }
7710
7711 value = sgot->output_offset + off;
7712 }
7713 if (r_type != R_ARM_GOT32)
7714 value += sgot->output_section->vma;
7715
7716 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7717 contents, rel->r_offset, value,
7718 rel->r_addend);
7719
7720 case R_ARM_TLS_LDO32:
7721 value = value - dtpoff_base (info);
7722
7723 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7724 contents, rel->r_offset, value,
7725 rel->r_addend);
7726
7727 case R_ARM_TLS_LDM32:
7728 {
7729 bfd_vma off;
7730
7731 if (globals->sgot == NULL)
7732 abort ();
7733
7734 off = globals->tls_ldm_got.offset;
7735
7736 if ((off & 1) != 0)
7737 off &= ~1;
7738 else
7739 {
7740 /* If we don't know the module number, create a relocation
7741 for it. */
7742 if (info->shared)
7743 {
7744 Elf_Internal_Rela outrel;
7745 bfd_byte *loc;
7746
7747 if (globals->srelgot == NULL)
7748 abort ();
7749
7750 outrel.r_addend = 0;
7751 outrel.r_offset = (globals->sgot->output_section->vma
7752 + globals->sgot->output_offset + off);
7753 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7754
7755 if (globals->use_rel)
7756 bfd_put_32 (output_bfd, outrel.r_addend,
7757 globals->sgot->contents + off);
7758
7759 loc = globals->srelgot->contents;
7760 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7761 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7762 }
7763 else
7764 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7765
7766 globals->tls_ldm_got.offset |= 1;
7767 }
7768
7769 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7770 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7771
7772 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7773 contents, rel->r_offset, value,
7774 rel->r_addend);
7775 }
7776
7777 case R_ARM_TLS_GD32:
7778 case R_ARM_TLS_IE32:
7779 {
7780 bfd_vma off;
7781 int indx;
7782 char tls_type;
7783
7784 if (globals->sgot == NULL)
7785 abort ();
7786
7787 indx = 0;
7788 if (h != NULL)
7789 {
7790 bfd_boolean dyn;
7791 dyn = globals->root.dynamic_sections_created;
7792 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7793 && (!info->shared
7794 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7795 {
7796 *unresolved_reloc_p = FALSE;
7797 indx = h->dynindx;
7798 }
7799 off = h->got.offset;
7800 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7801 }
7802 else
7803 {
7804 if (local_got_offsets == NULL)
7805 abort ();
7806 off = local_got_offsets[r_symndx];
7807 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7808 }
7809
7810 if (tls_type == GOT_UNKNOWN)
7811 abort ();
7812
7813 if ((off & 1) != 0)
7814 off &= ~1;
7815 else
7816 {
7817 bfd_boolean need_relocs = FALSE;
7818 Elf_Internal_Rela outrel;
7819 bfd_byte *loc = NULL;
7820 int cur_off = off;
7821
7822 /* The GOT entries have not been initialized yet. Do it
7823 now, and emit any relocations. If both an IE GOT and a
7824 GD GOT are necessary, we emit the GD first. */
7825
7826 if ((info->shared || indx != 0)
7827 && (h == NULL
7828 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7829 || h->root.type != bfd_link_hash_undefweak))
7830 {
7831 need_relocs = TRUE;
7832 if (globals->srelgot == NULL)
7833 abort ();
7834 loc = globals->srelgot->contents;
7835 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7836 }
7837
7838 if (tls_type & GOT_TLS_GD)
7839 {
7840 if (need_relocs)
7841 {
7842 outrel.r_addend = 0;
7843 outrel.r_offset = (globals->sgot->output_section->vma
7844 + globals->sgot->output_offset
7845 + cur_off);
7846 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7847
7848 if (globals->use_rel)
7849 bfd_put_32 (output_bfd, outrel.r_addend,
7850 globals->sgot->contents + cur_off);
7851
7852 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7853 globals->srelgot->reloc_count++;
7854 loc += RELOC_SIZE (globals);
7855
7856 if (indx == 0)
7857 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7858 globals->sgot->contents + cur_off + 4);
7859 else
7860 {
7861 outrel.r_addend = 0;
7862 outrel.r_info = ELF32_R_INFO (indx,
7863 R_ARM_TLS_DTPOFF32);
7864 outrel.r_offset += 4;
7865
7866 if (globals->use_rel)
7867 bfd_put_32 (output_bfd, outrel.r_addend,
7868 globals->sgot->contents + cur_off + 4);
7869
7870
7871 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7872 globals->srelgot->reloc_count++;
7873 loc += RELOC_SIZE (globals);
7874 }
7875 }
7876 else
7877 {
7878 /* If we are not emitting relocations for a
7879 general dynamic reference, then we must be in a
7880 static link or an executable link with the
7881 symbol binding locally. Mark it as belonging
7882 to module 1, the executable. */
7883 bfd_put_32 (output_bfd, 1,
7884 globals->sgot->contents + cur_off);
7885 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7886 globals->sgot->contents + cur_off + 4);
7887 }
7888
7889 cur_off += 8;
7890 }
7891
7892 if (tls_type & GOT_TLS_IE)
7893 {
7894 if (need_relocs)
7895 {
7896 if (indx == 0)
7897 outrel.r_addend = value - dtpoff_base (info);
7898 else
7899 outrel.r_addend = 0;
7900 outrel.r_offset = (globals->sgot->output_section->vma
7901 + globals->sgot->output_offset
7902 + cur_off);
7903 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7904
7905 if (globals->use_rel)
7906 bfd_put_32 (output_bfd, outrel.r_addend,
7907 globals->sgot->contents + cur_off);
7908
7909 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7910 globals->srelgot->reloc_count++;
7911 loc += RELOC_SIZE (globals);
7912 }
7913 else
7914 bfd_put_32 (output_bfd, tpoff (info, value),
7915 globals->sgot->contents + cur_off);
7916 cur_off += 4;
7917 }
7918
7919 if (h != NULL)
7920 h->got.offset |= 1;
7921 else
7922 local_got_offsets[r_symndx] |= 1;
7923 }
7924
7925 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7926 off += 8;
7927 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7928 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7929
7930 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7931 contents, rel->r_offset, value,
7932 rel->r_addend);
7933 }
7934
7935 case R_ARM_TLS_LE32:
7936 if (info->shared)
7937 {
7938 (*_bfd_error_handler)
7939 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7940 input_bfd, input_section,
7941 (long) rel->r_offset, howto->name);
7942 return FALSE;
7943 }
7944 else
7945 value = tpoff (info, value);
7946
7947 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7948 contents, rel->r_offset, value,
7949 rel->r_addend);
7950
7951 case R_ARM_V4BX:
7952 if (globals->fix_v4bx)
7953 {
7954 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7955
7956 /* Ensure that we have a BX instruction. */
7957 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
7958
7959 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
7960 {
7961 /* Branch to veneer. */
7962 bfd_vma glue_addr;
7963 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
7964 glue_addr -= input_section->output_section->vma
7965 + input_section->output_offset
7966 + rel->r_offset + 8;
7967 insn = (insn & 0xf0000000) | 0x0a000000
7968 | ((glue_addr >> 2) & 0x00ffffff);
7969 }
7970 else
7971 {
7972 /* Preserve Rm (lowest four bits) and the condition code
7973 (highest four bits). Other bits encode MOV PC,Rm. */
7974 insn = (insn & 0xf000000f) | 0x01a0f000;
7975 }
7976
7977 bfd_put_32 (input_bfd, insn, hit_data);
7978 }
7979 return bfd_reloc_ok;
7980
7981 case R_ARM_MOVW_ABS_NC:
7982 case R_ARM_MOVT_ABS:
7983 case R_ARM_MOVW_PREL_NC:
7984 case R_ARM_MOVT_PREL:
7985 /* Until we properly support segment-base-relative addressing then
7986 we assume the segment base to be zero, as for the group relocations.
7987 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
7988 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
7989 case R_ARM_MOVW_BREL_NC:
7990 case R_ARM_MOVW_BREL:
7991 case R_ARM_MOVT_BREL:
7992 {
7993 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7994
7995 if (globals->use_rel)
7996 {
7997 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7998 signed_addend = (addend ^ 0x8000) - 0x8000;
7999 }
8000
8001 value += signed_addend;
8002
8003 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8004 value -= (input_section->output_section->vma
8005 + input_section->output_offset + rel->r_offset);
8006
8007 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8008 return bfd_reloc_overflow;
8009
8010 if (sym_flags == STT_ARM_TFUNC)
8011 value |= 1;
8012
8013 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8014 || r_type == R_ARM_MOVT_BREL)
8015 value >>= 16;
8016
8017 insn &= 0xfff0f000;
8018 insn |= value & 0xfff;
8019 insn |= (value & 0xf000) << 4;
8020 bfd_put_32 (input_bfd, insn, hit_data);
8021 }
8022 return bfd_reloc_ok;
8023
8024 case R_ARM_THM_MOVW_ABS_NC:
8025 case R_ARM_THM_MOVT_ABS:
8026 case R_ARM_THM_MOVW_PREL_NC:
8027 case R_ARM_THM_MOVT_PREL:
8028 /* Until we properly support segment-base-relative addressing then
8029 we assume the segment base to be zero, as for the above relocations.
8030 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8031 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8032 as R_ARM_THM_MOVT_ABS. */
8033 case R_ARM_THM_MOVW_BREL_NC:
8034 case R_ARM_THM_MOVW_BREL:
8035 case R_ARM_THM_MOVT_BREL:
8036 {
8037 bfd_vma insn;
8038
8039 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8040 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8041
8042 if (globals->use_rel)
8043 {
8044 addend = ((insn >> 4) & 0xf000)
8045 | ((insn >> 15) & 0x0800)
8046 | ((insn >> 4) & 0x0700)
8047 | (insn & 0x00ff);
8048 signed_addend = (addend ^ 0x8000) - 0x8000;
8049 }
8050
8051 value += signed_addend;
8052
8053 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8054 value -= (input_section->output_section->vma
8055 + input_section->output_offset + rel->r_offset);
8056
8057 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8058 return bfd_reloc_overflow;
8059
8060 if (sym_flags == STT_ARM_TFUNC)
8061 value |= 1;
8062
8063 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8064 || r_type == R_ARM_THM_MOVT_BREL)
8065 value >>= 16;
8066
8067 insn &= 0xfbf08f00;
8068 insn |= (value & 0xf000) << 4;
8069 insn |= (value & 0x0800) << 15;
8070 insn |= (value & 0x0700) << 4;
8071 insn |= (value & 0x00ff);
8072
8073 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8074 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8075 }
8076 return bfd_reloc_ok;
8077
8078 case R_ARM_ALU_PC_G0_NC:
8079 case R_ARM_ALU_PC_G1_NC:
8080 case R_ARM_ALU_PC_G0:
8081 case R_ARM_ALU_PC_G1:
8082 case R_ARM_ALU_PC_G2:
8083 case R_ARM_ALU_SB_G0_NC:
8084 case R_ARM_ALU_SB_G1_NC:
8085 case R_ARM_ALU_SB_G0:
8086 case R_ARM_ALU_SB_G1:
8087 case R_ARM_ALU_SB_G2:
8088 {
8089 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8090 bfd_vma pc = input_section->output_section->vma
8091 + input_section->output_offset + rel->r_offset;
8092 /* sb should be the origin of the *segment* containing the symbol.
8093 It is not clear how to obtain this OS-dependent value, so we
8094 make an arbitrary choice of zero. */
8095 bfd_vma sb = 0;
8096 bfd_vma residual;
8097 bfd_vma g_n;
8098 bfd_signed_vma signed_value;
8099 int group = 0;
8100
8101 /* Determine which group of bits to select. */
8102 switch (r_type)
8103 {
8104 case R_ARM_ALU_PC_G0_NC:
8105 case R_ARM_ALU_PC_G0:
8106 case R_ARM_ALU_SB_G0_NC:
8107 case R_ARM_ALU_SB_G0:
8108 group = 0;
8109 break;
8110
8111 case R_ARM_ALU_PC_G1_NC:
8112 case R_ARM_ALU_PC_G1:
8113 case R_ARM_ALU_SB_G1_NC:
8114 case R_ARM_ALU_SB_G1:
8115 group = 1;
8116 break;
8117
8118 case R_ARM_ALU_PC_G2:
8119 case R_ARM_ALU_SB_G2:
8120 group = 2;
8121 break;
8122
8123 default:
8124 abort ();
8125 }
8126
8127 /* If REL, extract the addend from the insn. If RELA, it will
8128 have already been fetched for us. */
8129 if (globals->use_rel)
8130 {
8131 int negative;
8132 bfd_vma constant = insn & 0xff;
8133 bfd_vma rotation = (insn & 0xf00) >> 8;
8134
8135 if (rotation == 0)
8136 signed_addend = constant;
8137 else
8138 {
8139 /* Compensate for the fact that in the instruction, the
8140 rotation is stored in multiples of 2 bits. */
8141 rotation *= 2;
8142
8143 /* Rotate "constant" right by "rotation" bits. */
8144 signed_addend = (constant >> rotation) |
8145 (constant << (8 * sizeof (bfd_vma) - rotation));
8146 }
8147
8148 /* Determine if the instruction is an ADD or a SUB.
8149 (For REL, this determines the sign of the addend.) */
8150 negative = identify_add_or_sub (insn);
8151 if (negative == 0)
8152 {
8153 (*_bfd_error_handler)
8154 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8155 input_bfd, input_section,
8156 (long) rel->r_offset, howto->name);
8157 return bfd_reloc_overflow;
8158 }
8159
8160 signed_addend *= negative;
8161 }
8162
8163 /* Compute the value (X) to go in the place. */
8164 if (r_type == R_ARM_ALU_PC_G0_NC
8165 || r_type == R_ARM_ALU_PC_G1_NC
8166 || r_type == R_ARM_ALU_PC_G0
8167 || r_type == R_ARM_ALU_PC_G1
8168 || r_type == R_ARM_ALU_PC_G2)
8169 /* PC relative. */
8170 signed_value = value - pc + signed_addend;
8171 else
8172 /* Section base relative. */
8173 signed_value = value - sb + signed_addend;
8174
8175 /* If the target symbol is a Thumb function, then set the
8176 Thumb bit in the address. */
8177 if (sym_flags == STT_ARM_TFUNC)
8178 signed_value |= 1;
8179
8180 /* Calculate the value of the relevant G_n, in encoded
8181 constant-with-rotation format. */
8182 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8183 &residual);
8184
8185 /* Check for overflow if required. */
8186 if ((r_type == R_ARM_ALU_PC_G0
8187 || r_type == R_ARM_ALU_PC_G1
8188 || r_type == R_ARM_ALU_PC_G2
8189 || r_type == R_ARM_ALU_SB_G0
8190 || r_type == R_ARM_ALU_SB_G1
8191 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8192 {
8193 (*_bfd_error_handler)
8194 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8195 input_bfd, input_section,
8196 (long) rel->r_offset, abs (signed_value), howto->name);
8197 return bfd_reloc_overflow;
8198 }
8199
8200 /* Mask out the value and the ADD/SUB part of the opcode; take care
8201 not to destroy the S bit. */
8202 insn &= 0xff1ff000;
8203
8204 /* Set the opcode according to whether the value to go in the
8205 place is negative. */
8206 if (signed_value < 0)
8207 insn |= 1 << 22;
8208 else
8209 insn |= 1 << 23;
8210
8211 /* Encode the offset. */
8212 insn |= g_n;
8213
8214 bfd_put_32 (input_bfd, insn, hit_data);
8215 }
8216 return bfd_reloc_ok;
8217
8218 case R_ARM_LDR_PC_G0:
8219 case R_ARM_LDR_PC_G1:
8220 case R_ARM_LDR_PC_G2:
8221 case R_ARM_LDR_SB_G0:
8222 case R_ARM_LDR_SB_G1:
8223 case R_ARM_LDR_SB_G2:
8224 {
8225 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8226 bfd_vma pc = input_section->output_section->vma
8227 + input_section->output_offset + rel->r_offset;
8228 bfd_vma sb = 0; /* See note above. */
8229 bfd_vma residual;
8230 bfd_signed_vma signed_value;
8231 int group = 0;
8232
8233 /* Determine which groups of bits to calculate. */
8234 switch (r_type)
8235 {
8236 case R_ARM_LDR_PC_G0:
8237 case R_ARM_LDR_SB_G0:
8238 group = 0;
8239 break;
8240
8241 case R_ARM_LDR_PC_G1:
8242 case R_ARM_LDR_SB_G1:
8243 group = 1;
8244 break;
8245
8246 case R_ARM_LDR_PC_G2:
8247 case R_ARM_LDR_SB_G2:
8248 group = 2;
8249 break;
8250
8251 default:
8252 abort ();
8253 }
8254
8255 /* If REL, extract the addend from the insn. If RELA, it will
8256 have already been fetched for us. */
8257 if (globals->use_rel)
8258 {
8259 int negative = (insn & (1 << 23)) ? 1 : -1;
8260 signed_addend = negative * (insn & 0xfff);
8261 }
8262
8263 /* Compute the value (X) to go in the place. */
8264 if (r_type == R_ARM_LDR_PC_G0
8265 || r_type == R_ARM_LDR_PC_G1
8266 || r_type == R_ARM_LDR_PC_G2)
8267 /* PC relative. */
8268 signed_value = value - pc + signed_addend;
8269 else
8270 /* Section base relative. */
8271 signed_value = value - sb + signed_addend;
8272
8273 /* Calculate the value of the relevant G_{n-1} to obtain
8274 the residual at that stage. */
8275 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8276
8277 /* Check for overflow. */
8278 if (residual >= 0x1000)
8279 {
8280 (*_bfd_error_handler)
8281 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8282 input_bfd, input_section,
8283 (long) rel->r_offset, abs (signed_value), howto->name);
8284 return bfd_reloc_overflow;
8285 }
8286
8287 /* Mask out the value and U bit. */
8288 insn &= 0xff7ff000;
8289
8290 /* Set the U bit if the value to go in the place is non-negative. */
8291 if (signed_value >= 0)
8292 insn |= 1 << 23;
8293
8294 /* Encode the offset. */
8295 insn |= residual;
8296
8297 bfd_put_32 (input_bfd, insn, hit_data);
8298 }
8299 return bfd_reloc_ok;
8300
8301 case R_ARM_LDRS_PC_G0:
8302 case R_ARM_LDRS_PC_G1:
8303 case R_ARM_LDRS_PC_G2:
8304 case R_ARM_LDRS_SB_G0:
8305 case R_ARM_LDRS_SB_G1:
8306 case R_ARM_LDRS_SB_G2:
8307 {
8308 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8309 bfd_vma pc = input_section->output_section->vma
8310 + input_section->output_offset + rel->r_offset;
8311 bfd_vma sb = 0; /* See note above. */
8312 bfd_vma residual;
8313 bfd_signed_vma signed_value;
8314 int group = 0;
8315
8316 /* Determine which groups of bits to calculate. */
8317 switch (r_type)
8318 {
8319 case R_ARM_LDRS_PC_G0:
8320 case R_ARM_LDRS_SB_G0:
8321 group = 0;
8322 break;
8323
8324 case R_ARM_LDRS_PC_G1:
8325 case R_ARM_LDRS_SB_G1:
8326 group = 1;
8327 break;
8328
8329 case R_ARM_LDRS_PC_G2:
8330 case R_ARM_LDRS_SB_G2:
8331 group = 2;
8332 break;
8333
8334 default:
8335 abort ();
8336 }
8337
8338 /* If REL, extract the addend from the insn. If RELA, it will
8339 have already been fetched for us. */
8340 if (globals->use_rel)
8341 {
8342 int negative = (insn & (1 << 23)) ? 1 : -1;
8343 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8344 }
8345
8346 /* Compute the value (X) to go in the place. */
8347 if (r_type == R_ARM_LDRS_PC_G0
8348 || r_type == R_ARM_LDRS_PC_G1
8349 || r_type == R_ARM_LDRS_PC_G2)
8350 /* PC relative. */
8351 signed_value = value - pc + signed_addend;
8352 else
8353 /* Section base relative. */
8354 signed_value = value - sb + signed_addend;
8355
8356 /* Calculate the value of the relevant G_{n-1} to obtain
8357 the residual at that stage. */
8358 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8359
8360 /* Check for overflow. */
8361 if (residual >= 0x100)
8362 {
8363 (*_bfd_error_handler)
8364 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8365 input_bfd, input_section,
8366 (long) rel->r_offset, abs (signed_value), howto->name);
8367 return bfd_reloc_overflow;
8368 }
8369
8370 /* Mask out the value and U bit. */
8371 insn &= 0xff7ff0f0;
8372
8373 /* Set the U bit if the value to go in the place is non-negative. */
8374 if (signed_value >= 0)
8375 insn |= 1 << 23;
8376
8377 /* Encode the offset. */
8378 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8379
8380 bfd_put_32 (input_bfd, insn, hit_data);
8381 }
8382 return bfd_reloc_ok;
8383
8384 case R_ARM_LDC_PC_G0:
8385 case R_ARM_LDC_PC_G1:
8386 case R_ARM_LDC_PC_G2:
8387 case R_ARM_LDC_SB_G0:
8388 case R_ARM_LDC_SB_G1:
8389 case R_ARM_LDC_SB_G2:
8390 {
8391 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8392 bfd_vma pc = input_section->output_section->vma
8393 + input_section->output_offset + rel->r_offset;
8394 bfd_vma sb = 0; /* See note above. */
8395 bfd_vma residual;
8396 bfd_signed_vma signed_value;
8397 int group = 0;
8398
8399 /* Determine which groups of bits to calculate. */
8400 switch (r_type)
8401 {
8402 case R_ARM_LDC_PC_G0:
8403 case R_ARM_LDC_SB_G0:
8404 group = 0;
8405 break;
8406
8407 case R_ARM_LDC_PC_G1:
8408 case R_ARM_LDC_SB_G1:
8409 group = 1;
8410 break;
8411
8412 case R_ARM_LDC_PC_G2:
8413 case R_ARM_LDC_SB_G2:
8414 group = 2;
8415 break;
8416
8417 default:
8418 abort ();
8419 }
8420
8421 /* If REL, extract the addend from the insn. If RELA, it will
8422 have already been fetched for us. */
8423 if (globals->use_rel)
8424 {
8425 int negative = (insn & (1 << 23)) ? 1 : -1;
8426 signed_addend = negative * ((insn & 0xff) << 2);
8427 }
8428
8429 /* Compute the value (X) to go in the place. */
8430 if (r_type == R_ARM_LDC_PC_G0
8431 || r_type == R_ARM_LDC_PC_G1
8432 || r_type == R_ARM_LDC_PC_G2)
8433 /* PC relative. */
8434 signed_value = value - pc + signed_addend;
8435 else
8436 /* Section base relative. */
8437 signed_value = value - sb + signed_addend;
8438
8439 /* Calculate the value of the relevant G_{n-1} to obtain
8440 the residual at that stage. */
8441 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8442
8443 /* Check for overflow. (The absolute value to go in the place must be
8444 divisible by four and, after having been divided by four, must
8445 fit in eight bits.) */
8446 if ((residual & 0x3) != 0 || residual >= 0x400)
8447 {
8448 (*_bfd_error_handler)
8449 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8450 input_bfd, input_section,
8451 (long) rel->r_offset, abs (signed_value), howto->name);
8452 return bfd_reloc_overflow;
8453 }
8454
8455 /* Mask out the value and U bit. */
8456 insn &= 0xff7fff00;
8457
8458 /* Set the U bit if the value to go in the place is non-negative. */
8459 if (signed_value >= 0)
8460 insn |= 1 << 23;
8461
8462 /* Encode the offset. */
8463 insn |= residual >> 2;
8464
8465 bfd_put_32 (input_bfd, insn, hit_data);
8466 }
8467 return bfd_reloc_ok;
8468
8469 default:
8470 return bfd_reloc_notsupported;
8471 }
8472 }
8473
8474 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8475 static void
8476 arm_add_to_rel (bfd * abfd,
8477 bfd_byte * address,
8478 reloc_howto_type * howto,
8479 bfd_signed_vma increment)
8480 {
8481 bfd_signed_vma addend;
8482
8483 if (howto->type == R_ARM_THM_CALL
8484 || howto->type == R_ARM_THM_JUMP24)
8485 {
8486 int upper_insn, lower_insn;
8487 int upper, lower;
8488
8489 upper_insn = bfd_get_16 (abfd, address);
8490 lower_insn = bfd_get_16 (abfd, address + 2);
8491 upper = upper_insn & 0x7ff;
8492 lower = lower_insn & 0x7ff;
8493
8494 addend = (upper << 12) | (lower << 1);
8495 addend += increment;
8496 addend >>= 1;
8497
8498 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8499 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8500
8501 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8502 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8503 }
8504 else
8505 {
8506 bfd_vma contents;
8507
8508 contents = bfd_get_32 (abfd, address);
8509
8510 /* Get the (signed) value from the instruction. */
8511 addend = contents & howto->src_mask;
8512 if (addend & ((howto->src_mask + 1) >> 1))
8513 {
8514 bfd_signed_vma mask;
8515
8516 mask = -1;
8517 mask &= ~ howto->src_mask;
8518 addend |= mask;
8519 }
8520
8521 /* Add in the increment, (which is a byte value). */
8522 switch (howto->type)
8523 {
8524 default:
8525 addend += increment;
8526 break;
8527
8528 case R_ARM_PC24:
8529 case R_ARM_PLT32:
8530 case R_ARM_CALL:
8531 case R_ARM_JUMP24:
8532 addend <<= howto->size;
8533 addend += increment;
8534
8535 /* Should we check for overflow here ? */
8536
8537 /* Drop any undesired bits. */
8538 addend >>= howto->rightshift;
8539 break;
8540 }
8541
8542 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8543
8544 bfd_put_32 (abfd, contents, address);
8545 }
8546 }
8547
8548 #define IS_ARM_TLS_RELOC(R_TYPE) \
8549 ((R_TYPE) == R_ARM_TLS_GD32 \
8550 || (R_TYPE) == R_ARM_TLS_LDO32 \
8551 || (R_TYPE) == R_ARM_TLS_LDM32 \
8552 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8553 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8554 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8555 || (R_TYPE) == R_ARM_TLS_LE32 \
8556 || (R_TYPE) == R_ARM_TLS_IE32)
8557
8558 /* Relocate an ARM ELF section. */
8559
8560 static bfd_boolean
8561 elf32_arm_relocate_section (bfd * output_bfd,
8562 struct bfd_link_info * info,
8563 bfd * input_bfd,
8564 asection * input_section,
8565 bfd_byte * contents,
8566 Elf_Internal_Rela * relocs,
8567 Elf_Internal_Sym * local_syms,
8568 asection ** local_sections)
8569 {
8570 Elf_Internal_Shdr *symtab_hdr;
8571 struct elf_link_hash_entry **sym_hashes;
8572 Elf_Internal_Rela *rel;
8573 Elf_Internal_Rela *relend;
8574 const char *name;
8575 struct elf32_arm_link_hash_table * globals;
8576
8577 globals = elf32_arm_hash_table (info);
8578
8579 symtab_hdr = & elf_symtab_hdr (input_bfd);
8580 sym_hashes = elf_sym_hashes (input_bfd);
8581
8582 rel = relocs;
8583 relend = relocs + input_section->reloc_count;
8584 for (; rel < relend; rel++)
8585 {
8586 int r_type;
8587 reloc_howto_type * howto;
8588 unsigned long r_symndx;
8589 Elf_Internal_Sym * sym;
8590 asection * sec;
8591 struct elf_link_hash_entry * h;
8592 bfd_vma relocation;
8593 bfd_reloc_status_type r;
8594 arelent bfd_reloc;
8595 char sym_type;
8596 bfd_boolean unresolved_reloc = FALSE;
8597 char *error_message = NULL;
8598
8599 r_symndx = ELF32_R_SYM (rel->r_info);
8600 r_type = ELF32_R_TYPE (rel->r_info);
8601 r_type = arm_real_reloc_type (globals, r_type);
8602
8603 if ( r_type == R_ARM_GNU_VTENTRY
8604 || r_type == R_ARM_GNU_VTINHERIT)
8605 continue;
8606
8607 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8608 howto = bfd_reloc.howto;
8609
8610 h = NULL;
8611 sym = NULL;
8612 sec = NULL;
8613
8614 if (r_symndx < symtab_hdr->sh_info)
8615 {
8616 sym = local_syms + r_symndx;
8617 sym_type = ELF32_ST_TYPE (sym->st_info);
8618 sec = local_sections[r_symndx];
8619 if (globals->use_rel)
8620 {
8621 relocation = (sec->output_section->vma
8622 + sec->output_offset
8623 + sym->st_value);
8624 if (!info->relocatable
8625 && (sec->flags & SEC_MERGE)
8626 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8627 {
8628 asection *msec;
8629 bfd_vma addend, value;
8630
8631 switch (r_type)
8632 {
8633 case R_ARM_MOVW_ABS_NC:
8634 case R_ARM_MOVT_ABS:
8635 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8636 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8637 addend = (addend ^ 0x8000) - 0x8000;
8638 break;
8639
8640 case R_ARM_THM_MOVW_ABS_NC:
8641 case R_ARM_THM_MOVT_ABS:
8642 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8643 << 16;
8644 value |= bfd_get_16 (input_bfd,
8645 contents + rel->r_offset + 2);
8646 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8647 | ((value & 0x04000000) >> 15);
8648 addend = (addend ^ 0x8000) - 0x8000;
8649 break;
8650
8651 default:
8652 if (howto->rightshift
8653 || (howto->src_mask & (howto->src_mask + 1)))
8654 {
8655 (*_bfd_error_handler)
8656 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8657 input_bfd, input_section,
8658 (long) rel->r_offset, howto->name);
8659 return FALSE;
8660 }
8661
8662 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8663
8664 /* Get the (signed) value from the instruction. */
8665 addend = value & howto->src_mask;
8666 if (addend & ((howto->src_mask + 1) >> 1))
8667 {
8668 bfd_signed_vma mask;
8669
8670 mask = -1;
8671 mask &= ~ howto->src_mask;
8672 addend |= mask;
8673 }
8674 break;
8675 }
8676
8677 msec = sec;
8678 addend =
8679 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8680 - relocation;
8681 addend += msec->output_section->vma + msec->output_offset;
8682
8683 /* Cases here must match those in the preceeding
8684 switch statement. */
8685 switch (r_type)
8686 {
8687 case R_ARM_MOVW_ABS_NC:
8688 case R_ARM_MOVT_ABS:
8689 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8690 | (addend & 0xfff);
8691 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8692 break;
8693
8694 case R_ARM_THM_MOVW_ABS_NC:
8695 case R_ARM_THM_MOVT_ABS:
8696 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8697 | (addend & 0xff) | ((addend & 0x0800) << 15);
8698 bfd_put_16 (input_bfd, value >> 16,
8699 contents + rel->r_offset);
8700 bfd_put_16 (input_bfd, value,
8701 contents + rel->r_offset + 2);
8702 break;
8703
8704 default:
8705 value = (value & ~ howto->dst_mask)
8706 | (addend & howto->dst_mask);
8707 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8708 break;
8709 }
8710 }
8711 }
8712 else
8713 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8714 }
8715 else
8716 {
8717 bfd_boolean warned;
8718
8719 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8720 r_symndx, symtab_hdr, sym_hashes,
8721 h, sec, relocation,
8722 unresolved_reloc, warned);
8723
8724 sym_type = h->type;
8725 }
8726
8727 if (sec != NULL && elf_discarded_section (sec))
8728 {
8729 /* For relocs against symbols from removed linkonce sections,
8730 or sections discarded by a linker script, we just want the
8731 section contents zeroed. Avoid any special processing. */
8732 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8733 rel->r_info = 0;
8734 rel->r_addend = 0;
8735 continue;
8736 }
8737
8738 if (info->relocatable)
8739 {
8740 /* This is a relocatable link. We don't have to change
8741 anything, unless the reloc is against a section symbol,
8742 in which case we have to adjust according to where the
8743 section symbol winds up in the output section. */
8744 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8745 {
8746 if (globals->use_rel)
8747 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8748 howto, (bfd_signed_vma) sec->output_offset);
8749 else
8750 rel->r_addend += sec->output_offset;
8751 }
8752 continue;
8753 }
8754
8755 if (h != NULL)
8756 name = h->root.root.string;
8757 else
8758 {
8759 name = (bfd_elf_string_from_elf_section
8760 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8761 if (name == NULL || *name == '\0')
8762 name = bfd_section_name (input_bfd, sec);
8763 }
8764
8765 if (r_symndx != 0
8766 && r_type != R_ARM_NONE
8767 && (h == NULL
8768 || h->root.type == bfd_link_hash_defined
8769 || h->root.type == bfd_link_hash_defweak)
8770 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8771 {
8772 (*_bfd_error_handler)
8773 ((sym_type == STT_TLS
8774 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8775 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8776 input_bfd,
8777 input_section,
8778 (long) rel->r_offset,
8779 howto->name,
8780 name);
8781 }
8782
8783 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8784 input_section, contents, rel,
8785 relocation, info, sec, name,
8786 (h ? ELF_ST_TYPE (h->type) :
8787 ELF_ST_TYPE (sym->st_info)), h,
8788 &unresolved_reloc, &error_message);
8789
8790 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8791 because such sections are not SEC_ALLOC and thus ld.so will
8792 not process them. */
8793 if (unresolved_reloc
8794 && !((input_section->flags & SEC_DEBUGGING) != 0
8795 && h->def_dynamic))
8796 {
8797 (*_bfd_error_handler)
8798 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8799 input_bfd,
8800 input_section,
8801 (long) rel->r_offset,
8802 howto->name,
8803 h->root.root.string);
8804 return FALSE;
8805 }
8806
8807 if (r != bfd_reloc_ok)
8808 {
8809 switch (r)
8810 {
8811 case bfd_reloc_overflow:
8812 /* If the overflowing reloc was to an undefined symbol,
8813 we have already printed one error message and there
8814 is no point complaining again. */
8815 if ((! h ||
8816 h->root.type != bfd_link_hash_undefined)
8817 && (!((*info->callbacks->reloc_overflow)
8818 (info, (h ? &h->root : NULL), name, howto->name,
8819 (bfd_vma) 0, input_bfd, input_section,
8820 rel->r_offset))))
8821 return FALSE;
8822 break;
8823
8824 case bfd_reloc_undefined:
8825 if (!((*info->callbacks->undefined_symbol)
8826 (info, name, input_bfd, input_section,
8827 rel->r_offset, TRUE)))
8828 return FALSE;
8829 break;
8830
8831 case bfd_reloc_outofrange:
8832 error_message = _("out of range");
8833 goto common_error;
8834
8835 case bfd_reloc_notsupported:
8836 error_message = _("unsupported relocation");
8837 goto common_error;
8838
8839 case bfd_reloc_dangerous:
8840 /* error_message should already be set. */
8841 goto common_error;
8842
8843 default:
8844 error_message = _("unknown error");
8845 /* Fall through. */
8846
8847 common_error:
8848 BFD_ASSERT (error_message != NULL);
8849 if (!((*info->callbacks->reloc_dangerous)
8850 (info, error_message, input_bfd, input_section,
8851 rel->r_offset)))
8852 return FALSE;
8853 break;
8854 }
8855 }
8856 }
8857
8858 return TRUE;
8859 }
8860
8861 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8862 adds the edit to the start of the list. (The list must be built in order of
8863 ascending INDEX: the function's callers are primarily responsible for
8864 maintaining that condition). */
8865
8866 static void
8867 add_unwind_table_edit (arm_unwind_table_edit **head,
8868 arm_unwind_table_edit **tail,
8869 arm_unwind_edit_type type,
8870 asection *linked_section,
8871 unsigned int index)
8872 {
8873 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8874
8875 new_edit->type = type;
8876 new_edit->linked_section = linked_section;
8877 new_edit->index = index;
8878
8879 if (index > 0)
8880 {
8881 new_edit->next = NULL;
8882
8883 if (*tail)
8884 (*tail)->next = new_edit;
8885
8886 (*tail) = new_edit;
8887
8888 if (!*head)
8889 (*head) = new_edit;
8890 }
8891 else
8892 {
8893 new_edit->next = *head;
8894
8895 if (!*tail)
8896 *tail = new_edit;
8897
8898 *head = new_edit;
8899 }
8900 }
8901
8902 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8903
8904 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8905 static void
8906 adjust_exidx_size(asection *exidx_sec, int adjust)
8907 {
8908 asection *out_sec;
8909
8910 if (!exidx_sec->rawsize)
8911 exidx_sec->rawsize = exidx_sec->size;
8912
8913 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8914 out_sec = exidx_sec->output_section;
8915 /* Adjust size of output section. */
8916 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8917 }
8918
8919 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8920 static void
8921 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8922 {
8923 struct _arm_elf_section_data *exidx_arm_data;
8924
8925 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8926 add_unwind_table_edit (
8927 &exidx_arm_data->u.exidx.unwind_edit_list,
8928 &exidx_arm_data->u.exidx.unwind_edit_tail,
8929 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8930
8931 adjust_exidx_size(exidx_sec, 8);
8932 }
8933
8934 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8935 made to those tables, such that:
8936
8937 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8938 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8939 codes which have been inlined into the index).
8940
8941 The edits are applied when the tables are written
8942 (in elf32_arm_write_section).
8943 */
8944
8945 bfd_boolean
8946 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8947 unsigned int num_text_sections,
8948 struct bfd_link_info *info)
8949 {
8950 bfd *inp;
8951 unsigned int last_second_word = 0, i;
8952 asection *last_exidx_sec = NULL;
8953 asection *last_text_sec = NULL;
8954 int last_unwind_type = -1;
8955
8956 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
8957 text sections. */
8958 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
8959 {
8960 asection *sec;
8961
8962 for (sec = inp->sections; sec != NULL; sec = sec->next)
8963 {
8964 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
8965 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
8966
8967 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
8968 continue;
8969
8970 if (elf_sec->linked_to)
8971 {
8972 Elf_Internal_Shdr *linked_hdr
8973 = &elf_section_data (elf_sec->linked_to)->this_hdr;
8974 struct _arm_elf_section_data *linked_sec_arm_data
8975 = get_arm_elf_section_data (linked_hdr->bfd_section);
8976
8977 if (linked_sec_arm_data == NULL)
8978 continue;
8979
8980 /* Link this .ARM.exidx section back from the text section it
8981 describes. */
8982 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
8983 }
8984 }
8985 }
8986
8987 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
8988 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
8989 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
8990 */
8991
8992 for (i = 0; i < num_text_sections; i++)
8993 {
8994 asection *sec = text_section_order[i];
8995 asection *exidx_sec;
8996 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
8997 struct _arm_elf_section_data *exidx_arm_data;
8998 bfd_byte *contents = NULL;
8999 int deleted_exidx_bytes = 0;
9000 bfd_vma j;
9001 arm_unwind_table_edit *unwind_edit_head = NULL;
9002 arm_unwind_table_edit *unwind_edit_tail = NULL;
9003 Elf_Internal_Shdr *hdr;
9004 bfd *ibfd;
9005
9006 if (arm_data == NULL)
9007 continue;
9008
9009 exidx_sec = arm_data->u.text.arm_exidx_sec;
9010 if (exidx_sec == NULL)
9011 {
9012 /* Section has no unwind data. */
9013 if (last_unwind_type == 0 || !last_exidx_sec)
9014 continue;
9015
9016 /* Ignore zero sized sections. */
9017 if (sec->size == 0)
9018 continue;
9019
9020 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9021 last_unwind_type = 0;
9022 continue;
9023 }
9024
9025 /* Skip /DISCARD/ sections. */
9026 if (bfd_is_abs_section (exidx_sec->output_section))
9027 continue;
9028
9029 hdr = &elf_section_data (exidx_sec)->this_hdr;
9030 if (hdr->sh_type != SHT_ARM_EXIDX)
9031 continue;
9032
9033 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9034 if (exidx_arm_data == NULL)
9035 continue;
9036
9037 ibfd = exidx_sec->owner;
9038
9039 if (hdr->contents != NULL)
9040 contents = hdr->contents;
9041 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9042 /* An error? */
9043 continue;
9044
9045 for (j = 0; j < hdr->sh_size; j += 8)
9046 {
9047 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9048 int unwind_type;
9049 int elide = 0;
9050
9051 /* An EXIDX_CANTUNWIND entry. */
9052 if (second_word == 1)
9053 {
9054 if (last_unwind_type == 0)
9055 elide = 1;
9056 unwind_type = 0;
9057 }
9058 /* Inlined unwinding data. Merge if equal to previous. */
9059 else if ((second_word & 0x80000000) != 0)
9060 {
9061 if (last_second_word == second_word && last_unwind_type == 1)
9062 elide = 1;
9063 unwind_type = 1;
9064 last_second_word = second_word;
9065 }
9066 /* Normal table entry. In theory we could merge these too,
9067 but duplicate entries are likely to be much less common. */
9068 else
9069 unwind_type = 2;
9070
9071 if (elide)
9072 {
9073 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9074 DELETE_EXIDX_ENTRY, NULL, j / 8);
9075
9076 deleted_exidx_bytes += 8;
9077 }
9078
9079 last_unwind_type = unwind_type;
9080 }
9081
9082 /* Free contents if we allocated it ourselves. */
9083 if (contents != hdr->contents)
9084 free (contents);
9085
9086 /* Record edits to be applied later (in elf32_arm_write_section). */
9087 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9088 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9089
9090 if (deleted_exidx_bytes > 0)
9091 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9092
9093 last_exidx_sec = exidx_sec;
9094 last_text_sec = sec;
9095 }
9096
9097 /* Add terminating CANTUNWIND entry. */
9098 if (last_exidx_sec && last_unwind_type != 0)
9099 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9100
9101 return TRUE;
9102 }
9103
9104 static bfd_boolean
9105 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9106 bfd *ibfd, const char *name)
9107 {
9108 asection *sec, *osec;
9109
9110 sec = bfd_get_section_by_name (ibfd, name);
9111 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9112 return TRUE;
9113
9114 osec = sec->output_section;
9115 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9116 return TRUE;
9117
9118 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9119 sec->output_offset, sec->size))
9120 return FALSE;
9121
9122 return TRUE;
9123 }
9124
9125 static bfd_boolean
9126 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9127 {
9128 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9129
9130 /* Invoke the regular ELF backend linker to do all the work. */
9131 if (!bfd_elf_final_link (abfd, info))
9132 return FALSE;
9133
9134 /* Write out any glue sections now that we have created all the
9135 stubs. */
9136 if (globals->bfd_of_glue_owner != NULL)
9137 {
9138 if (! elf32_arm_output_glue_section (info, abfd,
9139 globals->bfd_of_glue_owner,
9140 ARM2THUMB_GLUE_SECTION_NAME))
9141 return FALSE;
9142
9143 if (! elf32_arm_output_glue_section (info, abfd,
9144 globals->bfd_of_glue_owner,
9145 THUMB2ARM_GLUE_SECTION_NAME))
9146 return FALSE;
9147
9148 if (! elf32_arm_output_glue_section (info, abfd,
9149 globals->bfd_of_glue_owner,
9150 VFP11_ERRATUM_VENEER_SECTION_NAME))
9151 return FALSE;
9152
9153 if (! elf32_arm_output_glue_section (info, abfd,
9154 globals->bfd_of_glue_owner,
9155 ARM_BX_GLUE_SECTION_NAME))
9156 return FALSE;
9157 }
9158
9159 return TRUE;
9160 }
9161
9162 /* Set the right machine number. */
9163
9164 static bfd_boolean
9165 elf32_arm_object_p (bfd *abfd)
9166 {
9167 unsigned int mach;
9168
9169 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9170
9171 if (mach != bfd_mach_arm_unknown)
9172 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9173
9174 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9175 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9176
9177 else
9178 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9179
9180 return TRUE;
9181 }
9182
9183 /* Function to keep ARM specific flags in the ELF header. */
9184
9185 static bfd_boolean
9186 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9187 {
9188 if (elf_flags_init (abfd)
9189 && elf_elfheader (abfd)->e_flags != flags)
9190 {
9191 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9192 {
9193 if (flags & EF_ARM_INTERWORK)
9194 (*_bfd_error_handler)
9195 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9196 abfd);
9197 else
9198 _bfd_error_handler
9199 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9200 abfd);
9201 }
9202 }
9203 else
9204 {
9205 elf_elfheader (abfd)->e_flags = flags;
9206 elf_flags_init (abfd) = TRUE;
9207 }
9208
9209 return TRUE;
9210 }
9211
9212 /* Copy backend specific data from one object module to another. */
9213
9214 static bfd_boolean
9215 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9216 {
9217 flagword in_flags;
9218 flagword out_flags;
9219
9220 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9221 return TRUE;
9222
9223 in_flags = elf_elfheader (ibfd)->e_flags;
9224 out_flags = elf_elfheader (obfd)->e_flags;
9225
9226 if (elf_flags_init (obfd)
9227 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9228 && in_flags != out_flags)
9229 {
9230 /* Cannot mix APCS26 and APCS32 code. */
9231 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9232 return FALSE;
9233
9234 /* Cannot mix float APCS and non-float APCS code. */
9235 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9236 return FALSE;
9237
9238 /* If the src and dest have different interworking flags
9239 then turn off the interworking bit. */
9240 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9241 {
9242 if (out_flags & EF_ARM_INTERWORK)
9243 _bfd_error_handler
9244 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9245 obfd, ibfd);
9246
9247 in_flags &= ~EF_ARM_INTERWORK;
9248 }
9249
9250 /* Likewise for PIC, though don't warn for this case. */
9251 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9252 in_flags &= ~EF_ARM_PIC;
9253 }
9254
9255 elf_elfheader (obfd)->e_flags = in_flags;
9256 elf_flags_init (obfd) = TRUE;
9257
9258 /* Also copy the EI_OSABI field. */
9259 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9260 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9261
9262 /* Copy object attributes. */
9263 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9264
9265 return TRUE;
9266 }
9267
9268 /* Values for Tag_ABI_PCS_R9_use. */
9269 enum
9270 {
9271 AEABI_R9_V6,
9272 AEABI_R9_SB,
9273 AEABI_R9_TLS,
9274 AEABI_R9_unused
9275 };
9276
9277 /* Values for Tag_ABI_PCS_RW_data. */
9278 enum
9279 {
9280 AEABI_PCS_RW_data_absolute,
9281 AEABI_PCS_RW_data_PCrel,
9282 AEABI_PCS_RW_data_SBrel,
9283 AEABI_PCS_RW_data_unused
9284 };
9285
9286 /* Values for Tag_ABI_enum_size. */
9287 enum
9288 {
9289 AEABI_enum_unused,
9290 AEABI_enum_short,
9291 AEABI_enum_wide,
9292 AEABI_enum_forced_wide
9293 };
9294
9295 /* Determine whether an object attribute tag takes an integer, a
9296 string or both. */
9297
9298 static int
9299 elf32_arm_obj_attrs_arg_type (int tag)
9300 {
9301 if (tag == Tag_compatibility)
9302 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9303 else if (tag == Tag_nodefaults)
9304 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9305 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9306 return ATTR_TYPE_FLAG_STR_VAL;
9307 else if (tag < 32)
9308 return ATTR_TYPE_FLAG_INT_VAL;
9309 else
9310 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9311 }
9312
9313 /* The ABI defines that Tag_conformance should be emitted first, and that
9314 Tag_nodefaults should be second (if either is defined). This sets those
9315 two positions, and bumps up the position of all the remaining tags to
9316 compensate. */
9317 static int
9318 elf32_arm_obj_attrs_order (int num)
9319 {
9320 if (num == 4)
9321 return Tag_conformance;
9322 if (num == 5)
9323 return Tag_nodefaults;
9324 if ((num - 2) < Tag_nodefaults)
9325 return num - 2;
9326 if ((num - 1) < Tag_conformance)
9327 return num - 1;
9328 return num;
9329 }
9330
9331 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9332 Returns -1 if no architecture could be read. */
9333
9334 static int
9335 get_secondary_compatible_arch (bfd *abfd)
9336 {
9337 obj_attribute *attr =
9338 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9339
9340 /* Note: the tag and its argument below are uleb128 values, though
9341 currently-defined values fit in one byte for each. */
9342 if (attr->s
9343 && attr->s[0] == Tag_CPU_arch
9344 && (attr->s[1] & 128) != 128
9345 && attr->s[2] == 0)
9346 return attr->s[1];
9347
9348 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9349 return -1;
9350 }
9351
9352 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9353 The tag is removed if ARCH is -1. */
9354
9355 static void
9356 set_secondary_compatible_arch (bfd *abfd, int arch)
9357 {
9358 obj_attribute *attr =
9359 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9360
9361 if (arch == -1)
9362 {
9363 attr->s = NULL;
9364 return;
9365 }
9366
9367 /* Note: the tag and its argument below are uleb128 values, though
9368 currently-defined values fit in one byte for each. */
9369 if (!attr->s)
9370 attr->s = bfd_alloc (abfd, 3);
9371 attr->s[0] = Tag_CPU_arch;
9372 attr->s[1] = arch;
9373 attr->s[2] = '\0';
9374 }
9375
9376 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9377 into account. */
9378
9379 static int
9380 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9381 int newtag, int secondary_compat)
9382 {
9383 #define T(X) TAG_CPU_ARCH_##X
9384 int tagl, tagh, result;
9385 const int v6t2[] =
9386 {
9387 T(V6T2), /* PRE_V4. */
9388 T(V6T2), /* V4. */
9389 T(V6T2), /* V4T. */
9390 T(V6T2), /* V5T. */
9391 T(V6T2), /* V5TE. */
9392 T(V6T2), /* V5TEJ. */
9393 T(V6T2), /* V6. */
9394 T(V7), /* V6KZ. */
9395 T(V6T2) /* V6T2. */
9396 };
9397 const int v6k[] =
9398 {
9399 T(V6K), /* PRE_V4. */
9400 T(V6K), /* V4. */
9401 T(V6K), /* V4T. */
9402 T(V6K), /* V5T. */
9403 T(V6K), /* V5TE. */
9404 T(V6K), /* V5TEJ. */
9405 T(V6K), /* V6. */
9406 T(V6KZ), /* V6KZ. */
9407 T(V7), /* V6T2. */
9408 T(V6K) /* V6K. */
9409 };
9410 const int v7[] =
9411 {
9412 T(V7), /* PRE_V4. */
9413 T(V7), /* V4. */
9414 T(V7), /* V4T. */
9415 T(V7), /* V5T. */
9416 T(V7), /* V5TE. */
9417 T(V7), /* V5TEJ. */
9418 T(V7), /* V6. */
9419 T(V7), /* V6KZ. */
9420 T(V7), /* V6T2. */
9421 T(V7), /* V6K. */
9422 T(V7) /* V7. */
9423 };
9424 const int v6_m[] =
9425 {
9426 -1, /* PRE_V4. */
9427 -1, /* V4. */
9428 T(V6K), /* V4T. */
9429 T(V6K), /* V5T. */
9430 T(V6K), /* V5TE. */
9431 T(V6K), /* V5TEJ. */
9432 T(V6K), /* V6. */
9433 T(V6KZ), /* V6KZ. */
9434 T(V7), /* V6T2. */
9435 T(V6K), /* V6K. */
9436 T(V7), /* V7. */
9437 T(V6_M) /* V6_M. */
9438 };
9439 const int v6s_m[] =
9440 {
9441 -1, /* PRE_V4. */
9442 -1, /* V4. */
9443 T(V6K), /* V4T. */
9444 T(V6K), /* V5T. */
9445 T(V6K), /* V5TE. */
9446 T(V6K), /* V5TEJ. */
9447 T(V6K), /* V6. */
9448 T(V6KZ), /* V6KZ. */
9449 T(V7), /* V6T2. */
9450 T(V6K), /* V6K. */
9451 T(V7), /* V7. */
9452 T(V6S_M), /* V6_M. */
9453 T(V6S_M) /* V6S_M. */
9454 };
9455 const int v4t_plus_v6_m[] =
9456 {
9457 -1, /* PRE_V4. */
9458 -1, /* V4. */
9459 T(V4T), /* V4T. */
9460 T(V5T), /* V5T. */
9461 T(V5TE), /* V5TE. */
9462 T(V5TEJ), /* V5TEJ. */
9463 T(V6), /* V6. */
9464 T(V6KZ), /* V6KZ. */
9465 T(V6T2), /* V6T2. */
9466 T(V6K), /* V6K. */
9467 T(V7), /* V7. */
9468 T(V6_M), /* V6_M. */
9469 T(V6S_M), /* V6S_M. */
9470 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9471 };
9472 const int *comb[] =
9473 {
9474 v6t2,
9475 v6k,
9476 v7,
9477 v6_m,
9478 v6s_m,
9479 /* Pseudo-architecture. */
9480 v4t_plus_v6_m
9481 };
9482
9483 /* Check we've not got a higher architecture than we know about. */
9484
9485 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9486 {
9487 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9488 return -1;
9489 }
9490
9491 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9492
9493 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9494 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9495 oldtag = T(V4T_PLUS_V6_M);
9496
9497 /* And override the new tag if we have a Tag_also_compatible_with on the
9498 input. */
9499
9500 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9501 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9502 newtag = T(V4T_PLUS_V6_M);
9503
9504 tagl = (oldtag < newtag) ? oldtag : newtag;
9505 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9506
9507 /* Architectures before V6KZ add features monotonically. */
9508 if (tagh <= TAG_CPU_ARCH_V6KZ)
9509 return result;
9510
9511 result = comb[tagh - T(V6T2)][tagl];
9512
9513 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9514 as the canonical version. */
9515 if (result == T(V4T_PLUS_V6_M))
9516 {
9517 result = T(V4T);
9518 *secondary_compat_out = T(V6_M);
9519 }
9520 else
9521 *secondary_compat_out = -1;
9522
9523 if (result == -1)
9524 {
9525 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9526 ibfd, oldtag, newtag);
9527 return -1;
9528 }
9529
9530 return result;
9531 #undef T
9532 }
9533
9534 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9535 are conflicting attributes. */
9536
9537 static bfd_boolean
9538 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9539 {
9540 obj_attribute *in_attr;
9541 obj_attribute *out_attr;
9542 obj_attribute_list *in_list;
9543 obj_attribute_list *out_list;
9544 obj_attribute_list **out_listp;
9545 /* Some tags have 0 = don't care, 1 = strong requirement,
9546 2 = weak requirement. */
9547 static const int order_021[3] = {0, 2, 1};
9548 /* For use with Tag_VFP_arch. */
9549 static const int order_01243[5] = {0, 1, 2, 4, 3};
9550 int i;
9551 bfd_boolean result = TRUE;
9552
9553 /* Skip the linker stubs file. This preserves previous behavior
9554 of accepting unknown attributes in the first input file - but
9555 is that a bug? */
9556 if (ibfd->flags & BFD_LINKER_CREATED)
9557 return TRUE;
9558
9559 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9560 {
9561 /* This is the first object. Copy the attributes. */
9562 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9563
9564 /* Use the Tag_null value to indicate the attributes have been
9565 initialized. */
9566 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9567
9568 return TRUE;
9569 }
9570
9571 in_attr = elf_known_obj_attributes_proc (ibfd);
9572 out_attr = elf_known_obj_attributes_proc (obfd);
9573 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9574 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9575 {
9576 /* Ignore mismatches if the object doesn't use floating point. */
9577 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9578 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9579 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9580 {
9581 _bfd_error_handler
9582 (_("error: %B uses VFP register arguments, %B does not"),
9583 ibfd, obfd);
9584 result = FALSE;
9585 }
9586 }
9587
9588 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9589 {
9590 /* Merge this attribute with existing attributes. */
9591 switch (i)
9592 {
9593 case Tag_CPU_raw_name:
9594 case Tag_CPU_name:
9595 /* These are merged after Tag_CPU_arch. */
9596 break;
9597
9598 case Tag_ABI_optimization_goals:
9599 case Tag_ABI_FP_optimization_goals:
9600 /* Use the first value seen. */
9601 break;
9602
9603 case Tag_CPU_arch:
9604 {
9605 int secondary_compat = -1, secondary_compat_out = -1;
9606 unsigned int saved_out_attr = out_attr[i].i;
9607 static const char *name_table[] = {
9608 /* These aren't real CPU names, but we can't guess
9609 that from the architecture version alone. */
9610 "Pre v4",
9611 "ARM v4",
9612 "ARM v4T",
9613 "ARM v5T",
9614 "ARM v5TE",
9615 "ARM v5TEJ",
9616 "ARM v6",
9617 "ARM v6KZ",
9618 "ARM v6T2",
9619 "ARM v6K",
9620 "ARM v7",
9621 "ARM v6-M",
9622 "ARM v6S-M"
9623 };
9624
9625 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9626 secondary_compat = get_secondary_compatible_arch (ibfd);
9627 secondary_compat_out = get_secondary_compatible_arch (obfd);
9628 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9629 &secondary_compat_out,
9630 in_attr[i].i,
9631 secondary_compat);
9632 set_secondary_compatible_arch (obfd, secondary_compat_out);
9633
9634 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9635 if (out_attr[i].i == saved_out_attr)
9636 ; /* Leave the names alone. */
9637 else if (out_attr[i].i == in_attr[i].i)
9638 {
9639 /* The output architecture has been changed to match the
9640 input architecture. Use the input names. */
9641 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9642 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9643 : NULL;
9644 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9645 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9646 : NULL;
9647 }
9648 else
9649 {
9650 out_attr[Tag_CPU_name].s = NULL;
9651 out_attr[Tag_CPU_raw_name].s = NULL;
9652 }
9653
9654 /* If we still don't have a value for Tag_CPU_name,
9655 make one up now. Tag_CPU_raw_name remains blank. */
9656 if (out_attr[Tag_CPU_name].s == NULL
9657 && out_attr[i].i < ARRAY_SIZE (name_table))
9658 out_attr[Tag_CPU_name].s =
9659 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9660 }
9661 break;
9662
9663 case Tag_ARM_ISA_use:
9664 case Tag_THUMB_ISA_use:
9665 case Tag_WMMX_arch:
9666 case Tag_Advanced_SIMD_arch:
9667 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9668 case Tag_ABI_FP_rounding:
9669 case Tag_ABI_FP_exceptions:
9670 case Tag_ABI_FP_user_exceptions:
9671 case Tag_ABI_FP_number_model:
9672 case Tag_VFP_HP_extension:
9673 case Tag_CPU_unaligned_access:
9674 case Tag_T2EE_use:
9675 case Tag_Virtualization_use:
9676 case Tag_MPextension_use:
9677 /* Use the largest value specified. */
9678 if (in_attr[i].i > out_attr[i].i)
9679 out_attr[i].i = in_attr[i].i;
9680 break;
9681
9682 case Tag_ABI_align8_preserved:
9683 case Tag_ABI_PCS_RO_data:
9684 /* Use the smallest value specified. */
9685 if (in_attr[i].i < out_attr[i].i)
9686 out_attr[i].i = in_attr[i].i;
9687 break;
9688
9689 case Tag_ABI_align8_needed:
9690 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9691 && (in_attr[Tag_ABI_align8_preserved].i == 0
9692 || out_attr[Tag_ABI_align8_preserved].i == 0))
9693 {
9694 /* This error message should be enabled once all non-conformant
9695 binaries in the toolchain have had the attributes set
9696 properly.
9697 _bfd_error_handler
9698 (_("error: %B: 8-byte data alignment conflicts with %B"),
9699 obfd, ibfd);
9700 result = FALSE; */
9701 }
9702 /* Fall through. */
9703 case Tag_ABI_FP_denormal:
9704 case Tag_ABI_PCS_GOT_use:
9705 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9706 value if greater than 2 (for future-proofing). */
9707 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9708 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9709 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9710 out_attr[i].i = in_attr[i].i;
9711 break;
9712
9713
9714 case Tag_CPU_arch_profile:
9715 if (out_attr[i].i != in_attr[i].i)
9716 {
9717 /* 0 will merge with anything.
9718 'A' and 'S' merge to 'A'.
9719 'R' and 'S' merge to 'R'.
9720 'M' and 'A|R|S' is an error. */
9721 if (out_attr[i].i == 0
9722 || (out_attr[i].i == 'S'
9723 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9724 out_attr[i].i = in_attr[i].i;
9725 else if (in_attr[i].i == 0
9726 || (in_attr[i].i == 'S'
9727 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9728 ; /* Do nothing. */
9729 else
9730 {
9731 _bfd_error_handler
9732 (_("error: %B: Conflicting architecture profiles %c/%c"),
9733 ibfd,
9734 in_attr[i].i ? in_attr[i].i : '0',
9735 out_attr[i].i ? out_attr[i].i : '0');
9736 result = FALSE;
9737 }
9738 }
9739 break;
9740 case Tag_VFP_arch:
9741 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9742 largest value if greater than 4 (for future-proofing). */
9743 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9744 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9745 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9746 out_attr[i].i = in_attr[i].i;
9747 break;
9748 case Tag_PCS_config:
9749 if (out_attr[i].i == 0)
9750 out_attr[i].i = in_attr[i].i;
9751 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9752 {
9753 /* It's sometimes ok to mix different configs, so this is only
9754 a warning. */
9755 _bfd_error_handler
9756 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9757 }
9758 break;
9759 case Tag_ABI_PCS_R9_use:
9760 if (in_attr[i].i != out_attr[i].i
9761 && out_attr[i].i != AEABI_R9_unused
9762 && in_attr[i].i != AEABI_R9_unused)
9763 {
9764 _bfd_error_handler
9765 (_("error: %B: Conflicting use of R9"), ibfd);
9766 result = FALSE;
9767 }
9768 if (out_attr[i].i == AEABI_R9_unused)
9769 out_attr[i].i = in_attr[i].i;
9770 break;
9771 case Tag_ABI_PCS_RW_data:
9772 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9773 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9774 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9775 {
9776 _bfd_error_handler
9777 (_("error: %B: SB relative addressing conflicts with use of R9"),
9778 ibfd);
9779 result = FALSE;
9780 }
9781 /* Use the smallest value specified. */
9782 if (in_attr[i].i < out_attr[i].i)
9783 out_attr[i].i = in_attr[i].i;
9784 break;
9785 case Tag_ABI_PCS_wchar_t:
9786 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9787 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9788 {
9789 _bfd_error_handler
9790 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9791 ibfd, in_attr[i].i, out_attr[i].i);
9792 }
9793 else if (in_attr[i].i && !out_attr[i].i)
9794 out_attr[i].i = in_attr[i].i;
9795 break;
9796 case Tag_ABI_enum_size:
9797 if (in_attr[i].i != AEABI_enum_unused)
9798 {
9799 if (out_attr[i].i == AEABI_enum_unused
9800 || out_attr[i].i == AEABI_enum_forced_wide)
9801 {
9802 /* The existing object is compatible with anything.
9803 Use whatever requirements the new object has. */
9804 out_attr[i].i = in_attr[i].i;
9805 }
9806 else if (in_attr[i].i != AEABI_enum_forced_wide
9807 && out_attr[i].i != in_attr[i].i
9808 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9809 {
9810 static const char *aeabi_enum_names[] =
9811 { "", "variable-size", "32-bit", "" };
9812 const char *in_name =
9813 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9814 ? aeabi_enum_names[in_attr[i].i]
9815 : "<unknown>";
9816 const char *out_name =
9817 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9818 ? aeabi_enum_names[out_attr[i].i]
9819 : "<unknown>";
9820 _bfd_error_handler
9821 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9822 ibfd, in_name, out_name);
9823 }
9824 }
9825 break;
9826 case Tag_ABI_VFP_args:
9827 /* Aready done. */
9828 break;
9829 case Tag_ABI_WMMX_args:
9830 if (in_attr[i].i != out_attr[i].i)
9831 {
9832 _bfd_error_handler
9833 (_("error: %B uses iWMMXt register arguments, %B does not"),
9834 ibfd, obfd);
9835 result = FALSE;
9836 }
9837 break;
9838 case Tag_compatibility:
9839 /* Merged in target-independent code. */
9840 break;
9841 case Tag_ABI_HardFP_use:
9842 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9843 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9844 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9845 out_attr[i].i = 3;
9846 else if (in_attr[i].i > out_attr[i].i)
9847 out_attr[i].i = in_attr[i].i;
9848 break;
9849 case Tag_ABI_FP_16bit_format:
9850 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9851 {
9852 if (in_attr[i].i != out_attr[i].i)
9853 {
9854 _bfd_error_handler
9855 (_("error: fp16 format mismatch between %B and %B"),
9856 ibfd, obfd);
9857 result = FALSE;
9858 }
9859 }
9860 if (in_attr[i].i != 0)
9861 out_attr[i].i = in_attr[i].i;
9862 break;
9863
9864 case Tag_nodefaults:
9865 /* This tag is set if it exists, but the value is unused (and is
9866 typically zero). We don't actually need to do anything here -
9867 the merge happens automatically when the type flags are merged
9868 below. */
9869 break;
9870 case Tag_also_compatible_with:
9871 /* Already done in Tag_CPU_arch. */
9872 break;
9873 case Tag_conformance:
9874 /* Keep the attribute if it matches. Throw it away otherwise.
9875 No attribute means no claim to conform. */
9876 if (!in_attr[i].s || !out_attr[i].s
9877 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9878 out_attr[i].s = NULL;
9879 break;
9880
9881 default:
9882 {
9883 bfd *err_bfd = NULL;
9884
9885 /* The "known_obj_attributes" table does contain some undefined
9886 attributes. Ensure that there are unused. */
9887 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9888 err_bfd = obfd;
9889 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9890 err_bfd = ibfd;
9891
9892 if (err_bfd != NULL)
9893 {
9894 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9895 if ((i & 127) < 64)
9896 {
9897 _bfd_error_handler
9898 (_("%B: Unknown mandatory EABI object attribute %d"),
9899 err_bfd, i);
9900 bfd_set_error (bfd_error_bad_value);
9901 result = FALSE;
9902 }
9903 else
9904 {
9905 _bfd_error_handler
9906 (_("Warning: %B: Unknown EABI object attribute %d"),
9907 err_bfd, i);
9908 }
9909 }
9910
9911 /* Only pass on attributes that match in both inputs. */
9912 if (in_attr[i].i != out_attr[i].i
9913 || in_attr[i].s != out_attr[i].s
9914 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9915 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9916 {
9917 out_attr[i].i = 0;
9918 out_attr[i].s = NULL;
9919 }
9920 }
9921 }
9922
9923 /* If out_attr was copied from in_attr then it won't have a type yet. */
9924 if (in_attr[i].type && !out_attr[i].type)
9925 out_attr[i].type = in_attr[i].type;
9926 }
9927
9928 /* Merge Tag_compatibility attributes and any common GNU ones. */
9929 _bfd_elf_merge_object_attributes (ibfd, obfd);
9930
9931 /* Check for any attributes not known on ARM. */
9932 in_list = elf_other_obj_attributes_proc (ibfd);
9933 out_listp = &elf_other_obj_attributes_proc (obfd);
9934 out_list = *out_listp;
9935
9936 for (; in_list || out_list; )
9937 {
9938 bfd *err_bfd = NULL;
9939 int err_tag = 0;
9940
9941 /* The tags for each list are in numerical order. */
9942 /* If the tags are equal, then merge. */
9943 if (out_list && (!in_list || in_list->tag > out_list->tag))
9944 {
9945 /* This attribute only exists in obfd. We can't merge, and we don't
9946 know what the tag means, so delete it. */
9947 err_bfd = obfd;
9948 err_tag = out_list->tag;
9949 *out_listp = out_list->next;
9950 out_list = *out_listp;
9951 }
9952 else if (in_list && (!out_list || in_list->tag < out_list->tag))
9953 {
9954 /* This attribute only exists in ibfd. We can't merge, and we don't
9955 know what the tag means, so ignore it. */
9956 err_bfd = ibfd;
9957 err_tag = in_list->tag;
9958 in_list = in_list->next;
9959 }
9960 else /* The tags are equal. */
9961 {
9962 /* As present, all attributes in the list are unknown, and
9963 therefore can't be merged meaningfully. */
9964 err_bfd = obfd;
9965 err_tag = out_list->tag;
9966
9967 /* Only pass on attributes that match in both inputs. */
9968 if (in_list->attr.i != out_list->attr.i
9969 || in_list->attr.s != out_list->attr.s
9970 || (in_list->attr.s && out_list->attr.s
9971 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
9972 {
9973 /* No match. Delete the attribute. */
9974 *out_listp = out_list->next;
9975 out_list = *out_listp;
9976 }
9977 else
9978 {
9979 /* Matched. Keep the attribute and move to the next. */
9980 out_list = out_list->next;
9981 in_list = in_list->next;
9982 }
9983 }
9984
9985 if (err_bfd)
9986 {
9987 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9988 if ((err_tag & 127) < 64)
9989 {
9990 _bfd_error_handler
9991 (_("%B: Unknown mandatory EABI object attribute %d"),
9992 err_bfd, err_tag);
9993 bfd_set_error (bfd_error_bad_value);
9994 result = FALSE;
9995 }
9996 else
9997 {
9998 _bfd_error_handler
9999 (_("Warning: %B: Unknown EABI object attribute %d"),
10000 err_bfd, err_tag);
10001 }
10002 }
10003 }
10004 return result;
10005 }
10006
10007
10008 /* Return TRUE if the two EABI versions are incompatible. */
10009
10010 static bfd_boolean
10011 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10012 {
10013 /* v4 and v5 are the same spec before and after it was released,
10014 so allow mixing them. */
10015 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10016 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10017 return TRUE;
10018
10019 return (iver == over);
10020 }
10021
10022 /* Merge backend specific data from an object file to the output
10023 object file when linking. */
10024
10025 static bfd_boolean
10026 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10027 {
10028 flagword out_flags;
10029 flagword in_flags;
10030 bfd_boolean flags_compatible = TRUE;
10031 asection *sec;
10032
10033 /* Check if we have the same endianess. */
10034 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10035 return FALSE;
10036
10037 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10038 return TRUE;
10039
10040 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10041 return FALSE;
10042
10043 /* The input BFD must have had its flags initialised. */
10044 /* The following seems bogus to me -- The flags are initialized in
10045 the assembler but I don't think an elf_flags_init field is
10046 written into the object. */
10047 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10048
10049 in_flags = elf_elfheader (ibfd)->e_flags;
10050 out_flags = elf_elfheader (obfd)->e_flags;
10051
10052 /* In theory there is no reason why we couldn't handle this. However
10053 in practice it isn't even close to working and there is no real
10054 reason to want it. */
10055 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10056 && !(ibfd->flags & DYNAMIC)
10057 && (in_flags & EF_ARM_BE8))
10058 {
10059 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10060 ibfd);
10061 return FALSE;
10062 }
10063
10064 if (!elf_flags_init (obfd))
10065 {
10066 /* If the input is the default architecture and had the default
10067 flags then do not bother setting the flags for the output
10068 architecture, instead allow future merges to do this. If no
10069 future merges ever set these flags then they will retain their
10070 uninitialised values, which surprise surprise, correspond
10071 to the default values. */
10072 if (bfd_get_arch_info (ibfd)->the_default
10073 && elf_elfheader (ibfd)->e_flags == 0)
10074 return TRUE;
10075
10076 elf_flags_init (obfd) = TRUE;
10077 elf_elfheader (obfd)->e_flags = in_flags;
10078
10079 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10080 && bfd_get_arch_info (obfd)->the_default)
10081 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10082
10083 return TRUE;
10084 }
10085
10086 /* Determine what should happen if the input ARM architecture
10087 does not match the output ARM architecture. */
10088 if (! bfd_arm_merge_machines (ibfd, obfd))
10089 return FALSE;
10090
10091 /* Identical flags must be compatible. */
10092 if (in_flags == out_flags)
10093 return TRUE;
10094
10095 /* Check to see if the input BFD actually contains any sections. If
10096 not, its flags may not have been initialised either, but it
10097 cannot actually cause any incompatiblity. Do not short-circuit
10098 dynamic objects; their section list may be emptied by
10099 elf_link_add_object_symbols.
10100
10101 Also check to see if there are no code sections in the input.
10102 In this case there is no need to check for code specific flags.
10103 XXX - do we need to worry about floating-point format compatability
10104 in data sections ? */
10105 if (!(ibfd->flags & DYNAMIC))
10106 {
10107 bfd_boolean null_input_bfd = TRUE;
10108 bfd_boolean only_data_sections = TRUE;
10109
10110 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10111 {
10112 /* Ignore synthetic glue sections. */
10113 if (strcmp (sec->name, ".glue_7")
10114 && strcmp (sec->name, ".glue_7t"))
10115 {
10116 if ((bfd_get_section_flags (ibfd, sec)
10117 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10118 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10119 only_data_sections = FALSE;
10120
10121 null_input_bfd = FALSE;
10122 break;
10123 }
10124 }
10125
10126 if (null_input_bfd || only_data_sections)
10127 return TRUE;
10128 }
10129
10130 /* Complain about various flag mismatches. */
10131 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10132 EF_ARM_EABI_VERSION (out_flags)))
10133 {
10134 _bfd_error_handler
10135 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10136 ibfd, obfd,
10137 (in_flags & EF_ARM_EABIMASK) >> 24,
10138 (out_flags & EF_ARM_EABIMASK) >> 24);
10139 return FALSE;
10140 }
10141
10142 /* Not sure what needs to be checked for EABI versions >= 1. */
10143 /* VxWorks libraries do not use these flags. */
10144 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10145 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10146 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10147 {
10148 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10149 {
10150 _bfd_error_handler
10151 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10152 ibfd, obfd,
10153 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10154 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10155 flags_compatible = FALSE;
10156 }
10157
10158 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10159 {
10160 if (in_flags & EF_ARM_APCS_FLOAT)
10161 _bfd_error_handler
10162 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10163 ibfd, obfd);
10164 else
10165 _bfd_error_handler
10166 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10167 ibfd, obfd);
10168
10169 flags_compatible = FALSE;
10170 }
10171
10172 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10173 {
10174 if (in_flags & EF_ARM_VFP_FLOAT)
10175 _bfd_error_handler
10176 (_("error: %B uses VFP instructions, whereas %B does not"),
10177 ibfd, obfd);
10178 else
10179 _bfd_error_handler
10180 (_("error: %B uses FPA instructions, whereas %B does not"),
10181 ibfd, obfd);
10182
10183 flags_compatible = FALSE;
10184 }
10185
10186 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10187 {
10188 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10189 _bfd_error_handler
10190 (_("error: %B uses Maverick instructions, whereas %B does not"),
10191 ibfd, obfd);
10192 else
10193 _bfd_error_handler
10194 (_("error: %B does not use Maverick instructions, whereas %B does"),
10195 ibfd, obfd);
10196
10197 flags_compatible = FALSE;
10198 }
10199
10200 #ifdef EF_ARM_SOFT_FLOAT
10201 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10202 {
10203 /* We can allow interworking between code that is VFP format
10204 layout, and uses either soft float or integer regs for
10205 passing floating point arguments and results. We already
10206 know that the APCS_FLOAT flags match; similarly for VFP
10207 flags. */
10208 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10209 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10210 {
10211 if (in_flags & EF_ARM_SOFT_FLOAT)
10212 _bfd_error_handler
10213 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10214 ibfd, obfd);
10215 else
10216 _bfd_error_handler
10217 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10218 ibfd, obfd);
10219
10220 flags_compatible = FALSE;
10221 }
10222 }
10223 #endif
10224
10225 /* Interworking mismatch is only a warning. */
10226 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10227 {
10228 if (in_flags & EF_ARM_INTERWORK)
10229 {
10230 _bfd_error_handler
10231 (_("Warning: %B supports interworking, whereas %B does not"),
10232 ibfd, obfd);
10233 }
10234 else
10235 {
10236 _bfd_error_handler
10237 (_("Warning: %B does not support interworking, whereas %B does"),
10238 ibfd, obfd);
10239 }
10240 }
10241 }
10242
10243 return flags_compatible;
10244 }
10245
10246 /* Display the flags field. */
10247
10248 static bfd_boolean
10249 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10250 {
10251 FILE * file = (FILE *) ptr;
10252 unsigned long flags;
10253
10254 BFD_ASSERT (abfd != NULL && ptr != NULL);
10255
10256 /* Print normal ELF private data. */
10257 _bfd_elf_print_private_bfd_data (abfd, ptr);
10258
10259 flags = elf_elfheader (abfd)->e_flags;
10260 /* Ignore init flag - it may not be set, despite the flags field
10261 containing valid data. */
10262
10263 /* xgettext:c-format */
10264 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10265
10266 switch (EF_ARM_EABI_VERSION (flags))
10267 {
10268 case EF_ARM_EABI_UNKNOWN:
10269 /* The following flag bits are GNU extensions and not part of the
10270 official ARM ELF extended ABI. Hence they are only decoded if
10271 the EABI version is not set. */
10272 if (flags & EF_ARM_INTERWORK)
10273 fprintf (file, _(" [interworking enabled]"));
10274
10275 if (flags & EF_ARM_APCS_26)
10276 fprintf (file, " [APCS-26]");
10277 else
10278 fprintf (file, " [APCS-32]");
10279
10280 if (flags & EF_ARM_VFP_FLOAT)
10281 fprintf (file, _(" [VFP float format]"));
10282 else if (flags & EF_ARM_MAVERICK_FLOAT)
10283 fprintf (file, _(" [Maverick float format]"));
10284 else
10285 fprintf (file, _(" [FPA float format]"));
10286
10287 if (flags & EF_ARM_APCS_FLOAT)
10288 fprintf (file, _(" [floats passed in float registers]"));
10289
10290 if (flags & EF_ARM_PIC)
10291 fprintf (file, _(" [position independent]"));
10292
10293 if (flags & EF_ARM_NEW_ABI)
10294 fprintf (file, _(" [new ABI]"));
10295
10296 if (flags & EF_ARM_OLD_ABI)
10297 fprintf (file, _(" [old ABI]"));
10298
10299 if (flags & EF_ARM_SOFT_FLOAT)
10300 fprintf (file, _(" [software FP]"));
10301
10302 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10303 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10304 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10305 | EF_ARM_MAVERICK_FLOAT);
10306 break;
10307
10308 case EF_ARM_EABI_VER1:
10309 fprintf (file, _(" [Version1 EABI]"));
10310
10311 if (flags & EF_ARM_SYMSARESORTED)
10312 fprintf (file, _(" [sorted symbol table]"));
10313 else
10314 fprintf (file, _(" [unsorted symbol table]"));
10315
10316 flags &= ~ EF_ARM_SYMSARESORTED;
10317 break;
10318
10319 case EF_ARM_EABI_VER2:
10320 fprintf (file, _(" [Version2 EABI]"));
10321
10322 if (flags & EF_ARM_SYMSARESORTED)
10323 fprintf (file, _(" [sorted symbol table]"));
10324 else
10325 fprintf (file, _(" [unsorted symbol table]"));
10326
10327 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10328 fprintf (file, _(" [dynamic symbols use segment index]"));
10329
10330 if (flags & EF_ARM_MAPSYMSFIRST)
10331 fprintf (file, _(" [mapping symbols precede others]"));
10332
10333 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10334 | EF_ARM_MAPSYMSFIRST);
10335 break;
10336
10337 case EF_ARM_EABI_VER3:
10338 fprintf (file, _(" [Version3 EABI]"));
10339 break;
10340
10341 case EF_ARM_EABI_VER4:
10342 fprintf (file, _(" [Version4 EABI]"));
10343 goto eabi;
10344
10345 case EF_ARM_EABI_VER5:
10346 fprintf (file, _(" [Version5 EABI]"));
10347 eabi:
10348 if (flags & EF_ARM_BE8)
10349 fprintf (file, _(" [BE8]"));
10350
10351 if (flags & EF_ARM_LE8)
10352 fprintf (file, _(" [LE8]"));
10353
10354 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10355 break;
10356
10357 default:
10358 fprintf (file, _(" <EABI version unrecognised>"));
10359 break;
10360 }
10361
10362 flags &= ~ EF_ARM_EABIMASK;
10363
10364 if (flags & EF_ARM_RELEXEC)
10365 fprintf (file, _(" [relocatable executable]"));
10366
10367 if (flags & EF_ARM_HASENTRY)
10368 fprintf (file, _(" [has entry point]"));
10369
10370 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10371
10372 if (flags)
10373 fprintf (file, _("<Unrecognised flag bits set>"));
10374
10375 fputc ('\n', file);
10376
10377 return TRUE;
10378 }
10379
10380 static int
10381 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10382 {
10383 switch (ELF_ST_TYPE (elf_sym->st_info))
10384 {
10385 case STT_ARM_TFUNC:
10386 return ELF_ST_TYPE (elf_sym->st_info);
10387
10388 case STT_ARM_16BIT:
10389 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10390 This allows us to distinguish between data used by Thumb instructions
10391 and non-data (which is probably code) inside Thumb regions of an
10392 executable. */
10393 if (type != STT_OBJECT && type != STT_TLS)
10394 return ELF_ST_TYPE (elf_sym->st_info);
10395 break;
10396
10397 default:
10398 break;
10399 }
10400
10401 return type;
10402 }
10403
10404 static asection *
10405 elf32_arm_gc_mark_hook (asection *sec,
10406 struct bfd_link_info *info,
10407 Elf_Internal_Rela *rel,
10408 struct elf_link_hash_entry *h,
10409 Elf_Internal_Sym *sym)
10410 {
10411 if (h != NULL)
10412 switch (ELF32_R_TYPE (rel->r_info))
10413 {
10414 case R_ARM_GNU_VTINHERIT:
10415 case R_ARM_GNU_VTENTRY:
10416 return NULL;
10417 }
10418
10419 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10420 }
10421
10422 /* Update the got entry reference counts for the section being removed. */
10423
10424 static bfd_boolean
10425 elf32_arm_gc_sweep_hook (bfd * abfd,
10426 struct bfd_link_info * info,
10427 asection * sec,
10428 const Elf_Internal_Rela * relocs)
10429 {
10430 Elf_Internal_Shdr *symtab_hdr;
10431 struct elf_link_hash_entry **sym_hashes;
10432 bfd_signed_vma *local_got_refcounts;
10433 const Elf_Internal_Rela *rel, *relend;
10434 struct elf32_arm_link_hash_table * globals;
10435
10436 if (info->relocatable)
10437 return TRUE;
10438
10439 globals = elf32_arm_hash_table (info);
10440
10441 elf_section_data (sec)->local_dynrel = NULL;
10442
10443 symtab_hdr = & elf_symtab_hdr (abfd);
10444 sym_hashes = elf_sym_hashes (abfd);
10445 local_got_refcounts = elf_local_got_refcounts (abfd);
10446
10447 check_use_blx (globals);
10448
10449 relend = relocs + sec->reloc_count;
10450 for (rel = relocs; rel < relend; rel++)
10451 {
10452 unsigned long r_symndx;
10453 struct elf_link_hash_entry *h = NULL;
10454 int r_type;
10455
10456 r_symndx = ELF32_R_SYM (rel->r_info);
10457 if (r_symndx >= symtab_hdr->sh_info)
10458 {
10459 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10460 while (h->root.type == bfd_link_hash_indirect
10461 || h->root.type == bfd_link_hash_warning)
10462 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10463 }
10464
10465 r_type = ELF32_R_TYPE (rel->r_info);
10466 r_type = arm_real_reloc_type (globals, r_type);
10467 switch (r_type)
10468 {
10469 case R_ARM_GOT32:
10470 case R_ARM_GOT_PREL:
10471 case R_ARM_TLS_GD32:
10472 case R_ARM_TLS_IE32:
10473 if (h != NULL)
10474 {
10475 if (h->got.refcount > 0)
10476 h->got.refcount -= 1;
10477 }
10478 else if (local_got_refcounts != NULL)
10479 {
10480 if (local_got_refcounts[r_symndx] > 0)
10481 local_got_refcounts[r_symndx] -= 1;
10482 }
10483 break;
10484
10485 case R_ARM_TLS_LDM32:
10486 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10487 break;
10488
10489 case R_ARM_ABS32:
10490 case R_ARM_ABS32_NOI:
10491 case R_ARM_REL32:
10492 case R_ARM_REL32_NOI:
10493 case R_ARM_PC24:
10494 case R_ARM_PLT32:
10495 case R_ARM_CALL:
10496 case R_ARM_JUMP24:
10497 case R_ARM_PREL31:
10498 case R_ARM_THM_CALL:
10499 case R_ARM_THM_JUMP24:
10500 case R_ARM_THM_JUMP19:
10501 case R_ARM_MOVW_ABS_NC:
10502 case R_ARM_MOVT_ABS:
10503 case R_ARM_MOVW_PREL_NC:
10504 case R_ARM_MOVT_PREL:
10505 case R_ARM_THM_MOVW_ABS_NC:
10506 case R_ARM_THM_MOVT_ABS:
10507 case R_ARM_THM_MOVW_PREL_NC:
10508 case R_ARM_THM_MOVT_PREL:
10509 /* Should the interworking branches be here also? */
10510
10511 if (h != NULL)
10512 {
10513 struct elf32_arm_link_hash_entry *eh;
10514 struct elf32_arm_relocs_copied **pp;
10515 struct elf32_arm_relocs_copied *p;
10516
10517 eh = (struct elf32_arm_link_hash_entry *) h;
10518
10519 if (h->plt.refcount > 0)
10520 {
10521 h->plt.refcount -= 1;
10522 if (r_type == R_ARM_THM_CALL)
10523 eh->plt_maybe_thumb_refcount--;
10524
10525 if (r_type == R_ARM_THM_JUMP24
10526 || r_type == R_ARM_THM_JUMP19)
10527 eh->plt_thumb_refcount--;
10528 }
10529
10530 if (r_type == R_ARM_ABS32
10531 || r_type == R_ARM_REL32
10532 || r_type == R_ARM_ABS32_NOI
10533 || r_type == R_ARM_REL32_NOI)
10534 {
10535 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10536 pp = &p->next)
10537 if (p->section == sec)
10538 {
10539 p->count -= 1;
10540 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10541 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10542 p->pc_count -= 1;
10543 if (p->count == 0)
10544 *pp = p->next;
10545 break;
10546 }
10547 }
10548 }
10549 break;
10550
10551 default:
10552 break;
10553 }
10554 }
10555
10556 return TRUE;
10557 }
10558
10559 /* Look through the relocs for a section during the first phase. */
10560
10561 static bfd_boolean
10562 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10563 asection *sec, const Elf_Internal_Rela *relocs)
10564 {
10565 Elf_Internal_Shdr *symtab_hdr;
10566 struct elf_link_hash_entry **sym_hashes;
10567 const Elf_Internal_Rela *rel;
10568 const Elf_Internal_Rela *rel_end;
10569 bfd *dynobj;
10570 asection *sreloc;
10571 bfd_vma *local_got_offsets;
10572 struct elf32_arm_link_hash_table *htab;
10573 bfd_boolean needs_plt;
10574 unsigned long nsyms;
10575
10576 if (info->relocatable)
10577 return TRUE;
10578
10579 BFD_ASSERT (is_arm_elf (abfd));
10580
10581 htab = elf32_arm_hash_table (info);
10582 sreloc = NULL;
10583
10584 /* Create dynamic sections for relocatable executables so that we can
10585 copy relocations. */
10586 if (htab->root.is_relocatable_executable
10587 && ! htab->root.dynamic_sections_created)
10588 {
10589 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10590 return FALSE;
10591 }
10592
10593 dynobj = elf_hash_table (info)->dynobj;
10594 local_got_offsets = elf_local_got_offsets (abfd);
10595
10596 symtab_hdr = & elf_symtab_hdr (abfd);
10597 sym_hashes = elf_sym_hashes (abfd);
10598 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10599
10600 rel_end = relocs + sec->reloc_count;
10601 for (rel = relocs; rel < rel_end; rel++)
10602 {
10603 struct elf_link_hash_entry *h;
10604 struct elf32_arm_link_hash_entry *eh;
10605 unsigned long r_symndx;
10606 int r_type;
10607
10608 r_symndx = ELF32_R_SYM (rel->r_info);
10609 r_type = ELF32_R_TYPE (rel->r_info);
10610 r_type = arm_real_reloc_type (htab, r_type);
10611
10612 if (r_symndx >= nsyms
10613 /* PR 9934: It is possible to have relocations that do not
10614 refer to symbols, thus it is also possible to have an
10615 object file containing relocations but no symbol table. */
10616 && (r_symndx > 0 || nsyms > 0))
10617 {
10618 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10619 r_symndx);
10620 return FALSE;
10621 }
10622
10623 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10624 h = NULL;
10625 else
10626 {
10627 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10628 while (h->root.type == bfd_link_hash_indirect
10629 || h->root.type == bfd_link_hash_warning)
10630 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10631 }
10632
10633 eh = (struct elf32_arm_link_hash_entry *) h;
10634
10635 switch (r_type)
10636 {
10637 case R_ARM_GOT32:
10638 case R_ARM_GOT_PREL:
10639 case R_ARM_TLS_GD32:
10640 case R_ARM_TLS_IE32:
10641 /* This symbol requires a global offset table entry. */
10642 {
10643 int tls_type, old_tls_type;
10644
10645 switch (r_type)
10646 {
10647 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10648 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10649 default: tls_type = GOT_NORMAL; break;
10650 }
10651
10652 if (h != NULL)
10653 {
10654 h->got.refcount++;
10655 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10656 }
10657 else
10658 {
10659 bfd_signed_vma *local_got_refcounts;
10660
10661 /* This is a global offset table entry for a local symbol. */
10662 local_got_refcounts = elf_local_got_refcounts (abfd);
10663 if (local_got_refcounts == NULL)
10664 {
10665 bfd_size_type size;
10666
10667 size = symtab_hdr->sh_info;
10668 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10669 local_got_refcounts = bfd_zalloc (abfd, size);
10670 if (local_got_refcounts == NULL)
10671 return FALSE;
10672 elf_local_got_refcounts (abfd) = local_got_refcounts;
10673 elf32_arm_local_got_tls_type (abfd)
10674 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10675 }
10676 local_got_refcounts[r_symndx] += 1;
10677 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10678 }
10679
10680 /* We will already have issued an error message if there is a
10681 TLS / non-TLS mismatch, based on the symbol type. We don't
10682 support any linker relaxations. So just combine any TLS
10683 types needed. */
10684 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10685 && tls_type != GOT_NORMAL)
10686 tls_type |= old_tls_type;
10687
10688 if (old_tls_type != tls_type)
10689 {
10690 if (h != NULL)
10691 elf32_arm_hash_entry (h)->tls_type = tls_type;
10692 else
10693 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10694 }
10695 }
10696 /* Fall through. */
10697
10698 case R_ARM_TLS_LDM32:
10699 if (r_type == R_ARM_TLS_LDM32)
10700 htab->tls_ldm_got.refcount++;
10701 /* Fall through. */
10702
10703 case R_ARM_GOTOFF32:
10704 case R_ARM_GOTPC:
10705 if (htab->sgot == NULL)
10706 {
10707 if (htab->root.dynobj == NULL)
10708 htab->root.dynobj = abfd;
10709 if (!create_got_section (htab->root.dynobj, info))
10710 return FALSE;
10711 }
10712 break;
10713
10714 case R_ARM_ABS12:
10715 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10716 ldr __GOTT_INDEX__ offsets. */
10717 if (!htab->vxworks_p)
10718 break;
10719 /* Fall through. */
10720
10721 case R_ARM_PC24:
10722 case R_ARM_PLT32:
10723 case R_ARM_CALL:
10724 case R_ARM_JUMP24:
10725 case R_ARM_PREL31:
10726 case R_ARM_THM_CALL:
10727 case R_ARM_THM_JUMP24:
10728 case R_ARM_THM_JUMP19:
10729 needs_plt = 1;
10730 goto normal_reloc;
10731
10732 case R_ARM_MOVW_ABS_NC:
10733 case R_ARM_MOVT_ABS:
10734 case R_ARM_THM_MOVW_ABS_NC:
10735 case R_ARM_THM_MOVT_ABS:
10736 if (info->shared)
10737 {
10738 (*_bfd_error_handler)
10739 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10740 abfd, elf32_arm_howto_table_1[r_type].name,
10741 (h) ? h->root.root.string : "a local symbol");
10742 bfd_set_error (bfd_error_bad_value);
10743 return FALSE;
10744 }
10745
10746 /* Fall through. */
10747 case R_ARM_ABS32:
10748 case R_ARM_ABS32_NOI:
10749 case R_ARM_REL32:
10750 case R_ARM_REL32_NOI:
10751 case R_ARM_MOVW_PREL_NC:
10752 case R_ARM_MOVT_PREL:
10753 case R_ARM_THM_MOVW_PREL_NC:
10754 case R_ARM_THM_MOVT_PREL:
10755 needs_plt = 0;
10756 normal_reloc:
10757
10758 /* Should the interworking branches be listed here? */
10759 if (h != NULL)
10760 {
10761 /* If this reloc is in a read-only section, we might
10762 need a copy reloc. We can't check reliably at this
10763 stage whether the section is read-only, as input
10764 sections have not yet been mapped to output sections.
10765 Tentatively set the flag for now, and correct in
10766 adjust_dynamic_symbol. */
10767 if (!info->shared)
10768 h->non_got_ref = 1;
10769
10770 /* We may need a .plt entry if the function this reloc
10771 refers to is in a different object. We can't tell for
10772 sure yet, because something later might force the
10773 symbol local. */
10774 if (needs_plt)
10775 h->needs_plt = 1;
10776
10777 /* If we create a PLT entry, this relocation will reference
10778 it, even if it's an ABS32 relocation. */
10779 h->plt.refcount += 1;
10780
10781 /* It's too early to use htab->use_blx here, so we have to
10782 record possible blx references separately from
10783 relocs that definitely need a thumb stub. */
10784
10785 if (r_type == R_ARM_THM_CALL)
10786 eh->plt_maybe_thumb_refcount += 1;
10787
10788 if (r_type == R_ARM_THM_JUMP24
10789 || r_type == R_ARM_THM_JUMP19)
10790 eh->plt_thumb_refcount += 1;
10791 }
10792
10793 /* If we are creating a shared library or relocatable executable,
10794 and this is a reloc against a global symbol, or a non PC
10795 relative reloc against a local symbol, then we need to copy
10796 the reloc into the shared library. However, if we are linking
10797 with -Bsymbolic, we do not need to copy a reloc against a
10798 global symbol which is defined in an object we are
10799 including in the link (i.e., DEF_REGULAR is set). At
10800 this point we have not seen all the input files, so it is
10801 possible that DEF_REGULAR is not set now but will be set
10802 later (it is never cleared). We account for that
10803 possibility below by storing information in the
10804 relocs_copied field of the hash table entry. */
10805 if ((info->shared || htab->root.is_relocatable_executable)
10806 && (sec->flags & SEC_ALLOC) != 0
10807 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10808 || (h != NULL && ! h->needs_plt
10809 && (! info->symbolic || ! h->def_regular))))
10810 {
10811 struct elf32_arm_relocs_copied *p, **head;
10812
10813 /* When creating a shared object, we must copy these
10814 reloc types into the output file. We create a reloc
10815 section in dynobj and make room for this reloc. */
10816 if (sreloc == NULL)
10817 {
10818 sreloc = _bfd_elf_make_dynamic_reloc_section
10819 (sec, dynobj, 2, abfd, ! htab->use_rel);
10820
10821 if (sreloc == NULL)
10822 return FALSE;
10823
10824 /* BPABI objects never have dynamic relocations mapped. */
10825 if (htab->symbian_p)
10826 {
10827 flagword flags;
10828
10829 flags = bfd_get_section_flags (dynobj, sreloc);
10830 flags &= ~(SEC_LOAD | SEC_ALLOC);
10831 bfd_set_section_flags (dynobj, sreloc, flags);
10832 }
10833 }
10834
10835 /* If this is a global symbol, we count the number of
10836 relocations we need for this symbol. */
10837 if (h != NULL)
10838 {
10839 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10840 }
10841 else
10842 {
10843 /* Track dynamic relocs needed for local syms too.
10844 We really need local syms available to do this
10845 easily. Oh well. */
10846
10847 asection *s;
10848 void *vpp;
10849
10850 s = bfd_section_from_r_symndx (abfd, &htab->sym_sec,
10851 sec, r_symndx);
10852 if (s == NULL)
10853 return FALSE;
10854
10855 vpp = &elf_section_data (s)->local_dynrel;
10856 head = (struct elf32_arm_relocs_copied **) vpp;
10857 }
10858
10859 p = *head;
10860 if (p == NULL || p->section != sec)
10861 {
10862 bfd_size_type amt = sizeof *p;
10863
10864 p = bfd_alloc (htab->root.dynobj, amt);
10865 if (p == NULL)
10866 return FALSE;
10867 p->next = *head;
10868 *head = p;
10869 p->section = sec;
10870 p->count = 0;
10871 p->pc_count = 0;
10872 }
10873
10874 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10875 p->pc_count += 1;
10876 p->count += 1;
10877 }
10878 break;
10879
10880 /* This relocation describes the C++ object vtable hierarchy.
10881 Reconstruct it for later use during GC. */
10882 case R_ARM_GNU_VTINHERIT:
10883 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10884 return FALSE;
10885 break;
10886
10887 /* This relocation describes which C++ vtable entries are actually
10888 used. Record for later use during GC. */
10889 case R_ARM_GNU_VTENTRY:
10890 BFD_ASSERT (h != NULL);
10891 if (h != NULL
10892 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10893 return FALSE;
10894 break;
10895 }
10896 }
10897
10898 return TRUE;
10899 }
10900
10901 /* Unwinding tables are not referenced directly. This pass marks them as
10902 required if the corresponding code section is marked. */
10903
10904 static bfd_boolean
10905 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10906 elf_gc_mark_hook_fn gc_mark_hook)
10907 {
10908 bfd *sub;
10909 Elf_Internal_Shdr **elf_shdrp;
10910 bfd_boolean again;
10911
10912 /* Marking EH data may cause additional code sections to be marked,
10913 requiring multiple passes. */
10914 again = TRUE;
10915 while (again)
10916 {
10917 again = FALSE;
10918 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10919 {
10920 asection *o;
10921
10922 if (! is_arm_elf (sub))
10923 continue;
10924
10925 elf_shdrp = elf_elfsections (sub);
10926 for (o = sub->sections; o != NULL; o = o->next)
10927 {
10928 Elf_Internal_Shdr *hdr;
10929
10930 hdr = &elf_section_data (o)->this_hdr;
10931 if (hdr->sh_type == SHT_ARM_EXIDX
10932 && hdr->sh_link
10933 && hdr->sh_link < elf_numsections (sub)
10934 && !o->gc_mark
10935 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10936 {
10937 again = TRUE;
10938 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10939 return FALSE;
10940 }
10941 }
10942 }
10943 }
10944
10945 return TRUE;
10946 }
10947
10948 /* Treat mapping symbols as special target symbols. */
10949
10950 static bfd_boolean
10951 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10952 {
10953 return bfd_is_arm_special_symbol_name (sym->name,
10954 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10955 }
10956
10957 /* This is a copy of elf_find_function() from elf.c except that
10958 ARM mapping symbols are ignored when looking for function names
10959 and STT_ARM_TFUNC is considered to a function type. */
10960
10961 static bfd_boolean
10962 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10963 asection * section,
10964 asymbol ** symbols,
10965 bfd_vma offset,
10966 const char ** filename_ptr,
10967 const char ** functionname_ptr)
10968 {
10969 const char * filename = NULL;
10970 asymbol * func = NULL;
10971 bfd_vma low_func = 0;
10972 asymbol ** p;
10973
10974 for (p = symbols; *p != NULL; p++)
10975 {
10976 elf_symbol_type *q;
10977
10978 q = (elf_symbol_type *) *p;
10979
10980 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
10981 {
10982 default:
10983 break;
10984 case STT_FILE:
10985 filename = bfd_asymbol_name (&q->symbol);
10986 break;
10987 case STT_FUNC:
10988 case STT_ARM_TFUNC:
10989 case STT_NOTYPE:
10990 /* Skip mapping symbols. */
10991 if ((q->symbol.flags & BSF_LOCAL)
10992 && bfd_is_arm_special_symbol_name (q->symbol.name,
10993 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
10994 continue;
10995 /* Fall through. */
10996 if (bfd_get_section (&q->symbol) == section
10997 && q->symbol.value >= low_func
10998 && q->symbol.value <= offset)
10999 {
11000 func = (asymbol *) q;
11001 low_func = q->symbol.value;
11002 }
11003 break;
11004 }
11005 }
11006
11007 if (func == NULL)
11008 return FALSE;
11009
11010 if (filename_ptr)
11011 *filename_ptr = filename;
11012 if (functionname_ptr)
11013 *functionname_ptr = bfd_asymbol_name (func);
11014
11015 return TRUE;
11016 }
11017
11018
11019 /* Find the nearest line to a particular section and offset, for error
11020 reporting. This code is a duplicate of the code in elf.c, except
11021 that it uses arm_elf_find_function. */
11022
11023 static bfd_boolean
11024 elf32_arm_find_nearest_line (bfd * abfd,
11025 asection * section,
11026 asymbol ** symbols,
11027 bfd_vma offset,
11028 const char ** filename_ptr,
11029 const char ** functionname_ptr,
11030 unsigned int * line_ptr)
11031 {
11032 bfd_boolean found = FALSE;
11033
11034 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11035
11036 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11037 filename_ptr, functionname_ptr,
11038 line_ptr, 0,
11039 & elf_tdata (abfd)->dwarf2_find_line_info))
11040 {
11041 if (!*functionname_ptr)
11042 arm_elf_find_function (abfd, section, symbols, offset,
11043 *filename_ptr ? NULL : filename_ptr,
11044 functionname_ptr);
11045
11046 return TRUE;
11047 }
11048
11049 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11050 & found, filename_ptr,
11051 functionname_ptr, line_ptr,
11052 & elf_tdata (abfd)->line_info))
11053 return FALSE;
11054
11055 if (found && (*functionname_ptr || *line_ptr))
11056 return TRUE;
11057
11058 if (symbols == NULL)
11059 return FALSE;
11060
11061 if (! arm_elf_find_function (abfd, section, symbols, offset,
11062 filename_ptr, functionname_ptr))
11063 return FALSE;
11064
11065 *line_ptr = 0;
11066 return TRUE;
11067 }
11068
11069 static bfd_boolean
11070 elf32_arm_find_inliner_info (bfd * abfd,
11071 const char ** filename_ptr,
11072 const char ** functionname_ptr,
11073 unsigned int * line_ptr)
11074 {
11075 bfd_boolean found;
11076 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11077 functionname_ptr, line_ptr,
11078 & elf_tdata (abfd)->dwarf2_find_line_info);
11079 return found;
11080 }
11081
11082 /* Adjust a symbol defined by a dynamic object and referenced by a
11083 regular object. The current definition is in some section of the
11084 dynamic object, but we're not including those sections. We have to
11085 change the definition to something the rest of the link can
11086 understand. */
11087
11088 static bfd_boolean
11089 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11090 struct elf_link_hash_entry * h)
11091 {
11092 bfd * dynobj;
11093 asection * s;
11094 struct elf32_arm_link_hash_entry * eh;
11095 struct elf32_arm_link_hash_table *globals;
11096
11097 globals = elf32_arm_hash_table (info);
11098 dynobj = elf_hash_table (info)->dynobj;
11099
11100 /* Make sure we know what is going on here. */
11101 BFD_ASSERT (dynobj != NULL
11102 && (h->needs_plt
11103 || h->u.weakdef != NULL
11104 || (h->def_dynamic
11105 && h->ref_regular
11106 && !h->def_regular)));
11107
11108 eh = (struct elf32_arm_link_hash_entry *) h;
11109
11110 /* If this is a function, put it in the procedure linkage table. We
11111 will fill in the contents of the procedure linkage table later,
11112 when we know the address of the .got section. */
11113 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11114 || h->needs_plt)
11115 {
11116 if (h->plt.refcount <= 0
11117 || SYMBOL_CALLS_LOCAL (info, h)
11118 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11119 && h->root.type == bfd_link_hash_undefweak))
11120 {
11121 /* This case can occur if we saw a PLT32 reloc in an input
11122 file, but the symbol was never referred to by a dynamic
11123 object, or if all references were garbage collected. In
11124 such a case, we don't actually need to build a procedure
11125 linkage table, and we can just do a PC24 reloc instead. */
11126 h->plt.offset = (bfd_vma) -1;
11127 eh->plt_thumb_refcount = 0;
11128 eh->plt_maybe_thumb_refcount = 0;
11129 h->needs_plt = 0;
11130 }
11131
11132 return TRUE;
11133 }
11134 else
11135 {
11136 /* It's possible that we incorrectly decided a .plt reloc was
11137 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11138 in check_relocs. We can't decide accurately between function
11139 and non-function syms in check-relocs; Objects loaded later in
11140 the link may change h->type. So fix it now. */
11141 h->plt.offset = (bfd_vma) -1;
11142 eh->plt_thumb_refcount = 0;
11143 eh->plt_maybe_thumb_refcount = 0;
11144 }
11145
11146 /* If this is a weak symbol, and there is a real definition, the
11147 processor independent code will have arranged for us to see the
11148 real definition first, and we can just use the same value. */
11149 if (h->u.weakdef != NULL)
11150 {
11151 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11152 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11153 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11154 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11155 return TRUE;
11156 }
11157
11158 /* If there are no non-GOT references, we do not need a copy
11159 relocation. */
11160 if (!h->non_got_ref)
11161 return TRUE;
11162
11163 /* This is a reference to a symbol defined by a dynamic object which
11164 is not a function. */
11165
11166 /* If we are creating a shared library, we must presume that the
11167 only references to the symbol are via the global offset table.
11168 For such cases we need not do anything here; the relocations will
11169 be handled correctly by relocate_section. Relocatable executables
11170 can reference data in shared objects directly, so we don't need to
11171 do anything here. */
11172 if (info->shared || globals->root.is_relocatable_executable)
11173 return TRUE;
11174
11175 if (h->size == 0)
11176 {
11177 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11178 h->root.root.string);
11179 return TRUE;
11180 }
11181
11182 /* We must allocate the symbol in our .dynbss section, which will
11183 become part of the .bss section of the executable. There will be
11184 an entry for this symbol in the .dynsym section. The dynamic
11185 object will contain position independent code, so all references
11186 from the dynamic object to this symbol will go through the global
11187 offset table. The dynamic linker will use the .dynsym entry to
11188 determine the address it must put in the global offset table, so
11189 both the dynamic object and the regular object will refer to the
11190 same memory location for the variable. */
11191 s = bfd_get_section_by_name (dynobj, ".dynbss");
11192 BFD_ASSERT (s != NULL);
11193
11194 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11195 copy the initial value out of the dynamic object and into the
11196 runtime process image. We need to remember the offset into the
11197 .rel(a).bss section we are going to use. */
11198 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11199 {
11200 asection *srel;
11201
11202 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11203 BFD_ASSERT (srel != NULL);
11204 srel->size += RELOC_SIZE (globals);
11205 h->needs_copy = 1;
11206 }
11207
11208 return _bfd_elf_adjust_dynamic_copy (h, s);
11209 }
11210
11211 /* Allocate space in .plt, .got and associated reloc sections for
11212 dynamic relocs. */
11213
11214 static bfd_boolean
11215 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11216 {
11217 struct bfd_link_info *info;
11218 struct elf32_arm_link_hash_table *htab;
11219 struct elf32_arm_link_hash_entry *eh;
11220 struct elf32_arm_relocs_copied *p;
11221 bfd_signed_vma thumb_refs;
11222
11223 eh = (struct elf32_arm_link_hash_entry *) h;
11224
11225 if (h->root.type == bfd_link_hash_indirect)
11226 return TRUE;
11227
11228 if (h->root.type == bfd_link_hash_warning)
11229 /* When warning symbols are created, they **replace** the "real"
11230 entry in the hash table, thus we never get to see the real
11231 symbol in a hash traversal. So look at it now. */
11232 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11233
11234 info = (struct bfd_link_info *) inf;
11235 htab = elf32_arm_hash_table (info);
11236
11237 if (htab->root.dynamic_sections_created
11238 && h->plt.refcount > 0)
11239 {
11240 /* Make sure this symbol is output as a dynamic symbol.
11241 Undefined weak syms won't yet be marked as dynamic. */
11242 if (h->dynindx == -1
11243 && !h->forced_local)
11244 {
11245 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11246 return FALSE;
11247 }
11248
11249 if (info->shared
11250 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11251 {
11252 asection *s = htab->splt;
11253
11254 /* If this is the first .plt entry, make room for the special
11255 first entry. */
11256 if (s->size == 0)
11257 s->size += htab->plt_header_size;
11258
11259 h->plt.offset = s->size;
11260
11261 /* If we will insert a Thumb trampoline before this PLT, leave room
11262 for it. */
11263 thumb_refs = eh->plt_thumb_refcount;
11264 if (!htab->use_blx)
11265 thumb_refs += eh->plt_maybe_thumb_refcount;
11266
11267 if (thumb_refs > 0)
11268 {
11269 h->plt.offset += PLT_THUMB_STUB_SIZE;
11270 s->size += PLT_THUMB_STUB_SIZE;
11271 }
11272
11273 /* If this symbol is not defined in a regular file, and we are
11274 not generating a shared library, then set the symbol to this
11275 location in the .plt. This is required to make function
11276 pointers compare as equal between the normal executable and
11277 the shared library. */
11278 if (! info->shared
11279 && !h->def_regular)
11280 {
11281 h->root.u.def.section = s;
11282 h->root.u.def.value = h->plt.offset;
11283
11284 /* Make sure the function is not marked as Thumb, in case
11285 it is the target of an ABS32 relocation, which will
11286 point to the PLT entry. */
11287 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11288 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11289 }
11290
11291 /* Make room for this entry. */
11292 s->size += htab->plt_entry_size;
11293
11294 if (!htab->symbian_p)
11295 {
11296 /* We also need to make an entry in the .got.plt section, which
11297 will be placed in the .got section by the linker script. */
11298 eh->plt_got_offset = htab->sgotplt->size;
11299 htab->sgotplt->size += 4;
11300 }
11301
11302 /* We also need to make an entry in the .rel(a).plt section. */
11303 htab->srelplt->size += RELOC_SIZE (htab);
11304
11305 /* VxWorks executables have a second set of relocations for
11306 each PLT entry. They go in a separate relocation section,
11307 which is processed by the kernel loader. */
11308 if (htab->vxworks_p && !info->shared)
11309 {
11310 /* There is a relocation for the initial PLT entry:
11311 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11312 if (h->plt.offset == htab->plt_header_size)
11313 htab->srelplt2->size += RELOC_SIZE (htab);
11314
11315 /* There are two extra relocations for each subsequent
11316 PLT entry: an R_ARM_32 relocation for the GOT entry,
11317 and an R_ARM_32 relocation for the PLT entry. */
11318 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11319 }
11320 }
11321 else
11322 {
11323 h->plt.offset = (bfd_vma) -1;
11324 h->needs_plt = 0;
11325 }
11326 }
11327 else
11328 {
11329 h->plt.offset = (bfd_vma) -1;
11330 h->needs_plt = 0;
11331 }
11332
11333 if (h->got.refcount > 0)
11334 {
11335 asection *s;
11336 bfd_boolean dyn;
11337 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11338 int indx;
11339
11340 /* Make sure this symbol is output as a dynamic symbol.
11341 Undefined weak syms won't yet be marked as dynamic. */
11342 if (h->dynindx == -1
11343 && !h->forced_local)
11344 {
11345 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11346 return FALSE;
11347 }
11348
11349 if (!htab->symbian_p)
11350 {
11351 s = htab->sgot;
11352 h->got.offset = s->size;
11353
11354 if (tls_type == GOT_UNKNOWN)
11355 abort ();
11356
11357 if (tls_type == GOT_NORMAL)
11358 /* Non-TLS symbols need one GOT slot. */
11359 s->size += 4;
11360 else
11361 {
11362 if (tls_type & GOT_TLS_GD)
11363 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11364 s->size += 8;
11365 if (tls_type & GOT_TLS_IE)
11366 /* R_ARM_TLS_IE32 needs one GOT slot. */
11367 s->size += 4;
11368 }
11369
11370 dyn = htab->root.dynamic_sections_created;
11371
11372 indx = 0;
11373 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11374 && (!info->shared
11375 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11376 indx = h->dynindx;
11377
11378 if (tls_type != GOT_NORMAL
11379 && (info->shared || indx != 0)
11380 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11381 || h->root.type != bfd_link_hash_undefweak))
11382 {
11383 if (tls_type & GOT_TLS_IE)
11384 htab->srelgot->size += RELOC_SIZE (htab);
11385
11386 if (tls_type & GOT_TLS_GD)
11387 htab->srelgot->size += RELOC_SIZE (htab);
11388
11389 if ((tls_type & GOT_TLS_GD) && indx != 0)
11390 htab->srelgot->size += RELOC_SIZE (htab);
11391 }
11392 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11393 || h->root.type != bfd_link_hash_undefweak)
11394 && (info->shared
11395 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11396 htab->srelgot->size += RELOC_SIZE (htab);
11397 }
11398 }
11399 else
11400 h->got.offset = (bfd_vma) -1;
11401
11402 /* Allocate stubs for exported Thumb functions on v4t. */
11403 if (!htab->use_blx && h->dynindx != -1
11404 && h->def_regular
11405 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11406 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11407 {
11408 struct elf_link_hash_entry * th;
11409 struct bfd_link_hash_entry * bh;
11410 struct elf_link_hash_entry * myh;
11411 char name[1024];
11412 asection *s;
11413 bh = NULL;
11414 /* Create a new symbol to regist the real location of the function. */
11415 s = h->root.u.def.section;
11416 sprintf (name, "__real_%s", h->root.root.string);
11417 _bfd_generic_link_add_one_symbol (info, s->owner,
11418 name, BSF_GLOBAL, s,
11419 h->root.u.def.value,
11420 NULL, TRUE, FALSE, &bh);
11421
11422 myh = (struct elf_link_hash_entry *) bh;
11423 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11424 myh->forced_local = 1;
11425 eh->export_glue = myh;
11426 th = record_arm_to_thumb_glue (info, h);
11427 /* Point the symbol at the stub. */
11428 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11429 h->root.u.def.section = th->root.u.def.section;
11430 h->root.u.def.value = th->root.u.def.value & ~1;
11431 }
11432
11433 if (eh->relocs_copied == NULL)
11434 return TRUE;
11435
11436 /* In the shared -Bsymbolic case, discard space allocated for
11437 dynamic pc-relative relocs against symbols which turn out to be
11438 defined in regular objects. For the normal shared case, discard
11439 space for pc-relative relocs that have become local due to symbol
11440 visibility changes. */
11441
11442 if (info->shared || htab->root.is_relocatable_executable)
11443 {
11444 /* The only relocs that use pc_count are R_ARM_REL32 and
11445 R_ARM_REL32_NOI, which will appear on something like
11446 ".long foo - .". We want calls to protected symbols to resolve
11447 directly to the function rather than going via the plt. If people
11448 want function pointer comparisons to work as expected then they
11449 should avoid writing assembly like ".long foo - .". */
11450 if (SYMBOL_CALLS_LOCAL (info, h))
11451 {
11452 struct elf32_arm_relocs_copied **pp;
11453
11454 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11455 {
11456 p->count -= p->pc_count;
11457 p->pc_count = 0;
11458 if (p->count == 0)
11459 *pp = p->next;
11460 else
11461 pp = &p->next;
11462 }
11463 }
11464
11465 if (elf32_arm_hash_table (info)->vxworks_p)
11466 {
11467 struct elf32_arm_relocs_copied **pp;
11468
11469 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11470 {
11471 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11472 *pp = p->next;
11473 else
11474 pp = &p->next;
11475 }
11476 }
11477
11478 /* Also discard relocs on undefined weak syms with non-default
11479 visibility. */
11480 if (eh->relocs_copied != NULL
11481 && h->root.type == bfd_link_hash_undefweak)
11482 {
11483 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11484 eh->relocs_copied = NULL;
11485
11486 /* Make sure undefined weak symbols are output as a dynamic
11487 symbol in PIEs. */
11488 else if (h->dynindx == -1
11489 && !h->forced_local)
11490 {
11491 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11492 return FALSE;
11493 }
11494 }
11495
11496 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11497 && h->root.type == bfd_link_hash_new)
11498 {
11499 /* Output absolute symbols so that we can create relocations
11500 against them. For normal symbols we output a relocation
11501 against the section that contains them. */
11502 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11503 return FALSE;
11504 }
11505
11506 }
11507 else
11508 {
11509 /* For the non-shared case, discard space for relocs against
11510 symbols which turn out to need copy relocs or are not
11511 dynamic. */
11512
11513 if (!h->non_got_ref
11514 && ((h->def_dynamic
11515 && !h->def_regular)
11516 || (htab->root.dynamic_sections_created
11517 && (h->root.type == bfd_link_hash_undefweak
11518 || h->root.type == bfd_link_hash_undefined))))
11519 {
11520 /* Make sure this symbol is output as a dynamic symbol.
11521 Undefined weak syms won't yet be marked as dynamic. */
11522 if (h->dynindx == -1
11523 && !h->forced_local)
11524 {
11525 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11526 return FALSE;
11527 }
11528
11529 /* If that succeeded, we know we'll be keeping all the
11530 relocs. */
11531 if (h->dynindx != -1)
11532 goto keep;
11533 }
11534
11535 eh->relocs_copied = NULL;
11536
11537 keep: ;
11538 }
11539
11540 /* Finally, allocate space. */
11541 for (p = eh->relocs_copied; p != NULL; p = p->next)
11542 {
11543 asection *sreloc = elf_section_data (p->section)->sreloc;
11544 sreloc->size += p->count * RELOC_SIZE (htab);
11545 }
11546
11547 return TRUE;
11548 }
11549
11550 /* Find any dynamic relocs that apply to read-only sections. */
11551
11552 static bfd_boolean
11553 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11554 {
11555 struct elf32_arm_link_hash_entry * eh;
11556 struct elf32_arm_relocs_copied * p;
11557
11558 if (h->root.type == bfd_link_hash_warning)
11559 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11560
11561 eh = (struct elf32_arm_link_hash_entry *) h;
11562 for (p = eh->relocs_copied; p != NULL; p = p->next)
11563 {
11564 asection *s = p->section;
11565
11566 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11567 {
11568 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11569
11570 info->flags |= DF_TEXTREL;
11571
11572 /* Not an error, just cut short the traversal. */
11573 return FALSE;
11574 }
11575 }
11576 return TRUE;
11577 }
11578
11579 void
11580 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11581 int byteswap_code)
11582 {
11583 struct elf32_arm_link_hash_table *globals;
11584
11585 globals = elf32_arm_hash_table (info);
11586 globals->byteswap_code = byteswap_code;
11587 }
11588
11589 /* Set the sizes of the dynamic sections. */
11590
11591 static bfd_boolean
11592 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11593 struct bfd_link_info * info)
11594 {
11595 bfd * dynobj;
11596 asection * s;
11597 bfd_boolean plt;
11598 bfd_boolean relocs;
11599 bfd *ibfd;
11600 struct elf32_arm_link_hash_table *htab;
11601
11602 htab = elf32_arm_hash_table (info);
11603 dynobj = elf_hash_table (info)->dynobj;
11604 BFD_ASSERT (dynobj != NULL);
11605 check_use_blx (htab);
11606
11607 if (elf_hash_table (info)->dynamic_sections_created)
11608 {
11609 /* Set the contents of the .interp section to the interpreter. */
11610 if (info->executable)
11611 {
11612 s = bfd_get_section_by_name (dynobj, ".interp");
11613 BFD_ASSERT (s != NULL);
11614 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11615 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11616 }
11617 }
11618
11619 /* Set up .got offsets for local syms, and space for local dynamic
11620 relocs. */
11621 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11622 {
11623 bfd_signed_vma *local_got;
11624 bfd_signed_vma *end_local_got;
11625 char *local_tls_type;
11626 bfd_size_type locsymcount;
11627 Elf_Internal_Shdr *symtab_hdr;
11628 asection *srel;
11629 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11630
11631 if (! is_arm_elf (ibfd))
11632 continue;
11633
11634 for (s = ibfd->sections; s != NULL; s = s->next)
11635 {
11636 struct elf32_arm_relocs_copied *p;
11637
11638 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11639 {
11640 if (!bfd_is_abs_section (p->section)
11641 && bfd_is_abs_section (p->section->output_section))
11642 {
11643 /* Input section has been discarded, either because
11644 it is a copy of a linkonce section or due to
11645 linker script /DISCARD/, so we'll be discarding
11646 the relocs too. */
11647 }
11648 else if (is_vxworks
11649 && strcmp (p->section->output_section->name,
11650 ".tls_vars") == 0)
11651 {
11652 /* Relocations in vxworks .tls_vars sections are
11653 handled specially by the loader. */
11654 }
11655 else if (p->count != 0)
11656 {
11657 srel = elf_section_data (p->section)->sreloc;
11658 srel->size += p->count * RELOC_SIZE (htab);
11659 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11660 info->flags |= DF_TEXTREL;
11661 }
11662 }
11663 }
11664
11665 local_got = elf_local_got_refcounts (ibfd);
11666 if (!local_got)
11667 continue;
11668
11669 symtab_hdr = & elf_symtab_hdr (ibfd);
11670 locsymcount = symtab_hdr->sh_info;
11671 end_local_got = local_got + locsymcount;
11672 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11673 s = htab->sgot;
11674 srel = htab->srelgot;
11675 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11676 {
11677 if (*local_got > 0)
11678 {
11679 *local_got = s->size;
11680 if (*local_tls_type & GOT_TLS_GD)
11681 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11682 s->size += 8;
11683 if (*local_tls_type & GOT_TLS_IE)
11684 s->size += 4;
11685 if (*local_tls_type == GOT_NORMAL)
11686 s->size += 4;
11687
11688 if (info->shared || *local_tls_type == GOT_TLS_GD)
11689 srel->size += RELOC_SIZE (htab);
11690 }
11691 else
11692 *local_got = (bfd_vma) -1;
11693 }
11694 }
11695
11696 if (htab->tls_ldm_got.refcount > 0)
11697 {
11698 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11699 for R_ARM_TLS_LDM32 relocations. */
11700 htab->tls_ldm_got.offset = htab->sgot->size;
11701 htab->sgot->size += 8;
11702 if (info->shared)
11703 htab->srelgot->size += RELOC_SIZE (htab);
11704 }
11705 else
11706 htab->tls_ldm_got.offset = -1;
11707
11708 /* Allocate global sym .plt and .got entries, and space for global
11709 sym dynamic relocs. */
11710 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11711
11712 /* Here we rummage through the found bfds to collect glue information. */
11713 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11714 {
11715 if (! is_arm_elf (ibfd))
11716 continue;
11717
11718 /* Initialise mapping tables for code/data. */
11719 bfd_elf32_arm_init_maps (ibfd);
11720
11721 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11722 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11723 /* xgettext:c-format */
11724 _bfd_error_handler (_("Errors encountered processing file %s"),
11725 ibfd->filename);
11726 }
11727
11728 /* Allocate space for the glue sections now that we've sized them. */
11729 bfd_elf32_arm_allocate_interworking_sections (info);
11730
11731 /* The check_relocs and adjust_dynamic_symbol entry points have
11732 determined the sizes of the various dynamic sections. Allocate
11733 memory for them. */
11734 plt = FALSE;
11735 relocs = FALSE;
11736 for (s = dynobj->sections; s != NULL; s = s->next)
11737 {
11738 const char * name;
11739
11740 if ((s->flags & SEC_LINKER_CREATED) == 0)
11741 continue;
11742
11743 /* It's OK to base decisions on the section name, because none
11744 of the dynobj section names depend upon the input files. */
11745 name = bfd_get_section_name (dynobj, s);
11746
11747 if (strcmp (name, ".plt") == 0)
11748 {
11749 /* Remember whether there is a PLT. */
11750 plt = s->size != 0;
11751 }
11752 else if (CONST_STRNEQ (name, ".rel"))
11753 {
11754 if (s->size != 0)
11755 {
11756 /* Remember whether there are any reloc sections other
11757 than .rel(a).plt and .rela.plt.unloaded. */
11758 if (s != htab->srelplt && s != htab->srelplt2)
11759 relocs = TRUE;
11760
11761 /* We use the reloc_count field as a counter if we need
11762 to copy relocs into the output file. */
11763 s->reloc_count = 0;
11764 }
11765 }
11766 else if (! CONST_STRNEQ (name, ".got")
11767 && strcmp (name, ".dynbss") != 0)
11768 {
11769 /* It's not one of our sections, so don't allocate space. */
11770 continue;
11771 }
11772
11773 if (s->size == 0)
11774 {
11775 /* If we don't need this section, strip it from the
11776 output file. This is mostly to handle .rel(a).bss and
11777 .rel(a).plt. We must create both sections in
11778 create_dynamic_sections, because they must be created
11779 before the linker maps input sections to output
11780 sections. The linker does that before
11781 adjust_dynamic_symbol is called, and it is that
11782 function which decides whether anything needs to go
11783 into these sections. */
11784 s->flags |= SEC_EXCLUDE;
11785 continue;
11786 }
11787
11788 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11789 continue;
11790
11791 /* Allocate memory for the section contents. */
11792 s->contents = bfd_zalloc (dynobj, s->size);
11793 if (s->contents == NULL)
11794 return FALSE;
11795 }
11796
11797 if (elf_hash_table (info)->dynamic_sections_created)
11798 {
11799 /* Add some entries to the .dynamic section. We fill in the
11800 values later, in elf32_arm_finish_dynamic_sections, but we
11801 must add the entries now so that we get the correct size for
11802 the .dynamic section. The DT_DEBUG entry is filled in by the
11803 dynamic linker and used by the debugger. */
11804 #define add_dynamic_entry(TAG, VAL) \
11805 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11806
11807 if (info->executable)
11808 {
11809 if (!add_dynamic_entry (DT_DEBUG, 0))
11810 return FALSE;
11811 }
11812
11813 if (plt)
11814 {
11815 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11816 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11817 || !add_dynamic_entry (DT_PLTREL,
11818 htab->use_rel ? DT_REL : DT_RELA)
11819 || !add_dynamic_entry (DT_JMPREL, 0))
11820 return FALSE;
11821 }
11822
11823 if (relocs)
11824 {
11825 if (htab->use_rel)
11826 {
11827 if (!add_dynamic_entry (DT_REL, 0)
11828 || !add_dynamic_entry (DT_RELSZ, 0)
11829 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11830 return FALSE;
11831 }
11832 else
11833 {
11834 if (!add_dynamic_entry (DT_RELA, 0)
11835 || !add_dynamic_entry (DT_RELASZ, 0)
11836 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11837 return FALSE;
11838 }
11839 }
11840
11841 /* If any dynamic relocs apply to a read-only section,
11842 then we need a DT_TEXTREL entry. */
11843 if ((info->flags & DF_TEXTREL) == 0)
11844 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11845 info);
11846
11847 if ((info->flags & DF_TEXTREL) != 0)
11848 {
11849 if (!add_dynamic_entry (DT_TEXTREL, 0))
11850 return FALSE;
11851 }
11852 if (htab->vxworks_p
11853 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11854 return FALSE;
11855 }
11856 #undef add_dynamic_entry
11857
11858 return TRUE;
11859 }
11860
11861 /* Finish up dynamic symbol handling. We set the contents of various
11862 dynamic sections here. */
11863
11864 static bfd_boolean
11865 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11866 struct bfd_link_info * info,
11867 struct elf_link_hash_entry * h,
11868 Elf_Internal_Sym * sym)
11869 {
11870 bfd * dynobj;
11871 struct elf32_arm_link_hash_table *htab;
11872 struct elf32_arm_link_hash_entry *eh;
11873
11874 dynobj = elf_hash_table (info)->dynobj;
11875 htab = elf32_arm_hash_table (info);
11876 eh = (struct elf32_arm_link_hash_entry *) h;
11877
11878 if (h->plt.offset != (bfd_vma) -1)
11879 {
11880 asection * splt;
11881 asection * srel;
11882 bfd_byte *loc;
11883 bfd_vma plt_index;
11884 Elf_Internal_Rela rel;
11885
11886 /* This symbol has an entry in the procedure linkage table. Set
11887 it up. */
11888
11889 BFD_ASSERT (h->dynindx != -1);
11890
11891 splt = bfd_get_section_by_name (dynobj, ".plt");
11892 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11893 BFD_ASSERT (splt != NULL && srel != NULL);
11894
11895 /* Fill in the entry in the procedure linkage table. */
11896 if (htab->symbian_p)
11897 {
11898 put_arm_insn (htab, output_bfd,
11899 elf32_arm_symbian_plt_entry[0],
11900 splt->contents + h->plt.offset);
11901 bfd_put_32 (output_bfd,
11902 elf32_arm_symbian_plt_entry[1],
11903 splt->contents + h->plt.offset + 4);
11904
11905 /* Fill in the entry in the .rel.plt section. */
11906 rel.r_offset = (splt->output_section->vma
11907 + splt->output_offset
11908 + h->plt.offset + 4);
11909 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11910
11911 /* Get the index in the procedure linkage table which
11912 corresponds to this symbol. This is the index of this symbol
11913 in all the symbols for which we are making plt entries. The
11914 first entry in the procedure linkage table is reserved. */
11915 plt_index = ((h->plt.offset - htab->plt_header_size)
11916 / htab->plt_entry_size);
11917 }
11918 else
11919 {
11920 bfd_vma got_offset, got_address, plt_address;
11921 bfd_vma got_displacement;
11922 asection * sgot;
11923 bfd_byte * ptr;
11924
11925 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11926 BFD_ASSERT (sgot != NULL);
11927
11928 /* Get the offset into the .got.plt table of the entry that
11929 corresponds to this function. */
11930 got_offset = eh->plt_got_offset;
11931
11932 /* Get the index in the procedure linkage table which
11933 corresponds to this symbol. This is the index of this symbol
11934 in all the symbols for which we are making plt entries. The
11935 first three entries in .got.plt are reserved; after that
11936 symbols appear in the same order as in .plt. */
11937 plt_index = (got_offset - 12) / 4;
11938
11939 /* Calculate the address of the GOT entry. */
11940 got_address = (sgot->output_section->vma
11941 + sgot->output_offset
11942 + got_offset);
11943
11944 /* ...and the address of the PLT entry. */
11945 plt_address = (splt->output_section->vma
11946 + splt->output_offset
11947 + h->plt.offset);
11948
11949 ptr = htab->splt->contents + h->plt.offset;
11950 if (htab->vxworks_p && info->shared)
11951 {
11952 unsigned int i;
11953 bfd_vma val;
11954
11955 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11956 {
11957 val = elf32_arm_vxworks_shared_plt_entry[i];
11958 if (i == 2)
11959 val |= got_address - sgot->output_section->vma;
11960 if (i == 5)
11961 val |= plt_index * RELOC_SIZE (htab);
11962 if (i == 2 || i == 5)
11963 bfd_put_32 (output_bfd, val, ptr);
11964 else
11965 put_arm_insn (htab, output_bfd, val, ptr);
11966 }
11967 }
11968 else if (htab->vxworks_p)
11969 {
11970 unsigned int i;
11971 bfd_vma val;
11972
11973 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11974 {
11975 val = elf32_arm_vxworks_exec_plt_entry[i];
11976 if (i == 2)
11977 val |= got_address;
11978 if (i == 4)
11979 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
11980 if (i == 5)
11981 val |= plt_index * RELOC_SIZE (htab);
11982 if (i == 2 || i == 5)
11983 bfd_put_32 (output_bfd, val, ptr);
11984 else
11985 put_arm_insn (htab, output_bfd, val, ptr);
11986 }
11987
11988 loc = (htab->srelplt2->contents
11989 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
11990
11991 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
11992 referencing the GOT for this PLT entry. */
11993 rel.r_offset = plt_address + 8;
11994 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
11995 rel.r_addend = got_offset;
11996 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
11997 loc += RELOC_SIZE (htab);
11998
11999 /* Create the R_ARM_ABS32 relocation referencing the
12000 beginning of the PLT for this GOT entry. */
12001 rel.r_offset = got_address;
12002 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12003 rel.r_addend = 0;
12004 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12005 }
12006 else
12007 {
12008 bfd_signed_vma thumb_refs;
12009 /* Calculate the displacement between the PLT slot and the
12010 entry in the GOT. The eight-byte offset accounts for the
12011 value produced by adding to pc in the first instruction
12012 of the PLT stub. */
12013 got_displacement = got_address - (plt_address + 8);
12014
12015 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12016
12017 thumb_refs = eh->plt_thumb_refcount;
12018 if (!htab->use_blx)
12019 thumb_refs += eh->plt_maybe_thumb_refcount;
12020
12021 if (thumb_refs > 0)
12022 {
12023 put_thumb_insn (htab, output_bfd,
12024 elf32_arm_plt_thumb_stub[0], ptr - 4);
12025 put_thumb_insn (htab, output_bfd,
12026 elf32_arm_plt_thumb_stub[1], ptr - 2);
12027 }
12028
12029 put_arm_insn (htab, output_bfd,
12030 elf32_arm_plt_entry[0]
12031 | ((got_displacement & 0x0ff00000) >> 20),
12032 ptr + 0);
12033 put_arm_insn (htab, output_bfd,
12034 elf32_arm_plt_entry[1]
12035 | ((got_displacement & 0x000ff000) >> 12),
12036 ptr+ 4);
12037 put_arm_insn (htab, output_bfd,
12038 elf32_arm_plt_entry[2]
12039 | (got_displacement & 0x00000fff),
12040 ptr + 8);
12041 #ifdef FOUR_WORD_PLT
12042 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12043 #endif
12044 }
12045
12046 /* Fill in the entry in the global offset table. */
12047 bfd_put_32 (output_bfd,
12048 (splt->output_section->vma
12049 + splt->output_offset),
12050 sgot->contents + got_offset);
12051
12052 /* Fill in the entry in the .rel(a).plt section. */
12053 rel.r_addend = 0;
12054 rel.r_offset = got_address;
12055 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12056 }
12057
12058 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12059 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12060
12061 if (!h->def_regular)
12062 {
12063 /* Mark the symbol as undefined, rather than as defined in
12064 the .plt section. Leave the value alone. */
12065 sym->st_shndx = SHN_UNDEF;
12066 /* If the symbol is weak, we do need to clear the value.
12067 Otherwise, the PLT entry would provide a definition for
12068 the symbol even if the symbol wasn't defined anywhere,
12069 and so the symbol would never be NULL. */
12070 if (!h->ref_regular_nonweak)
12071 sym->st_value = 0;
12072 }
12073 }
12074
12075 if (h->got.offset != (bfd_vma) -1
12076 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12077 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12078 {
12079 asection * sgot;
12080 asection * srel;
12081 Elf_Internal_Rela rel;
12082 bfd_byte *loc;
12083 bfd_vma offset;
12084
12085 /* This symbol has an entry in the global offset table. Set it
12086 up. */
12087 sgot = bfd_get_section_by_name (dynobj, ".got");
12088 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12089 BFD_ASSERT (sgot != NULL && srel != NULL);
12090
12091 offset = (h->got.offset & ~(bfd_vma) 1);
12092 rel.r_addend = 0;
12093 rel.r_offset = (sgot->output_section->vma
12094 + sgot->output_offset
12095 + offset);
12096
12097 /* If this is a static link, or it is a -Bsymbolic link and the
12098 symbol is defined locally or was forced to be local because
12099 of a version file, we just want to emit a RELATIVE reloc.
12100 The entry in the global offset table will already have been
12101 initialized in the relocate_section function. */
12102 if (info->shared
12103 && SYMBOL_REFERENCES_LOCAL (info, h))
12104 {
12105 BFD_ASSERT ((h->got.offset & 1) != 0);
12106 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12107 if (!htab->use_rel)
12108 {
12109 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12110 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12111 }
12112 }
12113 else
12114 {
12115 BFD_ASSERT ((h->got.offset & 1) == 0);
12116 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12117 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12118 }
12119
12120 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12121 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12122 }
12123
12124 if (h->needs_copy)
12125 {
12126 asection * s;
12127 Elf_Internal_Rela rel;
12128 bfd_byte *loc;
12129
12130 /* This symbol needs a copy reloc. Set it up. */
12131 BFD_ASSERT (h->dynindx != -1
12132 && (h->root.type == bfd_link_hash_defined
12133 || h->root.type == bfd_link_hash_defweak));
12134
12135 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12136 RELOC_SECTION (htab, ".bss"));
12137 BFD_ASSERT (s != NULL);
12138
12139 rel.r_addend = 0;
12140 rel.r_offset = (h->root.u.def.value
12141 + h->root.u.def.section->output_section->vma
12142 + h->root.u.def.section->output_offset);
12143 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12144 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12145 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12146 }
12147
12148 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12149 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12150 to the ".got" section. */
12151 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12152 || (!htab->vxworks_p && h == htab->root.hgot))
12153 sym->st_shndx = SHN_ABS;
12154
12155 return TRUE;
12156 }
12157
12158 /* Finish up the dynamic sections. */
12159
12160 static bfd_boolean
12161 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12162 {
12163 bfd * dynobj;
12164 asection * sgot;
12165 asection * sdyn;
12166
12167 dynobj = elf_hash_table (info)->dynobj;
12168
12169 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12170 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12171 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12172
12173 if (elf_hash_table (info)->dynamic_sections_created)
12174 {
12175 asection *splt;
12176 Elf32_External_Dyn *dyncon, *dynconend;
12177 struct elf32_arm_link_hash_table *htab;
12178
12179 htab = elf32_arm_hash_table (info);
12180 splt = bfd_get_section_by_name (dynobj, ".plt");
12181 BFD_ASSERT (splt != NULL && sdyn != NULL);
12182
12183 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12184 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12185
12186 for (; dyncon < dynconend; dyncon++)
12187 {
12188 Elf_Internal_Dyn dyn;
12189 const char * name;
12190 asection * s;
12191
12192 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12193
12194 switch (dyn.d_tag)
12195 {
12196 unsigned int type;
12197
12198 default:
12199 if (htab->vxworks_p
12200 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12201 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12202 break;
12203
12204 case DT_HASH:
12205 name = ".hash";
12206 goto get_vma_if_bpabi;
12207 case DT_STRTAB:
12208 name = ".dynstr";
12209 goto get_vma_if_bpabi;
12210 case DT_SYMTAB:
12211 name = ".dynsym";
12212 goto get_vma_if_bpabi;
12213 case DT_VERSYM:
12214 name = ".gnu.version";
12215 goto get_vma_if_bpabi;
12216 case DT_VERDEF:
12217 name = ".gnu.version_d";
12218 goto get_vma_if_bpabi;
12219 case DT_VERNEED:
12220 name = ".gnu.version_r";
12221 goto get_vma_if_bpabi;
12222
12223 case DT_PLTGOT:
12224 name = ".got";
12225 goto get_vma;
12226 case DT_JMPREL:
12227 name = RELOC_SECTION (htab, ".plt");
12228 get_vma:
12229 s = bfd_get_section_by_name (output_bfd, name);
12230 BFD_ASSERT (s != NULL);
12231 if (!htab->symbian_p)
12232 dyn.d_un.d_ptr = s->vma;
12233 else
12234 /* In the BPABI, tags in the PT_DYNAMIC section point
12235 at the file offset, not the memory address, for the
12236 convenience of the post linker. */
12237 dyn.d_un.d_ptr = s->filepos;
12238 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12239 break;
12240
12241 get_vma_if_bpabi:
12242 if (htab->symbian_p)
12243 goto get_vma;
12244 break;
12245
12246 case DT_PLTRELSZ:
12247 s = bfd_get_section_by_name (output_bfd,
12248 RELOC_SECTION (htab, ".plt"));
12249 BFD_ASSERT (s != NULL);
12250 dyn.d_un.d_val = s->size;
12251 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12252 break;
12253
12254 case DT_RELSZ:
12255 case DT_RELASZ:
12256 if (!htab->symbian_p)
12257 {
12258 /* My reading of the SVR4 ABI indicates that the
12259 procedure linkage table relocs (DT_JMPREL) should be
12260 included in the overall relocs (DT_REL). This is
12261 what Solaris does. However, UnixWare can not handle
12262 that case. Therefore, we override the DT_RELSZ entry
12263 here to make it not include the JMPREL relocs. Since
12264 the linker script arranges for .rel(a).plt to follow all
12265 other relocation sections, we don't have to worry
12266 about changing the DT_REL entry. */
12267 s = bfd_get_section_by_name (output_bfd,
12268 RELOC_SECTION (htab, ".plt"));
12269 if (s != NULL)
12270 dyn.d_un.d_val -= s->size;
12271 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12272 break;
12273 }
12274 /* Fall through. */
12275
12276 case DT_REL:
12277 case DT_RELA:
12278 /* In the BPABI, the DT_REL tag must point at the file
12279 offset, not the VMA, of the first relocation
12280 section. So, we use code similar to that in
12281 elflink.c, but do not check for SHF_ALLOC on the
12282 relcoation section, since relocations sections are
12283 never allocated under the BPABI. The comments above
12284 about Unixware notwithstanding, we include all of the
12285 relocations here. */
12286 if (htab->symbian_p)
12287 {
12288 unsigned int i;
12289 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12290 ? SHT_REL : SHT_RELA);
12291 dyn.d_un.d_val = 0;
12292 for (i = 1; i < elf_numsections (output_bfd); i++)
12293 {
12294 Elf_Internal_Shdr *hdr
12295 = elf_elfsections (output_bfd)[i];
12296 if (hdr->sh_type == type)
12297 {
12298 if (dyn.d_tag == DT_RELSZ
12299 || dyn.d_tag == DT_RELASZ)
12300 dyn.d_un.d_val += hdr->sh_size;
12301 else if ((ufile_ptr) hdr->sh_offset
12302 <= dyn.d_un.d_val - 1)
12303 dyn.d_un.d_val = hdr->sh_offset;
12304 }
12305 }
12306 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12307 }
12308 break;
12309
12310 /* Set the bottom bit of DT_INIT/FINI if the
12311 corresponding function is Thumb. */
12312 case DT_INIT:
12313 name = info->init_function;
12314 goto get_sym;
12315 case DT_FINI:
12316 name = info->fini_function;
12317 get_sym:
12318 /* If it wasn't set by elf_bfd_final_link
12319 then there is nothing to adjust. */
12320 if (dyn.d_un.d_val != 0)
12321 {
12322 struct elf_link_hash_entry * eh;
12323
12324 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12325 FALSE, FALSE, TRUE);
12326 if (eh != NULL
12327 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12328 {
12329 dyn.d_un.d_val |= 1;
12330 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12331 }
12332 }
12333 break;
12334 }
12335 }
12336
12337 /* Fill in the first entry in the procedure linkage table. */
12338 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12339 {
12340 const bfd_vma *plt0_entry;
12341 bfd_vma got_address, plt_address, got_displacement;
12342
12343 /* Calculate the addresses of the GOT and PLT. */
12344 got_address = sgot->output_section->vma + sgot->output_offset;
12345 plt_address = splt->output_section->vma + splt->output_offset;
12346
12347 if (htab->vxworks_p)
12348 {
12349 /* The VxWorks GOT is relocated by the dynamic linker.
12350 Therefore, we must emit relocations rather than simply
12351 computing the values now. */
12352 Elf_Internal_Rela rel;
12353
12354 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12355 put_arm_insn (htab, output_bfd, plt0_entry[0],
12356 splt->contents + 0);
12357 put_arm_insn (htab, output_bfd, plt0_entry[1],
12358 splt->contents + 4);
12359 put_arm_insn (htab, output_bfd, plt0_entry[2],
12360 splt->contents + 8);
12361 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12362
12363 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12364 rel.r_offset = plt_address + 12;
12365 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12366 rel.r_addend = 0;
12367 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12368 htab->srelplt2->contents);
12369 }
12370 else
12371 {
12372 got_displacement = got_address - (plt_address + 16);
12373
12374 plt0_entry = elf32_arm_plt0_entry;
12375 put_arm_insn (htab, output_bfd, plt0_entry[0],
12376 splt->contents + 0);
12377 put_arm_insn (htab, output_bfd, plt0_entry[1],
12378 splt->contents + 4);
12379 put_arm_insn (htab, output_bfd, plt0_entry[2],
12380 splt->contents + 8);
12381 put_arm_insn (htab, output_bfd, plt0_entry[3],
12382 splt->contents + 12);
12383
12384 #ifdef FOUR_WORD_PLT
12385 /* The displacement value goes in the otherwise-unused
12386 last word of the second entry. */
12387 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12388 #else
12389 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12390 #endif
12391 }
12392 }
12393
12394 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12395 really seem like the right value. */
12396 if (splt->output_section->owner == output_bfd)
12397 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12398
12399 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12400 {
12401 /* Correct the .rel(a).plt.unloaded relocations. They will have
12402 incorrect symbol indexes. */
12403 int num_plts;
12404 unsigned char *p;
12405
12406 num_plts = ((htab->splt->size - htab->plt_header_size)
12407 / htab->plt_entry_size);
12408 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12409
12410 for (; num_plts; num_plts--)
12411 {
12412 Elf_Internal_Rela rel;
12413
12414 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12415 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12416 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12417 p += RELOC_SIZE (htab);
12418
12419 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12420 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12421 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12422 p += RELOC_SIZE (htab);
12423 }
12424 }
12425 }
12426
12427 /* Fill in the first three entries in the global offset table. */
12428 if (sgot)
12429 {
12430 if (sgot->size > 0)
12431 {
12432 if (sdyn == NULL)
12433 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12434 else
12435 bfd_put_32 (output_bfd,
12436 sdyn->output_section->vma + sdyn->output_offset,
12437 sgot->contents);
12438 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12439 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12440 }
12441
12442 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12443 }
12444
12445 return TRUE;
12446 }
12447
12448 static void
12449 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12450 {
12451 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12452 struct elf32_arm_link_hash_table *globals;
12453
12454 i_ehdrp = elf_elfheader (abfd);
12455
12456 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12457 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12458 else
12459 i_ehdrp->e_ident[EI_OSABI] = 0;
12460 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12461
12462 if (link_info)
12463 {
12464 globals = elf32_arm_hash_table (link_info);
12465 if (globals->byteswap_code)
12466 i_ehdrp->e_flags |= EF_ARM_BE8;
12467 }
12468 }
12469
12470 static enum elf_reloc_type_class
12471 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12472 {
12473 switch ((int) ELF32_R_TYPE (rela->r_info))
12474 {
12475 case R_ARM_RELATIVE:
12476 return reloc_class_relative;
12477 case R_ARM_JUMP_SLOT:
12478 return reloc_class_plt;
12479 case R_ARM_COPY:
12480 return reloc_class_copy;
12481 default:
12482 return reloc_class_normal;
12483 }
12484 }
12485
12486 /* Set the right machine number for an Arm ELF file. */
12487
12488 static bfd_boolean
12489 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12490 {
12491 if (hdr->sh_type == SHT_NOTE)
12492 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12493
12494 return TRUE;
12495 }
12496
12497 static void
12498 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12499 {
12500 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12501 }
12502
12503 /* Return TRUE if this is an unwinding table entry. */
12504
12505 static bfd_boolean
12506 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12507 {
12508 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12509 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12510 }
12511
12512
12513 /* Set the type and flags for an ARM section. We do this by
12514 the section name, which is a hack, but ought to work. */
12515
12516 static bfd_boolean
12517 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12518 {
12519 const char * name;
12520
12521 name = bfd_get_section_name (abfd, sec);
12522
12523 if (is_arm_elf_unwind_section_name (abfd, name))
12524 {
12525 hdr->sh_type = SHT_ARM_EXIDX;
12526 hdr->sh_flags |= SHF_LINK_ORDER;
12527 }
12528 return TRUE;
12529 }
12530
12531 /* Handle an ARM specific section when reading an object file. This is
12532 called when bfd_section_from_shdr finds a section with an unknown
12533 type. */
12534
12535 static bfd_boolean
12536 elf32_arm_section_from_shdr (bfd *abfd,
12537 Elf_Internal_Shdr * hdr,
12538 const char *name,
12539 int shindex)
12540 {
12541 /* There ought to be a place to keep ELF backend specific flags, but
12542 at the moment there isn't one. We just keep track of the
12543 sections by their name, instead. Fortunately, the ABI gives
12544 names for all the ARM specific sections, so we will probably get
12545 away with this. */
12546 switch (hdr->sh_type)
12547 {
12548 case SHT_ARM_EXIDX:
12549 case SHT_ARM_PREEMPTMAP:
12550 case SHT_ARM_ATTRIBUTES:
12551 break;
12552
12553 default:
12554 return FALSE;
12555 }
12556
12557 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12558 return FALSE;
12559
12560 return TRUE;
12561 }
12562
12563 /* A structure used to record a list of sections, independently
12564 of the next and prev fields in the asection structure. */
12565 typedef struct section_list
12566 {
12567 asection * sec;
12568 struct section_list * next;
12569 struct section_list * prev;
12570 }
12571 section_list;
12572
12573 /* Unfortunately we need to keep a list of sections for which
12574 an _arm_elf_section_data structure has been allocated. This
12575 is because it is possible for functions like elf32_arm_write_section
12576 to be called on a section which has had an elf_data_structure
12577 allocated for it (and so the used_by_bfd field is valid) but
12578 for which the ARM extended version of this structure - the
12579 _arm_elf_section_data structure - has not been allocated. */
12580 static section_list * sections_with_arm_elf_section_data = NULL;
12581
12582 static void
12583 record_section_with_arm_elf_section_data (asection * sec)
12584 {
12585 struct section_list * entry;
12586
12587 entry = bfd_malloc (sizeof (* entry));
12588 if (entry == NULL)
12589 return;
12590 entry->sec = sec;
12591 entry->next = sections_with_arm_elf_section_data;
12592 entry->prev = NULL;
12593 if (entry->next != NULL)
12594 entry->next->prev = entry;
12595 sections_with_arm_elf_section_data = entry;
12596 }
12597
12598 static struct section_list *
12599 find_arm_elf_section_entry (asection * sec)
12600 {
12601 struct section_list * entry;
12602 static struct section_list * last_entry = NULL;
12603
12604 /* This is a short cut for the typical case where the sections are added
12605 to the sections_with_arm_elf_section_data list in forward order and
12606 then looked up here in backwards order. This makes a real difference
12607 to the ld-srec/sec64k.exp linker test. */
12608 entry = sections_with_arm_elf_section_data;
12609 if (last_entry != NULL)
12610 {
12611 if (last_entry->sec == sec)
12612 entry = last_entry;
12613 else if (last_entry->next != NULL
12614 && last_entry->next->sec == sec)
12615 entry = last_entry->next;
12616 }
12617
12618 for (; entry; entry = entry->next)
12619 if (entry->sec == sec)
12620 break;
12621
12622 if (entry)
12623 /* Record the entry prior to this one - it is the entry we are most
12624 likely to want to locate next time. Also this way if we have been
12625 called from unrecord_section_with_arm_elf_section_data() we will not
12626 be caching a pointer that is about to be freed. */
12627 last_entry = entry->prev;
12628
12629 return entry;
12630 }
12631
12632 static _arm_elf_section_data *
12633 get_arm_elf_section_data (asection * sec)
12634 {
12635 struct section_list * entry;
12636
12637 entry = find_arm_elf_section_entry (sec);
12638
12639 if (entry)
12640 return elf32_arm_section_data (entry->sec);
12641 else
12642 return NULL;
12643 }
12644
12645 static void
12646 unrecord_section_with_arm_elf_section_data (asection * sec)
12647 {
12648 struct section_list * entry;
12649
12650 entry = find_arm_elf_section_entry (sec);
12651
12652 if (entry)
12653 {
12654 if (entry->prev != NULL)
12655 entry->prev->next = entry->next;
12656 if (entry->next != NULL)
12657 entry->next->prev = entry->prev;
12658 if (entry == sections_with_arm_elf_section_data)
12659 sections_with_arm_elf_section_data = entry->next;
12660 free (entry);
12661 }
12662 }
12663
12664
12665 typedef struct
12666 {
12667 void *finfo;
12668 struct bfd_link_info *info;
12669 asection *sec;
12670 int sec_shndx;
12671 int (*func) (void *, const char *, Elf_Internal_Sym *,
12672 asection *, struct elf_link_hash_entry *);
12673 } output_arch_syminfo;
12674
12675 enum map_symbol_type
12676 {
12677 ARM_MAP_ARM,
12678 ARM_MAP_THUMB,
12679 ARM_MAP_DATA
12680 };
12681
12682
12683 /* Output a single mapping symbol. */
12684
12685 static bfd_boolean
12686 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12687 enum map_symbol_type type,
12688 bfd_vma offset)
12689 {
12690 static const char *names[3] = {"$a", "$t", "$d"};
12691 struct elf32_arm_link_hash_table *htab;
12692 Elf_Internal_Sym sym;
12693
12694 htab = elf32_arm_hash_table (osi->info);
12695 sym.st_value = osi->sec->output_section->vma
12696 + osi->sec->output_offset
12697 + offset;
12698 sym.st_size = 0;
12699 sym.st_other = 0;
12700 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12701 sym.st_shndx = osi->sec_shndx;
12702 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12703 }
12704
12705
12706 /* Output mapping symbols for PLT entries associated with H. */
12707
12708 static bfd_boolean
12709 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12710 {
12711 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12712 struct elf32_arm_link_hash_table *htab;
12713 struct elf32_arm_link_hash_entry *eh;
12714 bfd_vma addr;
12715
12716 htab = elf32_arm_hash_table (osi->info);
12717
12718 if (h->root.type == bfd_link_hash_indirect)
12719 return TRUE;
12720
12721 if (h->root.type == bfd_link_hash_warning)
12722 /* When warning symbols are created, they **replace** the "real"
12723 entry in the hash table, thus we never get to see the real
12724 symbol in a hash traversal. So look at it now. */
12725 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12726
12727 if (h->plt.offset == (bfd_vma) -1)
12728 return TRUE;
12729
12730 eh = (struct elf32_arm_link_hash_entry *) h;
12731 addr = h->plt.offset;
12732 if (htab->symbian_p)
12733 {
12734 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12735 return FALSE;
12736 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12737 return FALSE;
12738 }
12739 else if (htab->vxworks_p)
12740 {
12741 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12742 return FALSE;
12743 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12744 return FALSE;
12745 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12746 return FALSE;
12747 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12748 return FALSE;
12749 }
12750 else
12751 {
12752 bfd_signed_vma thumb_refs;
12753
12754 thumb_refs = eh->plt_thumb_refcount;
12755 if (!htab->use_blx)
12756 thumb_refs += eh->plt_maybe_thumb_refcount;
12757
12758 if (thumb_refs > 0)
12759 {
12760 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12761 return FALSE;
12762 }
12763 #ifdef FOUR_WORD_PLT
12764 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12765 return FALSE;
12766 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12767 return FALSE;
12768 #else
12769 /* A three-word PLT with no Thumb thunk contains only Arm code,
12770 so only need to output a mapping symbol for the first PLT entry and
12771 entries with thumb thunks. */
12772 if (thumb_refs > 0 || addr == 20)
12773 {
12774 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12775 return FALSE;
12776 }
12777 #endif
12778 }
12779
12780 return TRUE;
12781 }
12782
12783 /* Output a single local symbol for a generated stub. */
12784
12785 static bfd_boolean
12786 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12787 bfd_vma offset, bfd_vma size)
12788 {
12789 struct elf32_arm_link_hash_table *htab;
12790 Elf_Internal_Sym sym;
12791
12792 htab = elf32_arm_hash_table (osi->info);
12793 sym.st_value = osi->sec->output_section->vma
12794 + osi->sec->output_offset
12795 + offset;
12796 sym.st_size = size;
12797 sym.st_other = 0;
12798 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12799 sym.st_shndx = osi->sec_shndx;
12800 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12801 }
12802
12803 static bfd_boolean
12804 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12805 void * in_arg)
12806 {
12807 struct elf32_arm_stub_hash_entry *stub_entry;
12808 struct bfd_link_info *info;
12809 struct elf32_arm_link_hash_table *htab;
12810 asection *stub_sec;
12811 bfd_vma addr;
12812 char *stub_name;
12813 output_arch_syminfo *osi;
12814 const insn_sequence *template;
12815 enum stub_insn_type prev_type;
12816 int size;
12817 int i;
12818 enum map_symbol_type sym_type;
12819
12820 /* Massage our args to the form they really have. */
12821 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12822 osi = (output_arch_syminfo *) in_arg;
12823
12824 info = osi->info;
12825
12826 htab = elf32_arm_hash_table (info);
12827 stub_sec = stub_entry->stub_sec;
12828
12829 /* Ensure this stub is attached to the current section being
12830 processed. */
12831 if (stub_sec != osi->sec)
12832 return TRUE;
12833
12834 addr = (bfd_vma) stub_entry->stub_offset;
12835 stub_name = stub_entry->output_name;
12836
12837 template = stub_entry->stub_template;
12838 switch (template[0].type)
12839 {
12840 case ARM_TYPE:
12841 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12842 return FALSE;
12843 break;
12844 case THUMB16_TYPE:
12845 case THUMB32_TYPE:
12846 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12847 stub_entry->stub_size))
12848 return FALSE;
12849 break;
12850 default:
12851 BFD_FAIL ();
12852 return 0;
12853 }
12854
12855 prev_type = DATA_TYPE;
12856 size = 0;
12857 for (i = 0; i < stub_entry->stub_template_size; i++)
12858 {
12859 switch (template[i].type)
12860 {
12861 case ARM_TYPE:
12862 sym_type = ARM_MAP_ARM;
12863 break;
12864
12865 case THUMB16_TYPE:
12866 case THUMB32_TYPE:
12867 sym_type = ARM_MAP_THUMB;
12868 break;
12869
12870 case DATA_TYPE:
12871 sym_type = ARM_MAP_DATA;
12872 break;
12873
12874 default:
12875 BFD_FAIL ();
12876 return FALSE;
12877 }
12878
12879 if (template[i].type != prev_type)
12880 {
12881 prev_type = template[i].type;
12882 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12883 return FALSE;
12884 }
12885
12886 switch (template[i].type)
12887 {
12888 case ARM_TYPE:
12889 case THUMB32_TYPE:
12890 size += 4;
12891 break;
12892
12893 case THUMB16_TYPE:
12894 size += 2;
12895 break;
12896
12897 case DATA_TYPE:
12898 size += 4;
12899 break;
12900
12901 default:
12902 BFD_FAIL ();
12903 return FALSE;
12904 }
12905 }
12906
12907 return TRUE;
12908 }
12909
12910 /* Output mapping symbols for linker generated sections. */
12911
12912 static bfd_boolean
12913 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12914 struct bfd_link_info *info,
12915 void *finfo,
12916 int (*func) (void *, const char *,
12917 Elf_Internal_Sym *,
12918 asection *,
12919 struct elf_link_hash_entry *))
12920 {
12921 output_arch_syminfo osi;
12922 struct elf32_arm_link_hash_table *htab;
12923 bfd_vma offset;
12924 bfd_size_type size;
12925
12926 htab = elf32_arm_hash_table (info);
12927 check_use_blx (htab);
12928
12929 osi.finfo = finfo;
12930 osi.info = info;
12931 osi.func = func;
12932
12933 /* ARM->Thumb glue. */
12934 if (htab->arm_glue_size > 0)
12935 {
12936 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12937 ARM2THUMB_GLUE_SECTION_NAME);
12938
12939 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12940 (output_bfd, osi.sec->output_section);
12941 if (info->shared || htab->root.is_relocatable_executable
12942 || htab->pic_veneer)
12943 size = ARM2THUMB_PIC_GLUE_SIZE;
12944 else if (htab->use_blx)
12945 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12946 else
12947 size = ARM2THUMB_STATIC_GLUE_SIZE;
12948
12949 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12950 {
12951 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12952 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12953 }
12954 }
12955
12956 /* Thumb->ARM glue. */
12957 if (htab->thumb_glue_size > 0)
12958 {
12959 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12960 THUMB2ARM_GLUE_SECTION_NAME);
12961
12962 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12963 (output_bfd, osi.sec->output_section);
12964 size = THUMB2ARM_GLUE_SIZE;
12965
12966 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12967 {
12968 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
12969 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
12970 }
12971 }
12972
12973 /* ARMv4 BX veneers. */
12974 if (htab->bx_glue_size > 0)
12975 {
12976 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12977 ARM_BX_GLUE_SECTION_NAME);
12978
12979 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12980 (output_bfd, osi.sec->output_section);
12981
12982 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
12983 }
12984
12985 /* Long calls stubs. */
12986 if (htab->stub_bfd && htab->stub_bfd->sections)
12987 {
12988 asection* stub_sec;
12989
12990 for (stub_sec = htab->stub_bfd->sections;
12991 stub_sec != NULL;
12992 stub_sec = stub_sec->next)
12993 {
12994 /* Ignore non-stub sections. */
12995 if (!strstr (stub_sec->name, STUB_SUFFIX))
12996 continue;
12997
12998 osi.sec = stub_sec;
12999
13000 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13001 (output_bfd, osi.sec->output_section);
13002
13003 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13004 }
13005 }
13006
13007 /* Finally, output mapping symbols for the PLT. */
13008 if (!htab->splt || htab->splt->size == 0)
13009 return TRUE;
13010
13011 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13012 htab->splt->output_section);
13013 osi.sec = htab->splt;
13014 /* Output mapping symbols for the plt header. SymbianOS does not have a
13015 plt header. */
13016 if (htab->vxworks_p)
13017 {
13018 /* VxWorks shared libraries have no PLT header. */
13019 if (!info->shared)
13020 {
13021 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13022 return FALSE;
13023 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13024 return FALSE;
13025 }
13026 }
13027 else if (!htab->symbian_p)
13028 {
13029 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13030 return FALSE;
13031 #ifndef FOUR_WORD_PLT
13032 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13033 return FALSE;
13034 #endif
13035 }
13036
13037 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13038 return TRUE;
13039 }
13040
13041 /* Allocate target specific section data. */
13042
13043 static bfd_boolean
13044 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13045 {
13046 if (!sec->used_by_bfd)
13047 {
13048 _arm_elf_section_data *sdata;
13049 bfd_size_type amt = sizeof (*sdata);
13050
13051 sdata = bfd_zalloc (abfd, amt);
13052 if (sdata == NULL)
13053 return FALSE;
13054 sec->used_by_bfd = sdata;
13055 }
13056
13057 record_section_with_arm_elf_section_data (sec);
13058
13059 return _bfd_elf_new_section_hook (abfd, sec);
13060 }
13061
13062
13063 /* Used to order a list of mapping symbols by address. */
13064
13065 static int
13066 elf32_arm_compare_mapping (const void * a, const void * b)
13067 {
13068 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13069 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13070
13071 if (amap->vma > bmap->vma)
13072 return 1;
13073 else if (amap->vma < bmap->vma)
13074 return -1;
13075 else if (amap->type > bmap->type)
13076 /* Ensure results do not depend on the host qsort for objects with
13077 multiple mapping symbols at the same address by sorting on type
13078 after vma. */
13079 return 1;
13080 else if (amap->type < bmap->type)
13081 return -1;
13082 else
13083 return 0;
13084 }
13085
13086 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13087
13088 static unsigned long
13089 offset_prel31 (unsigned long addr, bfd_vma offset)
13090 {
13091 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13092 }
13093
13094 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13095 relocations. */
13096
13097 static void
13098 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13099 {
13100 unsigned long first_word = bfd_get_32 (output_bfd, from);
13101 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13102
13103 /* High bit of first word is supposed to be zero. */
13104 if ((first_word & 0x80000000ul) == 0)
13105 first_word = offset_prel31 (first_word, offset);
13106
13107 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13108 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13109 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13110 second_word = offset_prel31 (second_word, offset);
13111
13112 bfd_put_32 (output_bfd, first_word, to);
13113 bfd_put_32 (output_bfd, second_word, to + 4);
13114 }
13115
13116 /* Data for make_branch_to_a8_stub(). */
13117
13118 struct a8_branch_to_stub_data {
13119 asection *writing_section;
13120 bfd_byte *contents;
13121 };
13122
13123
13124 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13125 places for a particular section. */
13126
13127 static bfd_boolean
13128 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13129 void *in_arg)
13130 {
13131 struct elf32_arm_stub_hash_entry *stub_entry;
13132 struct a8_branch_to_stub_data *data;
13133 bfd_byte *contents;
13134 unsigned long branch_insn;
13135 bfd_vma veneered_insn_loc, veneer_entry_loc;
13136 bfd_signed_vma branch_offset;
13137 bfd *abfd;
13138 unsigned int index;
13139
13140 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13141 data = (struct a8_branch_to_stub_data *) in_arg;
13142
13143 if (stub_entry->target_section != data->writing_section
13144 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13145 return TRUE;
13146
13147 contents = data->contents;
13148
13149 veneered_insn_loc = stub_entry->target_section->output_section->vma
13150 + stub_entry->target_section->output_offset
13151 + stub_entry->target_value;
13152
13153 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13154 + stub_entry->stub_sec->output_offset
13155 + stub_entry->stub_offset;
13156
13157 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13158 veneered_insn_loc &= ~3u;
13159
13160 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13161
13162 abfd = stub_entry->target_section->owner;
13163 index = stub_entry->target_value;
13164
13165 /* We attempt to avoid this condition by setting stubs_always_after_branch
13166 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13167 This check is just to be on the safe side... */
13168 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13169 {
13170 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13171 "allocated in unsafe location"), abfd);
13172 return FALSE;
13173 }
13174
13175 switch (stub_entry->stub_type)
13176 {
13177 case arm_stub_a8_veneer_b:
13178 case arm_stub_a8_veneer_b_cond:
13179 branch_insn = 0xf0009000;
13180 goto jump24;
13181
13182 case arm_stub_a8_veneer_blx:
13183 branch_insn = 0xf000e800;
13184 goto jump24;
13185
13186 case arm_stub_a8_veneer_bl:
13187 {
13188 unsigned int i1, j1, i2, j2, s;
13189
13190 branch_insn = 0xf000d000;
13191
13192 jump24:
13193 if (branch_offset < -16777216 || branch_offset > 16777214)
13194 {
13195 /* There's not much we can do apart from complain if this
13196 happens. */
13197 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13198 "of range (input file too large)"), abfd);
13199 return FALSE;
13200 }
13201
13202 /* i1 = not(j1 eor s), so:
13203 not i1 = j1 eor s
13204 j1 = (not i1) eor s. */
13205
13206 branch_insn |= (branch_offset >> 1) & 0x7ff;
13207 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13208 i2 = (branch_offset >> 22) & 1;
13209 i1 = (branch_offset >> 23) & 1;
13210 s = (branch_offset >> 24) & 1;
13211 j1 = (!i1) ^ s;
13212 j2 = (!i2) ^ s;
13213 branch_insn |= j2 << 11;
13214 branch_insn |= j1 << 13;
13215 branch_insn |= s << 26;
13216 }
13217 break;
13218
13219 default:
13220 BFD_FAIL ();
13221 return FALSE;
13222 }
13223
13224 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13225 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13226
13227 return TRUE;
13228 }
13229
13230 /* Do code byteswapping. Return FALSE afterwards so that the section is
13231 written out as normal. */
13232
13233 static bfd_boolean
13234 elf32_arm_write_section (bfd *output_bfd,
13235 struct bfd_link_info *link_info,
13236 asection *sec,
13237 bfd_byte *contents)
13238 {
13239 unsigned int mapcount, errcount;
13240 _arm_elf_section_data *arm_data;
13241 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13242 elf32_arm_section_map *map;
13243 elf32_vfp11_erratum_list *errnode;
13244 bfd_vma ptr;
13245 bfd_vma end;
13246 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13247 bfd_byte tmp;
13248 unsigned int i;
13249
13250 /* If this section has not been allocated an _arm_elf_section_data
13251 structure then we cannot record anything. */
13252 arm_data = get_arm_elf_section_data (sec);
13253 if (arm_data == NULL)
13254 return FALSE;
13255
13256 mapcount = arm_data->mapcount;
13257 map = arm_data->map;
13258 errcount = arm_data->erratumcount;
13259
13260 if (errcount != 0)
13261 {
13262 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13263
13264 for (errnode = arm_data->erratumlist; errnode != 0;
13265 errnode = errnode->next)
13266 {
13267 bfd_vma index = errnode->vma - offset;
13268
13269 switch (errnode->type)
13270 {
13271 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13272 {
13273 bfd_vma branch_to_veneer;
13274 /* Original condition code of instruction, plus bit mask for
13275 ARM B instruction. */
13276 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13277 | 0x0a000000;
13278
13279 /* The instruction is before the label. */
13280 index -= 4;
13281
13282 /* Above offset included in -4 below. */
13283 branch_to_veneer = errnode->u.b.veneer->vma
13284 - errnode->vma - 4;
13285
13286 if ((signed) branch_to_veneer < -(1 << 25)
13287 || (signed) branch_to_veneer >= (1 << 25))
13288 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13289 "range"), output_bfd);
13290
13291 insn |= (branch_to_veneer >> 2) & 0xffffff;
13292 contents[endianflip ^ index] = insn & 0xff;
13293 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13294 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13295 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13296 }
13297 break;
13298
13299 case VFP11_ERRATUM_ARM_VENEER:
13300 {
13301 bfd_vma branch_from_veneer;
13302 unsigned int insn;
13303
13304 /* Take size of veneer into account. */
13305 branch_from_veneer = errnode->u.v.branch->vma
13306 - errnode->vma - 12;
13307
13308 if ((signed) branch_from_veneer < -(1 << 25)
13309 || (signed) branch_from_veneer >= (1 << 25))
13310 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13311 "range"), output_bfd);
13312
13313 /* Original instruction. */
13314 insn = errnode->u.v.branch->u.b.vfp_insn;
13315 contents[endianflip ^ index] = insn & 0xff;
13316 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13317 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13318 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13319
13320 /* Branch back to insn after original insn. */
13321 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13322 contents[endianflip ^ (index + 4)] = insn & 0xff;
13323 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13324 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13325 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13326 }
13327 break;
13328
13329 default:
13330 abort ();
13331 }
13332 }
13333 }
13334
13335 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13336 {
13337 arm_unwind_table_edit *edit_node
13338 = arm_data->u.exidx.unwind_edit_list;
13339 /* Now, sec->size is the size of the section we will write. The original
13340 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13341 markers) was sec->rawsize. (This isn't the case if we perform no
13342 edits, then rawsize will be zero and we should use size). */
13343 bfd_byte *edited_contents = bfd_malloc (sec->size);
13344 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13345 unsigned int in_index, out_index;
13346 bfd_vma add_to_offsets = 0;
13347
13348 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13349 {
13350 if (edit_node)
13351 {
13352 unsigned int edit_index = edit_node->index;
13353
13354 if (in_index < edit_index && in_index * 8 < input_size)
13355 {
13356 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13357 contents + in_index * 8, add_to_offsets);
13358 out_index++;
13359 in_index++;
13360 }
13361 else if (in_index == edit_index
13362 || (in_index * 8 >= input_size
13363 && edit_index == UINT_MAX))
13364 {
13365 switch (edit_node->type)
13366 {
13367 case DELETE_EXIDX_ENTRY:
13368 in_index++;
13369 add_to_offsets += 8;
13370 break;
13371
13372 case INSERT_EXIDX_CANTUNWIND_AT_END:
13373 {
13374 asection *text_sec = edit_node->linked_section;
13375 bfd_vma text_offset = text_sec->output_section->vma
13376 + text_sec->output_offset
13377 + text_sec->size;
13378 bfd_vma exidx_offset = offset + out_index * 8;
13379 unsigned long prel31_offset;
13380
13381 /* Note: this is meant to be equivalent to an
13382 R_ARM_PREL31 relocation. These synthetic
13383 EXIDX_CANTUNWIND markers are not relocated by the
13384 usual BFD method. */
13385 prel31_offset = (text_offset - exidx_offset)
13386 & 0x7ffffffful;
13387
13388 /* First address we can't unwind. */
13389 bfd_put_32 (output_bfd, prel31_offset,
13390 &edited_contents[out_index * 8]);
13391
13392 /* Code for EXIDX_CANTUNWIND. */
13393 bfd_put_32 (output_bfd, 0x1,
13394 &edited_contents[out_index * 8 + 4]);
13395
13396 out_index++;
13397 add_to_offsets -= 8;
13398 }
13399 break;
13400 }
13401
13402 edit_node = edit_node->next;
13403 }
13404 }
13405 else
13406 {
13407 /* No more edits, copy remaining entries verbatim. */
13408 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13409 contents + in_index * 8, add_to_offsets);
13410 out_index++;
13411 in_index++;
13412 }
13413 }
13414
13415 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13416 bfd_set_section_contents (output_bfd, sec->output_section,
13417 edited_contents,
13418 (file_ptr) sec->output_offset, sec->size);
13419
13420 return TRUE;
13421 }
13422
13423 /* Fix code to point to Cortex-A8 erratum stubs. */
13424 if (globals->fix_cortex_a8)
13425 {
13426 struct a8_branch_to_stub_data data;
13427
13428 data.writing_section = sec;
13429 data.contents = contents;
13430
13431 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13432 &data);
13433 }
13434
13435 if (mapcount == 0)
13436 return FALSE;
13437
13438 if (globals->byteswap_code)
13439 {
13440 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13441
13442 ptr = map[0].vma;
13443 for (i = 0; i < mapcount; i++)
13444 {
13445 if (i == mapcount - 1)
13446 end = sec->size;
13447 else
13448 end = map[i + 1].vma;
13449
13450 switch (map[i].type)
13451 {
13452 case 'a':
13453 /* Byte swap code words. */
13454 while (ptr + 3 < end)
13455 {
13456 tmp = contents[ptr];
13457 contents[ptr] = contents[ptr + 3];
13458 contents[ptr + 3] = tmp;
13459 tmp = contents[ptr + 1];
13460 contents[ptr + 1] = contents[ptr + 2];
13461 contents[ptr + 2] = tmp;
13462 ptr += 4;
13463 }
13464 break;
13465
13466 case 't':
13467 /* Byte swap code halfwords. */
13468 while (ptr + 1 < end)
13469 {
13470 tmp = contents[ptr];
13471 contents[ptr] = contents[ptr + 1];
13472 contents[ptr + 1] = tmp;
13473 ptr += 2;
13474 }
13475 break;
13476
13477 case 'd':
13478 /* Leave data alone. */
13479 break;
13480 }
13481 ptr = end;
13482 }
13483 }
13484
13485 free (map);
13486 arm_data->mapcount = 0;
13487 arm_data->mapsize = 0;
13488 arm_data->map = NULL;
13489 unrecord_section_with_arm_elf_section_data (sec);
13490
13491 return FALSE;
13492 }
13493
13494 static void
13495 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13496 asection * sec,
13497 void * ignore ATTRIBUTE_UNUSED)
13498 {
13499 unrecord_section_with_arm_elf_section_data (sec);
13500 }
13501
13502 static bfd_boolean
13503 elf32_arm_close_and_cleanup (bfd * abfd)
13504 {
13505 if (abfd->sections)
13506 bfd_map_over_sections (abfd,
13507 unrecord_section_via_map_over_sections,
13508 NULL);
13509
13510 return _bfd_elf_close_and_cleanup (abfd);
13511 }
13512
13513 static bfd_boolean
13514 elf32_arm_bfd_free_cached_info (bfd * abfd)
13515 {
13516 if (abfd->sections)
13517 bfd_map_over_sections (abfd,
13518 unrecord_section_via_map_over_sections,
13519 NULL);
13520
13521 return _bfd_free_cached_info (abfd);
13522 }
13523
13524 /* Display STT_ARM_TFUNC symbols as functions. */
13525
13526 static void
13527 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13528 asymbol *asym)
13529 {
13530 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13531
13532 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13533 elfsym->symbol.flags |= BSF_FUNCTION;
13534 }
13535
13536
13537 /* Mangle thumb function symbols as we read them in. */
13538
13539 static bfd_boolean
13540 elf32_arm_swap_symbol_in (bfd * abfd,
13541 const void *psrc,
13542 const void *pshn,
13543 Elf_Internal_Sym *dst)
13544 {
13545 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13546 return FALSE;
13547
13548 /* New EABI objects mark thumb function symbols by setting the low bit of
13549 the address. Turn these into STT_ARM_TFUNC. */
13550 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13551 && (dst->st_value & 1))
13552 {
13553 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13554 dst->st_value &= ~(bfd_vma) 1;
13555 }
13556 return TRUE;
13557 }
13558
13559
13560 /* Mangle thumb function symbols as we write them out. */
13561
13562 static void
13563 elf32_arm_swap_symbol_out (bfd *abfd,
13564 const Elf_Internal_Sym *src,
13565 void *cdst,
13566 void *shndx)
13567 {
13568 Elf_Internal_Sym newsym;
13569
13570 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13571 of the address set, as per the new EABI. We do this unconditionally
13572 because objcopy does not set the elf header flags until after
13573 it writes out the symbol table. */
13574 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13575 {
13576 newsym = *src;
13577 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13578 if (newsym.st_shndx != SHN_UNDEF)
13579 {
13580 /* Do this only for defined symbols. At link type, the static
13581 linker will simulate the work of dynamic linker of resolving
13582 symbols and will carry over the thumbness of found symbols to
13583 the output symbol table. It's not clear how it happens, but
13584 the thumbness of undefined symbols can well be different at
13585 runtime, and writing '1' for them will be confusing for users
13586 and possibly for dynamic linker itself.
13587 */
13588 newsym.st_value |= 1;
13589 }
13590
13591 src = &newsym;
13592 }
13593 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13594 }
13595
13596 /* Add the PT_ARM_EXIDX program header. */
13597
13598 static bfd_boolean
13599 elf32_arm_modify_segment_map (bfd *abfd,
13600 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13601 {
13602 struct elf_segment_map *m;
13603 asection *sec;
13604
13605 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13606 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13607 {
13608 /* If there is already a PT_ARM_EXIDX header, then we do not
13609 want to add another one. This situation arises when running
13610 "strip"; the input binary already has the header. */
13611 m = elf_tdata (abfd)->segment_map;
13612 while (m && m->p_type != PT_ARM_EXIDX)
13613 m = m->next;
13614 if (!m)
13615 {
13616 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13617 if (m == NULL)
13618 return FALSE;
13619 m->p_type = PT_ARM_EXIDX;
13620 m->count = 1;
13621 m->sections[0] = sec;
13622
13623 m->next = elf_tdata (abfd)->segment_map;
13624 elf_tdata (abfd)->segment_map = m;
13625 }
13626 }
13627
13628 return TRUE;
13629 }
13630
13631 /* We may add a PT_ARM_EXIDX program header. */
13632
13633 static int
13634 elf32_arm_additional_program_headers (bfd *abfd,
13635 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13636 {
13637 asection *sec;
13638
13639 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13640 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13641 return 1;
13642 else
13643 return 0;
13644 }
13645
13646 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13647
13648 static bfd_boolean
13649 elf32_arm_is_function_type (unsigned int type)
13650 {
13651 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13652 }
13653
13654 /* We use this to override swap_symbol_in and swap_symbol_out. */
13655 const struct elf_size_info elf32_arm_size_info =
13656 {
13657 sizeof (Elf32_External_Ehdr),
13658 sizeof (Elf32_External_Phdr),
13659 sizeof (Elf32_External_Shdr),
13660 sizeof (Elf32_External_Rel),
13661 sizeof (Elf32_External_Rela),
13662 sizeof (Elf32_External_Sym),
13663 sizeof (Elf32_External_Dyn),
13664 sizeof (Elf_External_Note),
13665 4,
13666 1,
13667 32, 2,
13668 ELFCLASS32, EV_CURRENT,
13669 bfd_elf32_write_out_phdrs,
13670 bfd_elf32_write_shdrs_and_ehdr,
13671 bfd_elf32_checksum_contents,
13672 bfd_elf32_write_relocs,
13673 elf32_arm_swap_symbol_in,
13674 elf32_arm_swap_symbol_out,
13675 bfd_elf32_slurp_reloc_table,
13676 bfd_elf32_slurp_symbol_table,
13677 bfd_elf32_swap_dyn_in,
13678 bfd_elf32_swap_dyn_out,
13679 bfd_elf32_swap_reloc_in,
13680 bfd_elf32_swap_reloc_out,
13681 bfd_elf32_swap_reloca_in,
13682 bfd_elf32_swap_reloca_out
13683 };
13684
13685 #define ELF_ARCH bfd_arch_arm
13686 #define ELF_MACHINE_CODE EM_ARM
13687 #ifdef __QNXTARGET__
13688 #define ELF_MAXPAGESIZE 0x1000
13689 #else
13690 #define ELF_MAXPAGESIZE 0x8000
13691 #endif
13692 #define ELF_MINPAGESIZE 0x1000
13693 #define ELF_COMMONPAGESIZE 0x1000
13694
13695 #define bfd_elf32_mkobject elf32_arm_mkobject
13696
13697 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13698 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13699 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13700 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13701 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13702 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13703 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13704 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13705 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13706 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13707 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13708 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13709 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13710 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13711 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13712
13713 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13714 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13715 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13716 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13717 #define elf_backend_check_relocs elf32_arm_check_relocs
13718 #define elf_backend_relocate_section elf32_arm_relocate_section
13719 #define elf_backend_write_section elf32_arm_write_section
13720 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13721 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13722 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13723 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13724 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13725 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13726 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13727 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13728 #define elf_backend_object_p elf32_arm_object_p
13729 #define elf_backend_section_flags elf32_arm_section_flags
13730 #define elf_backend_fake_sections elf32_arm_fake_sections
13731 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13732 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13733 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13734 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13735 #define elf_backend_size_info elf32_arm_size_info
13736 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13737 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13738 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13739 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13740 #define elf_backend_is_function_type elf32_arm_is_function_type
13741
13742 #define elf_backend_can_refcount 1
13743 #define elf_backend_can_gc_sections 1
13744 #define elf_backend_plt_readonly 1
13745 #define elf_backend_want_got_plt 1
13746 #define elf_backend_want_plt_sym 0
13747 #define elf_backend_may_use_rel_p 1
13748 #define elf_backend_may_use_rela_p 0
13749 #define elf_backend_default_use_rela_p 0
13750
13751 #define elf_backend_got_header_size 12
13752
13753 #undef elf_backend_obj_attrs_vendor
13754 #define elf_backend_obj_attrs_vendor "aeabi"
13755 #undef elf_backend_obj_attrs_section
13756 #define elf_backend_obj_attrs_section ".ARM.attributes"
13757 #undef elf_backend_obj_attrs_arg_type
13758 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13759 #undef elf_backend_obj_attrs_section_type
13760 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13761 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13762
13763 #include "elf32-target.h"
13764
13765 /* VxWorks Targets. */
13766
13767 #undef TARGET_LITTLE_SYM
13768 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13769 #undef TARGET_LITTLE_NAME
13770 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13771 #undef TARGET_BIG_SYM
13772 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13773 #undef TARGET_BIG_NAME
13774 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13775
13776 /* Like elf32_arm_link_hash_table_create -- but overrides
13777 appropriately for VxWorks. */
13778
13779 static struct bfd_link_hash_table *
13780 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13781 {
13782 struct bfd_link_hash_table *ret;
13783
13784 ret = elf32_arm_link_hash_table_create (abfd);
13785 if (ret)
13786 {
13787 struct elf32_arm_link_hash_table *htab
13788 = (struct elf32_arm_link_hash_table *) ret;
13789 htab->use_rel = 0;
13790 htab->vxworks_p = 1;
13791 }
13792 return ret;
13793 }
13794
13795 static void
13796 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13797 {
13798 elf32_arm_final_write_processing (abfd, linker);
13799 elf_vxworks_final_write_processing (abfd, linker);
13800 }
13801
13802 #undef elf32_bed
13803 #define elf32_bed elf32_arm_vxworks_bed
13804
13805 #undef bfd_elf32_bfd_link_hash_table_create
13806 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13807 #undef elf_backend_add_symbol_hook
13808 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13809 #undef elf_backend_final_write_processing
13810 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13811 #undef elf_backend_emit_relocs
13812 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13813
13814 #undef elf_backend_may_use_rel_p
13815 #define elf_backend_may_use_rel_p 0
13816 #undef elf_backend_may_use_rela_p
13817 #define elf_backend_may_use_rela_p 1
13818 #undef elf_backend_default_use_rela_p
13819 #define elf_backend_default_use_rela_p 1
13820 #undef elf_backend_want_plt_sym
13821 #define elf_backend_want_plt_sym 1
13822 #undef ELF_MAXPAGESIZE
13823 #define ELF_MAXPAGESIZE 0x1000
13824
13825 #include "elf32-target.h"
13826
13827
13828 /* Symbian OS Targets. */
13829
13830 #undef TARGET_LITTLE_SYM
13831 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13832 #undef TARGET_LITTLE_NAME
13833 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13834 #undef TARGET_BIG_SYM
13835 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13836 #undef TARGET_BIG_NAME
13837 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13838
13839 /* Like elf32_arm_link_hash_table_create -- but overrides
13840 appropriately for Symbian OS. */
13841
13842 static struct bfd_link_hash_table *
13843 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13844 {
13845 struct bfd_link_hash_table *ret;
13846
13847 ret = elf32_arm_link_hash_table_create (abfd);
13848 if (ret)
13849 {
13850 struct elf32_arm_link_hash_table *htab
13851 = (struct elf32_arm_link_hash_table *)ret;
13852 /* There is no PLT header for Symbian OS. */
13853 htab->plt_header_size = 0;
13854 /* The PLT entries are each one instruction and one word. */
13855 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13856 htab->symbian_p = 1;
13857 /* Symbian uses armv5t or above, so use_blx is always true. */
13858 htab->use_blx = 1;
13859 htab->root.is_relocatable_executable = 1;
13860 }
13861 return ret;
13862 }
13863
13864 static const struct bfd_elf_special_section
13865 elf32_arm_symbian_special_sections[] =
13866 {
13867 /* In a BPABI executable, the dynamic linking sections do not go in
13868 the loadable read-only segment. The post-linker may wish to
13869 refer to these sections, but they are not part of the final
13870 program image. */
13871 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13872 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13873 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13874 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13875 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13876 /* These sections do not need to be writable as the SymbianOS
13877 postlinker will arrange things so that no dynamic relocation is
13878 required. */
13879 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13880 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13881 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13882 { NULL, 0, 0, 0, 0 }
13883 };
13884
13885 static void
13886 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13887 struct bfd_link_info *link_info)
13888 {
13889 /* BPABI objects are never loaded directly by an OS kernel; they are
13890 processed by a postlinker first, into an OS-specific format. If
13891 the D_PAGED bit is set on the file, BFD will align segments on
13892 page boundaries, so that an OS can directly map the file. With
13893 BPABI objects, that just results in wasted space. In addition,
13894 because we clear the D_PAGED bit, map_sections_to_segments will
13895 recognize that the program headers should not be mapped into any
13896 loadable segment. */
13897 abfd->flags &= ~D_PAGED;
13898 elf32_arm_begin_write_processing (abfd, link_info);
13899 }
13900
13901 static bfd_boolean
13902 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13903 struct bfd_link_info *info)
13904 {
13905 struct elf_segment_map *m;
13906 asection *dynsec;
13907
13908 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13909 segment. However, because the .dynamic section is not marked
13910 with SEC_LOAD, the generic ELF code will not create such a
13911 segment. */
13912 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13913 if (dynsec)
13914 {
13915 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13916 if (m->p_type == PT_DYNAMIC)
13917 break;
13918
13919 if (m == NULL)
13920 {
13921 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13922 m->next = elf_tdata (abfd)->segment_map;
13923 elf_tdata (abfd)->segment_map = m;
13924 }
13925 }
13926
13927 /* Also call the generic arm routine. */
13928 return elf32_arm_modify_segment_map (abfd, info);
13929 }
13930
13931 /* Return address for Ith PLT stub in section PLT, for relocation REL
13932 or (bfd_vma) -1 if it should not be included. */
13933
13934 static bfd_vma
13935 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13936 const arelent *rel ATTRIBUTE_UNUSED)
13937 {
13938 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13939 }
13940
13941
13942 #undef elf32_bed
13943 #define elf32_bed elf32_arm_symbian_bed
13944
13945 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13946 will process them and then discard them. */
13947 #undef ELF_DYNAMIC_SEC_FLAGS
13948 #define ELF_DYNAMIC_SEC_FLAGS \
13949 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
13950
13951 #undef elf_backend_add_symbol_hook
13952 #undef elf_backend_emit_relocs
13953
13954 #undef bfd_elf32_bfd_link_hash_table_create
13955 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
13956 #undef elf_backend_special_sections
13957 #define elf_backend_special_sections elf32_arm_symbian_special_sections
13958 #undef elf_backend_begin_write_processing
13959 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
13960 #undef elf_backend_final_write_processing
13961 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13962
13963 #undef elf_backend_modify_segment_map
13964 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
13965
13966 /* There is no .got section for BPABI objects, and hence no header. */
13967 #undef elf_backend_got_header_size
13968 #define elf_backend_got_header_size 0
13969
13970 /* Similarly, there is no .got.plt section. */
13971 #undef elf_backend_want_got_plt
13972 #define elf_backend_want_got_plt 0
13973
13974 #undef elf_backend_plt_sym_val
13975 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
13976
13977 #undef elf_backend_may_use_rel_p
13978 #define elf_backend_may_use_rel_p 1
13979 #undef elf_backend_may_use_rela_p
13980 #define elf_backend_may_use_rela_p 0
13981 #undef elf_backend_default_use_rela_p
13982 #define elf_backend_default_use_rela_p 0
13983 #undef elf_backend_want_plt_sym
13984 #define elf_backend_want_plt_sym 0
13985 #undef ELF_MAXPAGESIZE
13986 #define ELF_MAXPAGESIZE 0x8000
13987
13988 #include "elf32-target.h"
This page took 0.32257 seconds and 4 git commands to generate.