* arm-linux-tdep.c: Include "auxv.h".
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
63
64 static int arm_debug;
65
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
69
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
72
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
75
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
78
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
81
82 struct arm_mapping_symbol
83 {
84 bfd_vma value;
85 char type;
86 };
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
89
90 struct arm_per_objfile
91 {
92 VEC(arm_mapping_symbol_s) **section_maps;
93 };
94
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
98
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
102 {
103 "auto",
104 "softfpa",
105 "fpa",
106 "softvfp",
107 "vfp",
108 NULL
109 };
110
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
114
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
117 {
118 "auto",
119 "APCS",
120 "AAPCS",
121 NULL
122 };
123
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
127
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
130 {
131 "auto",
132 "arm",
133 "thumb",
134 NULL
135 };
136
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
139
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
146
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
149
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
154 static const struct
155 {
156 const char *name;
157 int regnum;
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
160 { "r0", 0 },
161 { "r1", 1 },
162 { "r2", 2 },
163 { "r3", 3 },
164 { "r4", 4 },
165 { "r5", 5 },
166 { "r6", 6 },
167 { "r7", 7 },
168 { "r8", 8 },
169 { "r9", 9 },
170 { "r10", 10 },
171 { "r11", 11 },
172 { "r12", 12 },
173 { "r13", 13 },
174 { "r14", 14 },
175 { "r15", 15 },
176 /* Synonyms (argument and variable registers). */
177 { "a1", 0 },
178 { "a2", 1 },
179 { "a3", 2 },
180 { "a4", 3 },
181 { "v1", 4 },
182 { "v2", 5 },
183 { "v3", 6 },
184 { "v4", 7 },
185 { "v5", 8 },
186 { "v6", 9 },
187 { "v7", 10 },
188 { "v8", 11 },
189 /* Other platform-specific names for r9. */
190 { "sb", 9 },
191 { "tr", 9 },
192 /* Special names. */
193 { "ip", 12 },
194 { "lr", 14 },
195 /* Names used by GCC (not listed in the ARM EABI). */
196 { "sl", 10 },
197 /* A special name from the older ATPCS. */
198 { "wr", 7 },
199 };
200
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
209
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
212
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
215
216 /* This is used to keep the bfd arch_info in sync with the disassembly
217 style. */
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
221
222 static void convert_from_extended (const struct floatformat *, const void *,
223 void *, int);
224 static void convert_to_extended (const struct floatformat *, void *,
225 const void *, int);
226
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
233
234 struct arm_prologue_cache
235 {
236 /* The stack pointer at the time this frame was created; i.e. the
237 caller's stack pointer when this function was called. It is used
238 to identify this frame. */
239 CORE_ADDR prev_sp;
240
241 /* The frame base for this frame is just prev_sp - frame size.
242 FRAMESIZE is the distance from the frame pointer to the
243 initial stack pointer. */
244
245 int framesize;
246
247 /* The register used to hold the frame pointer for this frame. */
248 int framereg;
249
250 /* Saved register offsets. */
251 struct trad_frame_saved_reg *saved_regs;
252 };
253
254 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
255 CORE_ADDR prologue_start,
256 CORE_ADDR prologue_end,
257 struct arm_prologue_cache *cache);
258
259 /* Architecture version for displaced stepping. This effects the behaviour of
260 certain instructions, and really should not be hard-wired. */
261
262 #define DISPLACED_STEPPING_ARCH_VERSION 5
263
264 /* Addresses for calling Thumb functions have the bit 0 set.
265 Here are some macros to test, set, or clear bit 0 of addresses. */
266 #define IS_THUMB_ADDR(addr) ((addr) & 1)
267 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
268 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
269
270 /* Set to true if the 32-bit mode is in use. */
271
272 int arm_apcs_32 = 1;
273
274 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
275
276 int
277 arm_psr_thumb_bit (struct gdbarch *gdbarch)
278 {
279 if (gdbarch_tdep (gdbarch)->is_m)
280 return XPSR_T;
281 else
282 return CPSR_T;
283 }
284
285 /* Determine if FRAME is executing in Thumb mode. */
286
287 int
288 arm_frame_is_thumb (struct frame_info *frame)
289 {
290 CORE_ADDR cpsr;
291 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
292
293 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
294 directly (from a signal frame or dummy frame) or by interpreting
295 the saved LR (from a prologue or DWARF frame). So consult it and
296 trust the unwinders. */
297 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
298
299 return (cpsr & t_bit) != 0;
300 }
301
302 /* Callback for VEC_lower_bound. */
303
304 static inline int
305 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
306 const struct arm_mapping_symbol *rhs)
307 {
308 return lhs->value < rhs->value;
309 }
310
311 /* Search for the mapping symbol covering MEMADDR. If one is found,
312 return its type. Otherwise, return 0. If START is non-NULL,
313 set *START to the location of the mapping symbol. */
314
315 static char
316 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
317 {
318 struct obj_section *sec;
319
320 /* If there are mapping symbols, consult them. */
321 sec = find_pc_section (memaddr);
322 if (sec != NULL)
323 {
324 struct arm_per_objfile *data;
325 VEC(arm_mapping_symbol_s) *map;
326 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
327 0 };
328 unsigned int idx;
329
330 data = objfile_data (sec->objfile, arm_objfile_data_key);
331 if (data != NULL)
332 {
333 map = data->section_maps[sec->the_bfd_section->index];
334 if (!VEC_empty (arm_mapping_symbol_s, map))
335 {
336 struct arm_mapping_symbol *map_sym;
337
338 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
339 arm_compare_mapping_symbols);
340
341 /* VEC_lower_bound finds the earliest ordered insertion
342 point. If the following symbol starts at this exact
343 address, we use that; otherwise, the preceding
344 mapping symbol covers this address. */
345 if (idx < VEC_length (arm_mapping_symbol_s, map))
346 {
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
348 if (map_sym->value == map_key.value)
349 {
350 if (start)
351 *start = map_sym->value + obj_section_addr (sec);
352 return map_sym->type;
353 }
354 }
355
356 if (idx > 0)
357 {
358 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
359 if (start)
360 *start = map_sym->value + obj_section_addr (sec);
361 return map_sym->type;
362 }
363 }
364 }
365 }
366
367 return 0;
368 }
369
370 /* Determine if the program counter specified in MEMADDR is in a Thumb
371 function. This function should be called for addresses unrelated to
372 any executing frame; otherwise, prefer arm_frame_is_thumb. */
373
374 int
375 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
376 {
377 struct obj_section *sec;
378 struct minimal_symbol *sym;
379 char type;
380 struct displaced_step_closure* dsc
381 = get_displaced_step_closure_by_addr(memaddr);
382
383 /* If checking the mode of displaced instruction in copy area, the mode
384 should be determined by instruction on the original address. */
385 if (dsc)
386 {
387 if (debug_displaced)
388 fprintf_unfiltered (gdb_stdlog,
389 "displaced: check mode of %.8lx instead of %.8lx\n",
390 (unsigned long) dsc->insn_addr,
391 (unsigned long) memaddr);
392 memaddr = dsc->insn_addr;
393 }
394
395 /* If bit 0 of the address is set, assume this is a Thumb address. */
396 if (IS_THUMB_ADDR (memaddr))
397 return 1;
398
399 /* Respect internal mode override if active. */
400 if (arm_override_mode != -1)
401 return arm_override_mode;
402
403 /* If the user wants to override the symbol table, let him. */
404 if (strcmp (arm_force_mode_string, "arm") == 0)
405 return 0;
406 if (strcmp (arm_force_mode_string, "thumb") == 0)
407 return 1;
408
409 /* ARM v6-M and v7-M are always in Thumb mode. */
410 if (gdbarch_tdep (gdbarch)->is_m)
411 return 1;
412
413 /* If there are mapping symbols, consult them. */
414 type = arm_find_mapping_symbol (memaddr, NULL);
415 if (type)
416 return type == 't';
417
418 /* Thumb functions have a "special" bit set in minimal symbols. */
419 sym = lookup_minimal_symbol_by_pc (memaddr);
420 if (sym)
421 return (MSYMBOL_IS_SPECIAL (sym));
422
423 /* If the user wants to override the fallback mode, let them. */
424 if (strcmp (arm_fallback_mode_string, "arm") == 0)
425 return 0;
426 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
427 return 1;
428
429 /* If we couldn't find any symbol, but we're talking to a running
430 target, then trust the current value of $cpsr. This lets
431 "display/i $pc" always show the correct mode (though if there is
432 a symbol table we will not reach here, so it still may not be
433 displayed in the mode it will be executed). */
434 if (target_has_registers)
435 return arm_frame_is_thumb (get_current_frame ());
436
437 /* Otherwise we're out of luck; we assume ARM. */
438 return 0;
439 }
440
441 /* Remove useless bits from addresses in a running program. */
442 static CORE_ADDR
443 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
444 {
445 if (arm_apcs_32)
446 return UNMAKE_THUMB_ADDR (val);
447 else
448 return (val & 0x03fffffc);
449 }
450
451 /* When reading symbols, we need to zap the low bit of the address,
452 which may be set to 1 for Thumb functions. */
453 static CORE_ADDR
454 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
455 {
456 return val & ~1;
457 }
458
459 /* Return 1 if PC is the start of a compiler helper function which
460 can be safely ignored during prologue skipping. IS_THUMB is true
461 if the function is known to be a Thumb function due to the way it
462 is being called. */
463 static int
464 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
465 {
466 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
467 struct minimal_symbol *msym;
468
469 msym = lookup_minimal_symbol_by_pc (pc);
470 if (msym != NULL
471 && SYMBOL_VALUE_ADDRESS (msym) == pc
472 && SYMBOL_LINKAGE_NAME (msym) != NULL)
473 {
474 const char *name = SYMBOL_LINKAGE_NAME (msym);
475
476 /* The GNU linker's Thumb call stub to foo is named
477 __foo_from_thumb. */
478 if (strstr (name, "_from_thumb") != NULL)
479 name += 2;
480
481 /* On soft-float targets, __truncdfsf2 is called to convert promoted
482 arguments to their argument types in non-prototyped
483 functions. */
484 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
485 return 1;
486 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
487 return 1;
488
489 /* Internal functions related to thread-local storage. */
490 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
491 return 1;
492 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
493 return 1;
494 }
495 else
496 {
497 /* If we run against a stripped glibc, we may be unable to identify
498 special functions by name. Check for one important case,
499 __aeabi_read_tp, by comparing the *code* against the default
500 implementation (this is hand-written ARM assembler in glibc). */
501
502 if (!is_thumb
503 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
504 == 0xe3e00a0f /* mov r0, #0xffff0fff */
505 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
506 == 0xe240f01f) /* sub pc, r0, #31 */
507 return 1;
508 }
509
510 return 0;
511 }
512
513 /* Support routines for instruction parsing. */
514 #define submask(x) ((1L << ((x) + 1)) - 1)
515 #define bit(obj,st) (((obj) >> (st)) & 1)
516 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
517 #define sbits(obj,st,fn) \
518 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
519 #define BranchDest(addr,instr) \
520 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
521
522 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
523 the first 16-bit of instruction, and INSN2 is the second 16-bit of
524 instruction. */
525 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
526 ((bits ((insn1), 0, 3) << 12) \
527 | (bits ((insn1), 10, 10) << 11) \
528 | (bits ((insn2), 12, 14) << 8) \
529 | bits ((insn2), 0, 7))
530
531 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
532 the 32-bit instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
534 ((bits ((insn), 16, 19) << 12) \
535 | bits ((insn), 0, 11))
536
537 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
538
539 static unsigned int
540 thumb_expand_immediate (unsigned int imm)
541 {
542 unsigned int count = imm >> 7;
543
544 if (count < 8)
545 switch (count / 2)
546 {
547 case 0:
548 return imm & 0xff;
549 case 1:
550 return (imm & 0xff) | ((imm & 0xff) << 16);
551 case 2:
552 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
553 case 3:
554 return (imm & 0xff) | ((imm & 0xff) << 8)
555 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
556 }
557
558 return (0x80 | (imm & 0x7f)) << (32 - count);
559 }
560
561 /* Return 1 if the 16-bit Thumb instruction INST might change
562 control flow, 0 otherwise. */
563
564 static int
565 thumb_instruction_changes_pc (unsigned short inst)
566 {
567 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
568 return 1;
569
570 if ((inst & 0xf000) == 0xd000) /* conditional branch */
571 return 1;
572
573 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
574 return 1;
575
576 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
577 return 1;
578
579 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
580 return 1;
581
582 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
583 return 1;
584
585 return 0;
586 }
587
588 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
589 might change control flow, 0 otherwise. */
590
591 static int
592 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
593 {
594 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
595 {
596 /* Branches and miscellaneous control instructions. */
597
598 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
599 {
600 /* B, BL, BLX. */
601 return 1;
602 }
603 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
604 {
605 /* SUBS PC, LR, #imm8. */
606 return 1;
607 }
608 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
609 {
610 /* Conditional branch. */
611 return 1;
612 }
613
614 return 0;
615 }
616
617 if ((inst1 & 0xfe50) == 0xe810)
618 {
619 /* Load multiple or RFE. */
620
621 if (bit (inst1, 7) && !bit (inst1, 8))
622 {
623 /* LDMIA or POP */
624 if (bit (inst2, 15))
625 return 1;
626 }
627 else if (!bit (inst1, 7) && bit (inst1, 8))
628 {
629 /* LDMDB */
630 if (bit (inst2, 15))
631 return 1;
632 }
633 else if (bit (inst1, 7) && bit (inst1, 8))
634 {
635 /* RFEIA */
636 return 1;
637 }
638 else if (!bit (inst1, 7) && !bit (inst1, 8))
639 {
640 /* RFEDB */
641 return 1;
642 }
643
644 return 0;
645 }
646
647 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
648 {
649 /* MOV PC or MOVS PC. */
650 return 1;
651 }
652
653 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
654 {
655 /* LDR PC. */
656 if (bits (inst1, 0, 3) == 15)
657 return 1;
658 if (bit (inst1, 7))
659 return 1;
660 if (bit (inst2, 11))
661 return 1;
662 if ((inst2 & 0x0fc0) == 0x0000)
663 return 1;
664
665 return 0;
666 }
667
668 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
669 {
670 /* TBB. */
671 return 1;
672 }
673
674 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
675 {
676 /* TBH. */
677 return 1;
678 }
679
680 return 0;
681 }
682
683 /* Analyze a Thumb prologue, looking for a recognizable stack frame
684 and frame pointer. Scan until we encounter a store that could
685 clobber the stack frame unexpectedly, or an unknown instruction.
686 Return the last address which is definitely safe to skip for an
687 initial breakpoint. */
688
689 static CORE_ADDR
690 thumb_analyze_prologue (struct gdbarch *gdbarch,
691 CORE_ADDR start, CORE_ADDR limit,
692 struct arm_prologue_cache *cache)
693 {
694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
695 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
696 int i;
697 pv_t regs[16];
698 struct pv_area *stack;
699 struct cleanup *back_to;
700 CORE_ADDR offset;
701 CORE_ADDR unrecognized_pc = 0;
702
703 for (i = 0; i < 16; i++)
704 regs[i] = pv_register (i, 0);
705 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
706 back_to = make_cleanup_free_pv_area (stack);
707
708 while (start < limit)
709 {
710 unsigned short insn;
711
712 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
713
714 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
715 {
716 int regno;
717 int mask;
718
719 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
720 break;
721
722 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
723 whether to save LR (R14). */
724 mask = (insn & 0xff) | ((insn & 0x100) << 6);
725
726 /* Calculate offsets of saved R0-R7 and LR. */
727 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
728 if (mask & (1 << regno))
729 {
730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
731 -4);
732 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
733 }
734 }
735 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
736 sub sp, #simm */
737 {
738 offset = (insn & 0x7f) << 2; /* get scaled offset */
739 if (insn & 0x80) /* Check for SUB. */
740 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
741 -offset);
742 else
743 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
744 offset);
745 }
746 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
747 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
748 (insn & 0xff) << 2);
749 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
750 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
751 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
752 bits (insn, 6, 8));
753 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
754 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
756 bits (insn, 0, 7));
757 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
758 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
759 && pv_is_constant (regs[bits (insn, 3, 5)]))
760 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
761 regs[bits (insn, 6, 8)]);
762 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
763 && pv_is_constant (regs[bits (insn, 3, 6)]))
764 {
765 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
766 int rm = bits (insn, 3, 6);
767 regs[rd] = pv_add (regs[rd], regs[rm]);
768 }
769 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
770 {
771 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
772 int src_reg = (insn & 0x78) >> 3;
773 regs[dst_reg] = regs[src_reg];
774 }
775 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
776 {
777 /* Handle stores to the stack. Normally pushes are used,
778 but with GCC -mtpcs-frame, there may be other stores
779 in the prologue to create the frame. */
780 int regno = (insn >> 8) & 0x7;
781 pv_t addr;
782
783 offset = (insn & 0xff) << 2;
784 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
785
786 if (pv_area_store_would_trash (stack, addr))
787 break;
788
789 pv_area_store (stack, addr, 4, regs[regno]);
790 }
791 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
792 {
793 int rd = bits (insn, 0, 2);
794 int rn = bits (insn, 3, 5);
795 pv_t addr;
796
797 offset = bits (insn, 6, 10) << 2;
798 addr = pv_add_constant (regs[rn], offset);
799
800 if (pv_area_store_would_trash (stack, addr))
801 break;
802
803 pv_area_store (stack, addr, 4, regs[rd]);
804 }
805 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
806 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
807 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
808 /* Ignore stores of argument registers to the stack. */
809 ;
810 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
811 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
812 /* Ignore block loads from the stack, potentially copying
813 parameters from memory. */
814 ;
815 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
816 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
817 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
818 /* Similarly ignore single loads from the stack. */
819 ;
820 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
821 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
822 /* Skip register copies, i.e. saves to another register
823 instead of the stack. */
824 ;
825 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
826 /* Recognize constant loads; even with small stacks these are necessary
827 on Thumb. */
828 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
829 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
830 {
831 /* Constant pool loads, for the same reason. */
832 unsigned int constant;
833 CORE_ADDR loc;
834
835 loc = start + 4 + bits (insn, 0, 7) * 4;
836 constant = read_memory_unsigned_integer (loc, 4, byte_order);
837 regs[bits (insn, 8, 10)] = pv_constant (constant);
838 }
839 else if ((insn & 0xe000) == 0xe000)
840 {
841 unsigned short inst2;
842
843 inst2 = read_memory_unsigned_integer (start + 2, 2,
844 byte_order_for_code);
845
846 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
847 {
848 /* BL, BLX. Allow some special function calls when
849 skipping the prologue; GCC generates these before
850 storing arguments to the stack. */
851 CORE_ADDR nextpc;
852 int j1, j2, imm1, imm2;
853
854 imm1 = sbits (insn, 0, 10);
855 imm2 = bits (inst2, 0, 10);
856 j1 = bit (inst2, 13);
857 j2 = bit (inst2, 11);
858
859 offset = ((imm1 << 12) + (imm2 << 1));
860 offset ^= ((!j2) << 22) | ((!j1) << 23);
861
862 nextpc = start + 4 + offset;
863 /* For BLX make sure to clear the low bits. */
864 if (bit (inst2, 12) == 0)
865 nextpc = nextpc & 0xfffffffc;
866
867 if (!skip_prologue_function (gdbarch, nextpc,
868 bit (inst2, 12) != 0))
869 break;
870 }
871
872 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
873 { registers } */
874 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
875 {
876 pv_t addr = regs[bits (insn, 0, 3)];
877 int regno;
878
879 if (pv_area_store_would_trash (stack, addr))
880 break;
881
882 /* Calculate offsets of saved registers. */
883 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
884 if (inst2 & (1 << regno))
885 {
886 addr = pv_add_constant (addr, -4);
887 pv_area_store (stack, addr, 4, regs[regno]);
888 }
889
890 if (insn & 0x0020)
891 regs[bits (insn, 0, 3)] = addr;
892 }
893
894 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
895 [Rn, #+/-imm]{!} */
896 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
897 {
898 int regno1 = bits (inst2, 12, 15);
899 int regno2 = bits (inst2, 8, 11);
900 pv_t addr = regs[bits (insn, 0, 3)];
901
902 offset = inst2 & 0xff;
903 if (insn & 0x0080)
904 addr = pv_add_constant (addr, offset);
905 else
906 addr = pv_add_constant (addr, -offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno1]);
912 pv_area_store (stack, pv_add_constant (addr, 4),
913 4, regs[regno2]);
914
915 if (insn & 0x0020)
916 regs[bits (insn, 0, 3)] = addr;
917 }
918
919 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
920 && (inst2 & 0x0c00) == 0x0c00
921 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
922 {
923 int regno = bits (inst2, 12, 15);
924 pv_t addr = regs[bits (insn, 0, 3)];
925
926 offset = inst2 & 0xff;
927 if (inst2 & 0x0200)
928 addr = pv_add_constant (addr, offset);
929 else
930 addr = pv_add_constant (addr, -offset);
931
932 if (pv_area_store_would_trash (stack, addr))
933 break;
934
935 pv_area_store (stack, addr, 4, regs[regno]);
936
937 if (inst2 & 0x0100)
938 regs[bits (insn, 0, 3)] = addr;
939 }
940
941 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
942 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
943 {
944 int regno = bits (inst2, 12, 15);
945 pv_t addr;
946
947 offset = inst2 & 0xfff;
948 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
949
950 if (pv_area_store_would_trash (stack, addr))
951 break;
952
953 pv_area_store (stack, addr, 4, regs[regno]);
954 }
955
956 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
957 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
958 /* Ignore stores of argument registers to the stack. */
959 ;
960
961 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
962 && (inst2 & 0x0d00) == 0x0c00
963 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
964 /* Ignore stores of argument registers to the stack. */
965 ;
966
967 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
968 { registers } */
969 && (inst2 & 0x8000) == 0x0000
970 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
971 /* Ignore block loads from the stack, potentially copying
972 parameters from memory. */
973 ;
974
975 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
976 [Rn, #+/-imm] */
977 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
978 /* Similarly ignore dual loads from the stack. */
979 ;
980
981 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
982 && (inst2 & 0x0d00) == 0x0c00
983 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
984 /* Similarly ignore single loads from the stack. */
985 ;
986
987 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
988 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
989 /* Similarly ignore single loads from the stack. */
990 ;
991
992 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
993 && (inst2 & 0x8000) == 0x0000)
994 {
995 unsigned int imm = ((bits (insn, 10, 10) << 11)
996 | (bits (inst2, 12, 14) << 8)
997 | bits (inst2, 0, 7));
998
999 regs[bits (inst2, 8, 11)]
1000 = pv_add_constant (regs[bits (insn, 0, 3)],
1001 thumb_expand_immediate (imm));
1002 }
1003
1004 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1005 && (inst2 & 0x8000) == 0x0000)
1006 {
1007 unsigned int imm = ((bits (insn, 10, 10) << 11)
1008 | (bits (inst2, 12, 14) << 8)
1009 | bits (inst2, 0, 7));
1010
1011 regs[bits (inst2, 8, 11)]
1012 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1013 }
1014
1015 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1016 && (inst2 & 0x8000) == 0x0000)
1017 {
1018 unsigned int imm = ((bits (insn, 10, 10) << 11)
1019 | (bits (inst2, 12, 14) << 8)
1020 | bits (inst2, 0, 7));
1021
1022 regs[bits (inst2, 8, 11)]
1023 = pv_add_constant (regs[bits (insn, 0, 3)],
1024 - (CORE_ADDR) thumb_expand_immediate (imm));
1025 }
1026
1027 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1028 && (inst2 & 0x8000) == 0x0000)
1029 {
1030 unsigned int imm = ((bits (insn, 10, 10) << 11)
1031 | (bits (inst2, 12, 14) << 8)
1032 | bits (inst2, 0, 7));
1033
1034 regs[bits (inst2, 8, 11)]
1035 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1036 }
1037
1038 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1039 {
1040 unsigned int imm = ((bits (insn, 10, 10) << 11)
1041 | (bits (inst2, 12, 14) << 8)
1042 | bits (inst2, 0, 7));
1043
1044 regs[bits (inst2, 8, 11)]
1045 = pv_constant (thumb_expand_immediate (imm));
1046 }
1047
1048 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1049 {
1050 unsigned int imm
1051 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1052
1053 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1054 }
1055
1056 else if (insn == 0xea5f /* mov.w Rd,Rm */
1057 && (inst2 & 0xf0f0) == 0)
1058 {
1059 int dst_reg = (inst2 & 0x0f00) >> 8;
1060 int src_reg = inst2 & 0xf;
1061 regs[dst_reg] = regs[src_reg];
1062 }
1063
1064 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1065 {
1066 /* Constant pool loads. */
1067 unsigned int constant;
1068 CORE_ADDR loc;
1069
1070 offset = bits (insn, 0, 11);
1071 if (insn & 0x0080)
1072 loc = start + 4 + offset;
1073 else
1074 loc = start + 4 - offset;
1075
1076 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1077 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1078 }
1079
1080 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1081 {
1082 /* Constant pool loads. */
1083 unsigned int constant;
1084 CORE_ADDR loc;
1085
1086 offset = bits (insn, 0, 7) << 2;
1087 if (insn & 0x0080)
1088 loc = start + 4 + offset;
1089 else
1090 loc = start + 4 - offset;
1091
1092 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1093 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1094
1095 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1096 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1097 }
1098
1099 else if (thumb2_instruction_changes_pc (insn, inst2))
1100 {
1101 /* Don't scan past anything that might change control flow. */
1102 break;
1103 }
1104 else
1105 {
1106 /* The optimizer might shove anything into the prologue,
1107 so we just skip what we don't recognize. */
1108 unrecognized_pc = start;
1109 }
1110
1111 start += 2;
1112 }
1113 else if (thumb_instruction_changes_pc (insn))
1114 {
1115 /* Don't scan past anything that might change control flow. */
1116 break;
1117 }
1118 else
1119 {
1120 /* The optimizer might shove anything into the prologue,
1121 so we just skip what we don't recognize. */
1122 unrecognized_pc = start;
1123 }
1124
1125 start += 2;
1126 }
1127
1128 if (arm_debug)
1129 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1130 paddress (gdbarch, start));
1131
1132 if (unrecognized_pc == 0)
1133 unrecognized_pc = start;
1134
1135 if (cache == NULL)
1136 {
1137 do_cleanups (back_to);
1138 return unrecognized_pc;
1139 }
1140
1141 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1142 {
1143 /* Frame pointer is fp. Frame size is constant. */
1144 cache->framereg = ARM_FP_REGNUM;
1145 cache->framesize = -regs[ARM_FP_REGNUM].k;
1146 }
1147 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1148 {
1149 /* Frame pointer is r7. Frame size is constant. */
1150 cache->framereg = THUMB_FP_REGNUM;
1151 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1152 }
1153 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1154 {
1155 /* Try the stack pointer... this is a bit desperate. */
1156 cache->framereg = ARM_SP_REGNUM;
1157 cache->framesize = -regs[ARM_SP_REGNUM].k;
1158 }
1159 else
1160 {
1161 /* We're just out of luck. We don't know where the frame is. */
1162 cache->framereg = -1;
1163 cache->framesize = 0;
1164 }
1165
1166 for (i = 0; i < 16; i++)
1167 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1168 cache->saved_regs[i].addr = offset;
1169
1170 do_cleanups (back_to);
1171 return unrecognized_pc;
1172 }
1173
1174
1175 /* Try to analyze the instructions starting from PC, which load symbol
1176 __stack_chk_guard. Return the address of instruction after loading this
1177 symbol, set the dest register number to *BASEREG, and set the size of
1178 instructions for loading symbol in OFFSET. Return 0 if instructions are
1179 not recognized. */
1180
1181 static CORE_ADDR
1182 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1183 unsigned int *destreg, int *offset)
1184 {
1185 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1186 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1187 unsigned int low, high, address;
1188
1189 address = 0;
1190 if (is_thumb)
1191 {
1192 unsigned short insn1
1193 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1194
1195 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1196 {
1197 *destreg = bits (insn1, 8, 10);
1198 *offset = 2;
1199 address = bits (insn1, 0, 7);
1200 }
1201 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1202 {
1203 unsigned short insn2
1204 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1205
1206 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1207
1208 insn1
1209 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1210 insn2
1211 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1212
1213 /* movt Rd, #const */
1214 if ((insn1 & 0xfbc0) == 0xf2c0)
1215 {
1216 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217 *destreg = bits (insn2, 8, 11);
1218 *offset = 8;
1219 address = (high << 16 | low);
1220 }
1221 }
1222 }
1223 else
1224 {
1225 unsigned int insn
1226 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1227
1228 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1229 {
1230 address = bits (insn, 0, 11);
1231 *destreg = bits (insn, 12, 15);
1232 *offset = 4;
1233 }
1234 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1235 {
1236 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1237
1238 insn
1239 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1240
1241 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1242 {
1243 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1244 *destreg = bits (insn, 12, 15);
1245 *offset = 8;
1246 address = (high << 16 | low);
1247 }
1248 }
1249 }
1250
1251 return address;
1252 }
1253
1254 /* Try to skip a sequence of instructions used for stack protector. If PC
1255 points to the first instruction of this sequence, return the address of
1256 first instruction after this sequence, otherwise, return original PC.
1257
1258 On arm, this sequence of instructions is composed of mainly three steps,
1259 Step 1: load symbol __stack_chk_guard,
1260 Step 2: load from address of __stack_chk_guard,
1261 Step 3: store it to somewhere else.
1262
1263 Usually, instructions on step 2 and step 3 are the same on various ARM
1264 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1265 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1266 instructions in step 1 vary from different ARM architectures. On ARMv7,
1267 they are,
1268
1269 movw Rn, #:lower16:__stack_chk_guard
1270 movt Rn, #:upper16:__stack_chk_guard
1271
1272 On ARMv5t, it is,
1273
1274 ldr Rn, .Label
1275 ....
1276 .Lable:
1277 .word __stack_chk_guard
1278
1279 Since ldr/str is a very popular instruction, we can't use them as
1280 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1281 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1282 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1283
1284 static CORE_ADDR
1285 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1286 {
1287 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1288 unsigned int address, basereg;
1289 struct minimal_symbol *stack_chk_guard;
1290 int offset;
1291 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1292 CORE_ADDR addr;
1293
1294 /* Try to parse the instructions in Step 1. */
1295 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1296 &basereg, &offset);
1297 if (!addr)
1298 return pc;
1299
1300 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1301 /* If name of symbol doesn't start with '__stack_chk_guard', this
1302 instruction sequence is not for stack protector. If symbol is
1303 removed, we conservatively think this sequence is for stack protector. */
1304 if (stack_chk_guard
1305 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1306 strlen ("__stack_chk_guard")) != 0)
1307 return pc;
1308
1309 if (is_thumb)
1310 {
1311 unsigned int destreg;
1312 unsigned short insn
1313 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1314
1315 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1316 if ((insn & 0xf800) != 0x6800)
1317 return pc;
1318 if (bits (insn, 3, 5) != basereg)
1319 return pc;
1320 destreg = bits (insn, 0, 2);
1321
1322 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1323 byte_order_for_code);
1324 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1325 if ((insn & 0xf800) != 0x6000)
1326 return pc;
1327 if (destreg != bits (insn, 0, 2))
1328 return pc;
1329 }
1330 else
1331 {
1332 unsigned int destreg;
1333 unsigned int insn
1334 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1335
1336 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1337 if ((insn & 0x0e500000) != 0x04100000)
1338 return pc;
1339 if (bits (insn, 16, 19) != basereg)
1340 return pc;
1341 destreg = bits (insn, 12, 15);
1342 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1343 insn = read_memory_unsigned_integer (pc + offset + 4,
1344 4, byte_order_for_code);
1345 if ((insn & 0x0e500000) != 0x04000000)
1346 return pc;
1347 if (bits (insn, 12, 15) != destreg)
1348 return pc;
1349 }
1350 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1351 on arm. */
1352 if (is_thumb)
1353 return pc + offset + 4;
1354 else
1355 return pc + offset + 8;
1356 }
1357
1358 /* Advance the PC across any function entry prologue instructions to
1359 reach some "real" code.
1360
1361 The APCS (ARM Procedure Call Standard) defines the following
1362 prologue:
1363
1364 mov ip, sp
1365 [stmfd sp!, {a1,a2,a3,a4}]
1366 stmfd sp!, {...,fp,ip,lr,pc}
1367 [stfe f7, [sp, #-12]!]
1368 [stfe f6, [sp, #-12]!]
1369 [stfe f5, [sp, #-12]!]
1370 [stfe f4, [sp, #-12]!]
1371 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1372
1373 static CORE_ADDR
1374 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1375 {
1376 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1377 unsigned long inst;
1378 CORE_ADDR skip_pc;
1379 CORE_ADDR func_addr, limit_pc;
1380 struct symtab_and_line sal;
1381
1382 /* See if we can determine the end of the prologue via the symbol table.
1383 If so, then return either PC, or the PC after the prologue, whichever
1384 is greater. */
1385 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1386 {
1387 CORE_ADDR post_prologue_pc
1388 = skip_prologue_using_sal (gdbarch, func_addr);
1389 struct symtab *s = find_pc_symtab (func_addr);
1390
1391 if (post_prologue_pc)
1392 post_prologue_pc
1393 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1394
1395
1396 /* GCC always emits a line note before the prologue and another
1397 one after, even if the two are at the same address or on the
1398 same line. Take advantage of this so that we do not need to
1399 know every instruction that might appear in the prologue. We
1400 will have producer information for most binaries; if it is
1401 missing (e.g. for -gstabs), assuming the GNU tools. */
1402 if (post_prologue_pc
1403 && (s == NULL
1404 || s->producer == NULL
1405 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1406 return post_prologue_pc;
1407
1408 if (post_prologue_pc != 0)
1409 {
1410 CORE_ADDR analyzed_limit;
1411
1412 /* For non-GCC compilers, make sure the entire line is an
1413 acceptable prologue; GDB will round this function's
1414 return value up to the end of the following line so we
1415 can not skip just part of a line (and we do not want to).
1416
1417 RealView does not treat the prologue specially, but does
1418 associate prologue code with the opening brace; so this
1419 lets us skip the first line if we think it is the opening
1420 brace. */
1421 if (arm_pc_is_thumb (gdbarch, func_addr))
1422 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1423 post_prologue_pc, NULL);
1424 else
1425 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1426 post_prologue_pc, NULL);
1427
1428 if (analyzed_limit != post_prologue_pc)
1429 return func_addr;
1430
1431 return post_prologue_pc;
1432 }
1433 }
1434
1435 /* Can't determine prologue from the symbol table, need to examine
1436 instructions. */
1437
1438 /* Find an upper limit on the function prologue using the debug
1439 information. If the debug information could not be used to provide
1440 that bound, then use an arbitrary large number as the upper bound. */
1441 /* Like arm_scan_prologue, stop no later than pc + 64. */
1442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1443 if (limit_pc == 0)
1444 limit_pc = pc + 64; /* Magic. */
1445
1446
1447 /* Check if this is Thumb code. */
1448 if (arm_pc_is_thumb (gdbarch, pc))
1449 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1450
1451 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1452 {
1453 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1454
1455 /* "mov ip, sp" is no longer a required part of the prologue. */
1456 if (inst == 0xe1a0c00d) /* mov ip, sp */
1457 continue;
1458
1459 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1460 continue;
1461
1462 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1463 continue;
1464
1465 /* Some prologues begin with "str lr, [sp, #-4]!". */
1466 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1467 continue;
1468
1469 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1470 continue;
1471
1472 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1473 continue;
1474
1475 /* Any insns after this point may float into the code, if it makes
1476 for better instruction scheduling, so we skip them only if we
1477 find them, but still consider the function to be frame-ful. */
1478
1479 /* We may have either one sfmfd instruction here, or several stfe
1480 insns, depending on the version of floating point code we
1481 support. */
1482 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1483 continue;
1484
1485 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1486 continue;
1487
1488 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1489 continue;
1490
1491 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1492 continue;
1493
1494 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1495 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1496 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1497 continue;
1498
1499 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1500 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1501 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1502 continue;
1503
1504 /* Un-recognized instruction; stop scanning. */
1505 break;
1506 }
1507
1508 return skip_pc; /* End of prologue. */
1509 }
1510
1511 /* *INDENT-OFF* */
1512 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1513 This function decodes a Thumb function prologue to determine:
1514 1) the size of the stack frame
1515 2) which registers are saved on it
1516 3) the offsets of saved regs
1517 4) the offset from the stack pointer to the frame pointer
1518
1519 A typical Thumb function prologue would create this stack frame
1520 (offsets relative to FP)
1521 old SP -> 24 stack parameters
1522 20 LR
1523 16 R7
1524 R7 -> 0 local variables (16 bytes)
1525 SP -> -12 additional stack space (12 bytes)
1526 The frame size would thus be 36 bytes, and the frame offset would be
1527 12 bytes. The frame register is R7.
1528
1529 The comments for thumb_skip_prolog() describe the algorithm we use
1530 to detect the end of the prolog. */
1531 /* *INDENT-ON* */
1532
1533 static void
1534 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1535 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1536 {
1537 CORE_ADDR prologue_start;
1538 CORE_ADDR prologue_end;
1539 CORE_ADDR current_pc;
1540
1541 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1542 &prologue_end))
1543 {
1544 /* See comment in arm_scan_prologue for an explanation of
1545 this heuristics. */
1546 if (prologue_end > prologue_start + 64)
1547 {
1548 prologue_end = prologue_start + 64;
1549 }
1550 }
1551 else
1552 /* We're in the boondocks: we have no idea where the start of the
1553 function is. */
1554 return;
1555
1556 prologue_end = min (prologue_end, prev_pc);
1557
1558 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1559 }
1560
1561 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1562
1563 static int
1564 arm_instruction_changes_pc (uint32_t this_instr)
1565 {
1566 if (bits (this_instr, 28, 31) == INST_NV)
1567 /* Unconditional instructions. */
1568 switch (bits (this_instr, 24, 27))
1569 {
1570 case 0xa:
1571 case 0xb:
1572 /* Branch with Link and change to Thumb. */
1573 return 1;
1574 case 0xc:
1575 case 0xd:
1576 case 0xe:
1577 /* Coprocessor register transfer. */
1578 if (bits (this_instr, 12, 15) == 15)
1579 error (_("Invalid update to pc in instruction"));
1580 return 0;
1581 default:
1582 return 0;
1583 }
1584 else
1585 switch (bits (this_instr, 25, 27))
1586 {
1587 case 0x0:
1588 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1589 {
1590 /* Multiplies and extra load/stores. */
1591 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1592 /* Neither multiplies nor extension load/stores are allowed
1593 to modify PC. */
1594 return 0;
1595
1596 /* Otherwise, miscellaneous instructions. */
1597
1598 /* BX <reg>, BXJ <reg>, BLX <reg> */
1599 if (bits (this_instr, 4, 27) == 0x12fff1
1600 || bits (this_instr, 4, 27) == 0x12fff2
1601 || bits (this_instr, 4, 27) == 0x12fff3)
1602 return 1;
1603
1604 /* Other miscellaneous instructions are unpredictable if they
1605 modify PC. */
1606 return 0;
1607 }
1608 /* Data processing instruction. Fall through. */
1609
1610 case 0x1:
1611 if (bits (this_instr, 12, 15) == 15)
1612 return 1;
1613 else
1614 return 0;
1615
1616 case 0x2:
1617 case 0x3:
1618 /* Media instructions and architecturally undefined instructions. */
1619 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1620 return 0;
1621
1622 /* Stores. */
1623 if (bit (this_instr, 20) == 0)
1624 return 0;
1625
1626 /* Loads. */
1627 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1628 return 1;
1629 else
1630 return 0;
1631
1632 case 0x4:
1633 /* Load/store multiple. */
1634 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1635 return 1;
1636 else
1637 return 0;
1638
1639 case 0x5:
1640 /* Branch and branch with link. */
1641 return 1;
1642
1643 case 0x6:
1644 case 0x7:
1645 /* Coprocessor transfers or SWIs can not affect PC. */
1646 return 0;
1647
1648 default:
1649 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1650 }
1651 }
1652
1653 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1654 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1655 fill it in. Return the first address not recognized as a prologue
1656 instruction.
1657
1658 We recognize all the instructions typically found in ARM prologues,
1659 plus harmless instructions which can be skipped (either for analysis
1660 purposes, or a more restrictive set that can be skipped when finding
1661 the end of the prologue). */
1662
1663 static CORE_ADDR
1664 arm_analyze_prologue (struct gdbarch *gdbarch,
1665 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1666 struct arm_prologue_cache *cache)
1667 {
1668 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1669 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1670 int regno;
1671 CORE_ADDR offset, current_pc;
1672 pv_t regs[ARM_FPS_REGNUM];
1673 struct pv_area *stack;
1674 struct cleanup *back_to;
1675 int framereg, framesize;
1676 CORE_ADDR unrecognized_pc = 0;
1677
1678 /* Search the prologue looking for instructions that set up the
1679 frame pointer, adjust the stack pointer, and save registers.
1680
1681 Be careful, however, and if it doesn't look like a prologue,
1682 don't try to scan it. If, for instance, a frameless function
1683 begins with stmfd sp!, then we will tell ourselves there is
1684 a frame, which will confuse stack traceback, as well as "finish"
1685 and other operations that rely on a knowledge of the stack
1686 traceback. */
1687
1688 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1689 regs[regno] = pv_register (regno, 0);
1690 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1691 back_to = make_cleanup_free_pv_area (stack);
1692
1693 for (current_pc = prologue_start;
1694 current_pc < prologue_end;
1695 current_pc += 4)
1696 {
1697 unsigned int insn
1698 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1699
1700 if (insn == 0xe1a0c00d) /* mov ip, sp */
1701 {
1702 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1703 continue;
1704 }
1705 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1706 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1707 {
1708 unsigned imm = insn & 0xff; /* immediate value */
1709 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1710 int rd = bits (insn, 12, 15);
1711 imm = (imm >> rot) | (imm << (32 - rot));
1712 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1713 continue;
1714 }
1715 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1716 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1717 {
1718 unsigned imm = insn & 0xff; /* immediate value */
1719 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1720 int rd = bits (insn, 12, 15);
1721 imm = (imm >> rot) | (imm << (32 - rot));
1722 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1723 continue;
1724 }
1725 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1726 [sp, #-4]! */
1727 {
1728 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1729 break;
1730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1731 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1732 regs[bits (insn, 12, 15)]);
1733 continue;
1734 }
1735 else if ((insn & 0xffff0000) == 0xe92d0000)
1736 /* stmfd sp!, {..., fp, ip, lr, pc}
1737 or
1738 stmfd sp!, {a1, a2, a3, a4} */
1739 {
1740 int mask = insn & 0xffff;
1741
1742 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1743 break;
1744
1745 /* Calculate offsets of saved registers. */
1746 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1747 if (mask & (1 << regno))
1748 {
1749 regs[ARM_SP_REGNUM]
1750 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1751 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1752 }
1753 }
1754 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1755 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1756 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1757 {
1758 /* No need to add this to saved_regs -- it's just an arg reg. */
1759 continue;
1760 }
1761 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1762 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1763 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1764 {
1765 /* No need to add this to saved_regs -- it's just an arg reg. */
1766 continue;
1767 }
1768 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1769 { registers } */
1770 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1771 {
1772 /* No need to add this to saved_regs -- it's just arg regs. */
1773 continue;
1774 }
1775 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1776 {
1777 unsigned imm = insn & 0xff; /* immediate value */
1778 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1779 imm = (imm >> rot) | (imm << (32 - rot));
1780 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1781 }
1782 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1783 {
1784 unsigned imm = insn & 0xff; /* immediate value */
1785 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1786 imm = (imm >> rot) | (imm << (32 - rot));
1787 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1788 }
1789 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1790 [sp, -#c]! */
1791 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1792 {
1793 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1794 break;
1795
1796 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1797 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1798 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1799 }
1800 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1801 [sp!] */
1802 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1803 {
1804 int n_saved_fp_regs;
1805 unsigned int fp_start_reg, fp_bound_reg;
1806
1807 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1808 break;
1809
1810 if ((insn & 0x800) == 0x800) /* N0 is set */
1811 {
1812 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1813 n_saved_fp_regs = 3;
1814 else
1815 n_saved_fp_regs = 1;
1816 }
1817 else
1818 {
1819 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1820 n_saved_fp_regs = 2;
1821 else
1822 n_saved_fp_regs = 4;
1823 }
1824
1825 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1826 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1827 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1828 {
1829 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1830 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1831 regs[fp_start_reg++]);
1832 }
1833 }
1834 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1835 {
1836 /* Allow some special function calls when skipping the
1837 prologue; GCC generates these before storing arguments to
1838 the stack. */
1839 CORE_ADDR dest = BranchDest (current_pc, insn);
1840
1841 if (skip_prologue_function (gdbarch, dest, 0))
1842 continue;
1843 else
1844 break;
1845 }
1846 else if ((insn & 0xf0000000) != 0xe0000000)
1847 break; /* Condition not true, exit early. */
1848 else if (arm_instruction_changes_pc (insn))
1849 /* Don't scan past anything that might change control flow. */
1850 break;
1851 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1852 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1853 /* Ignore block loads from the stack, potentially copying
1854 parameters from memory. */
1855 continue;
1856 else if ((insn & 0xfc500000) == 0xe4100000
1857 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1858 /* Similarly ignore single loads from the stack. */
1859 continue;
1860 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1861 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1862 register instead of the stack. */
1863 continue;
1864 else
1865 {
1866 /* The optimizer might shove anything into the prologue,
1867 so we just skip what we don't recognize. */
1868 unrecognized_pc = current_pc;
1869 continue;
1870 }
1871 }
1872
1873 if (unrecognized_pc == 0)
1874 unrecognized_pc = current_pc;
1875
1876 /* The frame size is just the distance from the frame register
1877 to the original stack pointer. */
1878 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1879 {
1880 /* Frame pointer is fp. */
1881 framereg = ARM_FP_REGNUM;
1882 framesize = -regs[ARM_FP_REGNUM].k;
1883 }
1884 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1885 {
1886 /* Try the stack pointer... this is a bit desperate. */
1887 framereg = ARM_SP_REGNUM;
1888 framesize = -regs[ARM_SP_REGNUM].k;
1889 }
1890 else
1891 {
1892 /* We're just out of luck. We don't know where the frame is. */
1893 framereg = -1;
1894 framesize = 0;
1895 }
1896
1897 if (cache)
1898 {
1899 cache->framereg = framereg;
1900 cache->framesize = framesize;
1901
1902 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1903 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1904 cache->saved_regs[regno].addr = offset;
1905 }
1906
1907 if (arm_debug)
1908 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1909 paddress (gdbarch, unrecognized_pc));
1910
1911 do_cleanups (back_to);
1912 return unrecognized_pc;
1913 }
1914
1915 static void
1916 arm_scan_prologue (struct frame_info *this_frame,
1917 struct arm_prologue_cache *cache)
1918 {
1919 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1921 int regno;
1922 CORE_ADDR prologue_start, prologue_end, current_pc;
1923 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1924 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1925 pv_t regs[ARM_FPS_REGNUM];
1926 struct pv_area *stack;
1927 struct cleanup *back_to;
1928 CORE_ADDR offset;
1929
1930 /* Assume there is no frame until proven otherwise. */
1931 cache->framereg = ARM_SP_REGNUM;
1932 cache->framesize = 0;
1933
1934 /* Check for Thumb prologue. */
1935 if (arm_frame_is_thumb (this_frame))
1936 {
1937 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1938 return;
1939 }
1940
1941 /* Find the function prologue. If we can't find the function in
1942 the symbol table, peek in the stack frame to find the PC. */
1943 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1944 &prologue_end))
1945 {
1946 /* One way to find the end of the prologue (which works well
1947 for unoptimized code) is to do the following:
1948
1949 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1950
1951 if (sal.line == 0)
1952 prologue_end = prev_pc;
1953 else if (sal.end < prologue_end)
1954 prologue_end = sal.end;
1955
1956 This mechanism is very accurate so long as the optimizer
1957 doesn't move any instructions from the function body into the
1958 prologue. If this happens, sal.end will be the last
1959 instruction in the first hunk of prologue code just before
1960 the first instruction that the scheduler has moved from
1961 the body to the prologue.
1962
1963 In order to make sure that we scan all of the prologue
1964 instructions, we use a slightly less accurate mechanism which
1965 may scan more than necessary. To help compensate for this
1966 lack of accuracy, the prologue scanning loop below contains
1967 several clauses which'll cause the loop to terminate early if
1968 an implausible prologue instruction is encountered.
1969
1970 The expression
1971
1972 prologue_start + 64
1973
1974 is a suitable endpoint since it accounts for the largest
1975 possible prologue plus up to five instructions inserted by
1976 the scheduler. */
1977
1978 if (prologue_end > prologue_start + 64)
1979 {
1980 prologue_end = prologue_start + 64; /* See above. */
1981 }
1982 }
1983 else
1984 {
1985 /* We have no symbol information. Our only option is to assume this
1986 function has a standard stack frame and the normal frame register.
1987 Then, we can find the value of our frame pointer on entrance to
1988 the callee (or at the present moment if this is the innermost frame).
1989 The value stored there should be the address of the stmfd + 8. */
1990 CORE_ADDR frame_loc;
1991 LONGEST return_value;
1992
1993 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1994 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1995 return;
1996 else
1997 {
1998 prologue_start = gdbarch_addr_bits_remove
1999 (gdbarch, return_value) - 8;
2000 prologue_end = prologue_start + 64; /* See above. */
2001 }
2002 }
2003
2004 if (prev_pc < prologue_end)
2005 prologue_end = prev_pc;
2006
2007 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2008 }
2009
2010 static struct arm_prologue_cache *
2011 arm_make_prologue_cache (struct frame_info *this_frame)
2012 {
2013 int reg;
2014 struct arm_prologue_cache *cache;
2015 CORE_ADDR unwound_fp;
2016
2017 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2018 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2019
2020 arm_scan_prologue (this_frame, cache);
2021
2022 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2023 if (unwound_fp == 0)
2024 return cache;
2025
2026 cache->prev_sp = unwound_fp + cache->framesize;
2027
2028 /* Calculate actual addresses of saved registers using offsets
2029 determined by arm_scan_prologue. */
2030 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2031 if (trad_frame_addr_p (cache->saved_regs, reg))
2032 cache->saved_regs[reg].addr += cache->prev_sp;
2033
2034 return cache;
2035 }
2036
2037 /* Our frame ID for a normal frame is the current function's starting PC
2038 and the caller's SP when we were called. */
2039
2040 static void
2041 arm_prologue_this_id (struct frame_info *this_frame,
2042 void **this_cache,
2043 struct frame_id *this_id)
2044 {
2045 struct arm_prologue_cache *cache;
2046 struct frame_id id;
2047 CORE_ADDR pc, func;
2048
2049 if (*this_cache == NULL)
2050 *this_cache = arm_make_prologue_cache (this_frame);
2051 cache = *this_cache;
2052
2053 /* This is meant to halt the backtrace at "_start". */
2054 pc = get_frame_pc (this_frame);
2055 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2056 return;
2057
2058 /* If we've hit a wall, stop. */
2059 if (cache->prev_sp == 0)
2060 return;
2061
2062 /* Use function start address as part of the frame ID. If we cannot
2063 identify the start address (due to missing symbol information),
2064 fall back to just using the current PC. */
2065 func = get_frame_func (this_frame);
2066 if (!func)
2067 func = pc;
2068
2069 id = frame_id_build (cache->prev_sp, func);
2070 *this_id = id;
2071 }
2072
2073 static struct value *
2074 arm_prologue_prev_register (struct frame_info *this_frame,
2075 void **this_cache,
2076 int prev_regnum)
2077 {
2078 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2079 struct arm_prologue_cache *cache;
2080
2081 if (*this_cache == NULL)
2082 *this_cache = arm_make_prologue_cache (this_frame);
2083 cache = *this_cache;
2084
2085 /* If we are asked to unwind the PC, then we need to return the LR
2086 instead. The prologue may save PC, but it will point into this
2087 frame's prologue, not the next frame's resume location. Also
2088 strip the saved T bit. A valid LR may have the low bit set, but
2089 a valid PC never does. */
2090 if (prev_regnum == ARM_PC_REGNUM)
2091 {
2092 CORE_ADDR lr;
2093
2094 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2095 return frame_unwind_got_constant (this_frame, prev_regnum,
2096 arm_addr_bits_remove (gdbarch, lr));
2097 }
2098
2099 /* SP is generally not saved to the stack, but this frame is
2100 identified by the next frame's stack pointer at the time of the call.
2101 The value was already reconstructed into PREV_SP. */
2102 if (prev_regnum == ARM_SP_REGNUM)
2103 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2104
2105 /* The CPSR may have been changed by the call instruction and by the
2106 called function. The only bit we can reconstruct is the T bit,
2107 by checking the low bit of LR as of the call. This is a reliable
2108 indicator of Thumb-ness except for some ARM v4T pre-interworking
2109 Thumb code, which could get away with a clear low bit as long as
2110 the called function did not use bx. Guess that all other
2111 bits are unchanged; the condition flags are presumably lost,
2112 but the processor status is likely valid. */
2113 if (prev_regnum == ARM_PS_REGNUM)
2114 {
2115 CORE_ADDR lr, cpsr;
2116 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2117
2118 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2119 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2120 if (IS_THUMB_ADDR (lr))
2121 cpsr |= t_bit;
2122 else
2123 cpsr &= ~t_bit;
2124 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2125 }
2126
2127 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2128 prev_regnum);
2129 }
2130
2131 struct frame_unwind arm_prologue_unwind = {
2132 NORMAL_FRAME,
2133 default_frame_unwind_stop_reason,
2134 arm_prologue_this_id,
2135 arm_prologue_prev_register,
2136 NULL,
2137 default_frame_sniffer
2138 };
2139
2140 /* Maintain a list of ARM exception table entries per objfile, similar to the
2141 list of mapping symbols. We only cache entries for standard ARM-defined
2142 personality routines; the cache will contain only the frame unwinding
2143 instructions associated with the entry (not the descriptors). */
2144
2145 static const struct objfile_data *arm_exidx_data_key;
2146
2147 struct arm_exidx_entry
2148 {
2149 bfd_vma addr;
2150 gdb_byte *entry;
2151 };
2152 typedef struct arm_exidx_entry arm_exidx_entry_s;
2153 DEF_VEC_O(arm_exidx_entry_s);
2154
2155 struct arm_exidx_data
2156 {
2157 VEC(arm_exidx_entry_s) **section_maps;
2158 };
2159
2160 static void
2161 arm_exidx_data_free (struct objfile *objfile, void *arg)
2162 {
2163 struct arm_exidx_data *data = arg;
2164 unsigned int i;
2165
2166 for (i = 0; i < objfile->obfd->section_count; i++)
2167 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2168 }
2169
2170 static inline int
2171 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2172 const struct arm_exidx_entry *rhs)
2173 {
2174 return lhs->addr < rhs->addr;
2175 }
2176
2177 static struct obj_section *
2178 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2179 {
2180 struct obj_section *osect;
2181
2182 ALL_OBJFILE_OSECTIONS (objfile, osect)
2183 if (bfd_get_section_flags (objfile->obfd,
2184 osect->the_bfd_section) & SEC_ALLOC)
2185 {
2186 bfd_vma start, size;
2187 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2188 size = bfd_get_section_size (osect->the_bfd_section);
2189
2190 if (start <= vma && vma < start + size)
2191 return osect;
2192 }
2193
2194 return NULL;
2195 }
2196
2197 /* Parse contents of exception table and exception index sections
2198 of OBJFILE, and fill in the exception table entry cache.
2199
2200 For each entry that refers to a standard ARM-defined personality
2201 routine, extract the frame unwinding instructions (from either
2202 the index or the table section). The unwinding instructions
2203 are normalized by:
2204 - extracting them from the rest of the table data
2205 - converting to host endianness
2206 - appending the implicit 0xb0 ("Finish") code
2207
2208 The extracted and normalized instructions are stored for later
2209 retrieval by the arm_find_exidx_entry routine. */
2210
2211 static void
2212 arm_exidx_new_objfile (struct objfile *objfile)
2213 {
2214 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2215 struct arm_exidx_data *data;
2216 asection *exidx, *extab;
2217 bfd_vma exidx_vma = 0, extab_vma = 0;
2218 bfd_size_type exidx_size = 0, extab_size = 0;
2219 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2220 LONGEST i;
2221
2222 /* If we've already touched this file, do nothing. */
2223 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2224 return;
2225
2226 /* Read contents of exception table and index. */
2227 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2228 if (exidx)
2229 {
2230 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2231 exidx_size = bfd_get_section_size (exidx);
2232 exidx_data = xmalloc (exidx_size);
2233 make_cleanup (xfree, exidx_data);
2234
2235 if (!bfd_get_section_contents (objfile->obfd, exidx,
2236 exidx_data, 0, exidx_size))
2237 {
2238 do_cleanups (cleanups);
2239 return;
2240 }
2241 }
2242
2243 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2244 if (extab)
2245 {
2246 extab_vma = bfd_section_vma (objfile->obfd, extab);
2247 extab_size = bfd_get_section_size (extab);
2248 extab_data = xmalloc (extab_size);
2249 make_cleanup (xfree, extab_data);
2250
2251 if (!bfd_get_section_contents (objfile->obfd, extab,
2252 extab_data, 0, extab_size))
2253 {
2254 do_cleanups (cleanups);
2255 return;
2256 }
2257 }
2258
2259 /* Allocate exception table data structure. */
2260 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2261 set_objfile_data (objfile, arm_exidx_data_key, data);
2262 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2263 objfile->obfd->section_count,
2264 VEC(arm_exidx_entry_s) *);
2265
2266 /* Fill in exception table. */
2267 for (i = 0; i < exidx_size / 8; i++)
2268 {
2269 struct arm_exidx_entry new_exidx_entry;
2270 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2271 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2272 bfd_vma addr = 0, word = 0;
2273 int n_bytes = 0, n_words = 0;
2274 struct obj_section *sec;
2275 gdb_byte *entry = NULL;
2276
2277 /* Extract address of start of function. */
2278 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2279 idx += exidx_vma + i * 8;
2280
2281 /* Find section containing function and compute section offset. */
2282 sec = arm_obj_section_from_vma (objfile, idx);
2283 if (sec == NULL)
2284 continue;
2285 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2286
2287 /* Determine address of exception table entry. */
2288 if (val == 1)
2289 {
2290 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2291 }
2292 else if ((val & 0xff000000) == 0x80000000)
2293 {
2294 /* Exception table entry embedded in .ARM.exidx
2295 -- must be short form. */
2296 word = val;
2297 n_bytes = 3;
2298 }
2299 else if (!(val & 0x80000000))
2300 {
2301 /* Exception table entry in .ARM.extab. */
2302 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2303 addr += exidx_vma + i * 8 + 4;
2304
2305 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2306 {
2307 word = bfd_h_get_32 (objfile->obfd,
2308 extab_data + addr - extab_vma);
2309 addr += 4;
2310
2311 if ((word & 0xff000000) == 0x80000000)
2312 {
2313 /* Short form. */
2314 n_bytes = 3;
2315 }
2316 else if ((word & 0xff000000) == 0x81000000
2317 || (word & 0xff000000) == 0x82000000)
2318 {
2319 /* Long form. */
2320 n_bytes = 2;
2321 n_words = ((word >> 16) & 0xff);
2322 }
2323 else if (!(word & 0x80000000))
2324 {
2325 bfd_vma pers;
2326 struct obj_section *pers_sec;
2327 int gnu_personality = 0;
2328
2329 /* Custom personality routine. */
2330 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2331 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2332
2333 /* Check whether we've got one of the variants of the
2334 GNU personality routines. */
2335 pers_sec = arm_obj_section_from_vma (objfile, pers);
2336 if (pers_sec)
2337 {
2338 static const char *personality[] =
2339 {
2340 "__gcc_personality_v0",
2341 "__gxx_personality_v0",
2342 "__gcj_personality_v0",
2343 "__gnu_objc_personality_v0",
2344 NULL
2345 };
2346
2347 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2348 int k;
2349
2350 for (k = 0; personality[k]; k++)
2351 if (lookup_minimal_symbol_by_pc_name
2352 (pc, personality[k], objfile))
2353 {
2354 gnu_personality = 1;
2355 break;
2356 }
2357 }
2358
2359 /* If so, the next word contains a word count in the high
2360 byte, followed by the same unwind instructions as the
2361 pre-defined forms. */
2362 if (gnu_personality
2363 && addr + 4 <= extab_vma + extab_size)
2364 {
2365 word = bfd_h_get_32 (objfile->obfd,
2366 extab_data + addr - extab_vma);
2367 addr += 4;
2368 n_bytes = 3;
2369 n_words = ((word >> 24) & 0xff);
2370 }
2371 }
2372 }
2373 }
2374
2375 /* Sanity check address. */
2376 if (n_words)
2377 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2378 n_words = n_bytes = 0;
2379
2380 /* The unwind instructions reside in WORD (only the N_BYTES least
2381 significant bytes are valid), followed by N_WORDS words in the
2382 extab section starting at ADDR. */
2383 if (n_bytes || n_words)
2384 {
2385 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2386 n_bytes + n_words * 4 + 1);
2387
2388 while (n_bytes--)
2389 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2390
2391 while (n_words--)
2392 {
2393 word = bfd_h_get_32 (objfile->obfd,
2394 extab_data + addr - extab_vma);
2395 addr += 4;
2396
2397 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2398 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2399 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2400 *p++ = (gdb_byte) (word & 0xff);
2401 }
2402
2403 /* Implied "Finish" to terminate the list. */
2404 *p++ = 0xb0;
2405 }
2406
2407 /* Push entry onto vector. They are guaranteed to always
2408 appear in order of increasing addresses. */
2409 new_exidx_entry.addr = idx;
2410 new_exidx_entry.entry = entry;
2411 VEC_safe_push (arm_exidx_entry_s,
2412 data->section_maps[sec->the_bfd_section->index],
2413 &new_exidx_entry);
2414 }
2415
2416 do_cleanups (cleanups);
2417 }
2418
2419 /* Search for the exception table entry covering MEMADDR. If one is found,
2420 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2421 set *START to the start of the region covered by this entry. */
2422
2423 static gdb_byte *
2424 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2425 {
2426 struct obj_section *sec;
2427
2428 sec = find_pc_section (memaddr);
2429 if (sec != NULL)
2430 {
2431 struct arm_exidx_data *data;
2432 VEC(arm_exidx_entry_s) *map;
2433 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2434 unsigned int idx;
2435
2436 data = objfile_data (sec->objfile, arm_exidx_data_key);
2437 if (data != NULL)
2438 {
2439 map = data->section_maps[sec->the_bfd_section->index];
2440 if (!VEC_empty (arm_exidx_entry_s, map))
2441 {
2442 struct arm_exidx_entry *map_sym;
2443
2444 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2445 arm_compare_exidx_entries);
2446
2447 /* VEC_lower_bound finds the earliest ordered insertion
2448 point. If the following symbol starts at this exact
2449 address, we use that; otherwise, the preceding
2450 exception table entry covers this address. */
2451 if (idx < VEC_length (arm_exidx_entry_s, map))
2452 {
2453 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2454 if (map_sym->addr == map_key.addr)
2455 {
2456 if (start)
2457 *start = map_sym->addr + obj_section_addr (sec);
2458 return map_sym->entry;
2459 }
2460 }
2461
2462 if (idx > 0)
2463 {
2464 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2465 if (start)
2466 *start = map_sym->addr + obj_section_addr (sec);
2467 return map_sym->entry;
2468 }
2469 }
2470 }
2471 }
2472
2473 return NULL;
2474 }
2475
2476 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2477 instruction list from the ARM exception table entry ENTRY, allocate and
2478 return a prologue cache structure describing how to unwind this frame.
2479
2480 Return NULL if the unwinding instruction list contains a "spare",
2481 "reserved" or "refuse to unwind" instruction as defined in section
2482 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2483 for the ARM Architecture" document. */
2484
2485 static struct arm_prologue_cache *
2486 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2487 {
2488 CORE_ADDR vsp = 0;
2489 int vsp_valid = 0;
2490
2491 struct arm_prologue_cache *cache;
2492 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2493 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2494
2495 for (;;)
2496 {
2497 gdb_byte insn;
2498
2499 /* Whenever we reload SP, we actually have to retrieve its
2500 actual value in the current frame. */
2501 if (!vsp_valid)
2502 {
2503 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2504 {
2505 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2506 vsp = get_frame_register_unsigned (this_frame, reg);
2507 }
2508 else
2509 {
2510 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2511 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2512 }
2513
2514 vsp_valid = 1;
2515 }
2516
2517 /* Decode next unwind instruction. */
2518 insn = *entry++;
2519
2520 if ((insn & 0xc0) == 0)
2521 {
2522 int offset = insn & 0x3f;
2523 vsp += (offset << 2) + 4;
2524 }
2525 else if ((insn & 0xc0) == 0x40)
2526 {
2527 int offset = insn & 0x3f;
2528 vsp -= (offset << 2) + 4;
2529 }
2530 else if ((insn & 0xf0) == 0x80)
2531 {
2532 int mask = ((insn & 0xf) << 8) | *entry++;
2533 int i;
2534
2535 /* The special case of an all-zero mask identifies
2536 "Refuse to unwind". We return NULL to fall back
2537 to the prologue analyzer. */
2538 if (mask == 0)
2539 return NULL;
2540
2541 /* Pop registers r4..r15 under mask. */
2542 for (i = 0; i < 12; i++)
2543 if (mask & (1 << i))
2544 {
2545 cache->saved_regs[4 + i].addr = vsp;
2546 vsp += 4;
2547 }
2548
2549 /* Special-case popping SP -- we need to reload vsp. */
2550 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2551 vsp_valid = 0;
2552 }
2553 else if ((insn & 0xf0) == 0x90)
2554 {
2555 int reg = insn & 0xf;
2556
2557 /* Reserved cases. */
2558 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2559 return NULL;
2560
2561 /* Set SP from another register and mark VSP for reload. */
2562 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2563 vsp_valid = 0;
2564 }
2565 else if ((insn & 0xf0) == 0xa0)
2566 {
2567 int count = insn & 0x7;
2568 int pop_lr = (insn & 0x8) != 0;
2569 int i;
2570
2571 /* Pop r4..r[4+count]. */
2572 for (i = 0; i <= count; i++)
2573 {
2574 cache->saved_regs[4 + i].addr = vsp;
2575 vsp += 4;
2576 }
2577
2578 /* If indicated by flag, pop LR as well. */
2579 if (pop_lr)
2580 {
2581 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2582 vsp += 4;
2583 }
2584 }
2585 else if (insn == 0xb0)
2586 {
2587 /* We could only have updated PC by popping into it; if so, it
2588 will show up as address. Otherwise, copy LR into PC. */
2589 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2590 cache->saved_regs[ARM_PC_REGNUM]
2591 = cache->saved_regs[ARM_LR_REGNUM];
2592
2593 /* We're done. */
2594 break;
2595 }
2596 else if (insn == 0xb1)
2597 {
2598 int mask = *entry++;
2599 int i;
2600
2601 /* All-zero mask and mask >= 16 is "spare". */
2602 if (mask == 0 || mask >= 16)
2603 return NULL;
2604
2605 /* Pop r0..r3 under mask. */
2606 for (i = 0; i < 4; i++)
2607 if (mask & (1 << i))
2608 {
2609 cache->saved_regs[i].addr = vsp;
2610 vsp += 4;
2611 }
2612 }
2613 else if (insn == 0xb2)
2614 {
2615 ULONGEST offset = 0;
2616 unsigned shift = 0;
2617
2618 do
2619 {
2620 offset |= (*entry & 0x7f) << shift;
2621 shift += 7;
2622 }
2623 while (*entry++ & 0x80);
2624
2625 vsp += 0x204 + (offset << 2);
2626 }
2627 else if (insn == 0xb3)
2628 {
2629 int start = *entry >> 4;
2630 int count = (*entry++) & 0xf;
2631 int i;
2632
2633 /* Only registers D0..D15 are valid here. */
2634 if (start + count >= 16)
2635 return NULL;
2636
2637 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2638 for (i = 0; i <= count; i++)
2639 {
2640 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2641 vsp += 8;
2642 }
2643
2644 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2645 vsp += 4;
2646 }
2647 else if ((insn & 0xf8) == 0xb8)
2648 {
2649 int count = insn & 0x7;
2650 int i;
2651
2652 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2653 for (i = 0; i <= count; i++)
2654 {
2655 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2656 vsp += 8;
2657 }
2658
2659 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2660 vsp += 4;
2661 }
2662 else if (insn == 0xc6)
2663 {
2664 int start = *entry >> 4;
2665 int count = (*entry++) & 0xf;
2666 int i;
2667
2668 /* Only registers WR0..WR15 are valid. */
2669 if (start + count >= 16)
2670 return NULL;
2671
2672 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2673 for (i = 0; i <= count; i++)
2674 {
2675 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2676 vsp += 8;
2677 }
2678 }
2679 else if (insn == 0xc7)
2680 {
2681 int mask = *entry++;
2682 int i;
2683
2684 /* All-zero mask and mask >= 16 is "spare". */
2685 if (mask == 0 || mask >= 16)
2686 return NULL;
2687
2688 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2689 for (i = 0; i < 4; i++)
2690 if (mask & (1 << i))
2691 {
2692 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2693 vsp += 4;
2694 }
2695 }
2696 else if ((insn & 0xf8) == 0xc0)
2697 {
2698 int count = insn & 0x7;
2699 int i;
2700
2701 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2702 for (i = 0; i <= count; i++)
2703 {
2704 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2705 vsp += 8;
2706 }
2707 }
2708 else if (insn == 0xc8)
2709 {
2710 int start = *entry >> 4;
2711 int count = (*entry++) & 0xf;
2712 int i;
2713
2714 /* Only registers D0..D31 are valid. */
2715 if (start + count >= 16)
2716 return NULL;
2717
2718 /* Pop VFP double-precision registers
2719 D[16+start]..D[16+start+count]. */
2720 for (i = 0; i <= count; i++)
2721 {
2722 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2723 vsp += 8;
2724 }
2725 }
2726 else if (insn == 0xc9)
2727 {
2728 int start = *entry >> 4;
2729 int count = (*entry++) & 0xf;
2730 int i;
2731
2732 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2733 for (i = 0; i <= count; i++)
2734 {
2735 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2736 vsp += 8;
2737 }
2738 }
2739 else if ((insn & 0xf8) == 0xd0)
2740 {
2741 int count = insn & 0x7;
2742 int i;
2743
2744 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2745 for (i = 0; i <= count; i++)
2746 {
2747 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2748 vsp += 8;
2749 }
2750 }
2751 else
2752 {
2753 /* Everything else is "spare". */
2754 return NULL;
2755 }
2756 }
2757
2758 /* If we restore SP from a register, assume this was the frame register.
2759 Otherwise just fall back to SP as frame register. */
2760 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2761 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2762 else
2763 cache->framereg = ARM_SP_REGNUM;
2764
2765 /* Determine offset to previous frame. */
2766 cache->framesize
2767 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2768
2769 /* We already got the previous SP. */
2770 cache->prev_sp = vsp;
2771
2772 return cache;
2773 }
2774
2775 /* Unwinding via ARM exception table entries. Note that the sniffer
2776 already computes a filled-in prologue cache, which is then used
2777 with the same arm_prologue_this_id and arm_prologue_prev_register
2778 routines also used for prologue-parsing based unwinding. */
2779
2780 static int
2781 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2782 struct frame_info *this_frame,
2783 void **this_prologue_cache)
2784 {
2785 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2786 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2787 CORE_ADDR addr_in_block, exidx_region, func_start;
2788 struct arm_prologue_cache *cache;
2789 gdb_byte *entry;
2790
2791 /* See if we have an ARM exception table entry covering this address. */
2792 addr_in_block = get_frame_address_in_block (this_frame);
2793 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2794 if (!entry)
2795 return 0;
2796
2797 /* The ARM exception table does not describe unwind information
2798 for arbitrary PC values, but is guaranteed to be correct only
2799 at call sites. We have to decide here whether we want to use
2800 ARM exception table information for this frame, or fall back
2801 to using prologue parsing. (Note that if we have DWARF CFI,
2802 this sniffer isn't even called -- CFI is always preferred.)
2803
2804 Before we make this decision, however, we check whether we
2805 actually have *symbol* information for the current frame.
2806 If not, prologue parsing would not work anyway, so we might
2807 as well use the exception table and hope for the best. */
2808 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2809 {
2810 int exc_valid = 0;
2811
2812 /* If the next frame is "normal", we are at a call site in this
2813 frame, so exception information is guaranteed to be valid. */
2814 if (get_next_frame (this_frame)
2815 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2816 exc_valid = 1;
2817
2818 /* We also assume exception information is valid if we're currently
2819 blocked in a system call. The system library is supposed to
2820 ensure this, so that e.g. pthread cancellation works. */
2821 if (arm_frame_is_thumb (this_frame))
2822 {
2823 LONGEST insn;
2824
2825 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2826 byte_order_for_code, &insn)
2827 && (insn & 0xff00) == 0xdf00 /* svc */)
2828 exc_valid = 1;
2829 }
2830 else
2831 {
2832 LONGEST insn;
2833
2834 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2835 byte_order_for_code, &insn)
2836 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2837 exc_valid = 1;
2838 }
2839
2840 /* Bail out if we don't know that exception information is valid. */
2841 if (!exc_valid)
2842 return 0;
2843
2844 /* The ARM exception index does not mark the *end* of the region
2845 covered by the entry, and some functions will not have any entry.
2846 To correctly recognize the end of the covered region, the linker
2847 should have inserted dummy records with a CANTUNWIND marker.
2848
2849 Unfortunately, current versions of GNU ld do not reliably do
2850 this, and thus we may have found an incorrect entry above.
2851 As a (temporary) sanity check, we only use the entry if it
2852 lies *within* the bounds of the function. Note that this check
2853 might reject perfectly valid entries that just happen to cover
2854 multiple functions; therefore this check ought to be removed
2855 once the linker is fixed. */
2856 if (func_start > exidx_region)
2857 return 0;
2858 }
2859
2860 /* Decode the list of unwinding instructions into a prologue cache.
2861 Note that this may fail due to e.g. a "refuse to unwind" code. */
2862 cache = arm_exidx_fill_cache (this_frame, entry);
2863 if (!cache)
2864 return 0;
2865
2866 *this_prologue_cache = cache;
2867 return 1;
2868 }
2869
2870 struct frame_unwind arm_exidx_unwind = {
2871 NORMAL_FRAME,
2872 default_frame_unwind_stop_reason,
2873 arm_prologue_this_id,
2874 arm_prologue_prev_register,
2875 NULL,
2876 arm_exidx_unwind_sniffer
2877 };
2878
2879 static struct arm_prologue_cache *
2880 arm_make_stub_cache (struct frame_info *this_frame)
2881 {
2882 struct arm_prologue_cache *cache;
2883
2884 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2885 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2886
2887 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2888
2889 return cache;
2890 }
2891
2892 /* Our frame ID for a stub frame is the current SP and LR. */
2893
2894 static void
2895 arm_stub_this_id (struct frame_info *this_frame,
2896 void **this_cache,
2897 struct frame_id *this_id)
2898 {
2899 struct arm_prologue_cache *cache;
2900
2901 if (*this_cache == NULL)
2902 *this_cache = arm_make_stub_cache (this_frame);
2903 cache = *this_cache;
2904
2905 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2906 }
2907
2908 static int
2909 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2910 struct frame_info *this_frame,
2911 void **this_prologue_cache)
2912 {
2913 CORE_ADDR addr_in_block;
2914 char dummy[4];
2915
2916 addr_in_block = get_frame_address_in_block (this_frame);
2917 if (in_plt_section (addr_in_block, NULL)
2918 /* We also use the stub winder if the target memory is unreadable
2919 to avoid having the prologue unwinder trying to read it. */
2920 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2921 return 1;
2922
2923 return 0;
2924 }
2925
2926 struct frame_unwind arm_stub_unwind = {
2927 NORMAL_FRAME,
2928 default_frame_unwind_stop_reason,
2929 arm_stub_this_id,
2930 arm_prologue_prev_register,
2931 NULL,
2932 arm_stub_unwind_sniffer
2933 };
2934
2935 static CORE_ADDR
2936 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2937 {
2938 struct arm_prologue_cache *cache;
2939
2940 if (*this_cache == NULL)
2941 *this_cache = arm_make_prologue_cache (this_frame);
2942 cache = *this_cache;
2943
2944 return cache->prev_sp - cache->framesize;
2945 }
2946
2947 struct frame_base arm_normal_base = {
2948 &arm_prologue_unwind,
2949 arm_normal_frame_base,
2950 arm_normal_frame_base,
2951 arm_normal_frame_base
2952 };
2953
2954 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2955 dummy frame. The frame ID's base needs to match the TOS value
2956 saved by save_dummy_frame_tos() and returned from
2957 arm_push_dummy_call, and the PC needs to match the dummy frame's
2958 breakpoint. */
2959
2960 static struct frame_id
2961 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2962 {
2963 return frame_id_build (get_frame_register_unsigned (this_frame,
2964 ARM_SP_REGNUM),
2965 get_frame_pc (this_frame));
2966 }
2967
2968 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2969 be used to construct the previous frame's ID, after looking up the
2970 containing function). */
2971
2972 static CORE_ADDR
2973 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2974 {
2975 CORE_ADDR pc;
2976 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2977 return arm_addr_bits_remove (gdbarch, pc);
2978 }
2979
2980 static CORE_ADDR
2981 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2982 {
2983 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2984 }
2985
2986 static struct value *
2987 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2988 int regnum)
2989 {
2990 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2991 CORE_ADDR lr, cpsr;
2992 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2993
2994 switch (regnum)
2995 {
2996 case ARM_PC_REGNUM:
2997 /* The PC is normally copied from the return column, which
2998 describes saves of LR. However, that version may have an
2999 extra bit set to indicate Thumb state. The bit is not
3000 part of the PC. */
3001 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3002 return frame_unwind_got_constant (this_frame, regnum,
3003 arm_addr_bits_remove (gdbarch, lr));
3004
3005 case ARM_PS_REGNUM:
3006 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3007 cpsr = get_frame_register_unsigned (this_frame, regnum);
3008 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3009 if (IS_THUMB_ADDR (lr))
3010 cpsr |= t_bit;
3011 else
3012 cpsr &= ~t_bit;
3013 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3014
3015 default:
3016 internal_error (__FILE__, __LINE__,
3017 _("Unexpected register %d"), regnum);
3018 }
3019 }
3020
3021 static void
3022 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3023 struct dwarf2_frame_state_reg *reg,
3024 struct frame_info *this_frame)
3025 {
3026 switch (regnum)
3027 {
3028 case ARM_PC_REGNUM:
3029 case ARM_PS_REGNUM:
3030 reg->how = DWARF2_FRAME_REG_FN;
3031 reg->loc.fn = arm_dwarf2_prev_register;
3032 break;
3033 case ARM_SP_REGNUM:
3034 reg->how = DWARF2_FRAME_REG_CFA;
3035 break;
3036 }
3037 }
3038
3039 /* Return true if we are in the function's epilogue, i.e. after the
3040 instruction that destroyed the function's stack frame. */
3041
3042 static int
3043 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3044 {
3045 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3046 unsigned int insn, insn2;
3047 int found_return = 0, found_stack_adjust = 0;
3048 CORE_ADDR func_start, func_end;
3049 CORE_ADDR scan_pc;
3050 gdb_byte buf[4];
3051
3052 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3053 return 0;
3054
3055 /* The epilogue is a sequence of instructions along the following lines:
3056
3057 - add stack frame size to SP or FP
3058 - [if frame pointer used] restore SP from FP
3059 - restore registers from SP [may include PC]
3060 - a return-type instruction [if PC wasn't already restored]
3061
3062 In a first pass, we scan forward from the current PC and verify the
3063 instructions we find as compatible with this sequence, ending in a
3064 return instruction.
3065
3066 However, this is not sufficient to distinguish indirect function calls
3067 within a function from indirect tail calls in the epilogue in some cases.
3068 Therefore, if we didn't already find any SP-changing instruction during
3069 forward scan, we add a backward scanning heuristic to ensure we actually
3070 are in the epilogue. */
3071
3072 scan_pc = pc;
3073 while (scan_pc < func_end && !found_return)
3074 {
3075 if (target_read_memory (scan_pc, buf, 2))
3076 break;
3077
3078 scan_pc += 2;
3079 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3080
3081 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3082 found_return = 1;
3083 else if (insn == 0x46f7) /* mov pc, lr */
3084 found_return = 1;
3085 else if (insn == 0x46bd) /* mov sp, r7 */
3086 found_stack_adjust = 1;
3087 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3088 found_stack_adjust = 1;
3089 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3090 {
3091 found_stack_adjust = 1;
3092 if (insn & 0x0100) /* <registers> include PC. */
3093 found_return = 1;
3094 }
3095 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3096 {
3097 if (target_read_memory (scan_pc, buf, 2))
3098 break;
3099
3100 scan_pc += 2;
3101 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3102
3103 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3104 {
3105 found_stack_adjust = 1;
3106 if (insn2 & 0x8000) /* <registers> include PC. */
3107 found_return = 1;
3108 }
3109 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3110 && (insn2 & 0x0fff) == 0x0b04)
3111 {
3112 found_stack_adjust = 1;
3113 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3114 found_return = 1;
3115 }
3116 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3117 && (insn2 & 0x0e00) == 0x0a00)
3118 found_stack_adjust = 1;
3119 else
3120 break;
3121 }
3122 else
3123 break;
3124 }
3125
3126 if (!found_return)
3127 return 0;
3128
3129 /* Since any instruction in the epilogue sequence, with the possible
3130 exception of return itself, updates the stack pointer, we need to
3131 scan backwards for at most one instruction. Try either a 16-bit or
3132 a 32-bit instruction. This is just a heuristic, so we do not worry
3133 too much about false positives. */
3134
3135 if (!found_stack_adjust)
3136 {
3137 if (pc - 4 < func_start)
3138 return 0;
3139 if (target_read_memory (pc - 4, buf, 4))
3140 return 0;
3141
3142 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3143 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3144
3145 if (insn2 == 0x46bd) /* mov sp, r7 */
3146 found_stack_adjust = 1;
3147 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3148 found_stack_adjust = 1;
3149 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3150 found_stack_adjust = 1;
3151 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3152 found_stack_adjust = 1;
3153 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3154 && (insn2 & 0x0fff) == 0x0b04)
3155 found_stack_adjust = 1;
3156 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3157 && (insn2 & 0x0e00) == 0x0a00)
3158 found_stack_adjust = 1;
3159 }
3160
3161 return found_stack_adjust;
3162 }
3163
3164 /* Return true if we are in the function's epilogue, i.e. after the
3165 instruction that destroyed the function's stack frame. */
3166
3167 static int
3168 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3169 {
3170 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3171 unsigned int insn;
3172 int found_return, found_stack_adjust;
3173 CORE_ADDR func_start, func_end;
3174
3175 if (arm_pc_is_thumb (gdbarch, pc))
3176 return thumb_in_function_epilogue_p (gdbarch, pc);
3177
3178 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3179 return 0;
3180
3181 /* We are in the epilogue if the previous instruction was a stack
3182 adjustment and the next instruction is a possible return (bx, mov
3183 pc, or pop). We could have to scan backwards to find the stack
3184 adjustment, or forwards to find the return, but this is a decent
3185 approximation. First scan forwards. */
3186
3187 found_return = 0;
3188 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3189 if (bits (insn, 28, 31) != INST_NV)
3190 {
3191 if ((insn & 0x0ffffff0) == 0x012fff10)
3192 /* BX. */
3193 found_return = 1;
3194 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3195 /* MOV PC. */
3196 found_return = 1;
3197 else if ((insn & 0x0fff0000) == 0x08bd0000
3198 && (insn & 0x0000c000) != 0)
3199 /* POP (LDMIA), including PC or LR. */
3200 found_return = 1;
3201 }
3202
3203 if (!found_return)
3204 return 0;
3205
3206 /* Scan backwards. This is just a heuristic, so do not worry about
3207 false positives from mode changes. */
3208
3209 if (pc < func_start + 4)
3210 return 0;
3211
3212 found_stack_adjust = 0;
3213 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3214 if (bits (insn, 28, 31) != INST_NV)
3215 {
3216 if ((insn & 0x0df0f000) == 0x0080d000)
3217 /* ADD SP (register or immediate). */
3218 found_stack_adjust = 1;
3219 else if ((insn & 0x0df0f000) == 0x0040d000)
3220 /* SUB SP (register or immediate). */
3221 found_stack_adjust = 1;
3222 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3223 /* MOV SP. */
3224 found_stack_adjust = 1;
3225 else if ((insn & 0x0fff0000) == 0x08bd0000)
3226 /* POP (LDMIA). */
3227 found_stack_adjust = 1;
3228 }
3229
3230 if (found_stack_adjust)
3231 return 1;
3232
3233 return 0;
3234 }
3235
3236
3237 /* When arguments must be pushed onto the stack, they go on in reverse
3238 order. The code below implements a FILO (stack) to do this. */
3239
3240 struct stack_item
3241 {
3242 int len;
3243 struct stack_item *prev;
3244 void *data;
3245 };
3246
3247 static struct stack_item *
3248 push_stack_item (struct stack_item *prev, const void *contents, int len)
3249 {
3250 struct stack_item *si;
3251 si = xmalloc (sizeof (struct stack_item));
3252 si->data = xmalloc (len);
3253 si->len = len;
3254 si->prev = prev;
3255 memcpy (si->data, contents, len);
3256 return si;
3257 }
3258
3259 static struct stack_item *
3260 pop_stack_item (struct stack_item *si)
3261 {
3262 struct stack_item *dead = si;
3263 si = si->prev;
3264 xfree (dead->data);
3265 xfree (dead);
3266 return si;
3267 }
3268
3269
3270 /* Return the alignment (in bytes) of the given type. */
3271
3272 static int
3273 arm_type_align (struct type *t)
3274 {
3275 int n;
3276 int align;
3277 int falign;
3278
3279 t = check_typedef (t);
3280 switch (TYPE_CODE (t))
3281 {
3282 default:
3283 /* Should never happen. */
3284 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3285 return 4;
3286
3287 case TYPE_CODE_PTR:
3288 case TYPE_CODE_ENUM:
3289 case TYPE_CODE_INT:
3290 case TYPE_CODE_FLT:
3291 case TYPE_CODE_SET:
3292 case TYPE_CODE_RANGE:
3293 case TYPE_CODE_BITSTRING:
3294 case TYPE_CODE_REF:
3295 case TYPE_CODE_CHAR:
3296 case TYPE_CODE_BOOL:
3297 return TYPE_LENGTH (t);
3298
3299 case TYPE_CODE_ARRAY:
3300 case TYPE_CODE_COMPLEX:
3301 /* TODO: What about vector types? */
3302 return arm_type_align (TYPE_TARGET_TYPE (t));
3303
3304 case TYPE_CODE_STRUCT:
3305 case TYPE_CODE_UNION:
3306 align = 1;
3307 for (n = 0; n < TYPE_NFIELDS (t); n++)
3308 {
3309 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3310 if (falign > align)
3311 align = falign;
3312 }
3313 return align;
3314 }
3315 }
3316
3317 /* Possible base types for a candidate for passing and returning in
3318 VFP registers. */
3319
3320 enum arm_vfp_cprc_base_type
3321 {
3322 VFP_CPRC_UNKNOWN,
3323 VFP_CPRC_SINGLE,
3324 VFP_CPRC_DOUBLE,
3325 VFP_CPRC_VEC64,
3326 VFP_CPRC_VEC128
3327 };
3328
3329 /* The length of one element of base type B. */
3330
3331 static unsigned
3332 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3333 {
3334 switch (b)
3335 {
3336 case VFP_CPRC_SINGLE:
3337 return 4;
3338 case VFP_CPRC_DOUBLE:
3339 return 8;
3340 case VFP_CPRC_VEC64:
3341 return 8;
3342 case VFP_CPRC_VEC128:
3343 return 16;
3344 default:
3345 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3346 (int) b);
3347 }
3348 }
3349
3350 /* The character ('s', 'd' or 'q') for the type of VFP register used
3351 for passing base type B. */
3352
3353 static int
3354 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3355 {
3356 switch (b)
3357 {
3358 case VFP_CPRC_SINGLE:
3359 return 's';
3360 case VFP_CPRC_DOUBLE:
3361 return 'd';
3362 case VFP_CPRC_VEC64:
3363 return 'd';
3364 case VFP_CPRC_VEC128:
3365 return 'q';
3366 default:
3367 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3368 (int) b);
3369 }
3370 }
3371
3372 /* Determine whether T may be part of a candidate for passing and
3373 returning in VFP registers, ignoring the limit on the total number
3374 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3375 classification of the first valid component found; if it is not
3376 VFP_CPRC_UNKNOWN, all components must have the same classification
3377 as *BASE_TYPE. If it is found that T contains a type not permitted
3378 for passing and returning in VFP registers, a type differently
3379 classified from *BASE_TYPE, or two types differently classified
3380 from each other, return -1, otherwise return the total number of
3381 base-type elements found (possibly 0 in an empty structure or
3382 array). Vectors and complex types are not currently supported,
3383 matching the generic AAPCS support. */
3384
3385 static int
3386 arm_vfp_cprc_sub_candidate (struct type *t,
3387 enum arm_vfp_cprc_base_type *base_type)
3388 {
3389 t = check_typedef (t);
3390 switch (TYPE_CODE (t))
3391 {
3392 case TYPE_CODE_FLT:
3393 switch (TYPE_LENGTH (t))
3394 {
3395 case 4:
3396 if (*base_type == VFP_CPRC_UNKNOWN)
3397 *base_type = VFP_CPRC_SINGLE;
3398 else if (*base_type != VFP_CPRC_SINGLE)
3399 return -1;
3400 return 1;
3401
3402 case 8:
3403 if (*base_type == VFP_CPRC_UNKNOWN)
3404 *base_type = VFP_CPRC_DOUBLE;
3405 else if (*base_type != VFP_CPRC_DOUBLE)
3406 return -1;
3407 return 1;
3408
3409 default:
3410 return -1;
3411 }
3412 break;
3413
3414 case TYPE_CODE_ARRAY:
3415 {
3416 int count;
3417 unsigned unitlen;
3418 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3419 if (count == -1)
3420 return -1;
3421 if (TYPE_LENGTH (t) == 0)
3422 {
3423 gdb_assert (count == 0);
3424 return 0;
3425 }
3426 else if (count == 0)
3427 return -1;
3428 unitlen = arm_vfp_cprc_unit_length (*base_type);
3429 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3430 return TYPE_LENGTH (t) / unitlen;
3431 }
3432 break;
3433
3434 case TYPE_CODE_STRUCT:
3435 {
3436 int count = 0;
3437 unsigned unitlen;
3438 int i;
3439 for (i = 0; i < TYPE_NFIELDS (t); i++)
3440 {
3441 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3442 base_type);
3443 if (sub_count == -1)
3444 return -1;
3445 count += sub_count;
3446 }
3447 if (TYPE_LENGTH (t) == 0)
3448 {
3449 gdb_assert (count == 0);
3450 return 0;
3451 }
3452 else if (count == 0)
3453 return -1;
3454 unitlen = arm_vfp_cprc_unit_length (*base_type);
3455 if (TYPE_LENGTH (t) != unitlen * count)
3456 return -1;
3457 return count;
3458 }
3459
3460 case TYPE_CODE_UNION:
3461 {
3462 int count = 0;
3463 unsigned unitlen;
3464 int i;
3465 for (i = 0; i < TYPE_NFIELDS (t); i++)
3466 {
3467 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3468 base_type);
3469 if (sub_count == -1)
3470 return -1;
3471 count = (count > sub_count ? count : sub_count);
3472 }
3473 if (TYPE_LENGTH (t) == 0)
3474 {
3475 gdb_assert (count == 0);
3476 return 0;
3477 }
3478 else if (count == 0)
3479 return -1;
3480 unitlen = arm_vfp_cprc_unit_length (*base_type);
3481 if (TYPE_LENGTH (t) != unitlen * count)
3482 return -1;
3483 return count;
3484 }
3485
3486 default:
3487 break;
3488 }
3489
3490 return -1;
3491 }
3492
3493 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3494 if passed to or returned from a non-variadic function with the VFP
3495 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3496 *BASE_TYPE to the base type for T and *COUNT to the number of
3497 elements of that base type before returning. */
3498
3499 static int
3500 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3501 int *count)
3502 {
3503 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3504 int c = arm_vfp_cprc_sub_candidate (t, &b);
3505 if (c <= 0 || c > 4)
3506 return 0;
3507 *base_type = b;
3508 *count = c;
3509 return 1;
3510 }
3511
3512 /* Return 1 if the VFP ABI should be used for passing arguments to and
3513 returning values from a function of type FUNC_TYPE, 0
3514 otherwise. */
3515
3516 static int
3517 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3518 {
3519 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3520 /* Variadic functions always use the base ABI. Assume that functions
3521 without debug info are not variadic. */
3522 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3523 return 0;
3524 /* The VFP ABI is only supported as a variant of AAPCS. */
3525 if (tdep->arm_abi != ARM_ABI_AAPCS)
3526 return 0;
3527 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3528 }
3529
3530 /* We currently only support passing parameters in integer registers, which
3531 conforms with GCC's default model, and VFP argument passing following
3532 the VFP variant of AAPCS. Several other variants exist and
3533 we should probably support some of them based on the selected ABI. */
3534
3535 static CORE_ADDR
3536 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3537 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3538 struct value **args, CORE_ADDR sp, int struct_return,
3539 CORE_ADDR struct_addr)
3540 {
3541 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3542 int argnum;
3543 int argreg;
3544 int nstack;
3545 struct stack_item *si = NULL;
3546 int use_vfp_abi;
3547 struct type *ftype;
3548 unsigned vfp_regs_free = (1 << 16) - 1;
3549
3550 /* Determine the type of this function and whether the VFP ABI
3551 applies. */
3552 ftype = check_typedef (value_type (function));
3553 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3554 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3555 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3556
3557 /* Set the return address. For the ARM, the return breakpoint is
3558 always at BP_ADDR. */
3559 if (arm_pc_is_thumb (gdbarch, bp_addr))
3560 bp_addr |= 1;
3561 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3562
3563 /* Walk through the list of args and determine how large a temporary
3564 stack is required. Need to take care here as structs may be
3565 passed on the stack, and we have to push them. */
3566 nstack = 0;
3567
3568 argreg = ARM_A1_REGNUM;
3569 nstack = 0;
3570
3571 /* The struct_return pointer occupies the first parameter
3572 passing register. */
3573 if (struct_return)
3574 {
3575 if (arm_debug)
3576 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3577 gdbarch_register_name (gdbarch, argreg),
3578 paddress (gdbarch, struct_addr));
3579 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3580 argreg++;
3581 }
3582
3583 for (argnum = 0; argnum < nargs; argnum++)
3584 {
3585 int len;
3586 struct type *arg_type;
3587 struct type *target_type;
3588 enum type_code typecode;
3589 const bfd_byte *val;
3590 int align;
3591 enum arm_vfp_cprc_base_type vfp_base_type;
3592 int vfp_base_count;
3593 int may_use_core_reg = 1;
3594
3595 arg_type = check_typedef (value_type (args[argnum]));
3596 len = TYPE_LENGTH (arg_type);
3597 target_type = TYPE_TARGET_TYPE (arg_type);
3598 typecode = TYPE_CODE (arg_type);
3599 val = value_contents (args[argnum]);
3600
3601 align = arm_type_align (arg_type);
3602 /* Round alignment up to a whole number of words. */
3603 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3604 /* Different ABIs have different maximum alignments. */
3605 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3606 {
3607 /* The APCS ABI only requires word alignment. */
3608 align = INT_REGISTER_SIZE;
3609 }
3610 else
3611 {
3612 /* The AAPCS requires at most doubleword alignment. */
3613 if (align > INT_REGISTER_SIZE * 2)
3614 align = INT_REGISTER_SIZE * 2;
3615 }
3616
3617 if (use_vfp_abi
3618 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3619 &vfp_base_count))
3620 {
3621 int regno;
3622 int unit_length;
3623 int shift;
3624 unsigned mask;
3625
3626 /* Because this is a CPRC it cannot go in a core register or
3627 cause a core register to be skipped for alignment.
3628 Either it goes in VFP registers and the rest of this loop
3629 iteration is skipped for this argument, or it goes on the
3630 stack (and the stack alignment code is correct for this
3631 case). */
3632 may_use_core_reg = 0;
3633
3634 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3635 shift = unit_length / 4;
3636 mask = (1 << (shift * vfp_base_count)) - 1;
3637 for (regno = 0; regno < 16; regno += shift)
3638 if (((vfp_regs_free >> regno) & mask) == mask)
3639 break;
3640
3641 if (regno < 16)
3642 {
3643 int reg_char;
3644 int reg_scaled;
3645 int i;
3646
3647 vfp_regs_free &= ~(mask << regno);
3648 reg_scaled = regno / shift;
3649 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3650 for (i = 0; i < vfp_base_count; i++)
3651 {
3652 char name_buf[4];
3653 int regnum;
3654 if (reg_char == 'q')
3655 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3656 val + i * unit_length);
3657 else
3658 {
3659 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3660 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3661 strlen (name_buf));
3662 regcache_cooked_write (regcache, regnum,
3663 val + i * unit_length);
3664 }
3665 }
3666 continue;
3667 }
3668 else
3669 {
3670 /* This CPRC could not go in VFP registers, so all VFP
3671 registers are now marked as used. */
3672 vfp_regs_free = 0;
3673 }
3674 }
3675
3676 /* Push stack padding for dowubleword alignment. */
3677 if (nstack & (align - 1))
3678 {
3679 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3680 nstack += INT_REGISTER_SIZE;
3681 }
3682
3683 /* Doubleword aligned quantities must go in even register pairs. */
3684 if (may_use_core_reg
3685 && argreg <= ARM_LAST_ARG_REGNUM
3686 && align > INT_REGISTER_SIZE
3687 && argreg & 1)
3688 argreg++;
3689
3690 /* If the argument is a pointer to a function, and it is a
3691 Thumb function, create a LOCAL copy of the value and set
3692 the THUMB bit in it. */
3693 if (TYPE_CODE_PTR == typecode
3694 && target_type != NULL
3695 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3696 {
3697 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3698 if (arm_pc_is_thumb (gdbarch, regval))
3699 {
3700 bfd_byte *copy = alloca (len);
3701 store_unsigned_integer (copy, len, byte_order,
3702 MAKE_THUMB_ADDR (regval));
3703 val = copy;
3704 }
3705 }
3706
3707 /* Copy the argument to general registers or the stack in
3708 register-sized pieces. Large arguments are split between
3709 registers and stack. */
3710 while (len > 0)
3711 {
3712 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3713
3714 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3715 {
3716 /* The argument is being passed in a general purpose
3717 register. */
3718 CORE_ADDR regval
3719 = extract_unsigned_integer (val, partial_len, byte_order);
3720 if (byte_order == BFD_ENDIAN_BIG)
3721 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3722 if (arm_debug)
3723 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3724 argnum,
3725 gdbarch_register_name
3726 (gdbarch, argreg),
3727 phex (regval, INT_REGISTER_SIZE));
3728 regcache_cooked_write_unsigned (regcache, argreg, regval);
3729 argreg++;
3730 }
3731 else
3732 {
3733 /* Push the arguments onto the stack. */
3734 if (arm_debug)
3735 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3736 argnum, nstack);
3737 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3738 nstack += INT_REGISTER_SIZE;
3739 }
3740
3741 len -= partial_len;
3742 val += partial_len;
3743 }
3744 }
3745 /* If we have an odd number of words to push, then decrement the stack
3746 by one word now, so first stack argument will be dword aligned. */
3747 if (nstack & 4)
3748 sp -= 4;
3749
3750 while (si)
3751 {
3752 sp -= si->len;
3753 write_memory (sp, si->data, si->len);
3754 si = pop_stack_item (si);
3755 }
3756
3757 /* Finally, update teh SP register. */
3758 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3759
3760 return sp;
3761 }
3762
3763
3764 /* Always align the frame to an 8-byte boundary. This is required on
3765 some platforms and harmless on the rest. */
3766
3767 static CORE_ADDR
3768 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3769 {
3770 /* Align the stack to eight bytes. */
3771 return sp & ~ (CORE_ADDR) 7;
3772 }
3773
3774 static void
3775 print_fpu_flags (int flags)
3776 {
3777 if (flags & (1 << 0))
3778 fputs ("IVO ", stdout);
3779 if (flags & (1 << 1))
3780 fputs ("DVZ ", stdout);
3781 if (flags & (1 << 2))
3782 fputs ("OFL ", stdout);
3783 if (flags & (1 << 3))
3784 fputs ("UFL ", stdout);
3785 if (flags & (1 << 4))
3786 fputs ("INX ", stdout);
3787 putchar ('\n');
3788 }
3789
3790 /* Print interesting information about the floating point processor
3791 (if present) or emulator. */
3792 static void
3793 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3794 struct frame_info *frame, const char *args)
3795 {
3796 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3797 int type;
3798
3799 type = (status >> 24) & 127;
3800 if (status & (1 << 31))
3801 printf (_("Hardware FPU type %d\n"), type);
3802 else
3803 printf (_("Software FPU type %d\n"), type);
3804 /* i18n: [floating point unit] mask */
3805 fputs (_("mask: "), stdout);
3806 print_fpu_flags (status >> 16);
3807 /* i18n: [floating point unit] flags */
3808 fputs (_("flags: "), stdout);
3809 print_fpu_flags (status);
3810 }
3811
3812 /* Construct the ARM extended floating point type. */
3813 static struct type *
3814 arm_ext_type (struct gdbarch *gdbarch)
3815 {
3816 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3817
3818 if (!tdep->arm_ext_type)
3819 tdep->arm_ext_type
3820 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3821 floatformats_arm_ext);
3822
3823 return tdep->arm_ext_type;
3824 }
3825
3826 static struct type *
3827 arm_neon_double_type (struct gdbarch *gdbarch)
3828 {
3829 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3830
3831 if (tdep->neon_double_type == NULL)
3832 {
3833 struct type *t, *elem;
3834
3835 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3836 TYPE_CODE_UNION);
3837 elem = builtin_type (gdbarch)->builtin_uint8;
3838 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3839 elem = builtin_type (gdbarch)->builtin_uint16;
3840 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3841 elem = builtin_type (gdbarch)->builtin_uint32;
3842 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3843 elem = builtin_type (gdbarch)->builtin_uint64;
3844 append_composite_type_field (t, "u64", elem);
3845 elem = builtin_type (gdbarch)->builtin_float;
3846 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3847 elem = builtin_type (gdbarch)->builtin_double;
3848 append_composite_type_field (t, "f64", elem);
3849
3850 TYPE_VECTOR (t) = 1;
3851 TYPE_NAME (t) = "neon_d";
3852 tdep->neon_double_type = t;
3853 }
3854
3855 return tdep->neon_double_type;
3856 }
3857
3858 /* FIXME: The vector types are not correctly ordered on big-endian
3859 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3860 bits of d0 - regardless of what unit size is being held in d0. So
3861 the offset of the first uint8 in d0 is 7, but the offset of the
3862 first float is 4. This code works as-is for little-endian
3863 targets. */
3864
3865 static struct type *
3866 arm_neon_quad_type (struct gdbarch *gdbarch)
3867 {
3868 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3869
3870 if (tdep->neon_quad_type == NULL)
3871 {
3872 struct type *t, *elem;
3873
3874 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3875 TYPE_CODE_UNION);
3876 elem = builtin_type (gdbarch)->builtin_uint8;
3877 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3878 elem = builtin_type (gdbarch)->builtin_uint16;
3879 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3880 elem = builtin_type (gdbarch)->builtin_uint32;
3881 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3882 elem = builtin_type (gdbarch)->builtin_uint64;
3883 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3884 elem = builtin_type (gdbarch)->builtin_float;
3885 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3886 elem = builtin_type (gdbarch)->builtin_double;
3887 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3888
3889 TYPE_VECTOR (t) = 1;
3890 TYPE_NAME (t) = "neon_q";
3891 tdep->neon_quad_type = t;
3892 }
3893
3894 return tdep->neon_quad_type;
3895 }
3896
3897 /* Return the GDB type object for the "standard" data type of data in
3898 register N. */
3899
3900 static struct type *
3901 arm_register_type (struct gdbarch *gdbarch, int regnum)
3902 {
3903 int num_regs = gdbarch_num_regs (gdbarch);
3904
3905 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3906 && regnum >= num_regs && regnum < num_regs + 32)
3907 return builtin_type (gdbarch)->builtin_float;
3908
3909 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3910 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3911 return arm_neon_quad_type (gdbarch);
3912
3913 /* If the target description has register information, we are only
3914 in this function so that we can override the types of
3915 double-precision registers for NEON. */
3916 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3917 {
3918 struct type *t = tdesc_register_type (gdbarch, regnum);
3919
3920 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3921 && TYPE_CODE (t) == TYPE_CODE_FLT
3922 && gdbarch_tdep (gdbarch)->have_neon)
3923 return arm_neon_double_type (gdbarch);
3924 else
3925 return t;
3926 }
3927
3928 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3929 {
3930 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3931 return builtin_type (gdbarch)->builtin_void;
3932
3933 return arm_ext_type (gdbarch);
3934 }
3935 else if (regnum == ARM_SP_REGNUM)
3936 return builtin_type (gdbarch)->builtin_data_ptr;
3937 else if (regnum == ARM_PC_REGNUM)
3938 return builtin_type (gdbarch)->builtin_func_ptr;
3939 else if (regnum >= ARRAY_SIZE (arm_register_names))
3940 /* These registers are only supported on targets which supply
3941 an XML description. */
3942 return builtin_type (gdbarch)->builtin_int0;
3943 else
3944 return builtin_type (gdbarch)->builtin_uint32;
3945 }
3946
3947 /* Map a DWARF register REGNUM onto the appropriate GDB register
3948 number. */
3949
3950 static int
3951 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3952 {
3953 /* Core integer regs. */
3954 if (reg >= 0 && reg <= 15)
3955 return reg;
3956
3957 /* Legacy FPA encoding. These were once used in a way which
3958 overlapped with VFP register numbering, so their use is
3959 discouraged, but GDB doesn't support the ARM toolchain
3960 which used them for VFP. */
3961 if (reg >= 16 && reg <= 23)
3962 return ARM_F0_REGNUM + reg - 16;
3963
3964 /* New assignments for the FPA registers. */
3965 if (reg >= 96 && reg <= 103)
3966 return ARM_F0_REGNUM + reg - 96;
3967
3968 /* WMMX register assignments. */
3969 if (reg >= 104 && reg <= 111)
3970 return ARM_WCGR0_REGNUM + reg - 104;
3971
3972 if (reg >= 112 && reg <= 127)
3973 return ARM_WR0_REGNUM + reg - 112;
3974
3975 if (reg >= 192 && reg <= 199)
3976 return ARM_WC0_REGNUM + reg - 192;
3977
3978 /* VFP v2 registers. A double precision value is actually
3979 in d1 rather than s2, but the ABI only defines numbering
3980 for the single precision registers. This will "just work"
3981 in GDB for little endian targets (we'll read eight bytes,
3982 starting in s0 and then progressing to s1), but will be
3983 reversed on big endian targets with VFP. This won't
3984 be a problem for the new Neon quad registers; you're supposed
3985 to use DW_OP_piece for those. */
3986 if (reg >= 64 && reg <= 95)
3987 {
3988 char name_buf[4];
3989
3990 sprintf (name_buf, "s%d", reg - 64);
3991 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3992 strlen (name_buf));
3993 }
3994
3995 /* VFP v3 / Neon registers. This range is also used for VFP v2
3996 registers, except that it now describes d0 instead of s0. */
3997 if (reg >= 256 && reg <= 287)
3998 {
3999 char name_buf[4];
4000
4001 sprintf (name_buf, "d%d", reg - 256);
4002 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4003 strlen (name_buf));
4004 }
4005
4006 return -1;
4007 }
4008
4009 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4010 static int
4011 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4012 {
4013 int reg = regnum;
4014 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4015
4016 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4017 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4018
4019 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4020 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4021
4022 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4023 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4024
4025 if (reg < NUM_GREGS)
4026 return SIM_ARM_R0_REGNUM + reg;
4027 reg -= NUM_GREGS;
4028
4029 if (reg < NUM_FREGS)
4030 return SIM_ARM_FP0_REGNUM + reg;
4031 reg -= NUM_FREGS;
4032
4033 if (reg < NUM_SREGS)
4034 return SIM_ARM_FPS_REGNUM + reg;
4035 reg -= NUM_SREGS;
4036
4037 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4038 }
4039
4040 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4041 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4042 It is thought that this is is the floating-point register format on
4043 little-endian systems. */
4044
4045 static void
4046 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4047 void *dbl, int endianess)
4048 {
4049 DOUBLEST d;
4050
4051 if (endianess == BFD_ENDIAN_BIG)
4052 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4053 else
4054 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4055 ptr, &d);
4056 floatformat_from_doublest (fmt, &d, dbl);
4057 }
4058
4059 static void
4060 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4061 int endianess)
4062 {
4063 DOUBLEST d;
4064
4065 floatformat_to_doublest (fmt, ptr, &d);
4066 if (endianess == BFD_ENDIAN_BIG)
4067 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4068 else
4069 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4070 &d, dbl);
4071 }
4072
4073 static int
4074 condition_true (unsigned long cond, unsigned long status_reg)
4075 {
4076 if (cond == INST_AL || cond == INST_NV)
4077 return 1;
4078
4079 switch (cond)
4080 {
4081 case INST_EQ:
4082 return ((status_reg & FLAG_Z) != 0);
4083 case INST_NE:
4084 return ((status_reg & FLAG_Z) == 0);
4085 case INST_CS:
4086 return ((status_reg & FLAG_C) != 0);
4087 case INST_CC:
4088 return ((status_reg & FLAG_C) == 0);
4089 case INST_MI:
4090 return ((status_reg & FLAG_N) != 0);
4091 case INST_PL:
4092 return ((status_reg & FLAG_N) == 0);
4093 case INST_VS:
4094 return ((status_reg & FLAG_V) != 0);
4095 case INST_VC:
4096 return ((status_reg & FLAG_V) == 0);
4097 case INST_HI:
4098 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4099 case INST_LS:
4100 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4101 case INST_GE:
4102 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4103 case INST_LT:
4104 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4105 case INST_GT:
4106 return (((status_reg & FLAG_Z) == 0)
4107 && (((status_reg & FLAG_N) == 0)
4108 == ((status_reg & FLAG_V) == 0)));
4109 case INST_LE:
4110 return (((status_reg & FLAG_Z) != 0)
4111 || (((status_reg & FLAG_N) == 0)
4112 != ((status_reg & FLAG_V) == 0)));
4113 }
4114 return 1;
4115 }
4116
4117 static unsigned long
4118 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4119 unsigned long pc_val, unsigned long status_reg)
4120 {
4121 unsigned long res, shift;
4122 int rm = bits (inst, 0, 3);
4123 unsigned long shifttype = bits (inst, 5, 6);
4124
4125 if (bit (inst, 4))
4126 {
4127 int rs = bits (inst, 8, 11);
4128 shift = (rs == 15 ? pc_val + 8
4129 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4130 }
4131 else
4132 shift = bits (inst, 7, 11);
4133
4134 res = (rm == ARM_PC_REGNUM
4135 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4136 : get_frame_register_unsigned (frame, rm));
4137
4138 switch (shifttype)
4139 {
4140 case 0: /* LSL */
4141 res = shift >= 32 ? 0 : res << shift;
4142 break;
4143
4144 case 1: /* LSR */
4145 res = shift >= 32 ? 0 : res >> shift;
4146 break;
4147
4148 case 2: /* ASR */
4149 if (shift >= 32)
4150 shift = 31;
4151 res = ((res & 0x80000000L)
4152 ? ~((~res) >> shift) : res >> shift);
4153 break;
4154
4155 case 3: /* ROR/RRX */
4156 shift &= 31;
4157 if (shift == 0)
4158 res = (res >> 1) | (carry ? 0x80000000L : 0);
4159 else
4160 res = (res >> shift) | (res << (32 - shift));
4161 break;
4162 }
4163
4164 return res & 0xffffffff;
4165 }
4166
4167 /* Return number of 1-bits in VAL. */
4168
4169 static int
4170 bitcount (unsigned long val)
4171 {
4172 int nbits;
4173 for (nbits = 0; val != 0; nbits++)
4174 val &= val - 1; /* Delete rightmost 1-bit in val. */
4175 return nbits;
4176 }
4177
4178 /* Return the size in bytes of the complete Thumb instruction whose
4179 first halfword is INST1. */
4180
4181 static int
4182 thumb_insn_size (unsigned short inst1)
4183 {
4184 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4185 return 4;
4186 else
4187 return 2;
4188 }
4189
4190 static int
4191 thumb_advance_itstate (unsigned int itstate)
4192 {
4193 /* Preserve IT[7:5], the first three bits of the condition. Shift
4194 the upcoming condition flags left by one bit. */
4195 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4196
4197 /* If we have finished the IT block, clear the state. */
4198 if ((itstate & 0x0f) == 0)
4199 itstate = 0;
4200
4201 return itstate;
4202 }
4203
4204 /* Find the next PC after the current instruction executes. In some
4205 cases we can not statically determine the answer (see the IT state
4206 handling in this function); in that case, a breakpoint may be
4207 inserted in addition to the returned PC, which will be used to set
4208 another breakpoint by our caller. */
4209
4210 static CORE_ADDR
4211 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4212 {
4213 struct gdbarch *gdbarch = get_frame_arch (frame);
4214 struct address_space *aspace = get_frame_address_space (frame);
4215 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4216 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4217 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4218 unsigned short inst1;
4219 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4220 unsigned long offset;
4221 ULONGEST status, itstate;
4222
4223 nextpc = MAKE_THUMB_ADDR (nextpc);
4224 pc_val = MAKE_THUMB_ADDR (pc_val);
4225
4226 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4227
4228 /* Thumb-2 conditional execution support. There are eight bits in
4229 the CPSR which describe conditional execution state. Once
4230 reconstructed (they're in a funny order), the low five bits
4231 describe the low bit of the condition for each instruction and
4232 how many instructions remain. The high three bits describe the
4233 base condition. One of the low four bits will be set if an IT
4234 block is active. These bits read as zero on earlier
4235 processors. */
4236 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4237 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4238
4239 /* If-Then handling. On GNU/Linux, where this routine is used, we
4240 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4241 can disable execution of the undefined instruction. So we might
4242 miss the breakpoint if we set it on a skipped conditional
4243 instruction. Because conditional instructions can change the
4244 flags, affecting the execution of further instructions, we may
4245 need to set two breakpoints. */
4246
4247 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4248 {
4249 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4250 {
4251 /* An IT instruction. Because this instruction does not
4252 modify the flags, we can accurately predict the next
4253 executed instruction. */
4254 itstate = inst1 & 0x00ff;
4255 pc += thumb_insn_size (inst1);
4256
4257 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4258 {
4259 inst1 = read_memory_unsigned_integer (pc, 2,
4260 byte_order_for_code);
4261 pc += thumb_insn_size (inst1);
4262 itstate = thumb_advance_itstate (itstate);
4263 }
4264
4265 return MAKE_THUMB_ADDR (pc);
4266 }
4267 else if (itstate != 0)
4268 {
4269 /* We are in a conditional block. Check the condition. */
4270 if (! condition_true (itstate >> 4, status))
4271 {
4272 /* Advance to the next executed instruction. */
4273 pc += thumb_insn_size (inst1);
4274 itstate = thumb_advance_itstate (itstate);
4275
4276 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4277 {
4278 inst1 = read_memory_unsigned_integer (pc, 2,
4279 byte_order_for_code);
4280 pc += thumb_insn_size (inst1);
4281 itstate = thumb_advance_itstate (itstate);
4282 }
4283
4284 return MAKE_THUMB_ADDR (pc);
4285 }
4286 else if ((itstate & 0x0f) == 0x08)
4287 {
4288 /* This is the last instruction of the conditional
4289 block, and it is executed. We can handle it normally
4290 because the following instruction is not conditional,
4291 and we must handle it normally because it is
4292 permitted to branch. Fall through. */
4293 }
4294 else
4295 {
4296 int cond_negated;
4297
4298 /* There are conditional instructions after this one.
4299 If this instruction modifies the flags, then we can
4300 not predict what the next executed instruction will
4301 be. Fortunately, this instruction is architecturally
4302 forbidden to branch; we know it will fall through.
4303 Start by skipping past it. */
4304 pc += thumb_insn_size (inst1);
4305 itstate = thumb_advance_itstate (itstate);
4306
4307 /* Set a breakpoint on the following instruction. */
4308 gdb_assert ((itstate & 0x0f) != 0);
4309 arm_insert_single_step_breakpoint (gdbarch, aspace,
4310 MAKE_THUMB_ADDR (pc));
4311 cond_negated = (itstate >> 4) & 1;
4312
4313 /* Skip all following instructions with the same
4314 condition. If there is a later instruction in the IT
4315 block with the opposite condition, set the other
4316 breakpoint there. If not, then set a breakpoint on
4317 the instruction after the IT block. */
4318 do
4319 {
4320 inst1 = read_memory_unsigned_integer (pc, 2,
4321 byte_order_for_code);
4322 pc += thumb_insn_size (inst1);
4323 itstate = thumb_advance_itstate (itstate);
4324 }
4325 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4326
4327 return MAKE_THUMB_ADDR (pc);
4328 }
4329 }
4330 }
4331 else if (itstate & 0x0f)
4332 {
4333 /* We are in a conditional block. Check the condition. */
4334 int cond = itstate >> 4;
4335
4336 if (! condition_true (cond, status))
4337 {
4338 /* Advance to the next instruction. All the 32-bit
4339 instructions share a common prefix. */
4340 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4341 return MAKE_THUMB_ADDR (pc + 4);
4342 else
4343 return MAKE_THUMB_ADDR (pc + 2);
4344 }
4345
4346 /* Otherwise, handle the instruction normally. */
4347 }
4348
4349 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4350 {
4351 CORE_ADDR sp;
4352
4353 /* Fetch the saved PC from the stack. It's stored above
4354 all of the other registers. */
4355 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4356 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4357 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4358 }
4359 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4360 {
4361 unsigned long cond = bits (inst1, 8, 11);
4362 if (cond == 0x0f) /* 0x0f = SWI */
4363 {
4364 struct gdbarch_tdep *tdep;
4365 tdep = gdbarch_tdep (gdbarch);
4366
4367 if (tdep->syscall_next_pc != NULL)
4368 nextpc = tdep->syscall_next_pc (frame);
4369
4370 }
4371 else if (cond != 0x0f && condition_true (cond, status))
4372 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4373 }
4374 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4375 {
4376 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4377 }
4378 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4379 {
4380 unsigned short inst2;
4381 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4382
4383 /* Default to the next instruction. */
4384 nextpc = pc + 4;
4385 nextpc = MAKE_THUMB_ADDR (nextpc);
4386
4387 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4388 {
4389 /* Branches and miscellaneous control instructions. */
4390
4391 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4392 {
4393 /* B, BL, BLX. */
4394 int j1, j2, imm1, imm2;
4395
4396 imm1 = sbits (inst1, 0, 10);
4397 imm2 = bits (inst2, 0, 10);
4398 j1 = bit (inst2, 13);
4399 j2 = bit (inst2, 11);
4400
4401 offset = ((imm1 << 12) + (imm2 << 1));
4402 offset ^= ((!j2) << 22) | ((!j1) << 23);
4403
4404 nextpc = pc_val + offset;
4405 /* For BLX make sure to clear the low bits. */
4406 if (bit (inst2, 12) == 0)
4407 nextpc = nextpc & 0xfffffffc;
4408 }
4409 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4410 {
4411 /* SUBS PC, LR, #imm8. */
4412 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4413 nextpc -= inst2 & 0x00ff;
4414 }
4415 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4416 {
4417 /* Conditional branch. */
4418 if (condition_true (bits (inst1, 6, 9), status))
4419 {
4420 int sign, j1, j2, imm1, imm2;
4421
4422 sign = sbits (inst1, 10, 10);
4423 imm1 = bits (inst1, 0, 5);
4424 imm2 = bits (inst2, 0, 10);
4425 j1 = bit (inst2, 13);
4426 j2 = bit (inst2, 11);
4427
4428 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4429 offset += (imm1 << 12) + (imm2 << 1);
4430
4431 nextpc = pc_val + offset;
4432 }
4433 }
4434 }
4435 else if ((inst1 & 0xfe50) == 0xe810)
4436 {
4437 /* Load multiple or RFE. */
4438 int rn, offset, load_pc = 1;
4439
4440 rn = bits (inst1, 0, 3);
4441 if (bit (inst1, 7) && !bit (inst1, 8))
4442 {
4443 /* LDMIA or POP */
4444 if (!bit (inst2, 15))
4445 load_pc = 0;
4446 offset = bitcount (inst2) * 4 - 4;
4447 }
4448 else if (!bit (inst1, 7) && bit (inst1, 8))
4449 {
4450 /* LDMDB */
4451 if (!bit (inst2, 15))
4452 load_pc = 0;
4453 offset = -4;
4454 }
4455 else if (bit (inst1, 7) && bit (inst1, 8))
4456 {
4457 /* RFEIA */
4458 offset = 0;
4459 }
4460 else if (!bit (inst1, 7) && !bit (inst1, 8))
4461 {
4462 /* RFEDB */
4463 offset = -8;
4464 }
4465 else
4466 load_pc = 0;
4467
4468 if (load_pc)
4469 {
4470 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4471 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4472 }
4473 }
4474 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4475 {
4476 /* MOV PC or MOVS PC. */
4477 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4478 nextpc = MAKE_THUMB_ADDR (nextpc);
4479 }
4480 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4481 {
4482 /* LDR PC. */
4483 CORE_ADDR base;
4484 int rn, load_pc = 1;
4485
4486 rn = bits (inst1, 0, 3);
4487 base = get_frame_register_unsigned (frame, rn);
4488 if (rn == ARM_PC_REGNUM)
4489 {
4490 base = (base + 4) & ~(CORE_ADDR) 0x3;
4491 if (bit (inst1, 7))
4492 base += bits (inst2, 0, 11);
4493 else
4494 base -= bits (inst2, 0, 11);
4495 }
4496 else if (bit (inst1, 7))
4497 base += bits (inst2, 0, 11);
4498 else if (bit (inst2, 11))
4499 {
4500 if (bit (inst2, 10))
4501 {
4502 if (bit (inst2, 9))
4503 base += bits (inst2, 0, 7);
4504 else
4505 base -= bits (inst2, 0, 7);
4506 }
4507 }
4508 else if ((inst2 & 0x0fc0) == 0x0000)
4509 {
4510 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4511 base += get_frame_register_unsigned (frame, rm) << shift;
4512 }
4513 else
4514 /* Reserved. */
4515 load_pc = 0;
4516
4517 if (load_pc)
4518 nextpc = get_frame_memory_unsigned (frame, base, 4);
4519 }
4520 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4521 {
4522 /* TBB. */
4523 CORE_ADDR tbl_reg, table, offset, length;
4524
4525 tbl_reg = bits (inst1, 0, 3);
4526 if (tbl_reg == 0x0f)
4527 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4528 else
4529 table = get_frame_register_unsigned (frame, tbl_reg);
4530
4531 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4532 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4533 nextpc = pc_val + length;
4534 }
4535 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4536 {
4537 /* TBH. */
4538 CORE_ADDR tbl_reg, table, offset, length;
4539
4540 tbl_reg = bits (inst1, 0, 3);
4541 if (tbl_reg == 0x0f)
4542 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4543 else
4544 table = get_frame_register_unsigned (frame, tbl_reg);
4545
4546 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4547 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4548 nextpc = pc_val + length;
4549 }
4550 }
4551 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4552 {
4553 if (bits (inst1, 3, 6) == 0x0f)
4554 nextpc = pc_val;
4555 else
4556 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4557 }
4558 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4559 {
4560 if (bits (inst1, 3, 6) == 0x0f)
4561 nextpc = pc_val;
4562 else
4563 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4564
4565 nextpc = MAKE_THUMB_ADDR (nextpc);
4566 }
4567 else if ((inst1 & 0xf500) == 0xb100)
4568 {
4569 /* CBNZ or CBZ. */
4570 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4571 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4572
4573 if (bit (inst1, 11) && reg != 0)
4574 nextpc = pc_val + imm;
4575 else if (!bit (inst1, 11) && reg == 0)
4576 nextpc = pc_val + imm;
4577 }
4578 return nextpc;
4579 }
4580
4581 /* Get the raw next address. PC is the current program counter, in
4582 FRAME, which is assumed to be executing in ARM mode.
4583
4584 The value returned has the execution state of the next instruction
4585 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4586 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4587 address. */
4588
4589 static CORE_ADDR
4590 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4591 {
4592 struct gdbarch *gdbarch = get_frame_arch (frame);
4593 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4594 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4595 unsigned long pc_val;
4596 unsigned long this_instr;
4597 unsigned long status;
4598 CORE_ADDR nextpc;
4599
4600 pc_val = (unsigned long) pc;
4601 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4602
4603 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4604 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4605
4606 if (bits (this_instr, 28, 31) == INST_NV)
4607 switch (bits (this_instr, 24, 27))
4608 {
4609 case 0xa:
4610 case 0xb:
4611 {
4612 /* Branch with Link and change to Thumb. */
4613 nextpc = BranchDest (pc, this_instr);
4614 nextpc |= bit (this_instr, 24) << 1;
4615 nextpc = MAKE_THUMB_ADDR (nextpc);
4616 break;
4617 }
4618 case 0xc:
4619 case 0xd:
4620 case 0xe:
4621 /* Coprocessor register transfer. */
4622 if (bits (this_instr, 12, 15) == 15)
4623 error (_("Invalid update to pc in instruction"));
4624 break;
4625 }
4626 else if (condition_true (bits (this_instr, 28, 31), status))
4627 {
4628 switch (bits (this_instr, 24, 27))
4629 {
4630 case 0x0:
4631 case 0x1: /* data processing */
4632 case 0x2:
4633 case 0x3:
4634 {
4635 unsigned long operand1, operand2, result = 0;
4636 unsigned long rn;
4637 int c;
4638
4639 if (bits (this_instr, 12, 15) != 15)
4640 break;
4641
4642 if (bits (this_instr, 22, 25) == 0
4643 && bits (this_instr, 4, 7) == 9) /* multiply */
4644 error (_("Invalid update to pc in instruction"));
4645
4646 /* BX <reg>, BLX <reg> */
4647 if (bits (this_instr, 4, 27) == 0x12fff1
4648 || bits (this_instr, 4, 27) == 0x12fff3)
4649 {
4650 rn = bits (this_instr, 0, 3);
4651 nextpc = ((rn == ARM_PC_REGNUM)
4652 ? (pc_val + 8)
4653 : get_frame_register_unsigned (frame, rn));
4654
4655 return nextpc;
4656 }
4657
4658 /* Multiply into PC. */
4659 c = (status & FLAG_C) ? 1 : 0;
4660 rn = bits (this_instr, 16, 19);
4661 operand1 = ((rn == ARM_PC_REGNUM)
4662 ? (pc_val + 8)
4663 : get_frame_register_unsigned (frame, rn));
4664
4665 if (bit (this_instr, 25))
4666 {
4667 unsigned long immval = bits (this_instr, 0, 7);
4668 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4669 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4670 & 0xffffffff;
4671 }
4672 else /* operand 2 is a shifted register. */
4673 operand2 = shifted_reg_val (frame, this_instr, c,
4674 pc_val, status);
4675
4676 switch (bits (this_instr, 21, 24))
4677 {
4678 case 0x0: /*and */
4679 result = operand1 & operand2;
4680 break;
4681
4682 case 0x1: /*eor */
4683 result = operand1 ^ operand2;
4684 break;
4685
4686 case 0x2: /*sub */
4687 result = operand1 - operand2;
4688 break;
4689
4690 case 0x3: /*rsb */
4691 result = operand2 - operand1;
4692 break;
4693
4694 case 0x4: /*add */
4695 result = operand1 + operand2;
4696 break;
4697
4698 case 0x5: /*adc */
4699 result = operand1 + operand2 + c;
4700 break;
4701
4702 case 0x6: /*sbc */
4703 result = operand1 - operand2 + c;
4704 break;
4705
4706 case 0x7: /*rsc */
4707 result = operand2 - operand1 + c;
4708 break;
4709
4710 case 0x8:
4711 case 0x9:
4712 case 0xa:
4713 case 0xb: /* tst, teq, cmp, cmn */
4714 result = (unsigned long) nextpc;
4715 break;
4716
4717 case 0xc: /*orr */
4718 result = operand1 | operand2;
4719 break;
4720
4721 case 0xd: /*mov */
4722 /* Always step into a function. */
4723 result = operand2;
4724 break;
4725
4726 case 0xe: /*bic */
4727 result = operand1 & ~operand2;
4728 break;
4729
4730 case 0xf: /*mvn */
4731 result = ~operand2;
4732 break;
4733 }
4734
4735 /* In 26-bit APCS the bottom two bits of the result are
4736 ignored, and we always end up in ARM state. */
4737 if (!arm_apcs_32)
4738 nextpc = arm_addr_bits_remove (gdbarch, result);
4739 else
4740 nextpc = result;
4741
4742 break;
4743 }
4744
4745 case 0x4:
4746 case 0x5: /* data transfer */
4747 case 0x6:
4748 case 0x7:
4749 if (bit (this_instr, 20))
4750 {
4751 /* load */
4752 if (bits (this_instr, 12, 15) == 15)
4753 {
4754 /* rd == pc */
4755 unsigned long rn;
4756 unsigned long base;
4757
4758 if (bit (this_instr, 22))
4759 error (_("Invalid update to pc in instruction"));
4760
4761 /* byte write to PC */
4762 rn = bits (this_instr, 16, 19);
4763 base = ((rn == ARM_PC_REGNUM)
4764 ? (pc_val + 8)
4765 : get_frame_register_unsigned (frame, rn));
4766
4767 if (bit (this_instr, 24))
4768 {
4769 /* pre-indexed */
4770 int c = (status & FLAG_C) ? 1 : 0;
4771 unsigned long offset =
4772 (bit (this_instr, 25)
4773 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4774 : bits (this_instr, 0, 11));
4775
4776 if (bit (this_instr, 23))
4777 base += offset;
4778 else
4779 base -= offset;
4780 }
4781 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4782 4, byte_order);
4783 }
4784 }
4785 break;
4786
4787 case 0x8:
4788 case 0x9: /* block transfer */
4789 if (bit (this_instr, 20))
4790 {
4791 /* LDM */
4792 if (bit (this_instr, 15))
4793 {
4794 /* loading pc */
4795 int offset = 0;
4796
4797 if (bit (this_instr, 23))
4798 {
4799 /* up */
4800 unsigned long reglist = bits (this_instr, 0, 14);
4801 offset = bitcount (reglist) * 4;
4802 if (bit (this_instr, 24)) /* pre */
4803 offset += 4;
4804 }
4805 else if (bit (this_instr, 24))
4806 offset = -4;
4807
4808 {
4809 unsigned long rn_val =
4810 get_frame_register_unsigned (frame,
4811 bits (this_instr, 16, 19));
4812 nextpc =
4813 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4814 + offset),
4815 4, byte_order);
4816 }
4817 }
4818 }
4819 break;
4820
4821 case 0xb: /* branch & link */
4822 case 0xa: /* branch */
4823 {
4824 nextpc = BranchDest (pc, this_instr);
4825 break;
4826 }
4827
4828 case 0xc:
4829 case 0xd:
4830 case 0xe: /* coproc ops */
4831 break;
4832 case 0xf: /* SWI */
4833 {
4834 struct gdbarch_tdep *tdep;
4835 tdep = gdbarch_tdep (gdbarch);
4836
4837 if (tdep->syscall_next_pc != NULL)
4838 nextpc = tdep->syscall_next_pc (frame);
4839
4840 }
4841 break;
4842
4843 default:
4844 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4845 return (pc);
4846 }
4847 }
4848
4849 return nextpc;
4850 }
4851
4852 /* Determine next PC after current instruction executes. Will call either
4853 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4854 loop is detected. */
4855
4856 CORE_ADDR
4857 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4858 {
4859 CORE_ADDR nextpc;
4860
4861 if (arm_frame_is_thumb (frame))
4862 {
4863 nextpc = thumb_get_next_pc_raw (frame, pc);
4864 if (nextpc == MAKE_THUMB_ADDR (pc))
4865 error (_("Infinite loop detected"));
4866 }
4867 else
4868 {
4869 nextpc = arm_get_next_pc_raw (frame, pc);
4870 if (nextpc == pc)
4871 error (_("Infinite loop detected"));
4872 }
4873
4874 return nextpc;
4875 }
4876
4877 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4878 of the appropriate mode (as encoded in the PC value), even if this
4879 differs from what would be expected according to the symbol tables. */
4880
4881 void
4882 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4883 struct address_space *aspace,
4884 CORE_ADDR pc)
4885 {
4886 struct cleanup *old_chain
4887 = make_cleanup_restore_integer (&arm_override_mode);
4888
4889 arm_override_mode = IS_THUMB_ADDR (pc);
4890 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4891
4892 insert_single_step_breakpoint (gdbarch, aspace, pc);
4893
4894 do_cleanups (old_chain);
4895 }
4896
4897 /* single_step() is called just before we want to resume the inferior,
4898 if we want to single-step it but there is no hardware or kernel
4899 single-step support. We find the target of the coming instruction
4900 and breakpoint it. */
4901
4902 int
4903 arm_software_single_step (struct frame_info *frame)
4904 {
4905 struct gdbarch *gdbarch = get_frame_arch (frame);
4906 struct address_space *aspace = get_frame_address_space (frame);
4907 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4908
4909 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4910
4911 return 1;
4912 }
4913
4914 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4915 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4916 NULL if an error occurs. BUF is freed. */
4917
4918 static gdb_byte *
4919 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4920 int old_len, int new_len)
4921 {
4922 gdb_byte *new_buf, *middle;
4923 int bytes_to_read = new_len - old_len;
4924
4925 new_buf = xmalloc (new_len);
4926 memcpy (new_buf + bytes_to_read, buf, old_len);
4927 xfree (buf);
4928 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4929 {
4930 xfree (new_buf);
4931 return NULL;
4932 }
4933 return new_buf;
4934 }
4935
4936 /* An IT block is at most the 2-byte IT instruction followed by
4937 four 4-byte instructions. The furthest back we must search to
4938 find an IT block that affects the current instruction is thus
4939 2 + 3 * 4 == 14 bytes. */
4940 #define MAX_IT_BLOCK_PREFIX 14
4941
4942 /* Use a quick scan if there are more than this many bytes of
4943 code. */
4944 #define IT_SCAN_THRESHOLD 32
4945
4946 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4947 A breakpoint in an IT block may not be hit, depending on the
4948 condition flags. */
4949 static CORE_ADDR
4950 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4951 {
4952 gdb_byte *buf;
4953 char map_type;
4954 CORE_ADDR boundary, func_start;
4955 int buf_len, buf2_len;
4956 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4957 int i, any, last_it, last_it_count;
4958
4959 /* If we are using BKPT breakpoints, none of this is necessary. */
4960 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4961 return bpaddr;
4962
4963 /* ARM mode does not have this problem. */
4964 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4965 return bpaddr;
4966
4967 /* We are setting a breakpoint in Thumb code that could potentially
4968 contain an IT block. The first step is to find how much Thumb
4969 code there is; we do not need to read outside of known Thumb
4970 sequences. */
4971 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4972 if (map_type == 0)
4973 /* Thumb-2 code must have mapping symbols to have a chance. */
4974 return bpaddr;
4975
4976 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4977
4978 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4979 && func_start > boundary)
4980 boundary = func_start;
4981
4982 /* Search for a candidate IT instruction. We have to do some fancy
4983 footwork to distinguish a real IT instruction from the second
4984 half of a 32-bit instruction, but there is no need for that if
4985 there's no candidate. */
4986 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4987 if (buf_len == 0)
4988 /* No room for an IT instruction. */
4989 return bpaddr;
4990
4991 buf = xmalloc (buf_len);
4992 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4993 return bpaddr;
4994 any = 0;
4995 for (i = 0; i < buf_len; i += 2)
4996 {
4997 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4998 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4999 {
5000 any = 1;
5001 break;
5002 }
5003 }
5004 if (any == 0)
5005 {
5006 xfree (buf);
5007 return bpaddr;
5008 }
5009
5010 /* OK, the code bytes before this instruction contain at least one
5011 halfword which resembles an IT instruction. We know that it's
5012 Thumb code, but there are still two possibilities. Either the
5013 halfword really is an IT instruction, or it is the second half of
5014 a 32-bit Thumb instruction. The only way we can tell is to
5015 scan forwards from a known instruction boundary. */
5016 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5017 {
5018 int definite;
5019
5020 /* There's a lot of code before this instruction. Start with an
5021 optimistic search; it's easy to recognize halfwords that can
5022 not be the start of a 32-bit instruction, and use that to
5023 lock on to the instruction boundaries. */
5024 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5025 if (buf == NULL)
5026 return bpaddr;
5027 buf_len = IT_SCAN_THRESHOLD;
5028
5029 definite = 0;
5030 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5031 {
5032 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5033 if (thumb_insn_size (inst1) == 2)
5034 {
5035 definite = 1;
5036 break;
5037 }
5038 }
5039
5040 /* At this point, if DEFINITE, BUF[I] is the first place we
5041 are sure that we know the instruction boundaries, and it is far
5042 enough from BPADDR that we could not miss an IT instruction
5043 affecting BPADDR. If ! DEFINITE, give up - start from a
5044 known boundary. */
5045 if (! definite)
5046 {
5047 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5048 bpaddr - boundary);
5049 if (buf == NULL)
5050 return bpaddr;
5051 buf_len = bpaddr - boundary;
5052 i = 0;
5053 }
5054 }
5055 else
5056 {
5057 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5058 if (buf == NULL)
5059 return bpaddr;
5060 buf_len = bpaddr - boundary;
5061 i = 0;
5062 }
5063
5064 /* Scan forwards. Find the last IT instruction before BPADDR. */
5065 last_it = -1;
5066 last_it_count = 0;
5067 while (i < buf_len)
5068 {
5069 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5070 last_it_count--;
5071 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5072 {
5073 last_it = i;
5074 if (inst1 & 0x0001)
5075 last_it_count = 4;
5076 else if (inst1 & 0x0002)
5077 last_it_count = 3;
5078 else if (inst1 & 0x0004)
5079 last_it_count = 2;
5080 else
5081 last_it_count = 1;
5082 }
5083 i += thumb_insn_size (inst1);
5084 }
5085
5086 xfree (buf);
5087
5088 if (last_it == -1)
5089 /* There wasn't really an IT instruction after all. */
5090 return bpaddr;
5091
5092 if (last_it_count < 1)
5093 /* It was too far away. */
5094 return bpaddr;
5095
5096 /* This really is a trouble spot. Move the breakpoint to the IT
5097 instruction. */
5098 return bpaddr - buf_len + last_it;
5099 }
5100
5101 /* ARM displaced stepping support.
5102
5103 Generally ARM displaced stepping works as follows:
5104
5105 1. When an instruction is to be single-stepped, it is first decoded by
5106 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5107 Depending on the type of instruction, it is then copied to a scratch
5108 location, possibly in a modified form. The copy_* set of functions
5109 performs such modification, as necessary. A breakpoint is placed after
5110 the modified instruction in the scratch space to return control to GDB.
5111 Note in particular that instructions which modify the PC will no longer
5112 do so after modification.
5113
5114 2. The instruction is single-stepped, by setting the PC to the scratch
5115 location address, and resuming. Control returns to GDB when the
5116 breakpoint is hit.
5117
5118 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5119 function used for the current instruction. This function's job is to
5120 put the CPU/memory state back to what it would have been if the
5121 instruction had been executed unmodified in its original location. */
5122
5123 /* NOP instruction (mov r0, r0). */
5124 #define ARM_NOP 0xe1a00000
5125
5126 /* Helper for register reads for displaced stepping. In particular, this
5127 returns the PC as it would be seen by the instruction at its original
5128 location. */
5129
5130 ULONGEST
5131 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5132 int regno)
5133 {
5134 ULONGEST ret;
5135 CORE_ADDR from = dsc->insn_addr;
5136
5137 if (regno == ARM_PC_REGNUM)
5138 {
5139 /* Compute pipeline offset:
5140 - When executing an ARM instruction, PC reads as the address of the
5141 current instruction plus 8.
5142 - When executing a Thumb instruction, PC reads as the address of the
5143 current instruction plus 4. */
5144
5145 if (!dsc->is_thumb)
5146 from += 8;
5147 else
5148 from += 4;
5149
5150 if (debug_displaced)
5151 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5152 (unsigned long) from);
5153 return (ULONGEST) from;
5154 }
5155 else
5156 {
5157 regcache_cooked_read_unsigned (regs, regno, &ret);
5158 if (debug_displaced)
5159 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5160 regno, (unsigned long) ret);
5161 return ret;
5162 }
5163 }
5164
5165 static int
5166 displaced_in_arm_mode (struct regcache *regs)
5167 {
5168 ULONGEST ps;
5169 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5170
5171 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5172
5173 return (ps & t_bit) == 0;
5174 }
5175
5176 /* Write to the PC as from a branch instruction. */
5177
5178 static void
5179 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5180 ULONGEST val)
5181 {
5182 if (!dsc->is_thumb)
5183 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5184 architecture versions < 6. */
5185 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5186 val & ~(ULONGEST) 0x3);
5187 else
5188 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5189 val & ~(ULONGEST) 0x1);
5190 }
5191
5192 /* Write to the PC as from a branch-exchange instruction. */
5193
5194 static void
5195 bx_write_pc (struct regcache *regs, ULONGEST val)
5196 {
5197 ULONGEST ps;
5198 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5199
5200 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5201
5202 if ((val & 1) == 1)
5203 {
5204 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5205 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5206 }
5207 else if ((val & 2) == 0)
5208 {
5209 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5210 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5211 }
5212 else
5213 {
5214 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5215 mode, align dest to 4 bytes). */
5216 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5217 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5218 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5219 }
5220 }
5221
5222 /* Write to the PC as if from a load instruction. */
5223
5224 static void
5225 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5226 ULONGEST val)
5227 {
5228 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5229 bx_write_pc (regs, val);
5230 else
5231 branch_write_pc (regs, dsc, val);
5232 }
5233
5234 /* Write to the PC as if from an ALU instruction. */
5235
5236 static void
5237 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5238 ULONGEST val)
5239 {
5240 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5241 bx_write_pc (regs, val);
5242 else
5243 branch_write_pc (regs, dsc, val);
5244 }
5245
5246 /* Helper for writing to registers for displaced stepping. Writing to the PC
5247 has a varying effects depending on the instruction which does the write:
5248 this is controlled by the WRITE_PC argument. */
5249
5250 void
5251 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5252 int regno, ULONGEST val, enum pc_write_style write_pc)
5253 {
5254 if (regno == ARM_PC_REGNUM)
5255 {
5256 if (debug_displaced)
5257 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5258 (unsigned long) val);
5259 switch (write_pc)
5260 {
5261 case BRANCH_WRITE_PC:
5262 branch_write_pc (regs, dsc, val);
5263 break;
5264
5265 case BX_WRITE_PC:
5266 bx_write_pc (regs, val);
5267 break;
5268
5269 case LOAD_WRITE_PC:
5270 load_write_pc (regs, dsc, val);
5271 break;
5272
5273 case ALU_WRITE_PC:
5274 alu_write_pc (regs, dsc, val);
5275 break;
5276
5277 case CANNOT_WRITE_PC:
5278 warning (_("Instruction wrote to PC in an unexpected way when "
5279 "single-stepping"));
5280 break;
5281
5282 default:
5283 internal_error (__FILE__, __LINE__,
5284 _("Invalid argument to displaced_write_reg"));
5285 }
5286
5287 dsc->wrote_to_pc = 1;
5288 }
5289 else
5290 {
5291 if (debug_displaced)
5292 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5293 regno, (unsigned long) val);
5294 regcache_cooked_write_unsigned (regs, regno, val);
5295 }
5296 }
5297
5298 /* This function is used to concisely determine if an instruction INSN
5299 references PC. Register fields of interest in INSN should have the
5300 corresponding fields of BITMASK set to 0b1111. The function
5301 returns return 1 if any of these fields in INSN reference the PC
5302 (also 0b1111, r15), else it returns 0. */
5303
5304 static int
5305 insn_references_pc (uint32_t insn, uint32_t bitmask)
5306 {
5307 uint32_t lowbit = 1;
5308
5309 while (bitmask != 0)
5310 {
5311 uint32_t mask;
5312
5313 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5314 ;
5315
5316 if (!lowbit)
5317 break;
5318
5319 mask = lowbit * 0xf;
5320
5321 if ((insn & mask) == mask)
5322 return 1;
5323
5324 bitmask &= ~mask;
5325 }
5326
5327 return 0;
5328 }
5329
5330 /* The simplest copy function. Many instructions have the same effect no
5331 matter what address they are executed at: in those cases, use this. */
5332
5333 static int
5334 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5335 const char *iname, struct displaced_step_closure *dsc)
5336 {
5337 if (debug_displaced)
5338 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5339 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5340 iname);
5341
5342 dsc->modinsn[0] = insn;
5343
5344 return 0;
5345 }
5346
5347 /* Preload instructions with immediate offset. */
5348
5349 static void
5350 cleanup_preload (struct gdbarch *gdbarch,
5351 struct regcache *regs, struct displaced_step_closure *dsc)
5352 {
5353 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5354 if (!dsc->u.preload.immed)
5355 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5356 }
5357
5358 static void
5359 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5360 struct displaced_step_closure *dsc, unsigned int rn)
5361 {
5362 ULONGEST rn_val;
5363 /* Preload instructions:
5364
5365 {pli/pld} [rn, #+/-imm]
5366 ->
5367 {pli/pld} [r0, #+/-imm]. */
5368
5369 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5370 rn_val = displaced_read_reg (regs, dsc, rn);
5371 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5372 dsc->u.preload.immed = 1;
5373
5374 dsc->cleanup = &cleanup_preload;
5375 }
5376
5377 static int
5378 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5379 struct displaced_step_closure *dsc)
5380 {
5381 unsigned int rn = bits (insn, 16, 19);
5382
5383 if (!insn_references_pc (insn, 0x000f0000ul))
5384 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5385
5386 if (debug_displaced)
5387 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5388 (unsigned long) insn);
5389
5390 dsc->modinsn[0] = insn & 0xfff0ffff;
5391
5392 install_preload (gdbarch, regs, dsc, rn);
5393
5394 return 0;
5395 }
5396
5397 /* Preload instructions with register offset. */
5398
5399 static void
5400 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5401 struct displaced_step_closure *dsc, unsigned int rn,
5402 unsigned int rm)
5403 {
5404 ULONGEST rn_val, rm_val;
5405
5406 /* Preload register-offset instructions:
5407
5408 {pli/pld} [rn, rm {, shift}]
5409 ->
5410 {pli/pld} [r0, r1 {, shift}]. */
5411
5412 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5413 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5414 rn_val = displaced_read_reg (regs, dsc, rn);
5415 rm_val = displaced_read_reg (regs, dsc, rm);
5416 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5417 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5418 dsc->u.preload.immed = 0;
5419
5420 dsc->cleanup = &cleanup_preload;
5421 }
5422
5423 static int
5424 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5425 struct regcache *regs,
5426 struct displaced_step_closure *dsc)
5427 {
5428 unsigned int rn = bits (insn, 16, 19);
5429 unsigned int rm = bits (insn, 0, 3);
5430
5431
5432 if (!insn_references_pc (insn, 0x000f000ful))
5433 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5434
5435 if (debug_displaced)
5436 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5437 (unsigned long) insn);
5438
5439 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5440
5441 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5442 return 0;
5443 }
5444
5445 /* Copy/cleanup coprocessor load and store instructions. */
5446
5447 static void
5448 cleanup_copro_load_store (struct gdbarch *gdbarch,
5449 struct regcache *regs,
5450 struct displaced_step_closure *dsc)
5451 {
5452 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5453
5454 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5455
5456 if (dsc->u.ldst.writeback)
5457 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5458 }
5459
5460 static void
5461 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5462 struct displaced_step_closure *dsc,
5463 int writeback, unsigned int rn)
5464 {
5465 ULONGEST rn_val;
5466
5467 /* Coprocessor load/store instructions:
5468
5469 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5470 ->
5471 {stc/stc2} [r0, #+/-imm].
5472
5473 ldc/ldc2 are handled identically. */
5474
5475 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5476 rn_val = displaced_read_reg (regs, dsc, rn);
5477 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5478
5479 dsc->u.ldst.writeback = writeback;
5480 dsc->u.ldst.rn = rn;
5481
5482 dsc->cleanup = &cleanup_copro_load_store;
5483 }
5484
5485 static int
5486 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5487 struct regcache *regs,
5488 struct displaced_step_closure *dsc)
5489 {
5490 unsigned int rn = bits (insn, 16, 19);
5491
5492 if (!insn_references_pc (insn, 0x000f0000ul))
5493 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5494
5495 if (debug_displaced)
5496 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5497 "load/store insn %.8lx\n", (unsigned long) insn);
5498
5499 dsc->modinsn[0] = insn & 0xfff0ffff;
5500
5501 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5502
5503 return 0;
5504 }
5505
5506 /* Clean up branch instructions (actually perform the branch, by setting
5507 PC). */
5508
5509 static void
5510 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5511 struct displaced_step_closure *dsc)
5512 {
5513 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5514 int branch_taken = condition_true (dsc->u.branch.cond, status);
5515 enum pc_write_style write_pc = dsc->u.branch.exchange
5516 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5517
5518 if (!branch_taken)
5519 return;
5520
5521 if (dsc->u.branch.link)
5522 {
5523 /* The value of LR should be the next insn of current one. In order
5524 not to confuse logic hanlding later insn `bx lr', if current insn mode
5525 is Thumb, the bit 0 of LR value should be set to 1. */
5526 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5527
5528 if (dsc->is_thumb)
5529 next_insn_addr |= 0x1;
5530
5531 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5532 CANNOT_WRITE_PC);
5533 }
5534
5535 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5536 }
5537
5538 /* Copy B/BL/BLX instructions with immediate destinations. */
5539
5540 static void
5541 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5542 struct displaced_step_closure *dsc,
5543 unsigned int cond, int exchange, int link, long offset)
5544 {
5545 /* Implement "BL<cond> <label>" as:
5546
5547 Preparation: cond <- instruction condition
5548 Insn: mov r0, r0 (nop)
5549 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5550
5551 B<cond> similar, but don't set r14 in cleanup. */
5552
5553 dsc->u.branch.cond = cond;
5554 dsc->u.branch.link = link;
5555 dsc->u.branch.exchange = exchange;
5556
5557 if (dsc->is_thumb)
5558 dsc->u.branch.dest = dsc->insn_addr + 4 + offset;
5559 else
5560 dsc->u.branch.dest = dsc->insn_addr + 8 + offset;
5561
5562 dsc->cleanup = &cleanup_branch;
5563 }
5564 static int
5565 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5566 struct regcache *regs, struct displaced_step_closure *dsc)
5567 {
5568 unsigned int cond = bits (insn, 28, 31);
5569 int exchange = (cond == 0xf);
5570 int link = exchange || bit (insn, 24);
5571 long offset;
5572
5573 if (debug_displaced)
5574 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5575 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5576 (unsigned long) insn);
5577 if (exchange)
5578 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5579 then arrange the switch into Thumb mode. */
5580 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5581 else
5582 offset = bits (insn, 0, 23) << 2;
5583
5584 if (bit (offset, 25))
5585 offset = offset | ~0x3ffffff;
5586
5587 dsc->modinsn[0] = ARM_NOP;
5588
5589 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5590 return 0;
5591 }
5592
5593 /* Copy BX/BLX with register-specified destinations. */
5594
5595 static void
5596 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5597 struct displaced_step_closure *dsc, int link,
5598 unsigned int cond, unsigned int rm)
5599 {
5600 /* Implement {BX,BLX}<cond> <reg>" as:
5601
5602 Preparation: cond <- instruction condition
5603 Insn: mov r0, r0 (nop)
5604 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5605
5606 Don't set r14 in cleanup for BX. */
5607
5608 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5609
5610 dsc->u.branch.cond = cond;
5611 dsc->u.branch.link = link;
5612
5613 dsc->u.branch.exchange = 1;
5614
5615 dsc->cleanup = &cleanup_branch;
5616 }
5617
5618 static int
5619 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5620 struct regcache *regs, struct displaced_step_closure *dsc)
5621 {
5622 unsigned int cond = bits (insn, 28, 31);
5623 /* BX: x12xxx1x
5624 BLX: x12xxx3x. */
5625 int link = bit (insn, 5);
5626 unsigned int rm = bits (insn, 0, 3);
5627
5628 if (debug_displaced)
5629 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5630 (unsigned long) insn);
5631
5632 dsc->modinsn[0] = ARM_NOP;
5633
5634 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5635 return 0;
5636 }
5637
5638 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5639
5640 static void
5641 cleanup_alu_imm (struct gdbarch *gdbarch,
5642 struct regcache *regs, struct displaced_step_closure *dsc)
5643 {
5644 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5645 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5646 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5647 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5648 }
5649
5650 static int
5651 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5652 struct displaced_step_closure *dsc)
5653 {
5654 unsigned int rn = bits (insn, 16, 19);
5655 unsigned int rd = bits (insn, 12, 15);
5656 unsigned int op = bits (insn, 21, 24);
5657 int is_mov = (op == 0xd);
5658 ULONGEST rd_val, rn_val;
5659
5660 if (!insn_references_pc (insn, 0x000ff000ul))
5661 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5662
5663 if (debug_displaced)
5664 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5665 "%.8lx\n", is_mov ? "move" : "ALU",
5666 (unsigned long) insn);
5667
5668 /* Instruction is of form:
5669
5670 <op><cond> rd, [rn,] #imm
5671
5672 Rewrite as:
5673
5674 Preparation: tmp1, tmp2 <- r0, r1;
5675 r0, r1 <- rd, rn
5676 Insn: <op><cond> r0, r1, #imm
5677 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5678 */
5679
5680 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5681 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5682 rn_val = displaced_read_reg (regs, dsc, rn);
5683 rd_val = displaced_read_reg (regs, dsc, rd);
5684 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5685 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5686 dsc->rd = rd;
5687
5688 if (is_mov)
5689 dsc->modinsn[0] = insn & 0xfff00fff;
5690 else
5691 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5692
5693 dsc->cleanup = &cleanup_alu_imm;
5694
5695 return 0;
5696 }
5697
5698 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5699
5700 static void
5701 cleanup_alu_reg (struct gdbarch *gdbarch,
5702 struct regcache *regs, struct displaced_step_closure *dsc)
5703 {
5704 ULONGEST rd_val;
5705 int i;
5706
5707 rd_val = displaced_read_reg (regs, dsc, 0);
5708
5709 for (i = 0; i < 3; i++)
5710 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5711
5712 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5713 }
5714
5715 static void
5716 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5717 struct displaced_step_closure *dsc,
5718 unsigned int rd, unsigned int rn, unsigned int rm)
5719 {
5720 ULONGEST rd_val, rn_val, rm_val;
5721
5722 /* Instruction is of form:
5723
5724 <op><cond> rd, [rn,] rm [, <shift>]
5725
5726 Rewrite as:
5727
5728 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5729 r0, r1, r2 <- rd, rn, rm
5730 Insn: <op><cond> r0, r1, r2 [, <shift>]
5731 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5732 */
5733
5734 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5735 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5736 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5737 rd_val = displaced_read_reg (regs, dsc, rd);
5738 rn_val = displaced_read_reg (regs, dsc, rn);
5739 rm_val = displaced_read_reg (regs, dsc, rm);
5740 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5741 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5742 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5743 dsc->rd = rd;
5744
5745 dsc->cleanup = &cleanup_alu_reg;
5746 }
5747
5748 static int
5749 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5750 struct displaced_step_closure *dsc)
5751 {
5752 unsigned int op = bits (insn, 21, 24);
5753 int is_mov = (op == 0xd);
5754
5755 if (!insn_references_pc (insn, 0x000ff00ful))
5756 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5757
5758 if (debug_displaced)
5759 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5760 is_mov ? "move" : "ALU", (unsigned long) insn);
5761
5762 if (is_mov)
5763 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5764 else
5765 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5766
5767 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5768 bits (insn, 0, 3));
5769 return 0;
5770 }
5771
5772 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5773
5774 static void
5775 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5776 struct regcache *regs,
5777 struct displaced_step_closure *dsc)
5778 {
5779 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5780 int i;
5781
5782 for (i = 0; i < 4; i++)
5783 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5784
5785 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5786 }
5787
5788 static void
5789 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5790 struct displaced_step_closure *dsc,
5791 unsigned int rd, unsigned int rn, unsigned int rm,
5792 unsigned rs)
5793 {
5794 int i;
5795 ULONGEST rd_val, rn_val, rm_val, rs_val;
5796
5797 /* Instruction is of form:
5798
5799 <op><cond> rd, [rn,] rm, <shift> rs
5800
5801 Rewrite as:
5802
5803 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5804 r0, r1, r2, r3 <- rd, rn, rm, rs
5805 Insn: <op><cond> r0, r1, r2, <shift> r3
5806 Cleanup: tmp5 <- r0
5807 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5808 rd <- tmp5
5809 */
5810
5811 for (i = 0; i < 4; i++)
5812 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5813
5814 rd_val = displaced_read_reg (regs, dsc, rd);
5815 rn_val = displaced_read_reg (regs, dsc, rn);
5816 rm_val = displaced_read_reg (regs, dsc, rm);
5817 rs_val = displaced_read_reg (regs, dsc, rs);
5818 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5819 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5820 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5821 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5822 dsc->rd = rd;
5823 dsc->cleanup = &cleanup_alu_shifted_reg;
5824 }
5825
5826 static int
5827 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5828 struct regcache *regs,
5829 struct displaced_step_closure *dsc)
5830 {
5831 unsigned int op = bits (insn, 21, 24);
5832 int is_mov = (op == 0xd);
5833 unsigned int rd, rn, rm, rs;
5834
5835 if (!insn_references_pc (insn, 0x000fff0ful))
5836 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5837
5838 if (debug_displaced)
5839 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5840 "%.8lx\n", is_mov ? "move" : "ALU",
5841 (unsigned long) insn);
5842
5843 rn = bits (insn, 16, 19);
5844 rm = bits (insn, 0, 3);
5845 rs = bits (insn, 8, 11);
5846 rd = bits (insn, 12, 15);
5847
5848 if (is_mov)
5849 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5850 else
5851 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5852
5853 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5854
5855 return 0;
5856 }
5857
5858 /* Clean up load instructions. */
5859
5860 static void
5861 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5862 struct displaced_step_closure *dsc)
5863 {
5864 ULONGEST rt_val, rt_val2 = 0, rn_val;
5865
5866 rt_val = displaced_read_reg (regs, dsc, 0);
5867 if (dsc->u.ldst.xfersize == 8)
5868 rt_val2 = displaced_read_reg (regs, dsc, 1);
5869 rn_val = displaced_read_reg (regs, dsc, 2);
5870
5871 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5872 if (dsc->u.ldst.xfersize > 4)
5873 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5874 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5875 if (!dsc->u.ldst.immed)
5876 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5877
5878 /* Handle register writeback. */
5879 if (dsc->u.ldst.writeback)
5880 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5881 /* Put result in right place. */
5882 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5883 if (dsc->u.ldst.xfersize == 8)
5884 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5885 }
5886
5887 /* Clean up store instructions. */
5888
5889 static void
5890 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5891 struct displaced_step_closure *dsc)
5892 {
5893 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5894
5895 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5896 if (dsc->u.ldst.xfersize > 4)
5897 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5898 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5899 if (!dsc->u.ldst.immed)
5900 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5901 if (!dsc->u.ldst.restore_r4)
5902 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5903
5904 /* Writeback. */
5905 if (dsc->u.ldst.writeback)
5906 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5907 }
5908
5909 /* Copy "extra" load/store instructions. These are halfword/doubleword
5910 transfers, which have a different encoding to byte/word transfers. */
5911
5912 static int
5913 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5914 struct regcache *regs, struct displaced_step_closure *dsc)
5915 {
5916 unsigned int op1 = bits (insn, 20, 24);
5917 unsigned int op2 = bits (insn, 5, 6);
5918 unsigned int rt = bits (insn, 12, 15);
5919 unsigned int rn = bits (insn, 16, 19);
5920 unsigned int rm = bits (insn, 0, 3);
5921 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5922 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5923 int immed = (op1 & 0x4) != 0;
5924 int opcode;
5925 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5926
5927 if (!insn_references_pc (insn, 0x000ff00ful))
5928 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5929
5930 if (debug_displaced)
5931 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5932 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5933 (unsigned long) insn);
5934
5935 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5936
5937 if (opcode < 0)
5938 internal_error (__FILE__, __LINE__,
5939 _("copy_extra_ld_st: instruction decode error"));
5940
5941 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5942 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5943 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5944 if (!immed)
5945 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5946
5947 rt_val = displaced_read_reg (regs, dsc, rt);
5948 if (bytesize[opcode] == 8)
5949 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5950 rn_val = displaced_read_reg (regs, dsc, rn);
5951 if (!immed)
5952 rm_val = displaced_read_reg (regs, dsc, rm);
5953
5954 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5955 if (bytesize[opcode] == 8)
5956 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5957 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5958 if (!immed)
5959 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5960
5961 dsc->rd = rt;
5962 dsc->u.ldst.xfersize = bytesize[opcode];
5963 dsc->u.ldst.rn = rn;
5964 dsc->u.ldst.immed = immed;
5965 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5966 dsc->u.ldst.restore_r4 = 0;
5967
5968 if (immed)
5969 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5970 ->
5971 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5972 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5973 else
5974 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5975 ->
5976 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5977 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5978
5979 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5980
5981 return 0;
5982 }
5983
5984 /* Copy byte/word loads and stores. */
5985
5986 static void
5987 install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
5988 struct displaced_step_closure *dsc, int load,
5989 int immed, int writeback, int byte, int usermode,
5990 int rt, int rm, int rn)
5991 {
5992 ULONGEST rt_val, rn_val, rm_val = 0;
5993
5994 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5995 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5996 if (!immed)
5997 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5998 if (!load)
5999 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6000
6001 rt_val = displaced_read_reg (regs, dsc, rt);
6002 rn_val = displaced_read_reg (regs, dsc, rn);
6003 if (!immed)
6004 rm_val = displaced_read_reg (regs, dsc, rm);
6005
6006 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6007 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6008 if (!immed)
6009 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6010 dsc->rd = rt;
6011 dsc->u.ldst.xfersize = byte ? 1 : 4;
6012 dsc->u.ldst.rn = rn;
6013 dsc->u.ldst.immed = immed;
6014 dsc->u.ldst.writeback = writeback;
6015
6016 /* To write PC we can do:
6017
6018 Before this sequence of instructions:
6019 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6020 r2 is the Rn value got from dispalced_read_reg.
6021
6022 Insn1: push {pc} Write address of STR instruction + offset on stack
6023 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6024 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6025 = addr(Insn1) + offset - addr(Insn3) - 8
6026 = offset - 16
6027 Insn4: add r4, r4, #8 r4 = offset - 8
6028 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6029 = from + offset
6030 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6031
6032 Otherwise we don't know what value to write for PC, since the offset is
6033 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6034 of this can be found in Section "Saving from r15" in
6035 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6036
6037 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6038 }
6039
6040 static int
6041 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6042 struct regcache *regs,
6043 struct displaced_step_closure *dsc,
6044 int load, int byte, int usermode)
6045 {
6046 int immed = !bit (insn, 25);
6047 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6048 unsigned int rt = bits (insn, 12, 15);
6049 unsigned int rn = bits (insn, 16, 19);
6050 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6051
6052 if (!insn_references_pc (insn, 0x000ff00ful))
6053 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6054
6055 if (debug_displaced)
6056 fprintf_unfiltered (gdb_stdlog,
6057 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6058 load ? (byte ? "ldrb" : "ldr")
6059 : (byte ? "strb" : "str"), usermode ? "t" : "",
6060 rt, rn,
6061 (unsigned long) insn);
6062
6063 install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
6064 usermode, rt, rm, rn);
6065
6066 if (load || rt != ARM_PC_REGNUM)
6067 {
6068 dsc->u.ldst.restore_r4 = 0;
6069
6070 if (immed)
6071 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6072 ->
6073 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6074 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6075 else
6076 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6077 ->
6078 {ldr,str}[b]<cond> r0, [r2, r3]. */
6079 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6080 }
6081 else
6082 {
6083 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6084 dsc->u.ldst.restore_r4 = 1;
6085 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6086 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6087 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6088 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6089 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6090
6091 /* As above. */
6092 if (immed)
6093 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6094 else
6095 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6096
6097 dsc->numinsns = 6;
6098 }
6099
6100 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6101
6102 return 0;
6103 }
6104
6105 /* Cleanup LDM instructions with fully-populated register list. This is an
6106 unfortunate corner case: it's impossible to implement correctly by modifying
6107 the instruction. The issue is as follows: we have an instruction,
6108
6109 ldm rN, {r0-r15}
6110
6111 which we must rewrite to avoid loading PC. A possible solution would be to
6112 do the load in two halves, something like (with suitable cleanup
6113 afterwards):
6114
6115 mov r8, rN
6116 ldm[id][ab] r8!, {r0-r7}
6117 str r7, <temp>
6118 ldm[id][ab] r8, {r7-r14}
6119 <bkpt>
6120
6121 but at present there's no suitable place for <temp>, since the scratch space
6122 is overwritten before the cleanup routine is called. For now, we simply
6123 emulate the instruction. */
6124
6125 static void
6126 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6127 struct displaced_step_closure *dsc)
6128 {
6129 int inc = dsc->u.block.increment;
6130 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6131 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6132 uint32_t regmask = dsc->u.block.regmask;
6133 int regno = inc ? 0 : 15;
6134 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6135 int exception_return = dsc->u.block.load && dsc->u.block.user
6136 && (regmask & 0x8000) != 0;
6137 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6138 int do_transfer = condition_true (dsc->u.block.cond, status);
6139 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6140
6141 if (!do_transfer)
6142 return;
6143
6144 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6145 sensible we can do here. Complain loudly. */
6146 if (exception_return)
6147 error (_("Cannot single-step exception return"));
6148
6149 /* We don't handle any stores here for now. */
6150 gdb_assert (dsc->u.block.load != 0);
6151
6152 if (debug_displaced)
6153 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6154 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6155 dsc->u.block.increment ? "inc" : "dec",
6156 dsc->u.block.before ? "before" : "after");
6157
6158 while (regmask)
6159 {
6160 uint32_t memword;
6161
6162 if (inc)
6163 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6164 regno++;
6165 else
6166 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6167 regno--;
6168
6169 xfer_addr += bump_before;
6170
6171 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6172 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6173
6174 xfer_addr += bump_after;
6175
6176 regmask &= ~(1 << regno);
6177 }
6178
6179 if (dsc->u.block.writeback)
6180 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6181 CANNOT_WRITE_PC);
6182 }
6183
6184 /* Clean up an STM which included the PC in the register list. */
6185
6186 static void
6187 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6188 struct displaced_step_closure *dsc)
6189 {
6190 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6191 int store_executed = condition_true (dsc->u.block.cond, status);
6192 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6193 CORE_ADDR stm_insn_addr;
6194 uint32_t pc_val;
6195 long offset;
6196 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6197
6198 /* If condition code fails, there's nothing else to do. */
6199 if (!store_executed)
6200 return;
6201
6202 if (dsc->u.block.increment)
6203 {
6204 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6205
6206 if (dsc->u.block.before)
6207 pc_stored_at += 4;
6208 }
6209 else
6210 {
6211 pc_stored_at = dsc->u.block.xfer_addr;
6212
6213 if (dsc->u.block.before)
6214 pc_stored_at -= 4;
6215 }
6216
6217 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6218 stm_insn_addr = dsc->scratch_base;
6219 offset = pc_val - stm_insn_addr;
6220
6221 if (debug_displaced)
6222 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6223 "STM instruction\n", offset);
6224
6225 /* Rewrite the stored PC to the proper value for the non-displaced original
6226 instruction. */
6227 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6228 dsc->insn_addr + offset);
6229 }
6230
6231 /* Clean up an LDM which includes the PC in the register list. We clumped all
6232 the registers in the transferred list into a contiguous range r0...rX (to
6233 avoid loading PC directly and losing control of the debugged program), so we
6234 must undo that here. */
6235
6236 static void
6237 cleanup_block_load_pc (struct gdbarch *gdbarch,
6238 struct regcache *regs,
6239 struct displaced_step_closure *dsc)
6240 {
6241 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6242 int load_executed = condition_true (dsc->u.block.cond, status), i;
6243 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6244 unsigned int regs_loaded = bitcount (mask);
6245 unsigned int num_to_shuffle = regs_loaded, clobbered;
6246
6247 /* The method employed here will fail if the register list is fully populated
6248 (we need to avoid loading PC directly). */
6249 gdb_assert (num_to_shuffle < 16);
6250
6251 if (!load_executed)
6252 return;
6253
6254 clobbered = (1 << num_to_shuffle) - 1;
6255
6256 while (num_to_shuffle > 0)
6257 {
6258 if ((mask & (1 << write_reg)) != 0)
6259 {
6260 unsigned int read_reg = num_to_shuffle - 1;
6261
6262 if (read_reg != write_reg)
6263 {
6264 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6265 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6266 if (debug_displaced)
6267 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6268 "loaded register r%d to r%d\n"), read_reg,
6269 write_reg);
6270 }
6271 else if (debug_displaced)
6272 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6273 "r%d already in the right place\n"),
6274 write_reg);
6275
6276 clobbered &= ~(1 << write_reg);
6277
6278 num_to_shuffle--;
6279 }
6280
6281 write_reg--;
6282 }
6283
6284 /* Restore any registers we scribbled over. */
6285 for (write_reg = 0; clobbered != 0; write_reg++)
6286 {
6287 if ((clobbered & (1 << write_reg)) != 0)
6288 {
6289 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6290 CANNOT_WRITE_PC);
6291 if (debug_displaced)
6292 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6293 "clobbered register r%d\n"), write_reg);
6294 clobbered &= ~(1 << write_reg);
6295 }
6296 }
6297
6298 /* Perform register writeback manually. */
6299 if (dsc->u.block.writeback)
6300 {
6301 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6302
6303 if (dsc->u.block.increment)
6304 new_rn_val += regs_loaded * 4;
6305 else
6306 new_rn_val -= regs_loaded * 4;
6307
6308 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6309 CANNOT_WRITE_PC);
6310 }
6311 }
6312
6313 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6314 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6315
6316 static int
6317 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6318 struct regcache *regs,
6319 struct displaced_step_closure *dsc)
6320 {
6321 int load = bit (insn, 20);
6322 int user = bit (insn, 22);
6323 int increment = bit (insn, 23);
6324 int before = bit (insn, 24);
6325 int writeback = bit (insn, 21);
6326 int rn = bits (insn, 16, 19);
6327
6328 /* Block transfers which don't mention PC can be run directly
6329 out-of-line. */
6330 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6331 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6332
6333 if (rn == ARM_PC_REGNUM)
6334 {
6335 warning (_("displaced: Unpredictable LDM or STM with "
6336 "base register r15"));
6337 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6338 }
6339
6340 if (debug_displaced)
6341 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6342 "%.8lx\n", (unsigned long) insn);
6343
6344 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6345 dsc->u.block.rn = rn;
6346
6347 dsc->u.block.load = load;
6348 dsc->u.block.user = user;
6349 dsc->u.block.increment = increment;
6350 dsc->u.block.before = before;
6351 dsc->u.block.writeback = writeback;
6352 dsc->u.block.cond = bits (insn, 28, 31);
6353
6354 dsc->u.block.regmask = insn & 0xffff;
6355
6356 if (load)
6357 {
6358 if ((insn & 0xffff) == 0xffff)
6359 {
6360 /* LDM with a fully-populated register list. This case is
6361 particularly tricky. Implement for now by fully emulating the
6362 instruction (which might not behave perfectly in all cases, but
6363 these instructions should be rare enough for that not to matter
6364 too much). */
6365 dsc->modinsn[0] = ARM_NOP;
6366
6367 dsc->cleanup = &cleanup_block_load_all;
6368 }
6369 else
6370 {
6371 /* LDM of a list of registers which includes PC. Implement by
6372 rewriting the list of registers to be transferred into a
6373 contiguous chunk r0...rX before doing the transfer, then shuffling
6374 registers into the correct places in the cleanup routine. */
6375 unsigned int regmask = insn & 0xffff;
6376 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6377 unsigned int to = 0, from = 0, i, new_rn;
6378
6379 for (i = 0; i < num_in_list; i++)
6380 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6381
6382 /* Writeback makes things complicated. We need to avoid clobbering
6383 the base register with one of the registers in our modified
6384 register list, but just using a different register can't work in
6385 all cases, e.g.:
6386
6387 ldm r14!, {r0-r13,pc}
6388
6389 which would need to be rewritten as:
6390
6391 ldm rN!, {r0-r14}
6392
6393 but that can't work, because there's no free register for N.
6394
6395 Solve this by turning off the writeback bit, and emulating
6396 writeback manually in the cleanup routine. */
6397
6398 if (writeback)
6399 insn &= ~(1 << 21);
6400
6401 new_regmask = (1 << num_in_list) - 1;
6402
6403 if (debug_displaced)
6404 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6405 "{..., pc}: original reg list %.4x, modified "
6406 "list %.4x\n"), rn, writeback ? "!" : "",
6407 (int) insn & 0xffff, new_regmask);
6408
6409 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6410
6411 dsc->cleanup = &cleanup_block_load_pc;
6412 }
6413 }
6414 else
6415 {
6416 /* STM of a list of registers which includes PC. Run the instruction
6417 as-is, but out of line: this will store the wrong value for the PC,
6418 so we must manually fix up the memory in the cleanup routine.
6419 Doing things this way has the advantage that we can auto-detect
6420 the offset of the PC write (which is architecture-dependent) in
6421 the cleanup routine. */
6422 dsc->modinsn[0] = insn;
6423
6424 dsc->cleanup = &cleanup_block_store_pc;
6425 }
6426
6427 return 0;
6428 }
6429
6430 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6431 for Linux, where some SVC instructions must be treated specially. */
6432
6433 static void
6434 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6435 struct displaced_step_closure *dsc)
6436 {
6437 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6438
6439 if (debug_displaced)
6440 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6441 "%.8lx\n", (unsigned long) resume_addr);
6442
6443 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6444 }
6445
6446 static int
6447
6448 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6449 struct regcache *regs, struct displaced_step_closure *dsc)
6450 {
6451
6452 if (debug_displaced)
6453 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6454 (unsigned long) insn);
6455
6456 /* Preparation: none.
6457 Insn: unmodified svc.
6458 Cleanup: pc <- insn_addr + 4. */
6459
6460 dsc->modinsn[0] = insn;
6461
6462 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6463 instruction. */
6464 dsc->wrote_to_pc = 1;
6465
6466 /* Allow OS-specific code to override SVC handling. */
6467 if (dsc->u.svc.copy_svc_os)
6468 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6469 else
6470 {
6471 dsc->cleanup = &cleanup_svc;
6472 return 0;
6473 }
6474
6475 }
6476
6477 /* Copy undefined instructions. */
6478
6479 static int
6480 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6481 struct displaced_step_closure *dsc)
6482 {
6483 if (debug_displaced)
6484 fprintf_unfiltered (gdb_stdlog,
6485 "displaced: copying undefined insn %.8lx\n",
6486 (unsigned long) insn);
6487
6488 dsc->modinsn[0] = insn;
6489
6490 return 0;
6491 }
6492
6493 /* Copy unpredictable instructions. */
6494
6495 static int
6496 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6497 struct displaced_step_closure *dsc)
6498 {
6499 if (debug_displaced)
6500 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6501 "%.8lx\n", (unsigned long) insn);
6502
6503 dsc->modinsn[0] = insn;
6504
6505 return 0;
6506 }
6507
6508 /* The decode_* functions are instruction decoding helpers. They mostly follow
6509 the presentation in the ARM ARM. */
6510
6511 static int
6512 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6513 struct regcache *regs,
6514 struct displaced_step_closure *dsc)
6515 {
6516 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6517 unsigned int rn = bits (insn, 16, 19);
6518
6519 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6520 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6521 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6522 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6523 else if ((op1 & 0x60) == 0x20)
6524 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6525 else if ((op1 & 0x71) == 0x40)
6526 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6527 dsc);
6528 else if ((op1 & 0x77) == 0x41)
6529 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6530 else if ((op1 & 0x77) == 0x45)
6531 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6532 else if ((op1 & 0x77) == 0x51)
6533 {
6534 if (rn != 0xf)
6535 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6536 else
6537 return arm_copy_unpred (gdbarch, insn, dsc);
6538 }
6539 else if ((op1 & 0x77) == 0x55)
6540 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6541 else if (op1 == 0x57)
6542 switch (op2)
6543 {
6544 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6545 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6546 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6547 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6548 default: return arm_copy_unpred (gdbarch, insn, dsc);
6549 }
6550 else if ((op1 & 0x63) == 0x43)
6551 return arm_copy_unpred (gdbarch, insn, dsc);
6552 else if ((op2 & 0x1) == 0x0)
6553 switch (op1 & ~0x80)
6554 {
6555 case 0x61:
6556 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6557 case 0x65:
6558 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6559 case 0x71: case 0x75:
6560 /* pld/pldw reg. */
6561 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6562 case 0x63: case 0x67: case 0x73: case 0x77:
6563 return arm_copy_unpred (gdbarch, insn, dsc);
6564 default:
6565 return arm_copy_undef (gdbarch, insn, dsc);
6566 }
6567 else
6568 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6569 }
6570
6571 static int
6572 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6573 struct regcache *regs,
6574 struct displaced_step_closure *dsc)
6575 {
6576 if (bit (insn, 27) == 0)
6577 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6578 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6579 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6580 {
6581 case 0x0: case 0x2:
6582 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6583
6584 case 0x1: case 0x3:
6585 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6586
6587 case 0x4: case 0x5: case 0x6: case 0x7:
6588 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6589
6590 case 0x8:
6591 switch ((insn & 0xe00000) >> 21)
6592 {
6593 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6594 /* stc/stc2. */
6595 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6596
6597 case 0x2:
6598 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6599
6600 default:
6601 return arm_copy_undef (gdbarch, insn, dsc);
6602 }
6603
6604 case 0x9:
6605 {
6606 int rn_f = (bits (insn, 16, 19) == 0xf);
6607 switch ((insn & 0xe00000) >> 21)
6608 {
6609 case 0x1: case 0x3:
6610 /* ldc/ldc2 imm (undefined for rn == pc). */
6611 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6612 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6613
6614 case 0x2:
6615 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6616
6617 case 0x4: case 0x5: case 0x6: case 0x7:
6618 /* ldc/ldc2 lit (undefined for rn != pc). */
6619 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6620 : arm_copy_undef (gdbarch, insn, dsc);
6621
6622 default:
6623 return arm_copy_undef (gdbarch, insn, dsc);
6624 }
6625 }
6626
6627 case 0xa:
6628 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6629
6630 case 0xb:
6631 if (bits (insn, 16, 19) == 0xf)
6632 /* ldc/ldc2 lit. */
6633 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6634 else
6635 return arm_copy_undef (gdbarch, insn, dsc);
6636
6637 case 0xc:
6638 if (bit (insn, 4))
6639 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6640 else
6641 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6642
6643 case 0xd:
6644 if (bit (insn, 4))
6645 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6646 else
6647 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6648
6649 default:
6650 return arm_copy_undef (gdbarch, insn, dsc);
6651 }
6652 }
6653
6654 /* Decode miscellaneous instructions in dp/misc encoding space. */
6655
6656 static int
6657 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6658 struct regcache *regs,
6659 struct displaced_step_closure *dsc)
6660 {
6661 unsigned int op2 = bits (insn, 4, 6);
6662 unsigned int op = bits (insn, 21, 22);
6663 unsigned int op1 = bits (insn, 16, 19);
6664
6665 switch (op2)
6666 {
6667 case 0x0:
6668 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6669
6670 case 0x1:
6671 if (op == 0x1) /* bx. */
6672 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6673 else if (op == 0x3)
6674 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6675 else
6676 return arm_copy_undef (gdbarch, insn, dsc);
6677
6678 case 0x2:
6679 if (op == 0x1)
6680 /* Not really supported. */
6681 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6682 else
6683 return arm_copy_undef (gdbarch, insn, dsc);
6684
6685 case 0x3:
6686 if (op == 0x1)
6687 return arm_copy_bx_blx_reg (gdbarch, insn,
6688 regs, dsc); /* blx register. */
6689 else
6690 return arm_copy_undef (gdbarch, insn, dsc);
6691
6692 case 0x5:
6693 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6694
6695 case 0x7:
6696 if (op == 0x1)
6697 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6698 else if (op == 0x3)
6699 /* Not really supported. */
6700 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6701
6702 default:
6703 return arm_copy_undef (gdbarch, insn, dsc);
6704 }
6705 }
6706
6707 static int
6708 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6709 struct regcache *regs,
6710 struct displaced_step_closure *dsc)
6711 {
6712 if (bit (insn, 25))
6713 switch (bits (insn, 20, 24))
6714 {
6715 case 0x10:
6716 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6717
6718 case 0x14:
6719 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6720
6721 case 0x12: case 0x16:
6722 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6723
6724 default:
6725 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6726 }
6727 else
6728 {
6729 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6730
6731 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6732 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6733 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6734 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6735 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6736 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6737 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6738 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6739 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6740 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6741 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6742 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6743 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6744 /* 2nd arg means "unpriveleged". */
6745 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6746 dsc);
6747 }
6748
6749 /* Should be unreachable. */
6750 return 1;
6751 }
6752
6753 static int
6754 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6755 struct regcache *regs,
6756 struct displaced_step_closure *dsc)
6757 {
6758 int a = bit (insn, 25), b = bit (insn, 4);
6759 uint32_t op1 = bits (insn, 20, 24);
6760 int rn_f = bits (insn, 16, 19) == 0xf;
6761
6762 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6763 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6764 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6765 else if ((!a && (op1 & 0x17) == 0x02)
6766 || (a && (op1 & 0x17) == 0x02 && !b))
6767 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6768 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6769 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6770 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6771 else if ((!a && (op1 & 0x17) == 0x03)
6772 || (a && (op1 & 0x17) == 0x03 && !b))
6773 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6774 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6775 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6776 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6777 else if ((!a && (op1 & 0x17) == 0x06)
6778 || (a && (op1 & 0x17) == 0x06 && !b))
6779 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6780 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6781 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6782 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6783 else if ((!a && (op1 & 0x17) == 0x07)
6784 || (a && (op1 & 0x17) == 0x07 && !b))
6785 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6786
6787 /* Should be unreachable. */
6788 return 1;
6789 }
6790
6791 static int
6792 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6793 struct displaced_step_closure *dsc)
6794 {
6795 switch (bits (insn, 20, 24))
6796 {
6797 case 0x00: case 0x01: case 0x02: case 0x03:
6798 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6799
6800 case 0x04: case 0x05: case 0x06: case 0x07:
6801 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6802
6803 case 0x08: case 0x09: case 0x0a: case 0x0b:
6804 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6805 return arm_copy_unmodified (gdbarch, insn,
6806 "decode/pack/unpack/saturate/reverse", dsc);
6807
6808 case 0x18:
6809 if (bits (insn, 5, 7) == 0) /* op2. */
6810 {
6811 if (bits (insn, 12, 15) == 0xf)
6812 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6813 else
6814 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6815 }
6816 else
6817 return arm_copy_undef (gdbarch, insn, dsc);
6818
6819 case 0x1a: case 0x1b:
6820 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6821 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6822 else
6823 return arm_copy_undef (gdbarch, insn, dsc);
6824
6825 case 0x1c: case 0x1d:
6826 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6827 {
6828 if (bits (insn, 0, 3) == 0xf)
6829 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6830 else
6831 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6832 }
6833 else
6834 return arm_copy_undef (gdbarch, insn, dsc);
6835
6836 case 0x1e: case 0x1f:
6837 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6838 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6839 else
6840 return arm_copy_undef (gdbarch, insn, dsc);
6841 }
6842
6843 /* Should be unreachable. */
6844 return 1;
6845 }
6846
6847 static int
6848 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6849 struct regcache *regs,
6850 struct displaced_step_closure *dsc)
6851 {
6852 if (bit (insn, 25))
6853 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6854 else
6855 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6856 }
6857
6858 static int
6859 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6860 struct regcache *regs,
6861 struct displaced_step_closure *dsc)
6862 {
6863 unsigned int opcode = bits (insn, 20, 24);
6864
6865 switch (opcode)
6866 {
6867 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6868 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6869
6870 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6871 case 0x12: case 0x16:
6872 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6873
6874 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6875 case 0x13: case 0x17:
6876 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6877
6878 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6879 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6880 /* Note: no writeback for these instructions. Bit 25 will always be
6881 zero though (via caller), so the following works OK. */
6882 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6883 }
6884
6885 /* Should be unreachable. */
6886 return 1;
6887 }
6888
6889 static int
6890 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6891 struct regcache *regs, struct displaced_step_closure *dsc)
6892 {
6893 unsigned int op1 = bits (insn, 20, 25);
6894 int op = bit (insn, 4);
6895 unsigned int coproc = bits (insn, 8, 11);
6896 unsigned int rn = bits (insn, 16, 19);
6897
6898 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6899 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6900 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6901 && (coproc & 0xe) != 0xa)
6902 /* stc/stc2. */
6903 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6904 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6905 && (coproc & 0xe) != 0xa)
6906 /* ldc/ldc2 imm/lit. */
6907 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6908 else if ((op1 & 0x3e) == 0x00)
6909 return arm_copy_undef (gdbarch, insn, dsc);
6910 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6911 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6912 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6913 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6914 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6915 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6916 else if ((op1 & 0x30) == 0x20 && !op)
6917 {
6918 if ((coproc & 0xe) == 0xa)
6919 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6920 else
6921 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6922 }
6923 else if ((op1 & 0x30) == 0x20 && op)
6924 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6925 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6926 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6927 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6928 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6929 else if ((op1 & 0x30) == 0x30)
6930 return arm_copy_svc (gdbarch, insn, regs, dsc);
6931 else
6932 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6933 }
6934
6935 static void
6936 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6937 CORE_ADDR to, struct regcache *regs,
6938 struct displaced_step_closure *dsc)
6939 {
6940 error (_("Displaced stepping is only supported in ARM mode"));
6941 }
6942
6943 void
6944 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6945 CORE_ADDR to, struct regcache *regs,
6946 struct displaced_step_closure *dsc)
6947 {
6948 int err = 0;
6949 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6950 uint32_t insn;
6951
6952 /* Most displaced instructions use a 1-instruction scratch space, so set this
6953 here and override below if/when necessary. */
6954 dsc->numinsns = 1;
6955 dsc->insn_addr = from;
6956 dsc->scratch_base = to;
6957 dsc->cleanup = NULL;
6958 dsc->wrote_to_pc = 0;
6959
6960 if (!displaced_in_arm_mode (regs))
6961 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6962
6963 dsc->is_thumb = 0;
6964 dsc->insn_size = 4;
6965 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6966 if (debug_displaced)
6967 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6968 "at %.8lx\n", (unsigned long) insn,
6969 (unsigned long) from);
6970
6971 if ((insn & 0xf0000000) == 0xf0000000)
6972 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
6973 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6974 {
6975 case 0x0: case 0x1: case 0x2: case 0x3:
6976 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
6977 break;
6978
6979 case 0x4: case 0x5: case 0x6:
6980 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6981 break;
6982
6983 case 0x7:
6984 err = arm_decode_media (gdbarch, insn, dsc);
6985 break;
6986
6987 case 0x8: case 0x9: case 0xa: case 0xb:
6988 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6989 break;
6990
6991 case 0xc: case 0xd: case 0xe: case 0xf:
6992 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
6993 break;
6994 }
6995
6996 if (err)
6997 internal_error (__FILE__, __LINE__,
6998 _("arm_process_displaced_insn: Instruction decode error"));
6999 }
7000
7001 /* Actually set up the scratch space for a displaced instruction. */
7002
7003 void
7004 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7005 CORE_ADDR to, struct displaced_step_closure *dsc)
7006 {
7007 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7008 unsigned int i, len, offset;
7009 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7010 int size = dsc->is_thumb? 2 : 4;
7011 const unsigned char *bkp_insn;
7012
7013 offset = 0;
7014 /* Poke modified instruction(s). */
7015 for (i = 0; i < dsc->numinsns; i++)
7016 {
7017 if (debug_displaced)
7018 {
7019 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
7020 if (size == 4)
7021 fprintf_unfiltered (gdb_stdlog, "%.8lx",
7022 dsc->modinsn[i]);
7023 else if (size == 2)
7024 fprintf_unfiltered (gdb_stdlog, "%.4x",
7025 (unsigned short)dsc->modinsn[i]);
7026
7027 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
7028 (unsigned long) to + offset);
7029
7030 }
7031 write_memory_unsigned_integer (to + offset, size,
7032 byte_order_for_code,
7033 dsc->modinsn[i]);
7034 offset += size;
7035 }
7036
7037 /* Choose the correct breakpoint instruction. */
7038 if (dsc->is_thumb)
7039 {
7040 bkp_insn = tdep->thumb_breakpoint;
7041 len = tdep->thumb_breakpoint_size;
7042 }
7043 else
7044 {
7045 bkp_insn = tdep->arm_breakpoint;
7046 len = tdep->arm_breakpoint_size;
7047 }
7048
7049 /* Put breakpoint afterwards. */
7050 write_memory (to + offset, bkp_insn, len);
7051
7052 if (debug_displaced)
7053 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
7054 paddress (gdbarch, from), paddress (gdbarch, to));
7055 }
7056
7057 /* Entry point for copying an instruction into scratch space for displaced
7058 stepping. */
7059
7060 struct displaced_step_closure *
7061 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
7062 CORE_ADDR from, CORE_ADDR to,
7063 struct regcache *regs)
7064 {
7065 struct displaced_step_closure *dsc
7066 = xmalloc (sizeof (struct displaced_step_closure));
7067 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
7068 arm_displaced_init_closure (gdbarch, from, to, dsc);
7069
7070 return dsc;
7071 }
7072
7073 /* Entry point for cleaning things up after a displaced instruction has been
7074 single-stepped. */
7075
7076 void
7077 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7078 struct displaced_step_closure *dsc,
7079 CORE_ADDR from, CORE_ADDR to,
7080 struct regcache *regs)
7081 {
7082 if (dsc->cleanup)
7083 dsc->cleanup (gdbarch, regs, dsc);
7084
7085 if (!dsc->wrote_to_pc)
7086 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7087 dsc->insn_addr + dsc->insn_size);
7088
7089 }
7090
7091 #include "bfd-in2.h"
7092 #include "libcoff.h"
7093
7094 static int
7095 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7096 {
7097 struct gdbarch *gdbarch = info->application_data;
7098
7099 if (arm_pc_is_thumb (gdbarch, memaddr))
7100 {
7101 static asymbol *asym;
7102 static combined_entry_type ce;
7103 static struct coff_symbol_struct csym;
7104 static struct bfd fake_bfd;
7105 static bfd_target fake_target;
7106
7107 if (csym.native == NULL)
7108 {
7109 /* Create a fake symbol vector containing a Thumb symbol.
7110 This is solely so that the code in print_insn_little_arm()
7111 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7112 the presence of a Thumb symbol and switch to decoding
7113 Thumb instructions. */
7114
7115 fake_target.flavour = bfd_target_coff_flavour;
7116 fake_bfd.xvec = &fake_target;
7117 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7118 csym.native = &ce;
7119 csym.symbol.the_bfd = &fake_bfd;
7120 csym.symbol.name = "fake";
7121 asym = (asymbol *) & csym;
7122 }
7123
7124 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7125 info->symbols = &asym;
7126 }
7127 else
7128 info->symbols = NULL;
7129
7130 if (info->endian == BFD_ENDIAN_BIG)
7131 return print_insn_big_arm (memaddr, info);
7132 else
7133 return print_insn_little_arm (memaddr, info);
7134 }
7135
7136 /* The following define instruction sequences that will cause ARM
7137 cpu's to take an undefined instruction trap. These are used to
7138 signal a breakpoint to GDB.
7139
7140 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7141 modes. A different instruction is required for each mode. The ARM
7142 cpu's can also be big or little endian. Thus four different
7143 instructions are needed to support all cases.
7144
7145 Note: ARMv4 defines several new instructions that will take the
7146 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7147 not in fact add the new instructions. The new undefined
7148 instructions in ARMv4 are all instructions that had no defined
7149 behaviour in earlier chips. There is no guarantee that they will
7150 raise an exception, but may be treated as NOP's. In practice, it
7151 may only safe to rely on instructions matching:
7152
7153 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7154 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7155 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7156
7157 Even this may only true if the condition predicate is true. The
7158 following use a condition predicate of ALWAYS so it is always TRUE.
7159
7160 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7161 and NetBSD all use a software interrupt rather than an undefined
7162 instruction to force a trap. This can be handled by by the
7163 abi-specific code during establishment of the gdbarch vector. */
7164
7165 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7166 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7167 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7168 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7169
7170 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7171 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7172 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7173 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7174
7175 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7176 the program counter value to determine whether a 16-bit or 32-bit
7177 breakpoint should be used. It returns a pointer to a string of
7178 bytes that encode a breakpoint instruction, stores the length of
7179 the string to *lenptr, and adjusts the program counter (if
7180 necessary) to point to the actual memory location where the
7181 breakpoint should be inserted. */
7182
7183 static const unsigned char *
7184 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7185 {
7186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7187 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7188
7189 if (arm_pc_is_thumb (gdbarch, *pcptr))
7190 {
7191 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7192
7193 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7194 check whether we are replacing a 32-bit instruction. */
7195 if (tdep->thumb2_breakpoint != NULL)
7196 {
7197 gdb_byte buf[2];
7198 if (target_read_memory (*pcptr, buf, 2) == 0)
7199 {
7200 unsigned short inst1;
7201 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7202 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7203 {
7204 *lenptr = tdep->thumb2_breakpoint_size;
7205 return tdep->thumb2_breakpoint;
7206 }
7207 }
7208 }
7209
7210 *lenptr = tdep->thumb_breakpoint_size;
7211 return tdep->thumb_breakpoint;
7212 }
7213 else
7214 {
7215 *lenptr = tdep->arm_breakpoint_size;
7216 return tdep->arm_breakpoint;
7217 }
7218 }
7219
7220 static void
7221 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7222 int *kindptr)
7223 {
7224 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7225
7226 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7227
7228 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7229 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7230 that this is not confused with a 32-bit ARM breakpoint. */
7231 *kindptr = 3;
7232 }
7233
7234 /* Extract from an array REGBUF containing the (raw) register state a
7235 function return value of type TYPE, and copy that, in virtual
7236 format, into VALBUF. */
7237
7238 static void
7239 arm_extract_return_value (struct type *type, struct regcache *regs,
7240 gdb_byte *valbuf)
7241 {
7242 struct gdbarch *gdbarch = get_regcache_arch (regs);
7243 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7244
7245 if (TYPE_CODE_FLT == TYPE_CODE (type))
7246 {
7247 switch (gdbarch_tdep (gdbarch)->fp_model)
7248 {
7249 case ARM_FLOAT_FPA:
7250 {
7251 /* The value is in register F0 in internal format. We need to
7252 extract the raw value and then convert it to the desired
7253 internal type. */
7254 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7255
7256 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7257 convert_from_extended (floatformat_from_type (type), tmpbuf,
7258 valbuf, gdbarch_byte_order (gdbarch));
7259 }
7260 break;
7261
7262 case ARM_FLOAT_SOFT_FPA:
7263 case ARM_FLOAT_SOFT_VFP:
7264 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7265 not using the VFP ABI code. */
7266 case ARM_FLOAT_VFP:
7267 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7268 if (TYPE_LENGTH (type) > 4)
7269 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7270 valbuf + INT_REGISTER_SIZE);
7271 break;
7272
7273 default:
7274 internal_error (__FILE__, __LINE__,
7275 _("arm_extract_return_value: "
7276 "Floating point model not supported"));
7277 break;
7278 }
7279 }
7280 else if (TYPE_CODE (type) == TYPE_CODE_INT
7281 || TYPE_CODE (type) == TYPE_CODE_CHAR
7282 || TYPE_CODE (type) == TYPE_CODE_BOOL
7283 || TYPE_CODE (type) == TYPE_CODE_PTR
7284 || TYPE_CODE (type) == TYPE_CODE_REF
7285 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7286 {
7287 /* If the type is a plain integer, then the access is
7288 straight-forward. Otherwise we have to play around a bit
7289 more. */
7290 int len = TYPE_LENGTH (type);
7291 int regno = ARM_A1_REGNUM;
7292 ULONGEST tmp;
7293
7294 while (len > 0)
7295 {
7296 /* By using store_unsigned_integer we avoid having to do
7297 anything special for small big-endian values. */
7298 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7299 store_unsigned_integer (valbuf,
7300 (len > INT_REGISTER_SIZE
7301 ? INT_REGISTER_SIZE : len),
7302 byte_order, tmp);
7303 len -= INT_REGISTER_SIZE;
7304 valbuf += INT_REGISTER_SIZE;
7305 }
7306 }
7307 else
7308 {
7309 /* For a structure or union the behaviour is as if the value had
7310 been stored to word-aligned memory and then loaded into
7311 registers with 32-bit load instruction(s). */
7312 int len = TYPE_LENGTH (type);
7313 int regno = ARM_A1_REGNUM;
7314 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7315
7316 while (len > 0)
7317 {
7318 regcache_cooked_read (regs, regno++, tmpbuf);
7319 memcpy (valbuf, tmpbuf,
7320 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7321 len -= INT_REGISTER_SIZE;
7322 valbuf += INT_REGISTER_SIZE;
7323 }
7324 }
7325 }
7326
7327
7328 /* Will a function return an aggregate type in memory or in a
7329 register? Return 0 if an aggregate type can be returned in a
7330 register, 1 if it must be returned in memory. */
7331
7332 static int
7333 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7334 {
7335 int nRc;
7336 enum type_code code;
7337
7338 CHECK_TYPEDEF (type);
7339
7340 /* In the ARM ABI, "integer" like aggregate types are returned in
7341 registers. For an aggregate type to be integer like, its size
7342 must be less than or equal to INT_REGISTER_SIZE and the
7343 offset of each addressable subfield must be zero. Note that bit
7344 fields are not addressable, and all addressable subfields of
7345 unions always start at offset zero.
7346
7347 This function is based on the behaviour of GCC 2.95.1.
7348 See: gcc/arm.c: arm_return_in_memory() for details.
7349
7350 Note: All versions of GCC before GCC 2.95.2 do not set up the
7351 parameters correctly for a function returning the following
7352 structure: struct { float f;}; This should be returned in memory,
7353 not a register. Richard Earnshaw sent me a patch, but I do not
7354 know of any way to detect if a function like the above has been
7355 compiled with the correct calling convention. */
7356
7357 /* All aggregate types that won't fit in a register must be returned
7358 in memory. */
7359 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7360 {
7361 return 1;
7362 }
7363
7364 /* The AAPCS says all aggregates not larger than a word are returned
7365 in a register. */
7366 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7367 return 0;
7368
7369 /* The only aggregate types that can be returned in a register are
7370 structs and unions. Arrays must be returned in memory. */
7371 code = TYPE_CODE (type);
7372 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7373 {
7374 return 1;
7375 }
7376
7377 /* Assume all other aggregate types can be returned in a register.
7378 Run a check for structures, unions and arrays. */
7379 nRc = 0;
7380
7381 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7382 {
7383 int i;
7384 /* Need to check if this struct/union is "integer" like. For
7385 this to be true, its size must be less than or equal to
7386 INT_REGISTER_SIZE and the offset of each addressable
7387 subfield must be zero. Note that bit fields are not
7388 addressable, and unions always start at offset zero. If any
7389 of the subfields is a floating point type, the struct/union
7390 cannot be an integer type. */
7391
7392 /* For each field in the object, check:
7393 1) Is it FP? --> yes, nRc = 1;
7394 2) Is it addressable (bitpos != 0) and
7395 not packed (bitsize == 0)?
7396 --> yes, nRc = 1
7397 */
7398
7399 for (i = 0; i < TYPE_NFIELDS (type); i++)
7400 {
7401 enum type_code field_type_code;
7402 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7403 i)));
7404
7405 /* Is it a floating point type field? */
7406 if (field_type_code == TYPE_CODE_FLT)
7407 {
7408 nRc = 1;
7409 break;
7410 }
7411
7412 /* If bitpos != 0, then we have to care about it. */
7413 if (TYPE_FIELD_BITPOS (type, i) != 0)
7414 {
7415 /* Bitfields are not addressable. If the field bitsize is
7416 zero, then the field is not packed. Hence it cannot be
7417 a bitfield or any other packed type. */
7418 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7419 {
7420 nRc = 1;
7421 break;
7422 }
7423 }
7424 }
7425 }
7426
7427 return nRc;
7428 }
7429
7430 /* Write into appropriate registers a function return value of type
7431 TYPE, given in virtual format. */
7432
7433 static void
7434 arm_store_return_value (struct type *type, struct regcache *regs,
7435 const gdb_byte *valbuf)
7436 {
7437 struct gdbarch *gdbarch = get_regcache_arch (regs);
7438 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7439
7440 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7441 {
7442 char buf[MAX_REGISTER_SIZE];
7443
7444 switch (gdbarch_tdep (gdbarch)->fp_model)
7445 {
7446 case ARM_FLOAT_FPA:
7447
7448 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7449 gdbarch_byte_order (gdbarch));
7450 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7451 break;
7452
7453 case ARM_FLOAT_SOFT_FPA:
7454 case ARM_FLOAT_SOFT_VFP:
7455 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7456 not using the VFP ABI code. */
7457 case ARM_FLOAT_VFP:
7458 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7459 if (TYPE_LENGTH (type) > 4)
7460 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7461 valbuf + INT_REGISTER_SIZE);
7462 break;
7463
7464 default:
7465 internal_error (__FILE__, __LINE__,
7466 _("arm_store_return_value: Floating "
7467 "point model not supported"));
7468 break;
7469 }
7470 }
7471 else if (TYPE_CODE (type) == TYPE_CODE_INT
7472 || TYPE_CODE (type) == TYPE_CODE_CHAR
7473 || TYPE_CODE (type) == TYPE_CODE_BOOL
7474 || TYPE_CODE (type) == TYPE_CODE_PTR
7475 || TYPE_CODE (type) == TYPE_CODE_REF
7476 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7477 {
7478 if (TYPE_LENGTH (type) <= 4)
7479 {
7480 /* Values of one word or less are zero/sign-extended and
7481 returned in r0. */
7482 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7483 LONGEST val = unpack_long (type, valbuf);
7484
7485 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7486 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7487 }
7488 else
7489 {
7490 /* Integral values greater than one word are stored in consecutive
7491 registers starting with r0. This will always be a multiple of
7492 the regiser size. */
7493 int len = TYPE_LENGTH (type);
7494 int regno = ARM_A1_REGNUM;
7495
7496 while (len > 0)
7497 {
7498 regcache_cooked_write (regs, regno++, valbuf);
7499 len -= INT_REGISTER_SIZE;
7500 valbuf += INT_REGISTER_SIZE;
7501 }
7502 }
7503 }
7504 else
7505 {
7506 /* For a structure or union the behaviour is as if the value had
7507 been stored to word-aligned memory and then loaded into
7508 registers with 32-bit load instruction(s). */
7509 int len = TYPE_LENGTH (type);
7510 int regno = ARM_A1_REGNUM;
7511 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7512
7513 while (len > 0)
7514 {
7515 memcpy (tmpbuf, valbuf,
7516 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7517 regcache_cooked_write (regs, regno++, tmpbuf);
7518 len -= INT_REGISTER_SIZE;
7519 valbuf += INT_REGISTER_SIZE;
7520 }
7521 }
7522 }
7523
7524
7525 /* Handle function return values. */
7526
7527 static enum return_value_convention
7528 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7529 struct type *valtype, struct regcache *regcache,
7530 gdb_byte *readbuf, const gdb_byte *writebuf)
7531 {
7532 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7533 enum arm_vfp_cprc_base_type vfp_base_type;
7534 int vfp_base_count;
7535
7536 if (arm_vfp_abi_for_function (gdbarch, func_type)
7537 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7538 {
7539 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7540 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7541 int i;
7542 for (i = 0; i < vfp_base_count; i++)
7543 {
7544 if (reg_char == 'q')
7545 {
7546 if (writebuf)
7547 arm_neon_quad_write (gdbarch, regcache, i,
7548 writebuf + i * unit_length);
7549
7550 if (readbuf)
7551 arm_neon_quad_read (gdbarch, regcache, i,
7552 readbuf + i * unit_length);
7553 }
7554 else
7555 {
7556 char name_buf[4];
7557 int regnum;
7558
7559 sprintf (name_buf, "%c%d", reg_char, i);
7560 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7561 strlen (name_buf));
7562 if (writebuf)
7563 regcache_cooked_write (regcache, regnum,
7564 writebuf + i * unit_length);
7565 if (readbuf)
7566 regcache_cooked_read (regcache, regnum,
7567 readbuf + i * unit_length);
7568 }
7569 }
7570 return RETURN_VALUE_REGISTER_CONVENTION;
7571 }
7572
7573 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7574 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7575 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7576 {
7577 if (tdep->struct_return == pcc_struct_return
7578 || arm_return_in_memory (gdbarch, valtype))
7579 return RETURN_VALUE_STRUCT_CONVENTION;
7580 }
7581
7582 if (writebuf)
7583 arm_store_return_value (valtype, regcache, writebuf);
7584
7585 if (readbuf)
7586 arm_extract_return_value (valtype, regcache, readbuf);
7587
7588 return RETURN_VALUE_REGISTER_CONVENTION;
7589 }
7590
7591
7592 static int
7593 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7594 {
7595 struct gdbarch *gdbarch = get_frame_arch (frame);
7596 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7597 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7598 CORE_ADDR jb_addr;
7599 char buf[INT_REGISTER_SIZE];
7600
7601 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7602
7603 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7604 INT_REGISTER_SIZE))
7605 return 0;
7606
7607 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7608 return 1;
7609 }
7610
7611 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7612 return the target PC. Otherwise return 0. */
7613
7614 CORE_ADDR
7615 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7616 {
7617 char *name;
7618 int namelen;
7619 CORE_ADDR start_addr;
7620
7621 /* Find the starting address and name of the function containing the PC. */
7622 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7623 return 0;
7624
7625 /* If PC is in a Thumb call or return stub, return the address of the
7626 target PC, which is in a register. The thunk functions are called
7627 _call_via_xx, where x is the register name. The possible names
7628 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7629 functions, named __ARM_call_via_r[0-7]. */
7630 if (strncmp (name, "_call_via_", 10) == 0
7631 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7632 {
7633 /* Use the name suffix to determine which register contains the
7634 target PC. */
7635 static char *table[15] =
7636 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7637 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7638 };
7639 int regno;
7640 int offset = strlen (name) - 2;
7641
7642 for (regno = 0; regno <= 14; regno++)
7643 if (strcmp (&name[offset], table[regno]) == 0)
7644 return get_frame_register_unsigned (frame, regno);
7645 }
7646
7647 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7648 non-interworking calls to foo. We could decode the stubs
7649 to find the target but it's easier to use the symbol table. */
7650 namelen = strlen (name);
7651 if (name[0] == '_' && name[1] == '_'
7652 && ((namelen > 2 + strlen ("_from_thumb")
7653 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7654 strlen ("_from_thumb")) == 0)
7655 || (namelen > 2 + strlen ("_from_arm")
7656 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7657 strlen ("_from_arm")) == 0)))
7658 {
7659 char *target_name;
7660 int target_len = namelen - 2;
7661 struct minimal_symbol *minsym;
7662 struct objfile *objfile;
7663 struct obj_section *sec;
7664
7665 if (name[namelen - 1] == 'b')
7666 target_len -= strlen ("_from_thumb");
7667 else
7668 target_len -= strlen ("_from_arm");
7669
7670 target_name = alloca (target_len + 1);
7671 memcpy (target_name, name + 2, target_len);
7672 target_name[target_len] = '\0';
7673
7674 sec = find_pc_section (pc);
7675 objfile = (sec == NULL) ? NULL : sec->objfile;
7676 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7677 if (minsym != NULL)
7678 return SYMBOL_VALUE_ADDRESS (minsym);
7679 else
7680 return 0;
7681 }
7682
7683 return 0; /* not a stub */
7684 }
7685
7686 static void
7687 set_arm_command (char *args, int from_tty)
7688 {
7689 printf_unfiltered (_("\
7690 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7691 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7692 }
7693
7694 static void
7695 show_arm_command (char *args, int from_tty)
7696 {
7697 cmd_show_list (showarmcmdlist, from_tty, "");
7698 }
7699
7700 static void
7701 arm_update_current_architecture (void)
7702 {
7703 struct gdbarch_info info;
7704
7705 /* If the current architecture is not ARM, we have nothing to do. */
7706 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7707 return;
7708
7709 /* Update the architecture. */
7710 gdbarch_info_init (&info);
7711
7712 if (!gdbarch_update_p (info))
7713 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7714 }
7715
7716 static void
7717 set_fp_model_sfunc (char *args, int from_tty,
7718 struct cmd_list_element *c)
7719 {
7720 enum arm_float_model fp_model;
7721
7722 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7723 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7724 {
7725 arm_fp_model = fp_model;
7726 break;
7727 }
7728
7729 if (fp_model == ARM_FLOAT_LAST)
7730 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7731 current_fp_model);
7732
7733 arm_update_current_architecture ();
7734 }
7735
7736 static void
7737 show_fp_model (struct ui_file *file, int from_tty,
7738 struct cmd_list_element *c, const char *value)
7739 {
7740 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7741
7742 if (arm_fp_model == ARM_FLOAT_AUTO
7743 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7744 fprintf_filtered (file, _("\
7745 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7746 fp_model_strings[tdep->fp_model]);
7747 else
7748 fprintf_filtered (file, _("\
7749 The current ARM floating point model is \"%s\".\n"),
7750 fp_model_strings[arm_fp_model]);
7751 }
7752
7753 static void
7754 arm_set_abi (char *args, int from_tty,
7755 struct cmd_list_element *c)
7756 {
7757 enum arm_abi_kind arm_abi;
7758
7759 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7760 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7761 {
7762 arm_abi_global = arm_abi;
7763 break;
7764 }
7765
7766 if (arm_abi == ARM_ABI_LAST)
7767 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7768 arm_abi_string);
7769
7770 arm_update_current_architecture ();
7771 }
7772
7773 static void
7774 arm_show_abi (struct ui_file *file, int from_tty,
7775 struct cmd_list_element *c, const char *value)
7776 {
7777 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7778
7779 if (arm_abi_global == ARM_ABI_AUTO
7780 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7781 fprintf_filtered (file, _("\
7782 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7783 arm_abi_strings[tdep->arm_abi]);
7784 else
7785 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7786 arm_abi_string);
7787 }
7788
7789 static void
7790 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7791 struct cmd_list_element *c, const char *value)
7792 {
7793 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7794
7795 fprintf_filtered (file,
7796 _("The current execution mode assumed "
7797 "(when symbols are unavailable) is \"%s\".\n"),
7798 arm_fallback_mode_string);
7799 }
7800
7801 static void
7802 arm_show_force_mode (struct ui_file *file, int from_tty,
7803 struct cmd_list_element *c, const char *value)
7804 {
7805 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7806
7807 fprintf_filtered (file,
7808 _("The current execution mode assumed "
7809 "(even when symbols are available) is \"%s\".\n"),
7810 arm_force_mode_string);
7811 }
7812
7813 /* If the user changes the register disassembly style used for info
7814 register and other commands, we have to also switch the style used
7815 in opcodes for disassembly output. This function is run in the "set
7816 arm disassembly" command, and does that. */
7817
7818 static void
7819 set_disassembly_style_sfunc (char *args, int from_tty,
7820 struct cmd_list_element *c)
7821 {
7822 set_disassembly_style ();
7823 }
7824 \f
7825 /* Return the ARM register name corresponding to register I. */
7826 static const char *
7827 arm_register_name (struct gdbarch *gdbarch, int i)
7828 {
7829 const int num_regs = gdbarch_num_regs (gdbarch);
7830
7831 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7832 && i >= num_regs && i < num_regs + 32)
7833 {
7834 static const char *const vfp_pseudo_names[] = {
7835 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7836 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7837 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7838 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7839 };
7840
7841 return vfp_pseudo_names[i - num_regs];
7842 }
7843
7844 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7845 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7846 {
7847 static const char *const neon_pseudo_names[] = {
7848 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7849 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7850 };
7851
7852 return neon_pseudo_names[i - num_regs - 32];
7853 }
7854
7855 if (i >= ARRAY_SIZE (arm_register_names))
7856 /* These registers are only supported on targets which supply
7857 an XML description. */
7858 return "";
7859
7860 return arm_register_names[i];
7861 }
7862
7863 static void
7864 set_disassembly_style (void)
7865 {
7866 int current;
7867
7868 /* Find the style that the user wants. */
7869 for (current = 0; current < num_disassembly_options; current++)
7870 if (disassembly_style == valid_disassembly_styles[current])
7871 break;
7872 gdb_assert (current < num_disassembly_options);
7873
7874 /* Synchronize the disassembler. */
7875 set_arm_regname_option (current);
7876 }
7877
7878 /* Test whether the coff symbol specific value corresponds to a Thumb
7879 function. */
7880
7881 static int
7882 coff_sym_is_thumb (int val)
7883 {
7884 return (val == C_THUMBEXT
7885 || val == C_THUMBSTAT
7886 || val == C_THUMBEXTFUNC
7887 || val == C_THUMBSTATFUNC
7888 || val == C_THUMBLABEL);
7889 }
7890
7891 /* arm_coff_make_msymbol_special()
7892 arm_elf_make_msymbol_special()
7893
7894 These functions test whether the COFF or ELF symbol corresponds to
7895 an address in thumb code, and set a "special" bit in a minimal
7896 symbol to indicate that it does. */
7897
7898 static void
7899 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7900 {
7901 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
7902 == ST_BRANCH_TO_THUMB)
7903 MSYMBOL_SET_SPECIAL (msym);
7904 }
7905
7906 static void
7907 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7908 {
7909 if (coff_sym_is_thumb (val))
7910 MSYMBOL_SET_SPECIAL (msym);
7911 }
7912
7913 static void
7914 arm_objfile_data_free (struct objfile *objfile, void *arg)
7915 {
7916 struct arm_per_objfile *data = arg;
7917 unsigned int i;
7918
7919 for (i = 0; i < objfile->obfd->section_count; i++)
7920 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7921 }
7922
7923 static void
7924 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7925 asymbol *sym)
7926 {
7927 const char *name = bfd_asymbol_name (sym);
7928 struct arm_per_objfile *data;
7929 VEC(arm_mapping_symbol_s) **map_p;
7930 struct arm_mapping_symbol new_map_sym;
7931
7932 gdb_assert (name[0] == '$');
7933 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7934 return;
7935
7936 data = objfile_data (objfile, arm_objfile_data_key);
7937 if (data == NULL)
7938 {
7939 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7940 struct arm_per_objfile);
7941 set_objfile_data (objfile, arm_objfile_data_key, data);
7942 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7943 objfile->obfd->section_count,
7944 VEC(arm_mapping_symbol_s) *);
7945 }
7946 map_p = &data->section_maps[bfd_get_section (sym)->index];
7947
7948 new_map_sym.value = sym->value;
7949 new_map_sym.type = name[1];
7950
7951 /* Assume that most mapping symbols appear in order of increasing
7952 value. If they were randomly distributed, it would be faster to
7953 always push here and then sort at first use. */
7954 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7955 {
7956 struct arm_mapping_symbol *prev_map_sym;
7957
7958 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7959 if (prev_map_sym->value >= sym->value)
7960 {
7961 unsigned int idx;
7962 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7963 arm_compare_mapping_symbols);
7964 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7965 return;
7966 }
7967 }
7968
7969 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7970 }
7971
7972 static void
7973 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7974 {
7975 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7976 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7977
7978 /* If necessary, set the T bit. */
7979 if (arm_apcs_32)
7980 {
7981 ULONGEST val, t_bit;
7982 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7983 t_bit = arm_psr_thumb_bit (gdbarch);
7984 if (arm_pc_is_thumb (gdbarch, pc))
7985 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7986 val | t_bit);
7987 else
7988 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7989 val & ~t_bit);
7990 }
7991 }
7992
7993 /* Read the contents of a NEON quad register, by reading from two
7994 double registers. This is used to implement the quad pseudo
7995 registers, and for argument passing in case the quad registers are
7996 missing; vectors are passed in quad registers when using the VFP
7997 ABI, even if a NEON unit is not present. REGNUM is the index of
7998 the quad register, in [0, 15]. */
7999
8000 static enum register_status
8001 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
8002 int regnum, gdb_byte *buf)
8003 {
8004 char name_buf[4];
8005 gdb_byte reg_buf[8];
8006 int offset, double_regnum;
8007 enum register_status status;
8008
8009 sprintf (name_buf, "d%d", regnum << 1);
8010 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8011 strlen (name_buf));
8012
8013 /* d0 is always the least significant half of q0. */
8014 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8015 offset = 8;
8016 else
8017 offset = 0;
8018
8019 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8020 if (status != REG_VALID)
8021 return status;
8022 memcpy (buf + offset, reg_buf, 8);
8023
8024 offset = 8 - offset;
8025 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
8026 if (status != REG_VALID)
8027 return status;
8028 memcpy (buf + offset, reg_buf, 8);
8029
8030 return REG_VALID;
8031 }
8032
8033 static enum register_status
8034 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
8035 int regnum, gdb_byte *buf)
8036 {
8037 const int num_regs = gdbarch_num_regs (gdbarch);
8038 char name_buf[4];
8039 gdb_byte reg_buf[8];
8040 int offset, double_regnum;
8041
8042 gdb_assert (regnum >= num_regs);
8043 regnum -= num_regs;
8044
8045 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8046 /* Quad-precision register. */
8047 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8048 else
8049 {
8050 enum register_status status;
8051
8052 /* Single-precision register. */
8053 gdb_assert (regnum < 32);
8054
8055 /* s0 is always the least significant half of d0. */
8056 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8057 offset = (regnum & 1) ? 0 : 4;
8058 else
8059 offset = (regnum & 1) ? 4 : 0;
8060
8061 sprintf (name_buf, "d%d", regnum >> 1);
8062 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8063 strlen (name_buf));
8064
8065 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8066 if (status == REG_VALID)
8067 memcpy (buf, reg_buf + offset, 4);
8068 return status;
8069 }
8070 }
8071
8072 /* Store the contents of BUF to a NEON quad register, by writing to
8073 two double registers. This is used to implement the quad pseudo
8074 registers, and for argument passing in case the quad registers are
8075 missing; vectors are passed in quad registers when using the VFP
8076 ABI, even if a NEON unit is not present. REGNUM is the index
8077 of the quad register, in [0, 15]. */
8078
8079 static void
8080 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8081 int regnum, const gdb_byte *buf)
8082 {
8083 char name_buf[4];
8084 gdb_byte reg_buf[8];
8085 int offset, double_regnum;
8086
8087 sprintf (name_buf, "d%d", regnum << 1);
8088 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8089 strlen (name_buf));
8090
8091 /* d0 is always the least significant half of q0. */
8092 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8093 offset = 8;
8094 else
8095 offset = 0;
8096
8097 regcache_raw_write (regcache, double_regnum, buf + offset);
8098 offset = 8 - offset;
8099 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
8100 }
8101
8102 static void
8103 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8104 int regnum, const gdb_byte *buf)
8105 {
8106 const int num_regs = gdbarch_num_regs (gdbarch);
8107 char name_buf[4];
8108 gdb_byte reg_buf[8];
8109 int offset, double_regnum;
8110
8111 gdb_assert (regnum >= num_regs);
8112 regnum -= num_regs;
8113
8114 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8115 /* Quad-precision register. */
8116 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8117 else
8118 {
8119 /* Single-precision register. */
8120 gdb_assert (regnum < 32);
8121
8122 /* s0 is always the least significant half of d0. */
8123 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8124 offset = (regnum & 1) ? 0 : 4;
8125 else
8126 offset = (regnum & 1) ? 4 : 0;
8127
8128 sprintf (name_buf, "d%d", regnum >> 1);
8129 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8130 strlen (name_buf));
8131
8132 regcache_raw_read (regcache, double_regnum, reg_buf);
8133 memcpy (reg_buf + offset, buf, 4);
8134 regcache_raw_write (regcache, double_regnum, reg_buf);
8135 }
8136 }
8137
8138 static struct value *
8139 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8140 {
8141 const int *reg_p = baton;
8142 return value_of_register (*reg_p, frame);
8143 }
8144 \f
8145 static enum gdb_osabi
8146 arm_elf_osabi_sniffer (bfd *abfd)
8147 {
8148 unsigned int elfosabi;
8149 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8150
8151 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8152
8153 if (elfosabi == ELFOSABI_ARM)
8154 /* GNU tools use this value. Check note sections in this case,
8155 as well. */
8156 bfd_map_over_sections (abfd,
8157 generic_elf_osabi_sniff_abi_tag_sections,
8158 &osabi);
8159
8160 /* Anything else will be handled by the generic ELF sniffer. */
8161 return osabi;
8162 }
8163
8164 static int
8165 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8166 struct reggroup *group)
8167 {
8168 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8169 this, FPS register belongs to save_regroup, restore_reggroup, and
8170 all_reggroup, of course. */
8171 if (regnum == ARM_FPS_REGNUM)
8172 return (group == float_reggroup
8173 || group == save_reggroup
8174 || group == restore_reggroup
8175 || group == all_reggroup);
8176 else
8177 return default_register_reggroup_p (gdbarch, regnum, group);
8178 }
8179
8180 \f
8181 /* Initialize the current architecture based on INFO. If possible,
8182 re-use an architecture from ARCHES, which is a list of
8183 architectures already created during this debugging session.
8184
8185 Called e.g. at program startup, when reading a core file, and when
8186 reading a binary file. */
8187
8188 static struct gdbarch *
8189 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8190 {
8191 struct gdbarch_tdep *tdep;
8192 struct gdbarch *gdbarch;
8193 struct gdbarch_list *best_arch;
8194 enum arm_abi_kind arm_abi = arm_abi_global;
8195 enum arm_float_model fp_model = arm_fp_model;
8196 struct tdesc_arch_data *tdesc_data = NULL;
8197 int i, is_m = 0;
8198 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8199 int have_neon = 0;
8200 int have_fpa_registers = 1;
8201 const struct target_desc *tdesc = info.target_desc;
8202
8203 /* If we have an object to base this architecture on, try to determine
8204 its ABI. */
8205
8206 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8207 {
8208 int ei_osabi, e_flags;
8209
8210 switch (bfd_get_flavour (info.abfd))
8211 {
8212 case bfd_target_aout_flavour:
8213 /* Assume it's an old APCS-style ABI. */
8214 arm_abi = ARM_ABI_APCS;
8215 break;
8216
8217 case bfd_target_coff_flavour:
8218 /* Assume it's an old APCS-style ABI. */
8219 /* XXX WinCE? */
8220 arm_abi = ARM_ABI_APCS;
8221 break;
8222
8223 case bfd_target_elf_flavour:
8224 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8225 e_flags = elf_elfheader (info.abfd)->e_flags;
8226
8227 if (ei_osabi == ELFOSABI_ARM)
8228 {
8229 /* GNU tools used to use this value, but do not for EABI
8230 objects. There's nowhere to tag an EABI version
8231 anyway, so assume APCS. */
8232 arm_abi = ARM_ABI_APCS;
8233 }
8234 else if (ei_osabi == ELFOSABI_NONE)
8235 {
8236 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8237 int attr_arch, attr_profile;
8238
8239 switch (eabi_ver)
8240 {
8241 case EF_ARM_EABI_UNKNOWN:
8242 /* Assume GNU tools. */
8243 arm_abi = ARM_ABI_APCS;
8244 break;
8245
8246 case EF_ARM_EABI_VER4:
8247 case EF_ARM_EABI_VER5:
8248 arm_abi = ARM_ABI_AAPCS;
8249 /* EABI binaries default to VFP float ordering.
8250 They may also contain build attributes that can
8251 be used to identify if the VFP argument-passing
8252 ABI is in use. */
8253 if (fp_model == ARM_FLOAT_AUTO)
8254 {
8255 #ifdef HAVE_ELF
8256 switch (bfd_elf_get_obj_attr_int (info.abfd,
8257 OBJ_ATTR_PROC,
8258 Tag_ABI_VFP_args))
8259 {
8260 case 0:
8261 /* "The user intended FP parameter/result
8262 passing to conform to AAPCS, base
8263 variant". */
8264 fp_model = ARM_FLOAT_SOFT_VFP;
8265 break;
8266 case 1:
8267 /* "The user intended FP parameter/result
8268 passing to conform to AAPCS, VFP
8269 variant". */
8270 fp_model = ARM_FLOAT_VFP;
8271 break;
8272 case 2:
8273 /* "The user intended FP parameter/result
8274 passing to conform to tool chain-specific
8275 conventions" - we don't know any such
8276 conventions, so leave it as "auto". */
8277 break;
8278 default:
8279 /* Attribute value not mentioned in the
8280 October 2008 ABI, so leave it as
8281 "auto". */
8282 break;
8283 }
8284 #else
8285 fp_model = ARM_FLOAT_SOFT_VFP;
8286 #endif
8287 }
8288 break;
8289
8290 default:
8291 /* Leave it as "auto". */
8292 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8293 break;
8294 }
8295
8296 #ifdef HAVE_ELF
8297 /* Detect M-profile programs. This only works if the
8298 executable file includes build attributes; GCC does
8299 copy them to the executable, but e.g. RealView does
8300 not. */
8301 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8302 Tag_CPU_arch);
8303 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8304 OBJ_ATTR_PROC,
8305 Tag_CPU_arch_profile);
8306 /* GCC specifies the profile for v6-M; RealView only
8307 specifies the profile for architectures starting with
8308 V7 (as opposed to architectures with a tag
8309 numerically greater than TAG_CPU_ARCH_V7). */
8310 if (!tdesc_has_registers (tdesc)
8311 && (attr_arch == TAG_CPU_ARCH_V6_M
8312 || attr_arch == TAG_CPU_ARCH_V6S_M
8313 || attr_profile == 'M'))
8314 tdesc = tdesc_arm_with_m;
8315 #endif
8316 }
8317
8318 if (fp_model == ARM_FLOAT_AUTO)
8319 {
8320 int e_flags = elf_elfheader (info.abfd)->e_flags;
8321
8322 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8323 {
8324 case 0:
8325 /* Leave it as "auto". Strictly speaking this case
8326 means FPA, but almost nobody uses that now, and
8327 many toolchains fail to set the appropriate bits
8328 for the floating-point model they use. */
8329 break;
8330 case EF_ARM_SOFT_FLOAT:
8331 fp_model = ARM_FLOAT_SOFT_FPA;
8332 break;
8333 case EF_ARM_VFP_FLOAT:
8334 fp_model = ARM_FLOAT_VFP;
8335 break;
8336 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8337 fp_model = ARM_FLOAT_SOFT_VFP;
8338 break;
8339 }
8340 }
8341
8342 if (e_flags & EF_ARM_BE8)
8343 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8344
8345 break;
8346
8347 default:
8348 /* Leave it as "auto". */
8349 break;
8350 }
8351 }
8352
8353 /* Check any target description for validity. */
8354 if (tdesc_has_registers (tdesc))
8355 {
8356 /* For most registers we require GDB's default names; but also allow
8357 the numeric names for sp / lr / pc, as a convenience. */
8358 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8359 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8360 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8361
8362 const struct tdesc_feature *feature;
8363 int valid_p;
8364
8365 feature = tdesc_find_feature (tdesc,
8366 "org.gnu.gdb.arm.core");
8367 if (feature == NULL)
8368 {
8369 feature = tdesc_find_feature (tdesc,
8370 "org.gnu.gdb.arm.m-profile");
8371 if (feature == NULL)
8372 return NULL;
8373 else
8374 is_m = 1;
8375 }
8376
8377 tdesc_data = tdesc_data_alloc ();
8378
8379 valid_p = 1;
8380 for (i = 0; i < ARM_SP_REGNUM; i++)
8381 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8382 arm_register_names[i]);
8383 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8384 ARM_SP_REGNUM,
8385 arm_sp_names);
8386 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8387 ARM_LR_REGNUM,
8388 arm_lr_names);
8389 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8390 ARM_PC_REGNUM,
8391 arm_pc_names);
8392 if (is_m)
8393 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8394 ARM_PS_REGNUM, "xpsr");
8395 else
8396 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8397 ARM_PS_REGNUM, "cpsr");
8398
8399 if (!valid_p)
8400 {
8401 tdesc_data_cleanup (tdesc_data);
8402 return NULL;
8403 }
8404
8405 feature = tdesc_find_feature (tdesc,
8406 "org.gnu.gdb.arm.fpa");
8407 if (feature != NULL)
8408 {
8409 valid_p = 1;
8410 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8411 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8412 arm_register_names[i]);
8413 if (!valid_p)
8414 {
8415 tdesc_data_cleanup (tdesc_data);
8416 return NULL;
8417 }
8418 }
8419 else
8420 have_fpa_registers = 0;
8421
8422 feature = tdesc_find_feature (tdesc,
8423 "org.gnu.gdb.xscale.iwmmxt");
8424 if (feature != NULL)
8425 {
8426 static const char *const iwmmxt_names[] = {
8427 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8428 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8429 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8430 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8431 };
8432
8433 valid_p = 1;
8434 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8435 valid_p
8436 &= tdesc_numbered_register (feature, tdesc_data, i,
8437 iwmmxt_names[i - ARM_WR0_REGNUM]);
8438
8439 /* Check for the control registers, but do not fail if they
8440 are missing. */
8441 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8442 tdesc_numbered_register (feature, tdesc_data, i,
8443 iwmmxt_names[i - ARM_WR0_REGNUM]);
8444
8445 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8446 valid_p
8447 &= tdesc_numbered_register (feature, tdesc_data, i,
8448 iwmmxt_names[i - ARM_WR0_REGNUM]);
8449
8450 if (!valid_p)
8451 {
8452 tdesc_data_cleanup (tdesc_data);
8453 return NULL;
8454 }
8455 }
8456
8457 /* If we have a VFP unit, check whether the single precision registers
8458 are present. If not, then we will synthesize them as pseudo
8459 registers. */
8460 feature = tdesc_find_feature (tdesc,
8461 "org.gnu.gdb.arm.vfp");
8462 if (feature != NULL)
8463 {
8464 static const char *const vfp_double_names[] = {
8465 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8466 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8467 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8468 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8469 };
8470
8471 /* Require the double precision registers. There must be either
8472 16 or 32. */
8473 valid_p = 1;
8474 for (i = 0; i < 32; i++)
8475 {
8476 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8477 ARM_D0_REGNUM + i,
8478 vfp_double_names[i]);
8479 if (!valid_p)
8480 break;
8481 }
8482 if (!valid_p && i == 16)
8483 valid_p = 1;
8484
8485 /* Also require FPSCR. */
8486 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8487 ARM_FPSCR_REGNUM, "fpscr");
8488 if (!valid_p)
8489 {
8490 tdesc_data_cleanup (tdesc_data);
8491 return NULL;
8492 }
8493
8494 if (tdesc_unnumbered_register (feature, "s0") == 0)
8495 have_vfp_pseudos = 1;
8496
8497 have_vfp_registers = 1;
8498
8499 /* If we have VFP, also check for NEON. The architecture allows
8500 NEON without VFP (integer vector operations only), but GDB
8501 does not support that. */
8502 feature = tdesc_find_feature (tdesc,
8503 "org.gnu.gdb.arm.neon");
8504 if (feature != NULL)
8505 {
8506 /* NEON requires 32 double-precision registers. */
8507 if (i != 32)
8508 {
8509 tdesc_data_cleanup (tdesc_data);
8510 return NULL;
8511 }
8512
8513 /* If there are quad registers defined by the stub, use
8514 their type; otherwise (normally) provide them with
8515 the default type. */
8516 if (tdesc_unnumbered_register (feature, "q0") == 0)
8517 have_neon_pseudos = 1;
8518
8519 have_neon = 1;
8520 }
8521 }
8522 }
8523
8524 /* If there is already a candidate, use it. */
8525 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8526 best_arch != NULL;
8527 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8528 {
8529 if (arm_abi != ARM_ABI_AUTO
8530 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8531 continue;
8532
8533 if (fp_model != ARM_FLOAT_AUTO
8534 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8535 continue;
8536
8537 /* There are various other properties in tdep that we do not
8538 need to check here: those derived from a target description,
8539 since gdbarches with a different target description are
8540 automatically disqualified. */
8541
8542 /* Do check is_m, though, since it might come from the binary. */
8543 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8544 continue;
8545
8546 /* Found a match. */
8547 break;
8548 }
8549
8550 if (best_arch != NULL)
8551 {
8552 if (tdesc_data != NULL)
8553 tdesc_data_cleanup (tdesc_data);
8554 return best_arch->gdbarch;
8555 }
8556
8557 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8558 gdbarch = gdbarch_alloc (&info, tdep);
8559
8560 /* Record additional information about the architecture we are defining.
8561 These are gdbarch discriminators, like the OSABI. */
8562 tdep->arm_abi = arm_abi;
8563 tdep->fp_model = fp_model;
8564 tdep->is_m = is_m;
8565 tdep->have_fpa_registers = have_fpa_registers;
8566 tdep->have_vfp_registers = have_vfp_registers;
8567 tdep->have_vfp_pseudos = have_vfp_pseudos;
8568 tdep->have_neon_pseudos = have_neon_pseudos;
8569 tdep->have_neon = have_neon;
8570
8571 /* Breakpoints. */
8572 switch (info.byte_order_for_code)
8573 {
8574 case BFD_ENDIAN_BIG:
8575 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8576 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8577 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8578 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8579
8580 break;
8581
8582 case BFD_ENDIAN_LITTLE:
8583 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8584 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8585 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8586 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8587
8588 break;
8589
8590 default:
8591 internal_error (__FILE__, __LINE__,
8592 _("arm_gdbarch_init: bad byte order for float format"));
8593 }
8594
8595 /* On ARM targets char defaults to unsigned. */
8596 set_gdbarch_char_signed (gdbarch, 0);
8597
8598 /* Note: for displaced stepping, this includes the breakpoint, and one word
8599 of additional scratch space. This setting isn't used for anything beside
8600 displaced stepping at present. */
8601 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8602
8603 /* This should be low enough for everything. */
8604 tdep->lowest_pc = 0x20;
8605 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8606
8607 /* The default, for both APCS and AAPCS, is to return small
8608 structures in registers. */
8609 tdep->struct_return = reg_struct_return;
8610
8611 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8612 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8613
8614 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8615
8616 /* Frame handling. */
8617 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8618 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8619 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8620
8621 frame_base_set_default (gdbarch, &arm_normal_base);
8622
8623 /* Address manipulation. */
8624 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8625 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8626
8627 /* Advance PC across function entry code. */
8628 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8629
8630 /* Detect whether PC is in function epilogue. */
8631 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8632
8633 /* Skip trampolines. */
8634 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8635
8636 /* The stack grows downward. */
8637 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8638
8639 /* Breakpoint manipulation. */
8640 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8641 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8642 arm_remote_breakpoint_from_pc);
8643
8644 /* Information about registers, etc. */
8645 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8646 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8647 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8648 set_gdbarch_register_type (gdbarch, arm_register_type);
8649 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8650
8651 /* This "info float" is FPA-specific. Use the generic version if we
8652 do not have FPA. */
8653 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8654 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8655
8656 /* Internal <-> external register number maps. */
8657 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8658 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8659
8660 set_gdbarch_register_name (gdbarch, arm_register_name);
8661
8662 /* Returning results. */
8663 set_gdbarch_return_value (gdbarch, arm_return_value);
8664
8665 /* Disassembly. */
8666 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8667
8668 /* Minsymbol frobbing. */
8669 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8670 set_gdbarch_coff_make_msymbol_special (gdbarch,
8671 arm_coff_make_msymbol_special);
8672 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8673
8674 /* Thumb-2 IT block support. */
8675 set_gdbarch_adjust_breakpoint_address (gdbarch,
8676 arm_adjust_breakpoint_address);
8677
8678 /* Virtual tables. */
8679 set_gdbarch_vbit_in_delta (gdbarch, 1);
8680
8681 /* Hook in the ABI-specific overrides, if they have been registered. */
8682 gdbarch_init_osabi (info, gdbarch);
8683
8684 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8685
8686 /* Add some default predicates. */
8687 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8688 dwarf2_append_unwinders (gdbarch);
8689 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8690 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8691
8692 /* Now we have tuned the configuration, set a few final things,
8693 based on what the OS ABI has told us. */
8694
8695 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8696 binaries are always marked. */
8697 if (tdep->arm_abi == ARM_ABI_AUTO)
8698 tdep->arm_abi = ARM_ABI_APCS;
8699
8700 /* Watchpoints are not steppable. */
8701 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8702
8703 /* We used to default to FPA for generic ARM, but almost nobody
8704 uses that now, and we now provide a way for the user to force
8705 the model. So default to the most useful variant. */
8706 if (tdep->fp_model == ARM_FLOAT_AUTO)
8707 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8708
8709 if (tdep->jb_pc >= 0)
8710 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8711
8712 /* Floating point sizes and format. */
8713 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8714 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8715 {
8716 set_gdbarch_double_format
8717 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8718 set_gdbarch_long_double_format
8719 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8720 }
8721 else
8722 {
8723 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8724 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8725 }
8726
8727 if (have_vfp_pseudos)
8728 {
8729 /* NOTE: These are the only pseudo registers used by
8730 the ARM target at the moment. If more are added, a
8731 little more care in numbering will be needed. */
8732
8733 int num_pseudos = 32;
8734 if (have_neon_pseudos)
8735 num_pseudos += 16;
8736 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8737 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8738 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8739 }
8740
8741 if (tdesc_data)
8742 {
8743 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8744
8745 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8746
8747 /* Override tdesc_register_type to adjust the types of VFP
8748 registers for NEON. */
8749 set_gdbarch_register_type (gdbarch, arm_register_type);
8750 }
8751
8752 /* Add standard register aliases. We add aliases even for those
8753 nanes which are used by the current architecture - it's simpler,
8754 and does no harm, since nothing ever lists user registers. */
8755 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8756 user_reg_add (gdbarch, arm_register_aliases[i].name,
8757 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8758
8759 return gdbarch;
8760 }
8761
8762 static void
8763 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8764 {
8765 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8766
8767 if (tdep == NULL)
8768 return;
8769
8770 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8771 (unsigned long) tdep->lowest_pc);
8772 }
8773
8774 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8775
8776 void
8777 _initialize_arm_tdep (void)
8778 {
8779 struct ui_file *stb;
8780 long length;
8781 struct cmd_list_element *new_set, *new_show;
8782 const char *setname;
8783 const char *setdesc;
8784 const char *const *regnames;
8785 int numregs, i, j;
8786 static char *helptext;
8787 char regdesc[1024], *rdptr = regdesc;
8788 size_t rest = sizeof (regdesc);
8789
8790 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8791
8792 arm_objfile_data_key
8793 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8794
8795 /* Add ourselves to objfile event chain. */
8796 observer_attach_new_objfile (arm_exidx_new_objfile);
8797 arm_exidx_data_key
8798 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8799
8800 /* Register an ELF OS ABI sniffer for ARM binaries. */
8801 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8802 bfd_target_elf_flavour,
8803 arm_elf_osabi_sniffer);
8804
8805 /* Initialize the standard target descriptions. */
8806 initialize_tdesc_arm_with_m ();
8807 initialize_tdesc_arm_with_iwmmxt ();
8808 initialize_tdesc_arm_with_vfpv2 ();
8809 initialize_tdesc_arm_with_vfpv3 ();
8810 initialize_tdesc_arm_with_neon ();
8811
8812 /* Get the number of possible sets of register names defined in opcodes. */
8813 num_disassembly_options = get_arm_regname_num_options ();
8814
8815 /* Add root prefix command for all "set arm"/"show arm" commands. */
8816 add_prefix_cmd ("arm", no_class, set_arm_command,
8817 _("Various ARM-specific commands."),
8818 &setarmcmdlist, "set arm ", 0, &setlist);
8819
8820 add_prefix_cmd ("arm", no_class, show_arm_command,
8821 _("Various ARM-specific commands."),
8822 &showarmcmdlist, "show arm ", 0, &showlist);
8823
8824 /* Sync the opcode insn printer with our register viewer. */
8825 parse_arm_disassembler_option ("reg-names-std");
8826
8827 /* Initialize the array that will be passed to
8828 add_setshow_enum_cmd(). */
8829 valid_disassembly_styles
8830 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8831 for (i = 0; i < num_disassembly_options; i++)
8832 {
8833 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8834 valid_disassembly_styles[i] = setname;
8835 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8836 rdptr += length;
8837 rest -= length;
8838 /* When we find the default names, tell the disassembler to use
8839 them. */
8840 if (!strcmp (setname, "std"))
8841 {
8842 disassembly_style = setname;
8843 set_arm_regname_option (i);
8844 }
8845 }
8846 /* Mark the end of valid options. */
8847 valid_disassembly_styles[num_disassembly_options] = NULL;
8848
8849 /* Create the help text. */
8850 stb = mem_fileopen ();
8851 fprintf_unfiltered (stb, "%s%s%s",
8852 _("The valid values are:\n"),
8853 regdesc,
8854 _("The default is \"std\"."));
8855 helptext = ui_file_xstrdup (stb, NULL);
8856 ui_file_delete (stb);
8857
8858 add_setshow_enum_cmd("disassembler", no_class,
8859 valid_disassembly_styles, &disassembly_style,
8860 _("Set the disassembly style."),
8861 _("Show the disassembly style."),
8862 helptext,
8863 set_disassembly_style_sfunc,
8864 NULL, /* FIXME: i18n: The disassembly style is
8865 \"%s\". */
8866 &setarmcmdlist, &showarmcmdlist);
8867
8868 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8869 _("Set usage of ARM 32-bit mode."),
8870 _("Show usage of ARM 32-bit mode."),
8871 _("When off, a 26-bit PC will be used."),
8872 NULL,
8873 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8874 mode is %s. */
8875 &setarmcmdlist, &showarmcmdlist);
8876
8877 /* Add a command to allow the user to force the FPU model. */
8878 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8879 _("Set the floating point type."),
8880 _("Show the floating point type."),
8881 _("auto - Determine the FP typefrom the OS-ABI.\n\
8882 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8883 fpa - FPA co-processor (GCC compiled).\n\
8884 softvfp - Software FP with pure-endian doubles.\n\
8885 vfp - VFP co-processor."),
8886 set_fp_model_sfunc, show_fp_model,
8887 &setarmcmdlist, &showarmcmdlist);
8888
8889 /* Add a command to allow the user to force the ABI. */
8890 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8891 _("Set the ABI."),
8892 _("Show the ABI."),
8893 NULL, arm_set_abi, arm_show_abi,
8894 &setarmcmdlist, &showarmcmdlist);
8895
8896 /* Add two commands to allow the user to force the assumed
8897 execution mode. */
8898 add_setshow_enum_cmd ("fallback-mode", class_support,
8899 arm_mode_strings, &arm_fallback_mode_string,
8900 _("Set the mode assumed when symbols are unavailable."),
8901 _("Show the mode assumed when symbols are unavailable."),
8902 NULL, NULL, arm_show_fallback_mode,
8903 &setarmcmdlist, &showarmcmdlist);
8904 add_setshow_enum_cmd ("force-mode", class_support,
8905 arm_mode_strings, &arm_force_mode_string,
8906 _("Set the mode assumed even when symbols are available."),
8907 _("Show the mode assumed even when symbols are available."),
8908 NULL, NULL, arm_show_force_mode,
8909 &setarmcmdlist, &showarmcmdlist);
8910
8911 /* Debugging flag. */
8912 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8913 _("Set ARM debugging."),
8914 _("Show ARM debugging."),
8915 _("When on, arm-specific debugging is enabled."),
8916 NULL,
8917 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8918 &setdebuglist, &showdebuglist);
8919 }
This page took 0.204969 seconds and 5 git commands to generate.