4cd11d40906b2a2b37945325ad8afa544c9f193a
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
63
64 static int arm_debug;
65
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
69
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
72
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
75
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
78
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
81
82 struct arm_mapping_symbol
83 {
84 bfd_vma value;
85 char type;
86 };
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
89
90 struct arm_per_objfile
91 {
92 VEC(arm_mapping_symbol_s) **section_maps;
93 };
94
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
98
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
102 {
103 "auto",
104 "softfpa",
105 "fpa",
106 "softvfp",
107 "vfp",
108 NULL
109 };
110
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
114
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
117 {
118 "auto",
119 "APCS",
120 "AAPCS",
121 NULL
122 };
123
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
127
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
130 {
131 "auto",
132 "arm",
133 "thumb",
134 NULL
135 };
136
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
139
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
146
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
149
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
154 static const struct
155 {
156 const char *name;
157 int regnum;
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
160 { "r0", 0 },
161 { "r1", 1 },
162 { "r2", 2 },
163 { "r3", 3 },
164 { "r4", 4 },
165 { "r5", 5 },
166 { "r6", 6 },
167 { "r7", 7 },
168 { "r8", 8 },
169 { "r9", 9 },
170 { "r10", 10 },
171 { "r11", 11 },
172 { "r12", 12 },
173 { "r13", 13 },
174 { "r14", 14 },
175 { "r15", 15 },
176 /* Synonyms (argument and variable registers). */
177 { "a1", 0 },
178 { "a2", 1 },
179 { "a3", 2 },
180 { "a4", 3 },
181 { "v1", 4 },
182 { "v2", 5 },
183 { "v3", 6 },
184 { "v4", 7 },
185 { "v5", 8 },
186 { "v6", 9 },
187 { "v7", 10 },
188 { "v8", 11 },
189 /* Other platform-specific names for r9. */
190 { "sb", 9 },
191 { "tr", 9 },
192 /* Special names. */
193 { "ip", 12 },
194 { "lr", 14 },
195 /* Names used by GCC (not listed in the ARM EABI). */
196 { "sl", 10 },
197 /* A special name from the older ATPCS. */
198 { "wr", 7 },
199 };
200
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
209
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
212
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
215
216 /* This is used to keep the bfd arch_info in sync with the disassembly
217 style. */
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
221
222 static void convert_from_extended (const struct floatformat *, const void *,
223 void *, int);
224 static void convert_to_extended (const struct floatformat *, void *,
225 const void *, int);
226
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
233
234 static int thumb_insn_size (unsigned short inst1);
235
236 struct arm_prologue_cache
237 {
238 /* The stack pointer at the time this frame was created; i.e. the
239 caller's stack pointer when this function was called. It is used
240 to identify this frame. */
241 CORE_ADDR prev_sp;
242
243 /* The frame base for this frame is just prev_sp - frame size.
244 FRAMESIZE is the distance from the frame pointer to the
245 initial stack pointer. */
246
247 int framesize;
248
249 /* The register used to hold the frame pointer for this frame. */
250 int framereg;
251
252 /* Saved register offsets. */
253 struct trad_frame_saved_reg *saved_regs;
254 };
255
256 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR prologue_start,
258 CORE_ADDR prologue_end,
259 struct arm_prologue_cache *cache);
260
261 /* Architecture version for displaced stepping. This effects the behaviour of
262 certain instructions, and really should not be hard-wired. */
263
264 #define DISPLACED_STEPPING_ARCH_VERSION 5
265
266 /* Addresses for calling Thumb functions have the bit 0 set.
267 Here are some macros to test, set, or clear bit 0 of addresses. */
268 #define IS_THUMB_ADDR(addr) ((addr) & 1)
269 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
270 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
271
272 /* Set to true if the 32-bit mode is in use. */
273
274 int arm_apcs_32 = 1;
275
276 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
277
278 int
279 arm_psr_thumb_bit (struct gdbarch *gdbarch)
280 {
281 if (gdbarch_tdep (gdbarch)->is_m)
282 return XPSR_T;
283 else
284 return CPSR_T;
285 }
286
287 /* Determine if FRAME is executing in Thumb mode. */
288
289 int
290 arm_frame_is_thumb (struct frame_info *frame)
291 {
292 CORE_ADDR cpsr;
293 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
294
295 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
296 directly (from a signal frame or dummy frame) or by interpreting
297 the saved LR (from a prologue or DWARF frame). So consult it and
298 trust the unwinders. */
299 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
300
301 return (cpsr & t_bit) != 0;
302 }
303
304 /* Callback for VEC_lower_bound. */
305
306 static inline int
307 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
308 const struct arm_mapping_symbol *rhs)
309 {
310 return lhs->value < rhs->value;
311 }
312
313 /* Search for the mapping symbol covering MEMADDR. If one is found,
314 return its type. Otherwise, return 0. If START is non-NULL,
315 set *START to the location of the mapping symbol. */
316
317 static char
318 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
319 {
320 struct obj_section *sec;
321
322 /* If there are mapping symbols, consult them. */
323 sec = find_pc_section (memaddr);
324 if (sec != NULL)
325 {
326 struct arm_per_objfile *data;
327 VEC(arm_mapping_symbol_s) *map;
328 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
329 0 };
330 unsigned int idx;
331
332 data = objfile_data (sec->objfile, arm_objfile_data_key);
333 if (data != NULL)
334 {
335 map = data->section_maps[sec->the_bfd_section->index];
336 if (!VEC_empty (arm_mapping_symbol_s, map))
337 {
338 struct arm_mapping_symbol *map_sym;
339
340 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
341 arm_compare_mapping_symbols);
342
343 /* VEC_lower_bound finds the earliest ordered insertion
344 point. If the following symbol starts at this exact
345 address, we use that; otherwise, the preceding
346 mapping symbol covers this address. */
347 if (idx < VEC_length (arm_mapping_symbol_s, map))
348 {
349 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
350 if (map_sym->value == map_key.value)
351 {
352 if (start)
353 *start = map_sym->value + obj_section_addr (sec);
354 return map_sym->type;
355 }
356 }
357
358 if (idx > 0)
359 {
360 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
361 if (start)
362 *start = map_sym->value + obj_section_addr (sec);
363 return map_sym->type;
364 }
365 }
366 }
367 }
368
369 return 0;
370 }
371
372 /* Determine if the program counter specified in MEMADDR is in a Thumb
373 function. This function should be called for addresses unrelated to
374 any executing frame; otherwise, prefer arm_frame_is_thumb. */
375
376 int
377 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
378 {
379 struct obj_section *sec;
380 struct minimal_symbol *sym;
381 char type;
382 struct displaced_step_closure* dsc
383 = get_displaced_step_closure_by_addr(memaddr);
384
385 /* If checking the mode of displaced instruction in copy area, the mode
386 should be determined by instruction on the original address. */
387 if (dsc)
388 {
389 if (debug_displaced)
390 fprintf_unfiltered (gdb_stdlog,
391 "displaced: check mode of %.8lx instead of %.8lx\n",
392 (unsigned long) dsc->insn_addr,
393 (unsigned long) memaddr);
394 memaddr = dsc->insn_addr;
395 }
396
397 /* If bit 0 of the address is set, assume this is a Thumb address. */
398 if (IS_THUMB_ADDR (memaddr))
399 return 1;
400
401 /* Respect internal mode override if active. */
402 if (arm_override_mode != -1)
403 return arm_override_mode;
404
405 /* If the user wants to override the symbol table, let him. */
406 if (strcmp (arm_force_mode_string, "arm") == 0)
407 return 0;
408 if (strcmp (arm_force_mode_string, "thumb") == 0)
409 return 1;
410
411 /* ARM v6-M and v7-M are always in Thumb mode. */
412 if (gdbarch_tdep (gdbarch)->is_m)
413 return 1;
414
415 /* If there are mapping symbols, consult them. */
416 type = arm_find_mapping_symbol (memaddr, NULL);
417 if (type)
418 return type == 't';
419
420 /* Thumb functions have a "special" bit set in minimal symbols. */
421 sym = lookup_minimal_symbol_by_pc (memaddr);
422 if (sym)
423 return (MSYMBOL_IS_SPECIAL (sym));
424
425 /* If the user wants to override the fallback mode, let them. */
426 if (strcmp (arm_fallback_mode_string, "arm") == 0)
427 return 0;
428 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
429 return 1;
430
431 /* If we couldn't find any symbol, but we're talking to a running
432 target, then trust the current value of $cpsr. This lets
433 "display/i $pc" always show the correct mode (though if there is
434 a symbol table we will not reach here, so it still may not be
435 displayed in the mode it will be executed). */
436 if (target_has_registers)
437 return arm_frame_is_thumb (get_current_frame ());
438
439 /* Otherwise we're out of luck; we assume ARM. */
440 return 0;
441 }
442
443 /* Remove useless bits from addresses in a running program. */
444 static CORE_ADDR
445 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
446 {
447 if (arm_apcs_32)
448 return UNMAKE_THUMB_ADDR (val);
449 else
450 return (val & 0x03fffffc);
451 }
452
453 /* When reading symbols, we need to zap the low bit of the address,
454 which may be set to 1 for Thumb functions. */
455 static CORE_ADDR
456 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
457 {
458 return val & ~1;
459 }
460
461 /* Return 1 if PC is the start of a compiler helper function which
462 can be safely ignored during prologue skipping. IS_THUMB is true
463 if the function is known to be a Thumb function due to the way it
464 is being called. */
465 static int
466 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
467 {
468 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
469 struct minimal_symbol *msym;
470
471 msym = lookup_minimal_symbol_by_pc (pc);
472 if (msym != NULL
473 && SYMBOL_VALUE_ADDRESS (msym) == pc
474 && SYMBOL_LINKAGE_NAME (msym) != NULL)
475 {
476 const char *name = SYMBOL_LINKAGE_NAME (msym);
477
478 /* The GNU linker's Thumb call stub to foo is named
479 __foo_from_thumb. */
480 if (strstr (name, "_from_thumb") != NULL)
481 name += 2;
482
483 /* On soft-float targets, __truncdfsf2 is called to convert promoted
484 arguments to their argument types in non-prototyped
485 functions. */
486 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
487 return 1;
488 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
489 return 1;
490
491 /* Internal functions related to thread-local storage. */
492 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
493 return 1;
494 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
495 return 1;
496 }
497 else
498 {
499 /* If we run against a stripped glibc, we may be unable to identify
500 special functions by name. Check for one important case,
501 __aeabi_read_tp, by comparing the *code* against the default
502 implementation (this is hand-written ARM assembler in glibc). */
503
504 if (!is_thumb
505 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
506 == 0xe3e00a0f /* mov r0, #0xffff0fff */
507 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
508 == 0xe240f01f) /* sub pc, r0, #31 */
509 return 1;
510 }
511
512 return 0;
513 }
514
515 /* Support routines for instruction parsing. */
516 #define submask(x) ((1L << ((x) + 1)) - 1)
517 #define bit(obj,st) (((obj) >> (st)) & 1)
518 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
519 #define sbits(obj,st,fn) \
520 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
521 #define BranchDest(addr,instr) \
522 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
523
524 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
525 the first 16-bit of instruction, and INSN2 is the second 16-bit of
526 instruction. */
527 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
528 ((bits ((insn1), 0, 3) << 12) \
529 | (bits ((insn1), 10, 10) << 11) \
530 | (bits ((insn2), 12, 14) << 8) \
531 | bits ((insn2), 0, 7))
532
533 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
534 the 32-bit instruction. */
535 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
536 ((bits ((insn), 16, 19) << 12) \
537 | bits ((insn), 0, 11))
538
539 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
540
541 static unsigned int
542 thumb_expand_immediate (unsigned int imm)
543 {
544 unsigned int count = imm >> 7;
545
546 if (count < 8)
547 switch (count / 2)
548 {
549 case 0:
550 return imm & 0xff;
551 case 1:
552 return (imm & 0xff) | ((imm & 0xff) << 16);
553 case 2:
554 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
555 case 3:
556 return (imm & 0xff) | ((imm & 0xff) << 8)
557 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
558 }
559
560 return (0x80 | (imm & 0x7f)) << (32 - count);
561 }
562
563 /* Return 1 if the 16-bit Thumb instruction INST might change
564 control flow, 0 otherwise. */
565
566 static int
567 thumb_instruction_changes_pc (unsigned short inst)
568 {
569 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
570 return 1;
571
572 if ((inst & 0xf000) == 0xd000) /* conditional branch */
573 return 1;
574
575 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
576 return 1;
577
578 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
579 return 1;
580
581 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
582 return 1;
583
584 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
585 return 1;
586
587 return 0;
588 }
589
590 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
591 might change control flow, 0 otherwise. */
592
593 static int
594 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
595 {
596 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
597 {
598 /* Branches and miscellaneous control instructions. */
599
600 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
601 {
602 /* B, BL, BLX. */
603 return 1;
604 }
605 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
606 {
607 /* SUBS PC, LR, #imm8. */
608 return 1;
609 }
610 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
611 {
612 /* Conditional branch. */
613 return 1;
614 }
615
616 return 0;
617 }
618
619 if ((inst1 & 0xfe50) == 0xe810)
620 {
621 /* Load multiple or RFE. */
622
623 if (bit (inst1, 7) && !bit (inst1, 8))
624 {
625 /* LDMIA or POP */
626 if (bit (inst2, 15))
627 return 1;
628 }
629 else if (!bit (inst1, 7) && bit (inst1, 8))
630 {
631 /* LDMDB */
632 if (bit (inst2, 15))
633 return 1;
634 }
635 else if (bit (inst1, 7) && bit (inst1, 8))
636 {
637 /* RFEIA */
638 return 1;
639 }
640 else if (!bit (inst1, 7) && !bit (inst1, 8))
641 {
642 /* RFEDB */
643 return 1;
644 }
645
646 return 0;
647 }
648
649 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
650 {
651 /* MOV PC or MOVS PC. */
652 return 1;
653 }
654
655 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
656 {
657 /* LDR PC. */
658 if (bits (inst1, 0, 3) == 15)
659 return 1;
660 if (bit (inst1, 7))
661 return 1;
662 if (bit (inst2, 11))
663 return 1;
664 if ((inst2 & 0x0fc0) == 0x0000)
665 return 1;
666
667 return 0;
668 }
669
670 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
671 {
672 /* TBB. */
673 return 1;
674 }
675
676 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
677 {
678 /* TBH. */
679 return 1;
680 }
681
682 return 0;
683 }
684
685 /* Analyze a Thumb prologue, looking for a recognizable stack frame
686 and frame pointer. Scan until we encounter a store that could
687 clobber the stack frame unexpectedly, or an unknown instruction.
688 Return the last address which is definitely safe to skip for an
689 initial breakpoint. */
690
691 static CORE_ADDR
692 thumb_analyze_prologue (struct gdbarch *gdbarch,
693 CORE_ADDR start, CORE_ADDR limit,
694 struct arm_prologue_cache *cache)
695 {
696 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
697 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
698 int i;
699 pv_t regs[16];
700 struct pv_area *stack;
701 struct cleanup *back_to;
702 CORE_ADDR offset;
703 CORE_ADDR unrecognized_pc = 0;
704
705 for (i = 0; i < 16; i++)
706 regs[i] = pv_register (i, 0);
707 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
708 back_to = make_cleanup_free_pv_area (stack);
709
710 while (start < limit)
711 {
712 unsigned short insn;
713
714 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
715
716 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
717 {
718 int regno;
719 int mask;
720
721 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
722 break;
723
724 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
725 whether to save LR (R14). */
726 mask = (insn & 0xff) | ((insn & 0x100) << 6);
727
728 /* Calculate offsets of saved R0-R7 and LR. */
729 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
730 if (mask & (1 << regno))
731 {
732 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
733 -4);
734 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
735 }
736 }
737 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
738 sub sp, #simm */
739 {
740 offset = (insn & 0x7f) << 2; /* get scaled offset */
741 if (insn & 0x80) /* Check for SUB. */
742 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
743 -offset);
744 else
745 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
746 offset);
747 }
748 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
749 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
750 (insn & 0xff) << 2);
751 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
752 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
753 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
754 bits (insn, 6, 8));
755 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
756 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
757 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
758 bits (insn, 0, 7));
759 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
760 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
761 && pv_is_constant (regs[bits (insn, 3, 5)]))
762 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
763 regs[bits (insn, 6, 8)]);
764 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
765 && pv_is_constant (regs[bits (insn, 3, 6)]))
766 {
767 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
768 int rm = bits (insn, 3, 6);
769 regs[rd] = pv_add (regs[rd], regs[rm]);
770 }
771 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
772 {
773 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
774 int src_reg = (insn & 0x78) >> 3;
775 regs[dst_reg] = regs[src_reg];
776 }
777 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
778 {
779 /* Handle stores to the stack. Normally pushes are used,
780 but with GCC -mtpcs-frame, there may be other stores
781 in the prologue to create the frame. */
782 int regno = (insn >> 8) & 0x7;
783 pv_t addr;
784
785 offset = (insn & 0xff) << 2;
786 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
787
788 if (pv_area_store_would_trash (stack, addr))
789 break;
790
791 pv_area_store (stack, addr, 4, regs[regno]);
792 }
793 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
794 {
795 int rd = bits (insn, 0, 2);
796 int rn = bits (insn, 3, 5);
797 pv_t addr;
798
799 offset = bits (insn, 6, 10) << 2;
800 addr = pv_add_constant (regs[rn], offset);
801
802 if (pv_area_store_would_trash (stack, addr))
803 break;
804
805 pv_area_store (stack, addr, 4, regs[rd]);
806 }
807 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
808 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
809 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
810 /* Ignore stores of argument registers to the stack. */
811 ;
812 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
813 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
814 /* Ignore block loads from the stack, potentially copying
815 parameters from memory. */
816 ;
817 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
818 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
819 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
820 /* Similarly ignore single loads from the stack. */
821 ;
822 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
823 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
824 /* Skip register copies, i.e. saves to another register
825 instead of the stack. */
826 ;
827 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
828 /* Recognize constant loads; even with small stacks these are necessary
829 on Thumb. */
830 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
831 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
832 {
833 /* Constant pool loads, for the same reason. */
834 unsigned int constant;
835 CORE_ADDR loc;
836
837 loc = start + 4 + bits (insn, 0, 7) * 4;
838 constant = read_memory_unsigned_integer (loc, 4, byte_order);
839 regs[bits (insn, 8, 10)] = pv_constant (constant);
840 }
841 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
842 {
843 unsigned short inst2;
844
845 inst2 = read_memory_unsigned_integer (start + 2, 2,
846 byte_order_for_code);
847
848 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
849 {
850 /* BL, BLX. Allow some special function calls when
851 skipping the prologue; GCC generates these before
852 storing arguments to the stack. */
853 CORE_ADDR nextpc;
854 int j1, j2, imm1, imm2;
855
856 imm1 = sbits (insn, 0, 10);
857 imm2 = bits (inst2, 0, 10);
858 j1 = bit (inst2, 13);
859 j2 = bit (inst2, 11);
860
861 offset = ((imm1 << 12) + (imm2 << 1));
862 offset ^= ((!j2) << 22) | ((!j1) << 23);
863
864 nextpc = start + 4 + offset;
865 /* For BLX make sure to clear the low bits. */
866 if (bit (inst2, 12) == 0)
867 nextpc = nextpc & 0xfffffffc;
868
869 if (!skip_prologue_function (gdbarch, nextpc,
870 bit (inst2, 12) != 0))
871 break;
872 }
873
874 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
875 { registers } */
876 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
877 {
878 pv_t addr = regs[bits (insn, 0, 3)];
879 int regno;
880
881 if (pv_area_store_would_trash (stack, addr))
882 break;
883
884 /* Calculate offsets of saved registers. */
885 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
886 if (inst2 & (1 << regno))
887 {
888 addr = pv_add_constant (addr, -4);
889 pv_area_store (stack, addr, 4, regs[regno]);
890 }
891
892 if (insn & 0x0020)
893 regs[bits (insn, 0, 3)] = addr;
894 }
895
896 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
897 [Rn, #+/-imm]{!} */
898 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
899 {
900 int regno1 = bits (inst2, 12, 15);
901 int regno2 = bits (inst2, 8, 11);
902 pv_t addr = regs[bits (insn, 0, 3)];
903
904 offset = inst2 & 0xff;
905 if (insn & 0x0080)
906 addr = pv_add_constant (addr, offset);
907 else
908 addr = pv_add_constant (addr, -offset);
909
910 if (pv_area_store_would_trash (stack, addr))
911 break;
912
913 pv_area_store (stack, addr, 4, regs[regno1]);
914 pv_area_store (stack, pv_add_constant (addr, 4),
915 4, regs[regno2]);
916
917 if (insn & 0x0020)
918 regs[bits (insn, 0, 3)] = addr;
919 }
920
921 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
922 && (inst2 & 0x0c00) == 0x0c00
923 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
924 {
925 int regno = bits (inst2, 12, 15);
926 pv_t addr = regs[bits (insn, 0, 3)];
927
928 offset = inst2 & 0xff;
929 if (inst2 & 0x0200)
930 addr = pv_add_constant (addr, offset);
931 else
932 addr = pv_add_constant (addr, -offset);
933
934 if (pv_area_store_would_trash (stack, addr))
935 break;
936
937 pv_area_store (stack, addr, 4, regs[regno]);
938
939 if (inst2 & 0x0100)
940 regs[bits (insn, 0, 3)] = addr;
941 }
942
943 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
944 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
945 {
946 int regno = bits (inst2, 12, 15);
947 pv_t addr;
948
949 offset = inst2 & 0xfff;
950 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
951
952 if (pv_area_store_would_trash (stack, addr))
953 break;
954
955 pv_area_store (stack, addr, 4, regs[regno]);
956 }
957
958 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Ignore stores of argument registers to the stack. */
961 ;
962
963 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
964 && (inst2 & 0x0d00) == 0x0c00
965 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
966 /* Ignore stores of argument registers to the stack. */
967 ;
968
969 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
970 { registers } */
971 && (inst2 & 0x8000) == 0x0000
972 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
973 /* Ignore block loads from the stack, potentially copying
974 parameters from memory. */
975 ;
976
977 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
978 [Rn, #+/-imm] */
979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
980 /* Similarly ignore dual loads from the stack. */
981 ;
982
983 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
984 && (inst2 & 0x0d00) == 0x0c00
985 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
986 /* Similarly ignore single loads from the stack. */
987 ;
988
989 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
990 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
991 /* Similarly ignore single loads from the stack. */
992 ;
993
994 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
995 && (inst2 & 0x8000) == 0x0000)
996 {
997 unsigned int imm = ((bits (insn, 10, 10) << 11)
998 | (bits (inst2, 12, 14) << 8)
999 | bits (inst2, 0, 7));
1000
1001 regs[bits (inst2, 8, 11)]
1002 = pv_add_constant (regs[bits (insn, 0, 3)],
1003 thumb_expand_immediate (imm));
1004 }
1005
1006 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1007 && (inst2 & 0x8000) == 0x0000)
1008 {
1009 unsigned int imm = ((bits (insn, 10, 10) << 11)
1010 | (bits (inst2, 12, 14) << 8)
1011 | bits (inst2, 0, 7));
1012
1013 regs[bits (inst2, 8, 11)]
1014 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1015 }
1016
1017 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1018 && (inst2 & 0x8000) == 0x0000)
1019 {
1020 unsigned int imm = ((bits (insn, 10, 10) << 11)
1021 | (bits (inst2, 12, 14) << 8)
1022 | bits (inst2, 0, 7));
1023
1024 regs[bits (inst2, 8, 11)]
1025 = pv_add_constant (regs[bits (insn, 0, 3)],
1026 - (CORE_ADDR) thumb_expand_immediate (imm));
1027 }
1028
1029 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1030 && (inst2 & 0x8000) == 0x0000)
1031 {
1032 unsigned int imm = ((bits (insn, 10, 10) << 11)
1033 | (bits (inst2, 12, 14) << 8)
1034 | bits (inst2, 0, 7));
1035
1036 regs[bits (inst2, 8, 11)]
1037 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1038 }
1039
1040 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1041 {
1042 unsigned int imm = ((bits (insn, 10, 10) << 11)
1043 | (bits (inst2, 12, 14) << 8)
1044 | bits (inst2, 0, 7));
1045
1046 regs[bits (inst2, 8, 11)]
1047 = pv_constant (thumb_expand_immediate (imm));
1048 }
1049
1050 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1051 {
1052 unsigned int imm
1053 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1054
1055 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1056 }
1057
1058 else if (insn == 0xea5f /* mov.w Rd,Rm */
1059 && (inst2 & 0xf0f0) == 0)
1060 {
1061 int dst_reg = (inst2 & 0x0f00) >> 8;
1062 int src_reg = inst2 & 0xf;
1063 regs[dst_reg] = regs[src_reg];
1064 }
1065
1066 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1067 {
1068 /* Constant pool loads. */
1069 unsigned int constant;
1070 CORE_ADDR loc;
1071
1072 offset = bits (insn, 0, 11);
1073 if (insn & 0x0080)
1074 loc = start + 4 + offset;
1075 else
1076 loc = start + 4 - offset;
1077
1078 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1079 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1080 }
1081
1082 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1083 {
1084 /* Constant pool loads. */
1085 unsigned int constant;
1086 CORE_ADDR loc;
1087
1088 offset = bits (insn, 0, 7) << 2;
1089 if (insn & 0x0080)
1090 loc = start + 4 + offset;
1091 else
1092 loc = start + 4 - offset;
1093
1094 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1095 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1096
1097 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1098 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1099 }
1100
1101 else if (thumb2_instruction_changes_pc (insn, inst2))
1102 {
1103 /* Don't scan past anything that might change control flow. */
1104 break;
1105 }
1106 else
1107 {
1108 /* The optimizer might shove anything into the prologue,
1109 so we just skip what we don't recognize. */
1110 unrecognized_pc = start;
1111 }
1112
1113 start += 2;
1114 }
1115 else if (thumb_instruction_changes_pc (insn))
1116 {
1117 /* Don't scan past anything that might change control flow. */
1118 break;
1119 }
1120 else
1121 {
1122 /* The optimizer might shove anything into the prologue,
1123 so we just skip what we don't recognize. */
1124 unrecognized_pc = start;
1125 }
1126
1127 start += 2;
1128 }
1129
1130 if (arm_debug)
1131 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1132 paddress (gdbarch, start));
1133
1134 if (unrecognized_pc == 0)
1135 unrecognized_pc = start;
1136
1137 if (cache == NULL)
1138 {
1139 do_cleanups (back_to);
1140 return unrecognized_pc;
1141 }
1142
1143 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1144 {
1145 /* Frame pointer is fp. Frame size is constant. */
1146 cache->framereg = ARM_FP_REGNUM;
1147 cache->framesize = -regs[ARM_FP_REGNUM].k;
1148 }
1149 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Frame pointer is r7. Frame size is constant. */
1152 cache->framereg = THUMB_FP_REGNUM;
1153 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1154 }
1155 else
1156 {
1157 /* Try the stack pointer... this is a bit desperate. */
1158 cache->framereg = ARM_SP_REGNUM;
1159 cache->framesize = -regs[ARM_SP_REGNUM].k;
1160 }
1161
1162 for (i = 0; i < 16; i++)
1163 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1164 cache->saved_regs[i].addr = offset;
1165
1166 do_cleanups (back_to);
1167 return unrecognized_pc;
1168 }
1169
1170
1171 /* Try to analyze the instructions starting from PC, which load symbol
1172 __stack_chk_guard. Return the address of instruction after loading this
1173 symbol, set the dest register number to *BASEREG, and set the size of
1174 instructions for loading symbol in OFFSET. Return 0 if instructions are
1175 not recognized. */
1176
1177 static CORE_ADDR
1178 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1179 unsigned int *destreg, int *offset)
1180 {
1181 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1182 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1183 unsigned int low, high, address;
1184
1185 address = 0;
1186 if (is_thumb)
1187 {
1188 unsigned short insn1
1189 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1190
1191 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1192 {
1193 *destreg = bits (insn1, 8, 10);
1194 *offset = 2;
1195 address = bits (insn1, 0, 7);
1196 }
1197 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1198 {
1199 unsigned short insn2
1200 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1201
1202 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1203
1204 insn1
1205 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1206 insn2
1207 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1208
1209 /* movt Rd, #const */
1210 if ((insn1 & 0xfbc0) == 0xf2c0)
1211 {
1212 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1213 *destreg = bits (insn2, 8, 11);
1214 *offset = 8;
1215 address = (high << 16 | low);
1216 }
1217 }
1218 }
1219 else
1220 {
1221 unsigned int insn
1222 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1223
1224 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1225 {
1226 address = bits (insn, 0, 11);
1227 *destreg = bits (insn, 12, 15);
1228 *offset = 4;
1229 }
1230 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1231 {
1232 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1233
1234 insn
1235 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1236
1237 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1238 {
1239 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1240 *destreg = bits (insn, 12, 15);
1241 *offset = 8;
1242 address = (high << 16 | low);
1243 }
1244 }
1245 }
1246
1247 return address;
1248 }
1249
1250 /* Try to skip a sequence of instructions used for stack protector. If PC
1251 points to the first instruction of this sequence, return the address of
1252 first instruction after this sequence, otherwise, return original PC.
1253
1254 On arm, this sequence of instructions is composed of mainly three steps,
1255 Step 1: load symbol __stack_chk_guard,
1256 Step 2: load from address of __stack_chk_guard,
1257 Step 3: store it to somewhere else.
1258
1259 Usually, instructions on step 2 and step 3 are the same on various ARM
1260 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1261 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1262 instructions in step 1 vary from different ARM architectures. On ARMv7,
1263 they are,
1264
1265 movw Rn, #:lower16:__stack_chk_guard
1266 movt Rn, #:upper16:__stack_chk_guard
1267
1268 On ARMv5t, it is,
1269
1270 ldr Rn, .Label
1271 ....
1272 .Lable:
1273 .word __stack_chk_guard
1274
1275 Since ldr/str is a very popular instruction, we can't use them as
1276 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1277 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1278 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1279
1280 static CORE_ADDR
1281 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1282 {
1283 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1284 unsigned int address, basereg;
1285 struct minimal_symbol *stack_chk_guard;
1286 int offset;
1287 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1288 CORE_ADDR addr;
1289
1290 /* Try to parse the instructions in Step 1. */
1291 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1292 &basereg, &offset);
1293 if (!addr)
1294 return pc;
1295
1296 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1297 /* If name of symbol doesn't start with '__stack_chk_guard', this
1298 instruction sequence is not for stack protector. If symbol is
1299 removed, we conservatively think this sequence is for stack protector. */
1300 if (stack_chk_guard
1301 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1302 strlen ("__stack_chk_guard")) != 0)
1303 return pc;
1304
1305 if (is_thumb)
1306 {
1307 unsigned int destreg;
1308 unsigned short insn
1309 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1310
1311 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1312 if ((insn & 0xf800) != 0x6800)
1313 return pc;
1314 if (bits (insn, 3, 5) != basereg)
1315 return pc;
1316 destreg = bits (insn, 0, 2);
1317
1318 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1319 byte_order_for_code);
1320 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1321 if ((insn & 0xf800) != 0x6000)
1322 return pc;
1323 if (destreg != bits (insn, 0, 2))
1324 return pc;
1325 }
1326 else
1327 {
1328 unsigned int destreg;
1329 unsigned int insn
1330 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1331
1332 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1333 if ((insn & 0x0e500000) != 0x04100000)
1334 return pc;
1335 if (bits (insn, 16, 19) != basereg)
1336 return pc;
1337 destreg = bits (insn, 12, 15);
1338 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1339 insn = read_memory_unsigned_integer (pc + offset + 4,
1340 4, byte_order_for_code);
1341 if ((insn & 0x0e500000) != 0x04000000)
1342 return pc;
1343 if (bits (insn, 12, 15) != destreg)
1344 return pc;
1345 }
1346 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1347 on arm. */
1348 if (is_thumb)
1349 return pc + offset + 4;
1350 else
1351 return pc + offset + 8;
1352 }
1353
1354 /* Advance the PC across any function entry prologue instructions to
1355 reach some "real" code.
1356
1357 The APCS (ARM Procedure Call Standard) defines the following
1358 prologue:
1359
1360 mov ip, sp
1361 [stmfd sp!, {a1,a2,a3,a4}]
1362 stmfd sp!, {...,fp,ip,lr,pc}
1363 [stfe f7, [sp, #-12]!]
1364 [stfe f6, [sp, #-12]!]
1365 [stfe f5, [sp, #-12]!]
1366 [stfe f4, [sp, #-12]!]
1367 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1368
1369 static CORE_ADDR
1370 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1371 {
1372 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1373 unsigned long inst;
1374 CORE_ADDR skip_pc;
1375 CORE_ADDR func_addr, limit_pc;
1376 struct symtab_and_line sal;
1377
1378 /* See if we can determine the end of the prologue via the symbol table.
1379 If so, then return either PC, or the PC after the prologue, whichever
1380 is greater. */
1381 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1382 {
1383 CORE_ADDR post_prologue_pc
1384 = skip_prologue_using_sal (gdbarch, func_addr);
1385 struct symtab *s = find_pc_symtab (func_addr);
1386
1387 if (post_prologue_pc)
1388 post_prologue_pc
1389 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1390
1391
1392 /* GCC always emits a line note before the prologue and another
1393 one after, even if the two are at the same address or on the
1394 same line. Take advantage of this so that we do not need to
1395 know every instruction that might appear in the prologue. We
1396 will have producer information for most binaries; if it is
1397 missing (e.g. for -gstabs), assuming the GNU tools. */
1398 if (post_prologue_pc
1399 && (s == NULL
1400 || s->producer == NULL
1401 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1402 return post_prologue_pc;
1403
1404 if (post_prologue_pc != 0)
1405 {
1406 CORE_ADDR analyzed_limit;
1407
1408 /* For non-GCC compilers, make sure the entire line is an
1409 acceptable prologue; GDB will round this function's
1410 return value up to the end of the following line so we
1411 can not skip just part of a line (and we do not want to).
1412
1413 RealView does not treat the prologue specially, but does
1414 associate prologue code with the opening brace; so this
1415 lets us skip the first line if we think it is the opening
1416 brace. */
1417 if (arm_pc_is_thumb (gdbarch, func_addr))
1418 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1419 post_prologue_pc, NULL);
1420 else
1421 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1422 post_prologue_pc, NULL);
1423
1424 if (analyzed_limit != post_prologue_pc)
1425 return func_addr;
1426
1427 return post_prologue_pc;
1428 }
1429 }
1430
1431 /* Can't determine prologue from the symbol table, need to examine
1432 instructions. */
1433
1434 /* Find an upper limit on the function prologue using the debug
1435 information. If the debug information could not be used to provide
1436 that bound, then use an arbitrary large number as the upper bound. */
1437 /* Like arm_scan_prologue, stop no later than pc + 64. */
1438 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1439 if (limit_pc == 0)
1440 limit_pc = pc + 64; /* Magic. */
1441
1442
1443 /* Check if this is Thumb code. */
1444 if (arm_pc_is_thumb (gdbarch, pc))
1445 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1446
1447 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1448 {
1449 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1450
1451 /* "mov ip, sp" is no longer a required part of the prologue. */
1452 if (inst == 0xe1a0c00d) /* mov ip, sp */
1453 continue;
1454
1455 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1456 continue;
1457
1458 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1459 continue;
1460
1461 /* Some prologues begin with "str lr, [sp, #-4]!". */
1462 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1463 continue;
1464
1465 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1466 continue;
1467
1468 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1469 continue;
1470
1471 /* Any insns after this point may float into the code, if it makes
1472 for better instruction scheduling, so we skip them only if we
1473 find them, but still consider the function to be frame-ful. */
1474
1475 /* We may have either one sfmfd instruction here, or several stfe
1476 insns, depending on the version of floating point code we
1477 support. */
1478 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1479 continue;
1480
1481 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1482 continue;
1483
1484 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1485 continue;
1486
1487 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1488 continue;
1489
1490 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1491 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1492 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1493 continue;
1494
1495 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1496 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1497 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1498 continue;
1499
1500 /* Un-recognized instruction; stop scanning. */
1501 break;
1502 }
1503
1504 return skip_pc; /* End of prologue. */
1505 }
1506
1507 /* *INDENT-OFF* */
1508 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1509 This function decodes a Thumb function prologue to determine:
1510 1) the size of the stack frame
1511 2) which registers are saved on it
1512 3) the offsets of saved regs
1513 4) the offset from the stack pointer to the frame pointer
1514
1515 A typical Thumb function prologue would create this stack frame
1516 (offsets relative to FP)
1517 old SP -> 24 stack parameters
1518 20 LR
1519 16 R7
1520 R7 -> 0 local variables (16 bytes)
1521 SP -> -12 additional stack space (12 bytes)
1522 The frame size would thus be 36 bytes, and the frame offset would be
1523 12 bytes. The frame register is R7.
1524
1525 The comments for thumb_skip_prolog() describe the algorithm we use
1526 to detect the end of the prolog. */
1527 /* *INDENT-ON* */
1528
1529 static void
1530 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1531 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1532 {
1533 CORE_ADDR prologue_start;
1534 CORE_ADDR prologue_end;
1535 CORE_ADDR current_pc;
1536
1537 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1538 &prologue_end))
1539 {
1540 /* See comment in arm_scan_prologue for an explanation of
1541 this heuristics. */
1542 if (prologue_end > prologue_start + 64)
1543 {
1544 prologue_end = prologue_start + 64;
1545 }
1546 }
1547 else
1548 /* We're in the boondocks: we have no idea where the start of the
1549 function is. */
1550 return;
1551
1552 prologue_end = min (prologue_end, prev_pc);
1553
1554 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1555 }
1556
1557 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1558
1559 static int
1560 arm_instruction_changes_pc (uint32_t this_instr)
1561 {
1562 if (bits (this_instr, 28, 31) == INST_NV)
1563 /* Unconditional instructions. */
1564 switch (bits (this_instr, 24, 27))
1565 {
1566 case 0xa:
1567 case 0xb:
1568 /* Branch with Link and change to Thumb. */
1569 return 1;
1570 case 0xc:
1571 case 0xd:
1572 case 0xe:
1573 /* Coprocessor register transfer. */
1574 if (bits (this_instr, 12, 15) == 15)
1575 error (_("Invalid update to pc in instruction"));
1576 return 0;
1577 default:
1578 return 0;
1579 }
1580 else
1581 switch (bits (this_instr, 25, 27))
1582 {
1583 case 0x0:
1584 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1585 {
1586 /* Multiplies and extra load/stores. */
1587 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1588 /* Neither multiplies nor extension load/stores are allowed
1589 to modify PC. */
1590 return 0;
1591
1592 /* Otherwise, miscellaneous instructions. */
1593
1594 /* BX <reg>, BXJ <reg>, BLX <reg> */
1595 if (bits (this_instr, 4, 27) == 0x12fff1
1596 || bits (this_instr, 4, 27) == 0x12fff2
1597 || bits (this_instr, 4, 27) == 0x12fff3)
1598 return 1;
1599
1600 /* Other miscellaneous instructions are unpredictable if they
1601 modify PC. */
1602 return 0;
1603 }
1604 /* Data processing instruction. Fall through. */
1605
1606 case 0x1:
1607 if (bits (this_instr, 12, 15) == 15)
1608 return 1;
1609 else
1610 return 0;
1611
1612 case 0x2:
1613 case 0x3:
1614 /* Media instructions and architecturally undefined instructions. */
1615 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1616 return 0;
1617
1618 /* Stores. */
1619 if (bit (this_instr, 20) == 0)
1620 return 0;
1621
1622 /* Loads. */
1623 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1624 return 1;
1625 else
1626 return 0;
1627
1628 case 0x4:
1629 /* Load/store multiple. */
1630 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1631 return 1;
1632 else
1633 return 0;
1634
1635 case 0x5:
1636 /* Branch and branch with link. */
1637 return 1;
1638
1639 case 0x6:
1640 case 0x7:
1641 /* Coprocessor transfers or SWIs can not affect PC. */
1642 return 0;
1643
1644 default:
1645 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1646 }
1647 }
1648
1649 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1650 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1651 fill it in. Return the first address not recognized as a prologue
1652 instruction.
1653
1654 We recognize all the instructions typically found in ARM prologues,
1655 plus harmless instructions which can be skipped (either for analysis
1656 purposes, or a more restrictive set that can be skipped when finding
1657 the end of the prologue). */
1658
1659 static CORE_ADDR
1660 arm_analyze_prologue (struct gdbarch *gdbarch,
1661 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1662 struct arm_prologue_cache *cache)
1663 {
1664 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1665 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1666 int regno;
1667 CORE_ADDR offset, current_pc;
1668 pv_t regs[ARM_FPS_REGNUM];
1669 struct pv_area *stack;
1670 struct cleanup *back_to;
1671 int framereg, framesize;
1672 CORE_ADDR unrecognized_pc = 0;
1673
1674 /* Search the prologue looking for instructions that set up the
1675 frame pointer, adjust the stack pointer, and save registers.
1676
1677 Be careful, however, and if it doesn't look like a prologue,
1678 don't try to scan it. If, for instance, a frameless function
1679 begins with stmfd sp!, then we will tell ourselves there is
1680 a frame, which will confuse stack traceback, as well as "finish"
1681 and other operations that rely on a knowledge of the stack
1682 traceback. */
1683
1684 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1685 regs[regno] = pv_register (regno, 0);
1686 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1687 back_to = make_cleanup_free_pv_area (stack);
1688
1689 for (current_pc = prologue_start;
1690 current_pc < prologue_end;
1691 current_pc += 4)
1692 {
1693 unsigned int insn
1694 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1695
1696 if (insn == 0xe1a0c00d) /* mov ip, sp */
1697 {
1698 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1699 continue;
1700 }
1701 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1702 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1703 {
1704 unsigned imm = insn & 0xff; /* immediate value */
1705 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1706 int rd = bits (insn, 12, 15);
1707 imm = (imm >> rot) | (imm << (32 - rot));
1708 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1709 continue;
1710 }
1711 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1712 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1713 {
1714 unsigned imm = insn & 0xff; /* immediate value */
1715 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1716 int rd = bits (insn, 12, 15);
1717 imm = (imm >> rot) | (imm << (32 - rot));
1718 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1719 continue;
1720 }
1721 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1722 [sp, #-4]! */
1723 {
1724 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1725 break;
1726 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1727 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1728 regs[bits (insn, 12, 15)]);
1729 continue;
1730 }
1731 else if ((insn & 0xffff0000) == 0xe92d0000)
1732 /* stmfd sp!, {..., fp, ip, lr, pc}
1733 or
1734 stmfd sp!, {a1, a2, a3, a4} */
1735 {
1736 int mask = insn & 0xffff;
1737
1738 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1739 break;
1740
1741 /* Calculate offsets of saved registers. */
1742 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1743 if (mask & (1 << regno))
1744 {
1745 regs[ARM_SP_REGNUM]
1746 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1747 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1748 }
1749 }
1750 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1751 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1752 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1753 {
1754 /* No need to add this to saved_regs -- it's just an arg reg. */
1755 continue;
1756 }
1757 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1758 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1759 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1760 {
1761 /* No need to add this to saved_regs -- it's just an arg reg. */
1762 continue;
1763 }
1764 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1765 { registers } */
1766 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1767 {
1768 /* No need to add this to saved_regs -- it's just arg regs. */
1769 continue;
1770 }
1771 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1772 {
1773 unsigned imm = insn & 0xff; /* immediate value */
1774 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1775 imm = (imm >> rot) | (imm << (32 - rot));
1776 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1777 }
1778 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1779 {
1780 unsigned imm = insn & 0xff; /* immediate value */
1781 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1782 imm = (imm >> rot) | (imm << (32 - rot));
1783 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1784 }
1785 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1786 [sp, -#c]! */
1787 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1788 {
1789 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1790 break;
1791
1792 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1793 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1794 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1795 }
1796 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1797 [sp!] */
1798 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1799 {
1800 int n_saved_fp_regs;
1801 unsigned int fp_start_reg, fp_bound_reg;
1802
1803 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1804 break;
1805
1806 if ((insn & 0x800) == 0x800) /* N0 is set */
1807 {
1808 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1809 n_saved_fp_regs = 3;
1810 else
1811 n_saved_fp_regs = 1;
1812 }
1813 else
1814 {
1815 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1816 n_saved_fp_regs = 2;
1817 else
1818 n_saved_fp_regs = 4;
1819 }
1820
1821 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1822 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1823 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1824 {
1825 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1826 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1827 regs[fp_start_reg++]);
1828 }
1829 }
1830 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1831 {
1832 /* Allow some special function calls when skipping the
1833 prologue; GCC generates these before storing arguments to
1834 the stack. */
1835 CORE_ADDR dest = BranchDest (current_pc, insn);
1836
1837 if (skip_prologue_function (gdbarch, dest, 0))
1838 continue;
1839 else
1840 break;
1841 }
1842 else if ((insn & 0xf0000000) != 0xe0000000)
1843 break; /* Condition not true, exit early. */
1844 else if (arm_instruction_changes_pc (insn))
1845 /* Don't scan past anything that might change control flow. */
1846 break;
1847 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1848 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1849 /* Ignore block loads from the stack, potentially copying
1850 parameters from memory. */
1851 continue;
1852 else if ((insn & 0xfc500000) == 0xe4100000
1853 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1854 /* Similarly ignore single loads from the stack. */
1855 continue;
1856 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1857 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1858 register instead of the stack. */
1859 continue;
1860 else
1861 {
1862 /* The optimizer might shove anything into the prologue,
1863 so we just skip what we don't recognize. */
1864 unrecognized_pc = current_pc;
1865 continue;
1866 }
1867 }
1868
1869 if (unrecognized_pc == 0)
1870 unrecognized_pc = current_pc;
1871
1872 /* The frame size is just the distance from the frame register
1873 to the original stack pointer. */
1874 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1875 {
1876 /* Frame pointer is fp. */
1877 framereg = ARM_FP_REGNUM;
1878 framesize = -regs[ARM_FP_REGNUM].k;
1879 }
1880 else
1881 {
1882 /* Try the stack pointer... this is a bit desperate. */
1883 framereg = ARM_SP_REGNUM;
1884 framesize = -regs[ARM_SP_REGNUM].k;
1885 }
1886
1887 if (cache)
1888 {
1889 cache->framereg = framereg;
1890 cache->framesize = framesize;
1891
1892 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1893 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1894 cache->saved_regs[regno].addr = offset;
1895 }
1896
1897 if (arm_debug)
1898 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1899 paddress (gdbarch, unrecognized_pc));
1900
1901 do_cleanups (back_to);
1902 return unrecognized_pc;
1903 }
1904
1905 static void
1906 arm_scan_prologue (struct frame_info *this_frame,
1907 struct arm_prologue_cache *cache)
1908 {
1909 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1910 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1911 int regno;
1912 CORE_ADDR prologue_start, prologue_end, current_pc;
1913 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1914 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1915 pv_t regs[ARM_FPS_REGNUM];
1916 struct pv_area *stack;
1917 struct cleanup *back_to;
1918 CORE_ADDR offset;
1919
1920 /* Assume there is no frame until proven otherwise. */
1921 cache->framereg = ARM_SP_REGNUM;
1922 cache->framesize = 0;
1923
1924 /* Check for Thumb prologue. */
1925 if (arm_frame_is_thumb (this_frame))
1926 {
1927 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1928 return;
1929 }
1930
1931 /* Find the function prologue. If we can't find the function in
1932 the symbol table, peek in the stack frame to find the PC. */
1933 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1934 &prologue_end))
1935 {
1936 /* One way to find the end of the prologue (which works well
1937 for unoptimized code) is to do the following:
1938
1939 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1940
1941 if (sal.line == 0)
1942 prologue_end = prev_pc;
1943 else if (sal.end < prologue_end)
1944 prologue_end = sal.end;
1945
1946 This mechanism is very accurate so long as the optimizer
1947 doesn't move any instructions from the function body into the
1948 prologue. If this happens, sal.end will be the last
1949 instruction in the first hunk of prologue code just before
1950 the first instruction that the scheduler has moved from
1951 the body to the prologue.
1952
1953 In order to make sure that we scan all of the prologue
1954 instructions, we use a slightly less accurate mechanism which
1955 may scan more than necessary. To help compensate for this
1956 lack of accuracy, the prologue scanning loop below contains
1957 several clauses which'll cause the loop to terminate early if
1958 an implausible prologue instruction is encountered.
1959
1960 The expression
1961
1962 prologue_start + 64
1963
1964 is a suitable endpoint since it accounts for the largest
1965 possible prologue plus up to five instructions inserted by
1966 the scheduler. */
1967
1968 if (prologue_end > prologue_start + 64)
1969 {
1970 prologue_end = prologue_start + 64; /* See above. */
1971 }
1972 }
1973 else
1974 {
1975 /* We have no symbol information. Our only option is to assume this
1976 function has a standard stack frame and the normal frame register.
1977 Then, we can find the value of our frame pointer on entrance to
1978 the callee (or at the present moment if this is the innermost frame).
1979 The value stored there should be the address of the stmfd + 8. */
1980 CORE_ADDR frame_loc;
1981 LONGEST return_value;
1982
1983 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1984 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1985 return;
1986 else
1987 {
1988 prologue_start = gdbarch_addr_bits_remove
1989 (gdbarch, return_value) - 8;
1990 prologue_end = prologue_start + 64; /* See above. */
1991 }
1992 }
1993
1994 if (prev_pc < prologue_end)
1995 prologue_end = prev_pc;
1996
1997 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1998 }
1999
2000 static struct arm_prologue_cache *
2001 arm_make_prologue_cache (struct frame_info *this_frame)
2002 {
2003 int reg;
2004 struct arm_prologue_cache *cache;
2005 CORE_ADDR unwound_fp;
2006
2007 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2008 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2009
2010 arm_scan_prologue (this_frame, cache);
2011
2012 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2013 if (unwound_fp == 0)
2014 return cache;
2015
2016 cache->prev_sp = unwound_fp + cache->framesize;
2017
2018 /* Calculate actual addresses of saved registers using offsets
2019 determined by arm_scan_prologue. */
2020 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2021 if (trad_frame_addr_p (cache->saved_regs, reg))
2022 cache->saved_regs[reg].addr += cache->prev_sp;
2023
2024 return cache;
2025 }
2026
2027 /* Our frame ID for a normal frame is the current function's starting PC
2028 and the caller's SP when we were called. */
2029
2030 static void
2031 arm_prologue_this_id (struct frame_info *this_frame,
2032 void **this_cache,
2033 struct frame_id *this_id)
2034 {
2035 struct arm_prologue_cache *cache;
2036 struct frame_id id;
2037 CORE_ADDR pc, func;
2038
2039 if (*this_cache == NULL)
2040 *this_cache = arm_make_prologue_cache (this_frame);
2041 cache = *this_cache;
2042
2043 /* This is meant to halt the backtrace at "_start". */
2044 pc = get_frame_pc (this_frame);
2045 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2046 return;
2047
2048 /* If we've hit a wall, stop. */
2049 if (cache->prev_sp == 0)
2050 return;
2051
2052 /* Use function start address as part of the frame ID. If we cannot
2053 identify the start address (due to missing symbol information),
2054 fall back to just using the current PC. */
2055 func = get_frame_func (this_frame);
2056 if (!func)
2057 func = pc;
2058
2059 id = frame_id_build (cache->prev_sp, func);
2060 *this_id = id;
2061 }
2062
2063 static struct value *
2064 arm_prologue_prev_register (struct frame_info *this_frame,
2065 void **this_cache,
2066 int prev_regnum)
2067 {
2068 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2069 struct arm_prologue_cache *cache;
2070
2071 if (*this_cache == NULL)
2072 *this_cache = arm_make_prologue_cache (this_frame);
2073 cache = *this_cache;
2074
2075 /* If we are asked to unwind the PC, then we need to return the LR
2076 instead. The prologue may save PC, but it will point into this
2077 frame's prologue, not the next frame's resume location. Also
2078 strip the saved T bit. A valid LR may have the low bit set, but
2079 a valid PC never does. */
2080 if (prev_regnum == ARM_PC_REGNUM)
2081 {
2082 CORE_ADDR lr;
2083
2084 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2085 return frame_unwind_got_constant (this_frame, prev_regnum,
2086 arm_addr_bits_remove (gdbarch, lr));
2087 }
2088
2089 /* SP is generally not saved to the stack, but this frame is
2090 identified by the next frame's stack pointer at the time of the call.
2091 The value was already reconstructed into PREV_SP. */
2092 if (prev_regnum == ARM_SP_REGNUM)
2093 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2094
2095 /* The CPSR may have been changed by the call instruction and by the
2096 called function. The only bit we can reconstruct is the T bit,
2097 by checking the low bit of LR as of the call. This is a reliable
2098 indicator of Thumb-ness except for some ARM v4T pre-interworking
2099 Thumb code, which could get away with a clear low bit as long as
2100 the called function did not use bx. Guess that all other
2101 bits are unchanged; the condition flags are presumably lost,
2102 but the processor status is likely valid. */
2103 if (prev_regnum == ARM_PS_REGNUM)
2104 {
2105 CORE_ADDR lr, cpsr;
2106 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2107
2108 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2109 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2110 if (IS_THUMB_ADDR (lr))
2111 cpsr |= t_bit;
2112 else
2113 cpsr &= ~t_bit;
2114 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2115 }
2116
2117 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2118 prev_regnum);
2119 }
2120
2121 struct frame_unwind arm_prologue_unwind = {
2122 NORMAL_FRAME,
2123 default_frame_unwind_stop_reason,
2124 arm_prologue_this_id,
2125 arm_prologue_prev_register,
2126 NULL,
2127 default_frame_sniffer
2128 };
2129
2130 /* Maintain a list of ARM exception table entries per objfile, similar to the
2131 list of mapping symbols. We only cache entries for standard ARM-defined
2132 personality routines; the cache will contain only the frame unwinding
2133 instructions associated with the entry (not the descriptors). */
2134
2135 static const struct objfile_data *arm_exidx_data_key;
2136
2137 struct arm_exidx_entry
2138 {
2139 bfd_vma addr;
2140 gdb_byte *entry;
2141 };
2142 typedef struct arm_exidx_entry arm_exidx_entry_s;
2143 DEF_VEC_O(arm_exidx_entry_s);
2144
2145 struct arm_exidx_data
2146 {
2147 VEC(arm_exidx_entry_s) **section_maps;
2148 };
2149
2150 static void
2151 arm_exidx_data_free (struct objfile *objfile, void *arg)
2152 {
2153 struct arm_exidx_data *data = arg;
2154 unsigned int i;
2155
2156 for (i = 0; i < objfile->obfd->section_count; i++)
2157 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2158 }
2159
2160 static inline int
2161 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2162 const struct arm_exidx_entry *rhs)
2163 {
2164 return lhs->addr < rhs->addr;
2165 }
2166
2167 static struct obj_section *
2168 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2169 {
2170 struct obj_section *osect;
2171
2172 ALL_OBJFILE_OSECTIONS (objfile, osect)
2173 if (bfd_get_section_flags (objfile->obfd,
2174 osect->the_bfd_section) & SEC_ALLOC)
2175 {
2176 bfd_vma start, size;
2177 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2178 size = bfd_get_section_size (osect->the_bfd_section);
2179
2180 if (start <= vma && vma < start + size)
2181 return osect;
2182 }
2183
2184 return NULL;
2185 }
2186
2187 /* Parse contents of exception table and exception index sections
2188 of OBJFILE, and fill in the exception table entry cache.
2189
2190 For each entry that refers to a standard ARM-defined personality
2191 routine, extract the frame unwinding instructions (from either
2192 the index or the table section). The unwinding instructions
2193 are normalized by:
2194 - extracting them from the rest of the table data
2195 - converting to host endianness
2196 - appending the implicit 0xb0 ("Finish") code
2197
2198 The extracted and normalized instructions are stored for later
2199 retrieval by the arm_find_exidx_entry routine. */
2200
2201 static void
2202 arm_exidx_new_objfile (struct objfile *objfile)
2203 {
2204 struct cleanup *cleanups;
2205 struct arm_exidx_data *data;
2206 asection *exidx, *extab;
2207 bfd_vma exidx_vma = 0, extab_vma = 0;
2208 bfd_size_type exidx_size = 0, extab_size = 0;
2209 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2210 LONGEST i;
2211
2212 /* If we've already touched this file, do nothing. */
2213 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2214 return;
2215 cleanups = make_cleanup (null_cleanup, NULL);
2216
2217 /* Read contents of exception table and index. */
2218 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2219 if (exidx)
2220 {
2221 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2222 exidx_size = bfd_get_section_size (exidx);
2223 exidx_data = xmalloc (exidx_size);
2224 make_cleanup (xfree, exidx_data);
2225
2226 if (!bfd_get_section_contents (objfile->obfd, exidx,
2227 exidx_data, 0, exidx_size))
2228 {
2229 do_cleanups (cleanups);
2230 return;
2231 }
2232 }
2233
2234 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2235 if (extab)
2236 {
2237 extab_vma = bfd_section_vma (objfile->obfd, extab);
2238 extab_size = bfd_get_section_size (extab);
2239 extab_data = xmalloc (extab_size);
2240 make_cleanup (xfree, extab_data);
2241
2242 if (!bfd_get_section_contents (objfile->obfd, extab,
2243 extab_data, 0, extab_size))
2244 {
2245 do_cleanups (cleanups);
2246 return;
2247 }
2248 }
2249
2250 /* Allocate exception table data structure. */
2251 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2252 set_objfile_data (objfile, arm_exidx_data_key, data);
2253 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2254 objfile->obfd->section_count,
2255 VEC(arm_exidx_entry_s) *);
2256
2257 /* Fill in exception table. */
2258 for (i = 0; i < exidx_size / 8; i++)
2259 {
2260 struct arm_exidx_entry new_exidx_entry;
2261 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2262 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2263 bfd_vma addr = 0, word = 0;
2264 int n_bytes = 0, n_words = 0;
2265 struct obj_section *sec;
2266 gdb_byte *entry = NULL;
2267
2268 /* Extract address of start of function. */
2269 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2270 idx += exidx_vma + i * 8;
2271
2272 /* Find section containing function and compute section offset. */
2273 sec = arm_obj_section_from_vma (objfile, idx);
2274 if (sec == NULL)
2275 continue;
2276 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2277
2278 /* Determine address of exception table entry. */
2279 if (val == 1)
2280 {
2281 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2282 }
2283 else if ((val & 0xff000000) == 0x80000000)
2284 {
2285 /* Exception table entry embedded in .ARM.exidx
2286 -- must be short form. */
2287 word = val;
2288 n_bytes = 3;
2289 }
2290 else if (!(val & 0x80000000))
2291 {
2292 /* Exception table entry in .ARM.extab. */
2293 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2294 addr += exidx_vma + i * 8 + 4;
2295
2296 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2297 {
2298 word = bfd_h_get_32 (objfile->obfd,
2299 extab_data + addr - extab_vma);
2300 addr += 4;
2301
2302 if ((word & 0xff000000) == 0x80000000)
2303 {
2304 /* Short form. */
2305 n_bytes = 3;
2306 }
2307 else if ((word & 0xff000000) == 0x81000000
2308 || (word & 0xff000000) == 0x82000000)
2309 {
2310 /* Long form. */
2311 n_bytes = 2;
2312 n_words = ((word >> 16) & 0xff);
2313 }
2314 else if (!(word & 0x80000000))
2315 {
2316 bfd_vma pers;
2317 struct obj_section *pers_sec;
2318 int gnu_personality = 0;
2319
2320 /* Custom personality routine. */
2321 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2322 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2323
2324 /* Check whether we've got one of the variants of the
2325 GNU personality routines. */
2326 pers_sec = arm_obj_section_from_vma (objfile, pers);
2327 if (pers_sec)
2328 {
2329 static const char *personality[] =
2330 {
2331 "__gcc_personality_v0",
2332 "__gxx_personality_v0",
2333 "__gcj_personality_v0",
2334 "__gnu_objc_personality_v0",
2335 NULL
2336 };
2337
2338 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2339 int k;
2340
2341 for (k = 0; personality[k]; k++)
2342 if (lookup_minimal_symbol_by_pc_name
2343 (pc, personality[k], objfile))
2344 {
2345 gnu_personality = 1;
2346 break;
2347 }
2348 }
2349
2350 /* If so, the next word contains a word count in the high
2351 byte, followed by the same unwind instructions as the
2352 pre-defined forms. */
2353 if (gnu_personality
2354 && addr + 4 <= extab_vma + extab_size)
2355 {
2356 word = bfd_h_get_32 (objfile->obfd,
2357 extab_data + addr - extab_vma);
2358 addr += 4;
2359 n_bytes = 3;
2360 n_words = ((word >> 24) & 0xff);
2361 }
2362 }
2363 }
2364 }
2365
2366 /* Sanity check address. */
2367 if (n_words)
2368 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2369 n_words = n_bytes = 0;
2370
2371 /* The unwind instructions reside in WORD (only the N_BYTES least
2372 significant bytes are valid), followed by N_WORDS words in the
2373 extab section starting at ADDR. */
2374 if (n_bytes || n_words)
2375 {
2376 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2377 n_bytes + n_words * 4 + 1);
2378
2379 while (n_bytes--)
2380 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2381
2382 while (n_words--)
2383 {
2384 word = bfd_h_get_32 (objfile->obfd,
2385 extab_data + addr - extab_vma);
2386 addr += 4;
2387
2388 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2389 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2390 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2391 *p++ = (gdb_byte) (word & 0xff);
2392 }
2393
2394 /* Implied "Finish" to terminate the list. */
2395 *p++ = 0xb0;
2396 }
2397
2398 /* Push entry onto vector. They are guaranteed to always
2399 appear in order of increasing addresses. */
2400 new_exidx_entry.addr = idx;
2401 new_exidx_entry.entry = entry;
2402 VEC_safe_push (arm_exidx_entry_s,
2403 data->section_maps[sec->the_bfd_section->index],
2404 &new_exidx_entry);
2405 }
2406
2407 do_cleanups (cleanups);
2408 }
2409
2410 /* Search for the exception table entry covering MEMADDR. If one is found,
2411 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2412 set *START to the start of the region covered by this entry. */
2413
2414 static gdb_byte *
2415 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2416 {
2417 struct obj_section *sec;
2418
2419 sec = find_pc_section (memaddr);
2420 if (sec != NULL)
2421 {
2422 struct arm_exidx_data *data;
2423 VEC(arm_exidx_entry_s) *map;
2424 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2425 unsigned int idx;
2426
2427 data = objfile_data (sec->objfile, arm_exidx_data_key);
2428 if (data != NULL)
2429 {
2430 map = data->section_maps[sec->the_bfd_section->index];
2431 if (!VEC_empty (arm_exidx_entry_s, map))
2432 {
2433 struct arm_exidx_entry *map_sym;
2434
2435 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2436 arm_compare_exidx_entries);
2437
2438 /* VEC_lower_bound finds the earliest ordered insertion
2439 point. If the following symbol starts at this exact
2440 address, we use that; otherwise, the preceding
2441 exception table entry covers this address. */
2442 if (idx < VEC_length (arm_exidx_entry_s, map))
2443 {
2444 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2445 if (map_sym->addr == map_key.addr)
2446 {
2447 if (start)
2448 *start = map_sym->addr + obj_section_addr (sec);
2449 return map_sym->entry;
2450 }
2451 }
2452
2453 if (idx > 0)
2454 {
2455 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2456 if (start)
2457 *start = map_sym->addr + obj_section_addr (sec);
2458 return map_sym->entry;
2459 }
2460 }
2461 }
2462 }
2463
2464 return NULL;
2465 }
2466
2467 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2468 instruction list from the ARM exception table entry ENTRY, allocate and
2469 return a prologue cache structure describing how to unwind this frame.
2470
2471 Return NULL if the unwinding instruction list contains a "spare",
2472 "reserved" or "refuse to unwind" instruction as defined in section
2473 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2474 for the ARM Architecture" document. */
2475
2476 static struct arm_prologue_cache *
2477 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2478 {
2479 CORE_ADDR vsp = 0;
2480 int vsp_valid = 0;
2481
2482 struct arm_prologue_cache *cache;
2483 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2484 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2485
2486 for (;;)
2487 {
2488 gdb_byte insn;
2489
2490 /* Whenever we reload SP, we actually have to retrieve its
2491 actual value in the current frame. */
2492 if (!vsp_valid)
2493 {
2494 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2495 {
2496 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2497 vsp = get_frame_register_unsigned (this_frame, reg);
2498 }
2499 else
2500 {
2501 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2502 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2503 }
2504
2505 vsp_valid = 1;
2506 }
2507
2508 /* Decode next unwind instruction. */
2509 insn = *entry++;
2510
2511 if ((insn & 0xc0) == 0)
2512 {
2513 int offset = insn & 0x3f;
2514 vsp += (offset << 2) + 4;
2515 }
2516 else if ((insn & 0xc0) == 0x40)
2517 {
2518 int offset = insn & 0x3f;
2519 vsp -= (offset << 2) + 4;
2520 }
2521 else if ((insn & 0xf0) == 0x80)
2522 {
2523 int mask = ((insn & 0xf) << 8) | *entry++;
2524 int i;
2525
2526 /* The special case of an all-zero mask identifies
2527 "Refuse to unwind". We return NULL to fall back
2528 to the prologue analyzer. */
2529 if (mask == 0)
2530 return NULL;
2531
2532 /* Pop registers r4..r15 under mask. */
2533 for (i = 0; i < 12; i++)
2534 if (mask & (1 << i))
2535 {
2536 cache->saved_regs[4 + i].addr = vsp;
2537 vsp += 4;
2538 }
2539
2540 /* Special-case popping SP -- we need to reload vsp. */
2541 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2542 vsp_valid = 0;
2543 }
2544 else if ((insn & 0xf0) == 0x90)
2545 {
2546 int reg = insn & 0xf;
2547
2548 /* Reserved cases. */
2549 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2550 return NULL;
2551
2552 /* Set SP from another register and mark VSP for reload. */
2553 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2554 vsp_valid = 0;
2555 }
2556 else if ((insn & 0xf0) == 0xa0)
2557 {
2558 int count = insn & 0x7;
2559 int pop_lr = (insn & 0x8) != 0;
2560 int i;
2561
2562 /* Pop r4..r[4+count]. */
2563 for (i = 0; i <= count; i++)
2564 {
2565 cache->saved_regs[4 + i].addr = vsp;
2566 vsp += 4;
2567 }
2568
2569 /* If indicated by flag, pop LR as well. */
2570 if (pop_lr)
2571 {
2572 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2573 vsp += 4;
2574 }
2575 }
2576 else if (insn == 0xb0)
2577 {
2578 /* We could only have updated PC by popping into it; if so, it
2579 will show up as address. Otherwise, copy LR into PC. */
2580 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2581 cache->saved_regs[ARM_PC_REGNUM]
2582 = cache->saved_regs[ARM_LR_REGNUM];
2583
2584 /* We're done. */
2585 break;
2586 }
2587 else if (insn == 0xb1)
2588 {
2589 int mask = *entry++;
2590 int i;
2591
2592 /* All-zero mask and mask >= 16 is "spare". */
2593 if (mask == 0 || mask >= 16)
2594 return NULL;
2595
2596 /* Pop r0..r3 under mask. */
2597 for (i = 0; i < 4; i++)
2598 if (mask & (1 << i))
2599 {
2600 cache->saved_regs[i].addr = vsp;
2601 vsp += 4;
2602 }
2603 }
2604 else if (insn == 0xb2)
2605 {
2606 ULONGEST offset = 0;
2607 unsigned shift = 0;
2608
2609 do
2610 {
2611 offset |= (*entry & 0x7f) << shift;
2612 shift += 7;
2613 }
2614 while (*entry++ & 0x80);
2615
2616 vsp += 0x204 + (offset << 2);
2617 }
2618 else if (insn == 0xb3)
2619 {
2620 int start = *entry >> 4;
2621 int count = (*entry++) & 0xf;
2622 int i;
2623
2624 /* Only registers D0..D15 are valid here. */
2625 if (start + count >= 16)
2626 return NULL;
2627
2628 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2629 for (i = 0; i <= count; i++)
2630 {
2631 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2632 vsp += 8;
2633 }
2634
2635 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2636 vsp += 4;
2637 }
2638 else if ((insn & 0xf8) == 0xb8)
2639 {
2640 int count = insn & 0x7;
2641 int i;
2642
2643 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2644 for (i = 0; i <= count; i++)
2645 {
2646 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2647 vsp += 8;
2648 }
2649
2650 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2651 vsp += 4;
2652 }
2653 else if (insn == 0xc6)
2654 {
2655 int start = *entry >> 4;
2656 int count = (*entry++) & 0xf;
2657 int i;
2658
2659 /* Only registers WR0..WR15 are valid. */
2660 if (start + count >= 16)
2661 return NULL;
2662
2663 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2664 for (i = 0; i <= count; i++)
2665 {
2666 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2667 vsp += 8;
2668 }
2669 }
2670 else if (insn == 0xc7)
2671 {
2672 int mask = *entry++;
2673 int i;
2674
2675 /* All-zero mask and mask >= 16 is "spare". */
2676 if (mask == 0 || mask >= 16)
2677 return NULL;
2678
2679 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2680 for (i = 0; i < 4; i++)
2681 if (mask & (1 << i))
2682 {
2683 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2684 vsp += 4;
2685 }
2686 }
2687 else if ((insn & 0xf8) == 0xc0)
2688 {
2689 int count = insn & 0x7;
2690 int i;
2691
2692 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2693 for (i = 0; i <= count; i++)
2694 {
2695 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2696 vsp += 8;
2697 }
2698 }
2699 else if (insn == 0xc8)
2700 {
2701 int start = *entry >> 4;
2702 int count = (*entry++) & 0xf;
2703 int i;
2704
2705 /* Only registers D0..D31 are valid. */
2706 if (start + count >= 16)
2707 return NULL;
2708
2709 /* Pop VFP double-precision registers
2710 D[16+start]..D[16+start+count]. */
2711 for (i = 0; i <= count; i++)
2712 {
2713 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2714 vsp += 8;
2715 }
2716 }
2717 else if (insn == 0xc9)
2718 {
2719 int start = *entry >> 4;
2720 int count = (*entry++) & 0xf;
2721 int i;
2722
2723 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2724 for (i = 0; i <= count; i++)
2725 {
2726 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2727 vsp += 8;
2728 }
2729 }
2730 else if ((insn & 0xf8) == 0xd0)
2731 {
2732 int count = insn & 0x7;
2733 int i;
2734
2735 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2736 for (i = 0; i <= count; i++)
2737 {
2738 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2739 vsp += 8;
2740 }
2741 }
2742 else
2743 {
2744 /* Everything else is "spare". */
2745 return NULL;
2746 }
2747 }
2748
2749 /* If we restore SP from a register, assume this was the frame register.
2750 Otherwise just fall back to SP as frame register. */
2751 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2752 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2753 else
2754 cache->framereg = ARM_SP_REGNUM;
2755
2756 /* Determine offset to previous frame. */
2757 cache->framesize
2758 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2759
2760 /* We already got the previous SP. */
2761 cache->prev_sp = vsp;
2762
2763 return cache;
2764 }
2765
2766 /* Unwinding via ARM exception table entries. Note that the sniffer
2767 already computes a filled-in prologue cache, which is then used
2768 with the same arm_prologue_this_id and arm_prologue_prev_register
2769 routines also used for prologue-parsing based unwinding. */
2770
2771 static int
2772 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2773 struct frame_info *this_frame,
2774 void **this_prologue_cache)
2775 {
2776 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2777 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2778 CORE_ADDR addr_in_block, exidx_region, func_start;
2779 struct arm_prologue_cache *cache;
2780 gdb_byte *entry;
2781
2782 /* See if we have an ARM exception table entry covering this address. */
2783 addr_in_block = get_frame_address_in_block (this_frame);
2784 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2785 if (!entry)
2786 return 0;
2787
2788 /* The ARM exception table does not describe unwind information
2789 for arbitrary PC values, but is guaranteed to be correct only
2790 at call sites. We have to decide here whether we want to use
2791 ARM exception table information for this frame, or fall back
2792 to using prologue parsing. (Note that if we have DWARF CFI,
2793 this sniffer isn't even called -- CFI is always preferred.)
2794
2795 Before we make this decision, however, we check whether we
2796 actually have *symbol* information for the current frame.
2797 If not, prologue parsing would not work anyway, so we might
2798 as well use the exception table and hope for the best. */
2799 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2800 {
2801 int exc_valid = 0;
2802
2803 /* If the next frame is "normal", we are at a call site in this
2804 frame, so exception information is guaranteed to be valid. */
2805 if (get_next_frame (this_frame)
2806 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2807 exc_valid = 1;
2808
2809 /* We also assume exception information is valid if we're currently
2810 blocked in a system call. The system library is supposed to
2811 ensure this, so that e.g. pthread cancellation works. */
2812 if (arm_frame_is_thumb (this_frame))
2813 {
2814 LONGEST insn;
2815
2816 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2817 byte_order_for_code, &insn)
2818 && (insn & 0xff00) == 0xdf00 /* svc */)
2819 exc_valid = 1;
2820 }
2821 else
2822 {
2823 LONGEST insn;
2824
2825 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2826 byte_order_for_code, &insn)
2827 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2828 exc_valid = 1;
2829 }
2830
2831 /* Bail out if we don't know that exception information is valid. */
2832 if (!exc_valid)
2833 return 0;
2834
2835 /* The ARM exception index does not mark the *end* of the region
2836 covered by the entry, and some functions will not have any entry.
2837 To correctly recognize the end of the covered region, the linker
2838 should have inserted dummy records with a CANTUNWIND marker.
2839
2840 Unfortunately, current versions of GNU ld do not reliably do
2841 this, and thus we may have found an incorrect entry above.
2842 As a (temporary) sanity check, we only use the entry if it
2843 lies *within* the bounds of the function. Note that this check
2844 might reject perfectly valid entries that just happen to cover
2845 multiple functions; therefore this check ought to be removed
2846 once the linker is fixed. */
2847 if (func_start > exidx_region)
2848 return 0;
2849 }
2850
2851 /* Decode the list of unwinding instructions into a prologue cache.
2852 Note that this may fail due to e.g. a "refuse to unwind" code. */
2853 cache = arm_exidx_fill_cache (this_frame, entry);
2854 if (!cache)
2855 return 0;
2856
2857 *this_prologue_cache = cache;
2858 return 1;
2859 }
2860
2861 struct frame_unwind arm_exidx_unwind = {
2862 NORMAL_FRAME,
2863 default_frame_unwind_stop_reason,
2864 arm_prologue_this_id,
2865 arm_prologue_prev_register,
2866 NULL,
2867 arm_exidx_unwind_sniffer
2868 };
2869
2870 static struct arm_prologue_cache *
2871 arm_make_stub_cache (struct frame_info *this_frame)
2872 {
2873 struct arm_prologue_cache *cache;
2874
2875 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2877
2878 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2879
2880 return cache;
2881 }
2882
2883 /* Our frame ID for a stub frame is the current SP and LR. */
2884
2885 static void
2886 arm_stub_this_id (struct frame_info *this_frame,
2887 void **this_cache,
2888 struct frame_id *this_id)
2889 {
2890 struct arm_prologue_cache *cache;
2891
2892 if (*this_cache == NULL)
2893 *this_cache = arm_make_stub_cache (this_frame);
2894 cache = *this_cache;
2895
2896 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2897 }
2898
2899 static int
2900 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2901 struct frame_info *this_frame,
2902 void **this_prologue_cache)
2903 {
2904 CORE_ADDR addr_in_block;
2905 char dummy[4];
2906
2907 addr_in_block = get_frame_address_in_block (this_frame);
2908 if (in_plt_section (addr_in_block, NULL)
2909 /* We also use the stub winder if the target memory is unreadable
2910 to avoid having the prologue unwinder trying to read it. */
2911 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2912 return 1;
2913
2914 return 0;
2915 }
2916
2917 struct frame_unwind arm_stub_unwind = {
2918 NORMAL_FRAME,
2919 default_frame_unwind_stop_reason,
2920 arm_stub_this_id,
2921 arm_prologue_prev_register,
2922 NULL,
2923 arm_stub_unwind_sniffer
2924 };
2925
2926 static CORE_ADDR
2927 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2928 {
2929 struct arm_prologue_cache *cache;
2930
2931 if (*this_cache == NULL)
2932 *this_cache = arm_make_prologue_cache (this_frame);
2933 cache = *this_cache;
2934
2935 return cache->prev_sp - cache->framesize;
2936 }
2937
2938 struct frame_base arm_normal_base = {
2939 &arm_prologue_unwind,
2940 arm_normal_frame_base,
2941 arm_normal_frame_base,
2942 arm_normal_frame_base
2943 };
2944
2945 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2946 dummy frame. The frame ID's base needs to match the TOS value
2947 saved by save_dummy_frame_tos() and returned from
2948 arm_push_dummy_call, and the PC needs to match the dummy frame's
2949 breakpoint. */
2950
2951 static struct frame_id
2952 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2953 {
2954 return frame_id_build (get_frame_register_unsigned (this_frame,
2955 ARM_SP_REGNUM),
2956 get_frame_pc (this_frame));
2957 }
2958
2959 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2960 be used to construct the previous frame's ID, after looking up the
2961 containing function). */
2962
2963 static CORE_ADDR
2964 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2965 {
2966 CORE_ADDR pc;
2967 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2968 return arm_addr_bits_remove (gdbarch, pc);
2969 }
2970
2971 static CORE_ADDR
2972 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2973 {
2974 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2975 }
2976
2977 static struct value *
2978 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2979 int regnum)
2980 {
2981 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2982 CORE_ADDR lr, cpsr;
2983 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2984
2985 switch (regnum)
2986 {
2987 case ARM_PC_REGNUM:
2988 /* The PC is normally copied from the return column, which
2989 describes saves of LR. However, that version may have an
2990 extra bit set to indicate Thumb state. The bit is not
2991 part of the PC. */
2992 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2993 return frame_unwind_got_constant (this_frame, regnum,
2994 arm_addr_bits_remove (gdbarch, lr));
2995
2996 case ARM_PS_REGNUM:
2997 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2998 cpsr = get_frame_register_unsigned (this_frame, regnum);
2999 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3000 if (IS_THUMB_ADDR (lr))
3001 cpsr |= t_bit;
3002 else
3003 cpsr &= ~t_bit;
3004 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3005
3006 default:
3007 internal_error (__FILE__, __LINE__,
3008 _("Unexpected register %d"), regnum);
3009 }
3010 }
3011
3012 static void
3013 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3014 struct dwarf2_frame_state_reg *reg,
3015 struct frame_info *this_frame)
3016 {
3017 switch (regnum)
3018 {
3019 case ARM_PC_REGNUM:
3020 case ARM_PS_REGNUM:
3021 reg->how = DWARF2_FRAME_REG_FN;
3022 reg->loc.fn = arm_dwarf2_prev_register;
3023 break;
3024 case ARM_SP_REGNUM:
3025 reg->how = DWARF2_FRAME_REG_CFA;
3026 break;
3027 }
3028 }
3029
3030 /* Return true if we are in the function's epilogue, i.e. after the
3031 instruction that destroyed the function's stack frame. */
3032
3033 static int
3034 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3035 {
3036 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3037 unsigned int insn, insn2;
3038 int found_return = 0, found_stack_adjust = 0;
3039 CORE_ADDR func_start, func_end;
3040 CORE_ADDR scan_pc;
3041 gdb_byte buf[4];
3042
3043 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3044 return 0;
3045
3046 /* The epilogue is a sequence of instructions along the following lines:
3047
3048 - add stack frame size to SP or FP
3049 - [if frame pointer used] restore SP from FP
3050 - restore registers from SP [may include PC]
3051 - a return-type instruction [if PC wasn't already restored]
3052
3053 In a first pass, we scan forward from the current PC and verify the
3054 instructions we find as compatible with this sequence, ending in a
3055 return instruction.
3056
3057 However, this is not sufficient to distinguish indirect function calls
3058 within a function from indirect tail calls in the epilogue in some cases.
3059 Therefore, if we didn't already find any SP-changing instruction during
3060 forward scan, we add a backward scanning heuristic to ensure we actually
3061 are in the epilogue. */
3062
3063 scan_pc = pc;
3064 while (scan_pc < func_end && !found_return)
3065 {
3066 if (target_read_memory (scan_pc, buf, 2))
3067 break;
3068
3069 scan_pc += 2;
3070 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3071
3072 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3073 found_return = 1;
3074 else if (insn == 0x46f7) /* mov pc, lr */
3075 found_return = 1;
3076 else if (insn == 0x46bd) /* mov sp, r7 */
3077 found_stack_adjust = 1;
3078 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3079 found_stack_adjust = 1;
3080 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3081 {
3082 found_stack_adjust = 1;
3083 if (insn & 0x0100) /* <registers> include PC. */
3084 found_return = 1;
3085 }
3086 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3087 {
3088 if (target_read_memory (scan_pc, buf, 2))
3089 break;
3090
3091 scan_pc += 2;
3092 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3093
3094 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3095 {
3096 found_stack_adjust = 1;
3097 if (insn2 & 0x8000) /* <registers> include PC. */
3098 found_return = 1;
3099 }
3100 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3101 && (insn2 & 0x0fff) == 0x0b04)
3102 {
3103 found_stack_adjust = 1;
3104 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3105 found_return = 1;
3106 }
3107 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3108 && (insn2 & 0x0e00) == 0x0a00)
3109 found_stack_adjust = 1;
3110 else
3111 break;
3112 }
3113 else
3114 break;
3115 }
3116
3117 if (!found_return)
3118 return 0;
3119
3120 /* Since any instruction in the epilogue sequence, with the possible
3121 exception of return itself, updates the stack pointer, we need to
3122 scan backwards for at most one instruction. Try either a 16-bit or
3123 a 32-bit instruction. This is just a heuristic, so we do not worry
3124 too much about false positives. */
3125
3126 if (!found_stack_adjust)
3127 {
3128 if (pc - 4 < func_start)
3129 return 0;
3130 if (target_read_memory (pc - 4, buf, 4))
3131 return 0;
3132
3133 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3134 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3135
3136 if (insn2 == 0x46bd) /* mov sp, r7 */
3137 found_stack_adjust = 1;
3138 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3139 found_stack_adjust = 1;
3140 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3141 found_stack_adjust = 1;
3142 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3143 found_stack_adjust = 1;
3144 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3145 && (insn2 & 0x0fff) == 0x0b04)
3146 found_stack_adjust = 1;
3147 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3148 && (insn2 & 0x0e00) == 0x0a00)
3149 found_stack_adjust = 1;
3150 }
3151
3152 return found_stack_adjust;
3153 }
3154
3155 /* Return true if we are in the function's epilogue, i.e. after the
3156 instruction that destroyed the function's stack frame. */
3157
3158 static int
3159 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3160 {
3161 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3162 unsigned int insn;
3163 int found_return, found_stack_adjust;
3164 CORE_ADDR func_start, func_end;
3165
3166 if (arm_pc_is_thumb (gdbarch, pc))
3167 return thumb_in_function_epilogue_p (gdbarch, pc);
3168
3169 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3170 return 0;
3171
3172 /* We are in the epilogue if the previous instruction was a stack
3173 adjustment and the next instruction is a possible return (bx, mov
3174 pc, or pop). We could have to scan backwards to find the stack
3175 adjustment, or forwards to find the return, but this is a decent
3176 approximation. First scan forwards. */
3177
3178 found_return = 0;
3179 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3180 if (bits (insn, 28, 31) != INST_NV)
3181 {
3182 if ((insn & 0x0ffffff0) == 0x012fff10)
3183 /* BX. */
3184 found_return = 1;
3185 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3186 /* MOV PC. */
3187 found_return = 1;
3188 else if ((insn & 0x0fff0000) == 0x08bd0000
3189 && (insn & 0x0000c000) != 0)
3190 /* POP (LDMIA), including PC or LR. */
3191 found_return = 1;
3192 }
3193
3194 if (!found_return)
3195 return 0;
3196
3197 /* Scan backwards. This is just a heuristic, so do not worry about
3198 false positives from mode changes. */
3199
3200 if (pc < func_start + 4)
3201 return 0;
3202
3203 found_stack_adjust = 0;
3204 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3205 if (bits (insn, 28, 31) != INST_NV)
3206 {
3207 if ((insn & 0x0df0f000) == 0x0080d000)
3208 /* ADD SP (register or immediate). */
3209 found_stack_adjust = 1;
3210 else if ((insn & 0x0df0f000) == 0x0040d000)
3211 /* SUB SP (register or immediate). */
3212 found_stack_adjust = 1;
3213 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3214 /* MOV SP. */
3215 found_stack_adjust = 1;
3216 else if ((insn & 0x0fff0000) == 0x08bd0000)
3217 /* POP (LDMIA). */
3218 found_stack_adjust = 1;
3219 }
3220
3221 if (found_stack_adjust)
3222 return 1;
3223
3224 return 0;
3225 }
3226
3227
3228 /* When arguments must be pushed onto the stack, they go on in reverse
3229 order. The code below implements a FILO (stack) to do this. */
3230
3231 struct stack_item
3232 {
3233 int len;
3234 struct stack_item *prev;
3235 void *data;
3236 };
3237
3238 static struct stack_item *
3239 push_stack_item (struct stack_item *prev, const void *contents, int len)
3240 {
3241 struct stack_item *si;
3242 si = xmalloc (sizeof (struct stack_item));
3243 si->data = xmalloc (len);
3244 si->len = len;
3245 si->prev = prev;
3246 memcpy (si->data, contents, len);
3247 return si;
3248 }
3249
3250 static struct stack_item *
3251 pop_stack_item (struct stack_item *si)
3252 {
3253 struct stack_item *dead = si;
3254 si = si->prev;
3255 xfree (dead->data);
3256 xfree (dead);
3257 return si;
3258 }
3259
3260
3261 /* Return the alignment (in bytes) of the given type. */
3262
3263 static int
3264 arm_type_align (struct type *t)
3265 {
3266 int n;
3267 int align;
3268 int falign;
3269
3270 t = check_typedef (t);
3271 switch (TYPE_CODE (t))
3272 {
3273 default:
3274 /* Should never happen. */
3275 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3276 return 4;
3277
3278 case TYPE_CODE_PTR:
3279 case TYPE_CODE_ENUM:
3280 case TYPE_CODE_INT:
3281 case TYPE_CODE_FLT:
3282 case TYPE_CODE_SET:
3283 case TYPE_CODE_RANGE:
3284 case TYPE_CODE_BITSTRING:
3285 case TYPE_CODE_REF:
3286 case TYPE_CODE_CHAR:
3287 case TYPE_CODE_BOOL:
3288 return TYPE_LENGTH (t);
3289
3290 case TYPE_CODE_ARRAY:
3291 case TYPE_CODE_COMPLEX:
3292 /* TODO: What about vector types? */
3293 return arm_type_align (TYPE_TARGET_TYPE (t));
3294
3295 case TYPE_CODE_STRUCT:
3296 case TYPE_CODE_UNION:
3297 align = 1;
3298 for (n = 0; n < TYPE_NFIELDS (t); n++)
3299 {
3300 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3301 if (falign > align)
3302 align = falign;
3303 }
3304 return align;
3305 }
3306 }
3307
3308 /* Possible base types for a candidate for passing and returning in
3309 VFP registers. */
3310
3311 enum arm_vfp_cprc_base_type
3312 {
3313 VFP_CPRC_UNKNOWN,
3314 VFP_CPRC_SINGLE,
3315 VFP_CPRC_DOUBLE,
3316 VFP_CPRC_VEC64,
3317 VFP_CPRC_VEC128
3318 };
3319
3320 /* The length of one element of base type B. */
3321
3322 static unsigned
3323 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3324 {
3325 switch (b)
3326 {
3327 case VFP_CPRC_SINGLE:
3328 return 4;
3329 case VFP_CPRC_DOUBLE:
3330 return 8;
3331 case VFP_CPRC_VEC64:
3332 return 8;
3333 case VFP_CPRC_VEC128:
3334 return 16;
3335 default:
3336 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3337 (int) b);
3338 }
3339 }
3340
3341 /* The character ('s', 'd' or 'q') for the type of VFP register used
3342 for passing base type B. */
3343
3344 static int
3345 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3346 {
3347 switch (b)
3348 {
3349 case VFP_CPRC_SINGLE:
3350 return 's';
3351 case VFP_CPRC_DOUBLE:
3352 return 'd';
3353 case VFP_CPRC_VEC64:
3354 return 'd';
3355 case VFP_CPRC_VEC128:
3356 return 'q';
3357 default:
3358 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3359 (int) b);
3360 }
3361 }
3362
3363 /* Determine whether T may be part of a candidate for passing and
3364 returning in VFP registers, ignoring the limit on the total number
3365 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3366 classification of the first valid component found; if it is not
3367 VFP_CPRC_UNKNOWN, all components must have the same classification
3368 as *BASE_TYPE. If it is found that T contains a type not permitted
3369 for passing and returning in VFP registers, a type differently
3370 classified from *BASE_TYPE, or two types differently classified
3371 from each other, return -1, otherwise return the total number of
3372 base-type elements found (possibly 0 in an empty structure or
3373 array). Vectors and complex types are not currently supported,
3374 matching the generic AAPCS support. */
3375
3376 static int
3377 arm_vfp_cprc_sub_candidate (struct type *t,
3378 enum arm_vfp_cprc_base_type *base_type)
3379 {
3380 t = check_typedef (t);
3381 switch (TYPE_CODE (t))
3382 {
3383 case TYPE_CODE_FLT:
3384 switch (TYPE_LENGTH (t))
3385 {
3386 case 4:
3387 if (*base_type == VFP_CPRC_UNKNOWN)
3388 *base_type = VFP_CPRC_SINGLE;
3389 else if (*base_type != VFP_CPRC_SINGLE)
3390 return -1;
3391 return 1;
3392
3393 case 8:
3394 if (*base_type == VFP_CPRC_UNKNOWN)
3395 *base_type = VFP_CPRC_DOUBLE;
3396 else if (*base_type != VFP_CPRC_DOUBLE)
3397 return -1;
3398 return 1;
3399
3400 default:
3401 return -1;
3402 }
3403 break;
3404
3405 case TYPE_CODE_ARRAY:
3406 {
3407 int count;
3408 unsigned unitlen;
3409 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3410 if (count == -1)
3411 return -1;
3412 if (TYPE_LENGTH (t) == 0)
3413 {
3414 gdb_assert (count == 0);
3415 return 0;
3416 }
3417 else if (count == 0)
3418 return -1;
3419 unitlen = arm_vfp_cprc_unit_length (*base_type);
3420 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3421 return TYPE_LENGTH (t) / unitlen;
3422 }
3423 break;
3424
3425 case TYPE_CODE_STRUCT:
3426 {
3427 int count = 0;
3428 unsigned unitlen;
3429 int i;
3430 for (i = 0; i < TYPE_NFIELDS (t); i++)
3431 {
3432 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3433 base_type);
3434 if (sub_count == -1)
3435 return -1;
3436 count += sub_count;
3437 }
3438 if (TYPE_LENGTH (t) == 0)
3439 {
3440 gdb_assert (count == 0);
3441 return 0;
3442 }
3443 else if (count == 0)
3444 return -1;
3445 unitlen = arm_vfp_cprc_unit_length (*base_type);
3446 if (TYPE_LENGTH (t) != unitlen * count)
3447 return -1;
3448 return count;
3449 }
3450
3451 case TYPE_CODE_UNION:
3452 {
3453 int count = 0;
3454 unsigned unitlen;
3455 int i;
3456 for (i = 0; i < TYPE_NFIELDS (t); i++)
3457 {
3458 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3459 base_type);
3460 if (sub_count == -1)
3461 return -1;
3462 count = (count > sub_count ? count : sub_count);
3463 }
3464 if (TYPE_LENGTH (t) == 0)
3465 {
3466 gdb_assert (count == 0);
3467 return 0;
3468 }
3469 else if (count == 0)
3470 return -1;
3471 unitlen = arm_vfp_cprc_unit_length (*base_type);
3472 if (TYPE_LENGTH (t) != unitlen * count)
3473 return -1;
3474 return count;
3475 }
3476
3477 default:
3478 break;
3479 }
3480
3481 return -1;
3482 }
3483
3484 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3485 if passed to or returned from a non-variadic function with the VFP
3486 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3487 *BASE_TYPE to the base type for T and *COUNT to the number of
3488 elements of that base type before returning. */
3489
3490 static int
3491 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3492 int *count)
3493 {
3494 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3495 int c = arm_vfp_cprc_sub_candidate (t, &b);
3496 if (c <= 0 || c > 4)
3497 return 0;
3498 *base_type = b;
3499 *count = c;
3500 return 1;
3501 }
3502
3503 /* Return 1 if the VFP ABI should be used for passing arguments to and
3504 returning values from a function of type FUNC_TYPE, 0
3505 otherwise. */
3506
3507 static int
3508 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3509 {
3510 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3511 /* Variadic functions always use the base ABI. Assume that functions
3512 without debug info are not variadic. */
3513 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3514 return 0;
3515 /* The VFP ABI is only supported as a variant of AAPCS. */
3516 if (tdep->arm_abi != ARM_ABI_AAPCS)
3517 return 0;
3518 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3519 }
3520
3521 /* We currently only support passing parameters in integer registers, which
3522 conforms with GCC's default model, and VFP argument passing following
3523 the VFP variant of AAPCS. Several other variants exist and
3524 we should probably support some of them based on the selected ABI. */
3525
3526 static CORE_ADDR
3527 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3528 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3529 struct value **args, CORE_ADDR sp, int struct_return,
3530 CORE_ADDR struct_addr)
3531 {
3532 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3533 int argnum;
3534 int argreg;
3535 int nstack;
3536 struct stack_item *si = NULL;
3537 int use_vfp_abi;
3538 struct type *ftype;
3539 unsigned vfp_regs_free = (1 << 16) - 1;
3540
3541 /* Determine the type of this function and whether the VFP ABI
3542 applies. */
3543 ftype = check_typedef (value_type (function));
3544 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3545 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3546 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3547
3548 /* Set the return address. For the ARM, the return breakpoint is
3549 always at BP_ADDR. */
3550 if (arm_pc_is_thumb (gdbarch, bp_addr))
3551 bp_addr |= 1;
3552 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3553
3554 /* Walk through the list of args and determine how large a temporary
3555 stack is required. Need to take care here as structs may be
3556 passed on the stack, and we have to push them. */
3557 nstack = 0;
3558
3559 argreg = ARM_A1_REGNUM;
3560 nstack = 0;
3561
3562 /* The struct_return pointer occupies the first parameter
3563 passing register. */
3564 if (struct_return)
3565 {
3566 if (arm_debug)
3567 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3568 gdbarch_register_name (gdbarch, argreg),
3569 paddress (gdbarch, struct_addr));
3570 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3571 argreg++;
3572 }
3573
3574 for (argnum = 0; argnum < nargs; argnum++)
3575 {
3576 int len;
3577 struct type *arg_type;
3578 struct type *target_type;
3579 enum type_code typecode;
3580 const bfd_byte *val;
3581 int align;
3582 enum arm_vfp_cprc_base_type vfp_base_type;
3583 int vfp_base_count;
3584 int may_use_core_reg = 1;
3585
3586 arg_type = check_typedef (value_type (args[argnum]));
3587 len = TYPE_LENGTH (arg_type);
3588 target_type = TYPE_TARGET_TYPE (arg_type);
3589 typecode = TYPE_CODE (arg_type);
3590 val = value_contents (args[argnum]);
3591
3592 align = arm_type_align (arg_type);
3593 /* Round alignment up to a whole number of words. */
3594 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3595 /* Different ABIs have different maximum alignments. */
3596 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3597 {
3598 /* The APCS ABI only requires word alignment. */
3599 align = INT_REGISTER_SIZE;
3600 }
3601 else
3602 {
3603 /* The AAPCS requires at most doubleword alignment. */
3604 if (align > INT_REGISTER_SIZE * 2)
3605 align = INT_REGISTER_SIZE * 2;
3606 }
3607
3608 if (use_vfp_abi
3609 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3610 &vfp_base_count))
3611 {
3612 int regno;
3613 int unit_length;
3614 int shift;
3615 unsigned mask;
3616
3617 /* Because this is a CPRC it cannot go in a core register or
3618 cause a core register to be skipped for alignment.
3619 Either it goes in VFP registers and the rest of this loop
3620 iteration is skipped for this argument, or it goes on the
3621 stack (and the stack alignment code is correct for this
3622 case). */
3623 may_use_core_reg = 0;
3624
3625 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3626 shift = unit_length / 4;
3627 mask = (1 << (shift * vfp_base_count)) - 1;
3628 for (regno = 0; regno < 16; regno += shift)
3629 if (((vfp_regs_free >> regno) & mask) == mask)
3630 break;
3631
3632 if (regno < 16)
3633 {
3634 int reg_char;
3635 int reg_scaled;
3636 int i;
3637
3638 vfp_regs_free &= ~(mask << regno);
3639 reg_scaled = regno / shift;
3640 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3641 for (i = 0; i < vfp_base_count; i++)
3642 {
3643 char name_buf[4];
3644 int regnum;
3645 if (reg_char == 'q')
3646 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3647 val + i * unit_length);
3648 else
3649 {
3650 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3651 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3652 strlen (name_buf));
3653 regcache_cooked_write (regcache, regnum,
3654 val + i * unit_length);
3655 }
3656 }
3657 continue;
3658 }
3659 else
3660 {
3661 /* This CPRC could not go in VFP registers, so all VFP
3662 registers are now marked as used. */
3663 vfp_regs_free = 0;
3664 }
3665 }
3666
3667 /* Push stack padding for dowubleword alignment. */
3668 if (nstack & (align - 1))
3669 {
3670 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3671 nstack += INT_REGISTER_SIZE;
3672 }
3673
3674 /* Doubleword aligned quantities must go in even register pairs. */
3675 if (may_use_core_reg
3676 && argreg <= ARM_LAST_ARG_REGNUM
3677 && align > INT_REGISTER_SIZE
3678 && argreg & 1)
3679 argreg++;
3680
3681 /* If the argument is a pointer to a function, and it is a
3682 Thumb function, create a LOCAL copy of the value and set
3683 the THUMB bit in it. */
3684 if (TYPE_CODE_PTR == typecode
3685 && target_type != NULL
3686 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3687 {
3688 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3689 if (arm_pc_is_thumb (gdbarch, regval))
3690 {
3691 bfd_byte *copy = alloca (len);
3692 store_unsigned_integer (copy, len, byte_order,
3693 MAKE_THUMB_ADDR (regval));
3694 val = copy;
3695 }
3696 }
3697
3698 /* Copy the argument to general registers or the stack in
3699 register-sized pieces. Large arguments are split between
3700 registers and stack. */
3701 while (len > 0)
3702 {
3703 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3704
3705 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3706 {
3707 /* The argument is being passed in a general purpose
3708 register. */
3709 CORE_ADDR regval
3710 = extract_unsigned_integer (val, partial_len, byte_order);
3711 if (byte_order == BFD_ENDIAN_BIG)
3712 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3713 if (arm_debug)
3714 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3715 argnum,
3716 gdbarch_register_name
3717 (gdbarch, argreg),
3718 phex (regval, INT_REGISTER_SIZE));
3719 regcache_cooked_write_unsigned (regcache, argreg, regval);
3720 argreg++;
3721 }
3722 else
3723 {
3724 /* Push the arguments onto the stack. */
3725 if (arm_debug)
3726 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3727 argnum, nstack);
3728 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3729 nstack += INT_REGISTER_SIZE;
3730 }
3731
3732 len -= partial_len;
3733 val += partial_len;
3734 }
3735 }
3736 /* If we have an odd number of words to push, then decrement the stack
3737 by one word now, so first stack argument will be dword aligned. */
3738 if (nstack & 4)
3739 sp -= 4;
3740
3741 while (si)
3742 {
3743 sp -= si->len;
3744 write_memory (sp, si->data, si->len);
3745 si = pop_stack_item (si);
3746 }
3747
3748 /* Finally, update teh SP register. */
3749 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3750
3751 return sp;
3752 }
3753
3754
3755 /* Always align the frame to an 8-byte boundary. This is required on
3756 some platforms and harmless on the rest. */
3757
3758 static CORE_ADDR
3759 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3760 {
3761 /* Align the stack to eight bytes. */
3762 return sp & ~ (CORE_ADDR) 7;
3763 }
3764
3765 static void
3766 print_fpu_flags (int flags)
3767 {
3768 if (flags & (1 << 0))
3769 fputs ("IVO ", stdout);
3770 if (flags & (1 << 1))
3771 fputs ("DVZ ", stdout);
3772 if (flags & (1 << 2))
3773 fputs ("OFL ", stdout);
3774 if (flags & (1 << 3))
3775 fputs ("UFL ", stdout);
3776 if (flags & (1 << 4))
3777 fputs ("INX ", stdout);
3778 putchar ('\n');
3779 }
3780
3781 /* Print interesting information about the floating point processor
3782 (if present) or emulator. */
3783 static void
3784 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3785 struct frame_info *frame, const char *args)
3786 {
3787 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3788 int type;
3789
3790 type = (status >> 24) & 127;
3791 if (status & (1 << 31))
3792 printf (_("Hardware FPU type %d\n"), type);
3793 else
3794 printf (_("Software FPU type %d\n"), type);
3795 /* i18n: [floating point unit] mask */
3796 fputs (_("mask: "), stdout);
3797 print_fpu_flags (status >> 16);
3798 /* i18n: [floating point unit] flags */
3799 fputs (_("flags: "), stdout);
3800 print_fpu_flags (status);
3801 }
3802
3803 /* Construct the ARM extended floating point type. */
3804 static struct type *
3805 arm_ext_type (struct gdbarch *gdbarch)
3806 {
3807 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3808
3809 if (!tdep->arm_ext_type)
3810 tdep->arm_ext_type
3811 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3812 floatformats_arm_ext);
3813
3814 return tdep->arm_ext_type;
3815 }
3816
3817 static struct type *
3818 arm_neon_double_type (struct gdbarch *gdbarch)
3819 {
3820 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3821
3822 if (tdep->neon_double_type == NULL)
3823 {
3824 struct type *t, *elem;
3825
3826 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3827 TYPE_CODE_UNION);
3828 elem = builtin_type (gdbarch)->builtin_uint8;
3829 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3830 elem = builtin_type (gdbarch)->builtin_uint16;
3831 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3832 elem = builtin_type (gdbarch)->builtin_uint32;
3833 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3834 elem = builtin_type (gdbarch)->builtin_uint64;
3835 append_composite_type_field (t, "u64", elem);
3836 elem = builtin_type (gdbarch)->builtin_float;
3837 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3838 elem = builtin_type (gdbarch)->builtin_double;
3839 append_composite_type_field (t, "f64", elem);
3840
3841 TYPE_VECTOR (t) = 1;
3842 TYPE_NAME (t) = "neon_d";
3843 tdep->neon_double_type = t;
3844 }
3845
3846 return tdep->neon_double_type;
3847 }
3848
3849 /* FIXME: The vector types are not correctly ordered on big-endian
3850 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3851 bits of d0 - regardless of what unit size is being held in d0. So
3852 the offset of the first uint8 in d0 is 7, but the offset of the
3853 first float is 4. This code works as-is for little-endian
3854 targets. */
3855
3856 static struct type *
3857 arm_neon_quad_type (struct gdbarch *gdbarch)
3858 {
3859 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3860
3861 if (tdep->neon_quad_type == NULL)
3862 {
3863 struct type *t, *elem;
3864
3865 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3866 TYPE_CODE_UNION);
3867 elem = builtin_type (gdbarch)->builtin_uint8;
3868 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3869 elem = builtin_type (gdbarch)->builtin_uint16;
3870 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3871 elem = builtin_type (gdbarch)->builtin_uint32;
3872 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3873 elem = builtin_type (gdbarch)->builtin_uint64;
3874 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3875 elem = builtin_type (gdbarch)->builtin_float;
3876 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3877 elem = builtin_type (gdbarch)->builtin_double;
3878 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3879
3880 TYPE_VECTOR (t) = 1;
3881 TYPE_NAME (t) = "neon_q";
3882 tdep->neon_quad_type = t;
3883 }
3884
3885 return tdep->neon_quad_type;
3886 }
3887
3888 /* Return the GDB type object for the "standard" data type of data in
3889 register N. */
3890
3891 static struct type *
3892 arm_register_type (struct gdbarch *gdbarch, int regnum)
3893 {
3894 int num_regs = gdbarch_num_regs (gdbarch);
3895
3896 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3897 && regnum >= num_regs && regnum < num_regs + 32)
3898 return builtin_type (gdbarch)->builtin_float;
3899
3900 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3901 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3902 return arm_neon_quad_type (gdbarch);
3903
3904 /* If the target description has register information, we are only
3905 in this function so that we can override the types of
3906 double-precision registers for NEON. */
3907 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3908 {
3909 struct type *t = tdesc_register_type (gdbarch, regnum);
3910
3911 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3912 && TYPE_CODE (t) == TYPE_CODE_FLT
3913 && gdbarch_tdep (gdbarch)->have_neon)
3914 return arm_neon_double_type (gdbarch);
3915 else
3916 return t;
3917 }
3918
3919 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3920 {
3921 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3922 return builtin_type (gdbarch)->builtin_void;
3923
3924 return arm_ext_type (gdbarch);
3925 }
3926 else if (regnum == ARM_SP_REGNUM)
3927 return builtin_type (gdbarch)->builtin_data_ptr;
3928 else if (regnum == ARM_PC_REGNUM)
3929 return builtin_type (gdbarch)->builtin_func_ptr;
3930 else if (regnum >= ARRAY_SIZE (arm_register_names))
3931 /* These registers are only supported on targets which supply
3932 an XML description. */
3933 return builtin_type (gdbarch)->builtin_int0;
3934 else
3935 return builtin_type (gdbarch)->builtin_uint32;
3936 }
3937
3938 /* Map a DWARF register REGNUM onto the appropriate GDB register
3939 number. */
3940
3941 static int
3942 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3943 {
3944 /* Core integer regs. */
3945 if (reg >= 0 && reg <= 15)
3946 return reg;
3947
3948 /* Legacy FPA encoding. These were once used in a way which
3949 overlapped with VFP register numbering, so their use is
3950 discouraged, but GDB doesn't support the ARM toolchain
3951 which used them for VFP. */
3952 if (reg >= 16 && reg <= 23)
3953 return ARM_F0_REGNUM + reg - 16;
3954
3955 /* New assignments for the FPA registers. */
3956 if (reg >= 96 && reg <= 103)
3957 return ARM_F0_REGNUM + reg - 96;
3958
3959 /* WMMX register assignments. */
3960 if (reg >= 104 && reg <= 111)
3961 return ARM_WCGR0_REGNUM + reg - 104;
3962
3963 if (reg >= 112 && reg <= 127)
3964 return ARM_WR0_REGNUM + reg - 112;
3965
3966 if (reg >= 192 && reg <= 199)
3967 return ARM_WC0_REGNUM + reg - 192;
3968
3969 /* VFP v2 registers. A double precision value is actually
3970 in d1 rather than s2, but the ABI only defines numbering
3971 for the single precision registers. This will "just work"
3972 in GDB for little endian targets (we'll read eight bytes,
3973 starting in s0 and then progressing to s1), but will be
3974 reversed on big endian targets with VFP. This won't
3975 be a problem for the new Neon quad registers; you're supposed
3976 to use DW_OP_piece for those. */
3977 if (reg >= 64 && reg <= 95)
3978 {
3979 char name_buf[4];
3980
3981 sprintf (name_buf, "s%d", reg - 64);
3982 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3983 strlen (name_buf));
3984 }
3985
3986 /* VFP v3 / Neon registers. This range is also used for VFP v2
3987 registers, except that it now describes d0 instead of s0. */
3988 if (reg >= 256 && reg <= 287)
3989 {
3990 char name_buf[4];
3991
3992 sprintf (name_buf, "d%d", reg - 256);
3993 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3994 strlen (name_buf));
3995 }
3996
3997 return -1;
3998 }
3999
4000 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4001 static int
4002 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4003 {
4004 int reg = regnum;
4005 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4006
4007 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4008 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4009
4010 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4011 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4012
4013 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4014 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4015
4016 if (reg < NUM_GREGS)
4017 return SIM_ARM_R0_REGNUM + reg;
4018 reg -= NUM_GREGS;
4019
4020 if (reg < NUM_FREGS)
4021 return SIM_ARM_FP0_REGNUM + reg;
4022 reg -= NUM_FREGS;
4023
4024 if (reg < NUM_SREGS)
4025 return SIM_ARM_FPS_REGNUM + reg;
4026 reg -= NUM_SREGS;
4027
4028 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4029 }
4030
4031 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4032 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4033 It is thought that this is is the floating-point register format on
4034 little-endian systems. */
4035
4036 static void
4037 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4038 void *dbl, int endianess)
4039 {
4040 DOUBLEST d;
4041
4042 if (endianess == BFD_ENDIAN_BIG)
4043 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4044 else
4045 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4046 ptr, &d);
4047 floatformat_from_doublest (fmt, &d, dbl);
4048 }
4049
4050 static void
4051 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4052 int endianess)
4053 {
4054 DOUBLEST d;
4055
4056 floatformat_to_doublest (fmt, ptr, &d);
4057 if (endianess == BFD_ENDIAN_BIG)
4058 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4059 else
4060 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4061 &d, dbl);
4062 }
4063
4064 static int
4065 condition_true (unsigned long cond, unsigned long status_reg)
4066 {
4067 if (cond == INST_AL || cond == INST_NV)
4068 return 1;
4069
4070 switch (cond)
4071 {
4072 case INST_EQ:
4073 return ((status_reg & FLAG_Z) != 0);
4074 case INST_NE:
4075 return ((status_reg & FLAG_Z) == 0);
4076 case INST_CS:
4077 return ((status_reg & FLAG_C) != 0);
4078 case INST_CC:
4079 return ((status_reg & FLAG_C) == 0);
4080 case INST_MI:
4081 return ((status_reg & FLAG_N) != 0);
4082 case INST_PL:
4083 return ((status_reg & FLAG_N) == 0);
4084 case INST_VS:
4085 return ((status_reg & FLAG_V) != 0);
4086 case INST_VC:
4087 return ((status_reg & FLAG_V) == 0);
4088 case INST_HI:
4089 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4090 case INST_LS:
4091 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4092 case INST_GE:
4093 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4094 case INST_LT:
4095 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4096 case INST_GT:
4097 return (((status_reg & FLAG_Z) == 0)
4098 && (((status_reg & FLAG_N) == 0)
4099 == ((status_reg & FLAG_V) == 0)));
4100 case INST_LE:
4101 return (((status_reg & FLAG_Z) != 0)
4102 || (((status_reg & FLAG_N) == 0)
4103 != ((status_reg & FLAG_V) == 0)));
4104 }
4105 return 1;
4106 }
4107
4108 static unsigned long
4109 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4110 unsigned long pc_val, unsigned long status_reg)
4111 {
4112 unsigned long res, shift;
4113 int rm = bits (inst, 0, 3);
4114 unsigned long shifttype = bits (inst, 5, 6);
4115
4116 if (bit (inst, 4))
4117 {
4118 int rs = bits (inst, 8, 11);
4119 shift = (rs == 15 ? pc_val + 8
4120 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4121 }
4122 else
4123 shift = bits (inst, 7, 11);
4124
4125 res = (rm == ARM_PC_REGNUM
4126 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4127 : get_frame_register_unsigned (frame, rm));
4128
4129 switch (shifttype)
4130 {
4131 case 0: /* LSL */
4132 res = shift >= 32 ? 0 : res << shift;
4133 break;
4134
4135 case 1: /* LSR */
4136 res = shift >= 32 ? 0 : res >> shift;
4137 break;
4138
4139 case 2: /* ASR */
4140 if (shift >= 32)
4141 shift = 31;
4142 res = ((res & 0x80000000L)
4143 ? ~((~res) >> shift) : res >> shift);
4144 break;
4145
4146 case 3: /* ROR/RRX */
4147 shift &= 31;
4148 if (shift == 0)
4149 res = (res >> 1) | (carry ? 0x80000000L : 0);
4150 else
4151 res = (res >> shift) | (res << (32 - shift));
4152 break;
4153 }
4154
4155 return res & 0xffffffff;
4156 }
4157
4158 /* Return number of 1-bits in VAL. */
4159
4160 static int
4161 bitcount (unsigned long val)
4162 {
4163 int nbits;
4164 for (nbits = 0; val != 0; nbits++)
4165 val &= val - 1; /* Delete rightmost 1-bit in val. */
4166 return nbits;
4167 }
4168
4169 /* Return the size in bytes of the complete Thumb instruction whose
4170 first halfword is INST1. */
4171
4172 static int
4173 thumb_insn_size (unsigned short inst1)
4174 {
4175 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4176 return 4;
4177 else
4178 return 2;
4179 }
4180
4181 static int
4182 thumb_advance_itstate (unsigned int itstate)
4183 {
4184 /* Preserve IT[7:5], the first three bits of the condition. Shift
4185 the upcoming condition flags left by one bit. */
4186 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4187
4188 /* If we have finished the IT block, clear the state. */
4189 if ((itstate & 0x0f) == 0)
4190 itstate = 0;
4191
4192 return itstate;
4193 }
4194
4195 /* Find the next PC after the current instruction executes. In some
4196 cases we can not statically determine the answer (see the IT state
4197 handling in this function); in that case, a breakpoint may be
4198 inserted in addition to the returned PC, which will be used to set
4199 another breakpoint by our caller. */
4200
4201 static CORE_ADDR
4202 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4203 {
4204 struct gdbarch *gdbarch = get_frame_arch (frame);
4205 struct address_space *aspace = get_frame_address_space (frame);
4206 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4208 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4209 unsigned short inst1;
4210 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4211 unsigned long offset;
4212 ULONGEST status, itstate;
4213
4214 nextpc = MAKE_THUMB_ADDR (nextpc);
4215 pc_val = MAKE_THUMB_ADDR (pc_val);
4216
4217 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4218
4219 /* Thumb-2 conditional execution support. There are eight bits in
4220 the CPSR which describe conditional execution state. Once
4221 reconstructed (they're in a funny order), the low five bits
4222 describe the low bit of the condition for each instruction and
4223 how many instructions remain. The high three bits describe the
4224 base condition. One of the low four bits will be set if an IT
4225 block is active. These bits read as zero on earlier
4226 processors. */
4227 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4228 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4229
4230 /* If-Then handling. On GNU/Linux, where this routine is used, we
4231 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4232 can disable execution of the undefined instruction. So we might
4233 miss the breakpoint if we set it on a skipped conditional
4234 instruction. Because conditional instructions can change the
4235 flags, affecting the execution of further instructions, we may
4236 need to set two breakpoints. */
4237
4238 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4239 {
4240 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4241 {
4242 /* An IT instruction. Because this instruction does not
4243 modify the flags, we can accurately predict the next
4244 executed instruction. */
4245 itstate = inst1 & 0x00ff;
4246 pc += thumb_insn_size (inst1);
4247
4248 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4249 {
4250 inst1 = read_memory_unsigned_integer (pc, 2,
4251 byte_order_for_code);
4252 pc += thumb_insn_size (inst1);
4253 itstate = thumb_advance_itstate (itstate);
4254 }
4255
4256 return MAKE_THUMB_ADDR (pc);
4257 }
4258 else if (itstate != 0)
4259 {
4260 /* We are in a conditional block. Check the condition. */
4261 if (! condition_true (itstate >> 4, status))
4262 {
4263 /* Advance to the next executed instruction. */
4264 pc += thumb_insn_size (inst1);
4265 itstate = thumb_advance_itstate (itstate);
4266
4267 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4268 {
4269 inst1 = read_memory_unsigned_integer (pc, 2,
4270 byte_order_for_code);
4271 pc += thumb_insn_size (inst1);
4272 itstate = thumb_advance_itstate (itstate);
4273 }
4274
4275 return MAKE_THUMB_ADDR (pc);
4276 }
4277 else if ((itstate & 0x0f) == 0x08)
4278 {
4279 /* This is the last instruction of the conditional
4280 block, and it is executed. We can handle it normally
4281 because the following instruction is not conditional,
4282 and we must handle it normally because it is
4283 permitted to branch. Fall through. */
4284 }
4285 else
4286 {
4287 int cond_negated;
4288
4289 /* There are conditional instructions after this one.
4290 If this instruction modifies the flags, then we can
4291 not predict what the next executed instruction will
4292 be. Fortunately, this instruction is architecturally
4293 forbidden to branch; we know it will fall through.
4294 Start by skipping past it. */
4295 pc += thumb_insn_size (inst1);
4296 itstate = thumb_advance_itstate (itstate);
4297
4298 /* Set a breakpoint on the following instruction. */
4299 gdb_assert ((itstate & 0x0f) != 0);
4300 arm_insert_single_step_breakpoint (gdbarch, aspace,
4301 MAKE_THUMB_ADDR (pc));
4302 cond_negated = (itstate >> 4) & 1;
4303
4304 /* Skip all following instructions with the same
4305 condition. If there is a later instruction in the IT
4306 block with the opposite condition, set the other
4307 breakpoint there. If not, then set a breakpoint on
4308 the instruction after the IT block. */
4309 do
4310 {
4311 inst1 = read_memory_unsigned_integer (pc, 2,
4312 byte_order_for_code);
4313 pc += thumb_insn_size (inst1);
4314 itstate = thumb_advance_itstate (itstate);
4315 }
4316 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4317
4318 return MAKE_THUMB_ADDR (pc);
4319 }
4320 }
4321 }
4322 else if (itstate & 0x0f)
4323 {
4324 /* We are in a conditional block. Check the condition. */
4325 int cond = itstate >> 4;
4326
4327 if (! condition_true (cond, status))
4328 /* Advance to the next instruction. All the 32-bit
4329 instructions share a common prefix. */
4330 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4331
4332 /* Otherwise, handle the instruction normally. */
4333 }
4334
4335 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4336 {
4337 CORE_ADDR sp;
4338
4339 /* Fetch the saved PC from the stack. It's stored above
4340 all of the other registers. */
4341 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4342 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4343 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4344 }
4345 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4346 {
4347 unsigned long cond = bits (inst1, 8, 11);
4348 if (cond == 0x0f) /* 0x0f = SWI */
4349 {
4350 struct gdbarch_tdep *tdep;
4351 tdep = gdbarch_tdep (gdbarch);
4352
4353 if (tdep->syscall_next_pc != NULL)
4354 nextpc = tdep->syscall_next_pc (frame);
4355
4356 }
4357 else if (cond != 0x0f && condition_true (cond, status))
4358 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4359 }
4360 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4361 {
4362 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4363 }
4364 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4365 {
4366 unsigned short inst2;
4367 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4368
4369 /* Default to the next instruction. */
4370 nextpc = pc + 4;
4371 nextpc = MAKE_THUMB_ADDR (nextpc);
4372
4373 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4374 {
4375 /* Branches and miscellaneous control instructions. */
4376
4377 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4378 {
4379 /* B, BL, BLX. */
4380 int j1, j2, imm1, imm2;
4381
4382 imm1 = sbits (inst1, 0, 10);
4383 imm2 = bits (inst2, 0, 10);
4384 j1 = bit (inst2, 13);
4385 j2 = bit (inst2, 11);
4386
4387 offset = ((imm1 << 12) + (imm2 << 1));
4388 offset ^= ((!j2) << 22) | ((!j1) << 23);
4389
4390 nextpc = pc_val + offset;
4391 /* For BLX make sure to clear the low bits. */
4392 if (bit (inst2, 12) == 0)
4393 nextpc = nextpc & 0xfffffffc;
4394 }
4395 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4396 {
4397 /* SUBS PC, LR, #imm8. */
4398 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4399 nextpc -= inst2 & 0x00ff;
4400 }
4401 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4402 {
4403 /* Conditional branch. */
4404 if (condition_true (bits (inst1, 6, 9), status))
4405 {
4406 int sign, j1, j2, imm1, imm2;
4407
4408 sign = sbits (inst1, 10, 10);
4409 imm1 = bits (inst1, 0, 5);
4410 imm2 = bits (inst2, 0, 10);
4411 j1 = bit (inst2, 13);
4412 j2 = bit (inst2, 11);
4413
4414 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4415 offset += (imm1 << 12) + (imm2 << 1);
4416
4417 nextpc = pc_val + offset;
4418 }
4419 }
4420 }
4421 else if ((inst1 & 0xfe50) == 0xe810)
4422 {
4423 /* Load multiple or RFE. */
4424 int rn, offset, load_pc = 1;
4425
4426 rn = bits (inst1, 0, 3);
4427 if (bit (inst1, 7) && !bit (inst1, 8))
4428 {
4429 /* LDMIA or POP */
4430 if (!bit (inst2, 15))
4431 load_pc = 0;
4432 offset = bitcount (inst2) * 4 - 4;
4433 }
4434 else if (!bit (inst1, 7) && bit (inst1, 8))
4435 {
4436 /* LDMDB */
4437 if (!bit (inst2, 15))
4438 load_pc = 0;
4439 offset = -4;
4440 }
4441 else if (bit (inst1, 7) && bit (inst1, 8))
4442 {
4443 /* RFEIA */
4444 offset = 0;
4445 }
4446 else if (!bit (inst1, 7) && !bit (inst1, 8))
4447 {
4448 /* RFEDB */
4449 offset = -8;
4450 }
4451 else
4452 load_pc = 0;
4453
4454 if (load_pc)
4455 {
4456 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4457 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4458 }
4459 }
4460 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4461 {
4462 /* MOV PC or MOVS PC. */
4463 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4464 nextpc = MAKE_THUMB_ADDR (nextpc);
4465 }
4466 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4467 {
4468 /* LDR PC. */
4469 CORE_ADDR base;
4470 int rn, load_pc = 1;
4471
4472 rn = bits (inst1, 0, 3);
4473 base = get_frame_register_unsigned (frame, rn);
4474 if (rn == ARM_PC_REGNUM)
4475 {
4476 base = (base + 4) & ~(CORE_ADDR) 0x3;
4477 if (bit (inst1, 7))
4478 base += bits (inst2, 0, 11);
4479 else
4480 base -= bits (inst2, 0, 11);
4481 }
4482 else if (bit (inst1, 7))
4483 base += bits (inst2, 0, 11);
4484 else if (bit (inst2, 11))
4485 {
4486 if (bit (inst2, 10))
4487 {
4488 if (bit (inst2, 9))
4489 base += bits (inst2, 0, 7);
4490 else
4491 base -= bits (inst2, 0, 7);
4492 }
4493 }
4494 else if ((inst2 & 0x0fc0) == 0x0000)
4495 {
4496 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4497 base += get_frame_register_unsigned (frame, rm) << shift;
4498 }
4499 else
4500 /* Reserved. */
4501 load_pc = 0;
4502
4503 if (load_pc)
4504 nextpc = get_frame_memory_unsigned (frame, base, 4);
4505 }
4506 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4507 {
4508 /* TBB. */
4509 CORE_ADDR tbl_reg, table, offset, length;
4510
4511 tbl_reg = bits (inst1, 0, 3);
4512 if (tbl_reg == 0x0f)
4513 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4514 else
4515 table = get_frame_register_unsigned (frame, tbl_reg);
4516
4517 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4518 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4519 nextpc = pc_val + length;
4520 }
4521 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4522 {
4523 /* TBH. */
4524 CORE_ADDR tbl_reg, table, offset, length;
4525
4526 tbl_reg = bits (inst1, 0, 3);
4527 if (tbl_reg == 0x0f)
4528 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4529 else
4530 table = get_frame_register_unsigned (frame, tbl_reg);
4531
4532 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4533 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4534 nextpc = pc_val + length;
4535 }
4536 }
4537 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4538 {
4539 if (bits (inst1, 3, 6) == 0x0f)
4540 nextpc = pc_val;
4541 else
4542 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4543 }
4544 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4545 {
4546 if (bits (inst1, 3, 6) == 0x0f)
4547 nextpc = pc_val;
4548 else
4549 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4550
4551 nextpc = MAKE_THUMB_ADDR (nextpc);
4552 }
4553 else if ((inst1 & 0xf500) == 0xb100)
4554 {
4555 /* CBNZ or CBZ. */
4556 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4557 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4558
4559 if (bit (inst1, 11) && reg != 0)
4560 nextpc = pc_val + imm;
4561 else if (!bit (inst1, 11) && reg == 0)
4562 nextpc = pc_val + imm;
4563 }
4564 return nextpc;
4565 }
4566
4567 /* Get the raw next address. PC is the current program counter, in
4568 FRAME, which is assumed to be executing in ARM mode.
4569
4570 The value returned has the execution state of the next instruction
4571 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4572 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4573 address. */
4574
4575 static CORE_ADDR
4576 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4577 {
4578 struct gdbarch *gdbarch = get_frame_arch (frame);
4579 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4580 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4581 unsigned long pc_val;
4582 unsigned long this_instr;
4583 unsigned long status;
4584 CORE_ADDR nextpc;
4585
4586 pc_val = (unsigned long) pc;
4587 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4588
4589 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4590 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4591
4592 if (bits (this_instr, 28, 31) == INST_NV)
4593 switch (bits (this_instr, 24, 27))
4594 {
4595 case 0xa:
4596 case 0xb:
4597 {
4598 /* Branch with Link and change to Thumb. */
4599 nextpc = BranchDest (pc, this_instr);
4600 nextpc |= bit (this_instr, 24) << 1;
4601 nextpc = MAKE_THUMB_ADDR (nextpc);
4602 break;
4603 }
4604 case 0xc:
4605 case 0xd:
4606 case 0xe:
4607 /* Coprocessor register transfer. */
4608 if (bits (this_instr, 12, 15) == 15)
4609 error (_("Invalid update to pc in instruction"));
4610 break;
4611 }
4612 else if (condition_true (bits (this_instr, 28, 31), status))
4613 {
4614 switch (bits (this_instr, 24, 27))
4615 {
4616 case 0x0:
4617 case 0x1: /* data processing */
4618 case 0x2:
4619 case 0x3:
4620 {
4621 unsigned long operand1, operand2, result = 0;
4622 unsigned long rn;
4623 int c;
4624
4625 if (bits (this_instr, 12, 15) != 15)
4626 break;
4627
4628 if (bits (this_instr, 22, 25) == 0
4629 && bits (this_instr, 4, 7) == 9) /* multiply */
4630 error (_("Invalid update to pc in instruction"));
4631
4632 /* BX <reg>, BLX <reg> */
4633 if (bits (this_instr, 4, 27) == 0x12fff1
4634 || bits (this_instr, 4, 27) == 0x12fff3)
4635 {
4636 rn = bits (this_instr, 0, 3);
4637 nextpc = ((rn == ARM_PC_REGNUM)
4638 ? (pc_val + 8)
4639 : get_frame_register_unsigned (frame, rn));
4640
4641 return nextpc;
4642 }
4643
4644 /* Multiply into PC. */
4645 c = (status & FLAG_C) ? 1 : 0;
4646 rn = bits (this_instr, 16, 19);
4647 operand1 = ((rn == ARM_PC_REGNUM)
4648 ? (pc_val + 8)
4649 : get_frame_register_unsigned (frame, rn));
4650
4651 if (bit (this_instr, 25))
4652 {
4653 unsigned long immval = bits (this_instr, 0, 7);
4654 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4655 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4656 & 0xffffffff;
4657 }
4658 else /* operand 2 is a shifted register. */
4659 operand2 = shifted_reg_val (frame, this_instr, c,
4660 pc_val, status);
4661
4662 switch (bits (this_instr, 21, 24))
4663 {
4664 case 0x0: /*and */
4665 result = operand1 & operand2;
4666 break;
4667
4668 case 0x1: /*eor */
4669 result = operand1 ^ operand2;
4670 break;
4671
4672 case 0x2: /*sub */
4673 result = operand1 - operand2;
4674 break;
4675
4676 case 0x3: /*rsb */
4677 result = operand2 - operand1;
4678 break;
4679
4680 case 0x4: /*add */
4681 result = operand1 + operand2;
4682 break;
4683
4684 case 0x5: /*adc */
4685 result = operand1 + operand2 + c;
4686 break;
4687
4688 case 0x6: /*sbc */
4689 result = operand1 - operand2 + c;
4690 break;
4691
4692 case 0x7: /*rsc */
4693 result = operand2 - operand1 + c;
4694 break;
4695
4696 case 0x8:
4697 case 0x9:
4698 case 0xa:
4699 case 0xb: /* tst, teq, cmp, cmn */
4700 result = (unsigned long) nextpc;
4701 break;
4702
4703 case 0xc: /*orr */
4704 result = operand1 | operand2;
4705 break;
4706
4707 case 0xd: /*mov */
4708 /* Always step into a function. */
4709 result = operand2;
4710 break;
4711
4712 case 0xe: /*bic */
4713 result = operand1 & ~operand2;
4714 break;
4715
4716 case 0xf: /*mvn */
4717 result = ~operand2;
4718 break;
4719 }
4720
4721 /* In 26-bit APCS the bottom two bits of the result are
4722 ignored, and we always end up in ARM state. */
4723 if (!arm_apcs_32)
4724 nextpc = arm_addr_bits_remove (gdbarch, result);
4725 else
4726 nextpc = result;
4727
4728 break;
4729 }
4730
4731 case 0x4:
4732 case 0x5: /* data transfer */
4733 case 0x6:
4734 case 0x7:
4735 if (bit (this_instr, 20))
4736 {
4737 /* load */
4738 if (bits (this_instr, 12, 15) == 15)
4739 {
4740 /* rd == pc */
4741 unsigned long rn;
4742 unsigned long base;
4743
4744 if (bit (this_instr, 22))
4745 error (_("Invalid update to pc in instruction"));
4746
4747 /* byte write to PC */
4748 rn = bits (this_instr, 16, 19);
4749 base = ((rn == ARM_PC_REGNUM)
4750 ? (pc_val + 8)
4751 : get_frame_register_unsigned (frame, rn));
4752
4753 if (bit (this_instr, 24))
4754 {
4755 /* pre-indexed */
4756 int c = (status & FLAG_C) ? 1 : 0;
4757 unsigned long offset =
4758 (bit (this_instr, 25)
4759 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4760 : bits (this_instr, 0, 11));
4761
4762 if (bit (this_instr, 23))
4763 base += offset;
4764 else
4765 base -= offset;
4766 }
4767 nextpc =
4768 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4769 4, byte_order);
4770 }
4771 }
4772 break;
4773
4774 case 0x8:
4775 case 0x9: /* block transfer */
4776 if (bit (this_instr, 20))
4777 {
4778 /* LDM */
4779 if (bit (this_instr, 15))
4780 {
4781 /* loading pc */
4782 int offset = 0;
4783 unsigned long rn_val
4784 = get_frame_register_unsigned (frame,
4785 bits (this_instr, 16, 19));
4786
4787 if (bit (this_instr, 23))
4788 {
4789 /* up */
4790 unsigned long reglist = bits (this_instr, 0, 14);
4791 offset = bitcount (reglist) * 4;
4792 if (bit (this_instr, 24)) /* pre */
4793 offset += 4;
4794 }
4795 else if (bit (this_instr, 24))
4796 offset = -4;
4797
4798 nextpc =
4799 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4800 (rn_val + offset),
4801 4, byte_order);
4802 }
4803 }
4804 break;
4805
4806 case 0xb: /* branch & link */
4807 case 0xa: /* branch */
4808 {
4809 nextpc = BranchDest (pc, this_instr);
4810 break;
4811 }
4812
4813 case 0xc:
4814 case 0xd:
4815 case 0xe: /* coproc ops */
4816 break;
4817 case 0xf: /* SWI */
4818 {
4819 struct gdbarch_tdep *tdep;
4820 tdep = gdbarch_tdep (gdbarch);
4821
4822 if (tdep->syscall_next_pc != NULL)
4823 nextpc = tdep->syscall_next_pc (frame);
4824
4825 }
4826 break;
4827
4828 default:
4829 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4830 return (pc);
4831 }
4832 }
4833
4834 return nextpc;
4835 }
4836
4837 /* Determine next PC after current instruction executes. Will call either
4838 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4839 loop is detected. */
4840
4841 CORE_ADDR
4842 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4843 {
4844 CORE_ADDR nextpc;
4845
4846 if (arm_frame_is_thumb (frame))
4847 {
4848 nextpc = thumb_get_next_pc_raw (frame, pc);
4849 if (nextpc == MAKE_THUMB_ADDR (pc))
4850 error (_("Infinite loop detected"));
4851 }
4852 else
4853 {
4854 nextpc = arm_get_next_pc_raw (frame, pc);
4855 if (nextpc == pc)
4856 error (_("Infinite loop detected"));
4857 }
4858
4859 return nextpc;
4860 }
4861
4862 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4863 of the appropriate mode (as encoded in the PC value), even if this
4864 differs from what would be expected according to the symbol tables. */
4865
4866 void
4867 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4868 struct address_space *aspace,
4869 CORE_ADDR pc)
4870 {
4871 struct cleanup *old_chain
4872 = make_cleanup_restore_integer (&arm_override_mode);
4873
4874 arm_override_mode = IS_THUMB_ADDR (pc);
4875 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4876
4877 insert_single_step_breakpoint (gdbarch, aspace, pc);
4878
4879 do_cleanups (old_chain);
4880 }
4881
4882 /* single_step() is called just before we want to resume the inferior,
4883 if we want to single-step it but there is no hardware or kernel
4884 single-step support. We find the target of the coming instruction
4885 and breakpoint it. */
4886
4887 int
4888 arm_software_single_step (struct frame_info *frame)
4889 {
4890 struct gdbarch *gdbarch = get_frame_arch (frame);
4891 struct address_space *aspace = get_frame_address_space (frame);
4892 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4893
4894 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4895
4896 return 1;
4897 }
4898
4899 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4900 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4901 NULL if an error occurs. BUF is freed. */
4902
4903 static gdb_byte *
4904 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4905 int old_len, int new_len)
4906 {
4907 gdb_byte *new_buf, *middle;
4908 int bytes_to_read = new_len - old_len;
4909
4910 new_buf = xmalloc (new_len);
4911 memcpy (new_buf + bytes_to_read, buf, old_len);
4912 xfree (buf);
4913 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4914 {
4915 xfree (new_buf);
4916 return NULL;
4917 }
4918 return new_buf;
4919 }
4920
4921 /* An IT block is at most the 2-byte IT instruction followed by
4922 four 4-byte instructions. The furthest back we must search to
4923 find an IT block that affects the current instruction is thus
4924 2 + 3 * 4 == 14 bytes. */
4925 #define MAX_IT_BLOCK_PREFIX 14
4926
4927 /* Use a quick scan if there are more than this many bytes of
4928 code. */
4929 #define IT_SCAN_THRESHOLD 32
4930
4931 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4932 A breakpoint in an IT block may not be hit, depending on the
4933 condition flags. */
4934 static CORE_ADDR
4935 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4936 {
4937 gdb_byte *buf;
4938 char map_type;
4939 CORE_ADDR boundary, func_start;
4940 int buf_len, buf2_len;
4941 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4942 int i, any, last_it, last_it_count;
4943
4944 /* If we are using BKPT breakpoints, none of this is necessary. */
4945 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4946 return bpaddr;
4947
4948 /* ARM mode does not have this problem. */
4949 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4950 return bpaddr;
4951
4952 /* We are setting a breakpoint in Thumb code that could potentially
4953 contain an IT block. The first step is to find how much Thumb
4954 code there is; we do not need to read outside of known Thumb
4955 sequences. */
4956 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4957 if (map_type == 0)
4958 /* Thumb-2 code must have mapping symbols to have a chance. */
4959 return bpaddr;
4960
4961 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4962
4963 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4964 && func_start > boundary)
4965 boundary = func_start;
4966
4967 /* Search for a candidate IT instruction. We have to do some fancy
4968 footwork to distinguish a real IT instruction from the second
4969 half of a 32-bit instruction, but there is no need for that if
4970 there's no candidate. */
4971 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4972 if (buf_len == 0)
4973 /* No room for an IT instruction. */
4974 return bpaddr;
4975
4976 buf = xmalloc (buf_len);
4977 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4978 return bpaddr;
4979 any = 0;
4980 for (i = 0; i < buf_len; i += 2)
4981 {
4982 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4983 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4984 {
4985 any = 1;
4986 break;
4987 }
4988 }
4989 if (any == 0)
4990 {
4991 xfree (buf);
4992 return bpaddr;
4993 }
4994
4995 /* OK, the code bytes before this instruction contain at least one
4996 halfword which resembles an IT instruction. We know that it's
4997 Thumb code, but there are still two possibilities. Either the
4998 halfword really is an IT instruction, or it is the second half of
4999 a 32-bit Thumb instruction. The only way we can tell is to
5000 scan forwards from a known instruction boundary. */
5001 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5002 {
5003 int definite;
5004
5005 /* There's a lot of code before this instruction. Start with an
5006 optimistic search; it's easy to recognize halfwords that can
5007 not be the start of a 32-bit instruction, and use that to
5008 lock on to the instruction boundaries. */
5009 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5010 if (buf == NULL)
5011 return bpaddr;
5012 buf_len = IT_SCAN_THRESHOLD;
5013
5014 definite = 0;
5015 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5016 {
5017 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5018 if (thumb_insn_size (inst1) == 2)
5019 {
5020 definite = 1;
5021 break;
5022 }
5023 }
5024
5025 /* At this point, if DEFINITE, BUF[I] is the first place we
5026 are sure that we know the instruction boundaries, and it is far
5027 enough from BPADDR that we could not miss an IT instruction
5028 affecting BPADDR. If ! DEFINITE, give up - start from a
5029 known boundary. */
5030 if (! definite)
5031 {
5032 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5033 bpaddr - boundary);
5034 if (buf == NULL)
5035 return bpaddr;
5036 buf_len = bpaddr - boundary;
5037 i = 0;
5038 }
5039 }
5040 else
5041 {
5042 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5043 if (buf == NULL)
5044 return bpaddr;
5045 buf_len = bpaddr - boundary;
5046 i = 0;
5047 }
5048
5049 /* Scan forwards. Find the last IT instruction before BPADDR. */
5050 last_it = -1;
5051 last_it_count = 0;
5052 while (i < buf_len)
5053 {
5054 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5055 last_it_count--;
5056 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5057 {
5058 last_it = i;
5059 if (inst1 & 0x0001)
5060 last_it_count = 4;
5061 else if (inst1 & 0x0002)
5062 last_it_count = 3;
5063 else if (inst1 & 0x0004)
5064 last_it_count = 2;
5065 else
5066 last_it_count = 1;
5067 }
5068 i += thumb_insn_size (inst1);
5069 }
5070
5071 xfree (buf);
5072
5073 if (last_it == -1)
5074 /* There wasn't really an IT instruction after all. */
5075 return bpaddr;
5076
5077 if (last_it_count < 1)
5078 /* It was too far away. */
5079 return bpaddr;
5080
5081 /* This really is a trouble spot. Move the breakpoint to the IT
5082 instruction. */
5083 return bpaddr - buf_len + last_it;
5084 }
5085
5086 /* ARM displaced stepping support.
5087
5088 Generally ARM displaced stepping works as follows:
5089
5090 1. When an instruction is to be single-stepped, it is first decoded by
5091 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5092 Depending on the type of instruction, it is then copied to a scratch
5093 location, possibly in a modified form. The copy_* set of functions
5094 performs such modification, as necessary. A breakpoint is placed after
5095 the modified instruction in the scratch space to return control to GDB.
5096 Note in particular that instructions which modify the PC will no longer
5097 do so after modification.
5098
5099 2. The instruction is single-stepped, by setting the PC to the scratch
5100 location address, and resuming. Control returns to GDB when the
5101 breakpoint is hit.
5102
5103 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5104 function used for the current instruction. This function's job is to
5105 put the CPU/memory state back to what it would have been if the
5106 instruction had been executed unmodified in its original location. */
5107
5108 /* NOP instruction (mov r0, r0). */
5109 #define ARM_NOP 0xe1a00000
5110 #define THUMB_NOP 0x4600
5111
5112 /* Helper for register reads for displaced stepping. In particular, this
5113 returns the PC as it would be seen by the instruction at its original
5114 location. */
5115
5116 ULONGEST
5117 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5118 int regno)
5119 {
5120 ULONGEST ret;
5121 CORE_ADDR from = dsc->insn_addr;
5122
5123 if (regno == ARM_PC_REGNUM)
5124 {
5125 /* Compute pipeline offset:
5126 - When executing an ARM instruction, PC reads as the address of the
5127 current instruction plus 8.
5128 - When executing a Thumb instruction, PC reads as the address of the
5129 current instruction plus 4. */
5130
5131 if (!dsc->is_thumb)
5132 from += 8;
5133 else
5134 from += 4;
5135
5136 if (debug_displaced)
5137 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5138 (unsigned long) from);
5139 return (ULONGEST) from;
5140 }
5141 else
5142 {
5143 regcache_cooked_read_unsigned (regs, regno, &ret);
5144 if (debug_displaced)
5145 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5146 regno, (unsigned long) ret);
5147 return ret;
5148 }
5149 }
5150
5151 static int
5152 displaced_in_arm_mode (struct regcache *regs)
5153 {
5154 ULONGEST ps;
5155 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5156
5157 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5158
5159 return (ps & t_bit) == 0;
5160 }
5161
5162 /* Write to the PC as from a branch instruction. */
5163
5164 static void
5165 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5166 ULONGEST val)
5167 {
5168 if (!dsc->is_thumb)
5169 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5170 architecture versions < 6. */
5171 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5172 val & ~(ULONGEST) 0x3);
5173 else
5174 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5175 val & ~(ULONGEST) 0x1);
5176 }
5177
5178 /* Write to the PC as from a branch-exchange instruction. */
5179
5180 static void
5181 bx_write_pc (struct regcache *regs, ULONGEST val)
5182 {
5183 ULONGEST ps;
5184 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5185
5186 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5187
5188 if ((val & 1) == 1)
5189 {
5190 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5191 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5192 }
5193 else if ((val & 2) == 0)
5194 {
5195 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5196 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5197 }
5198 else
5199 {
5200 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5201 mode, align dest to 4 bytes). */
5202 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5203 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5204 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5205 }
5206 }
5207
5208 /* Write to the PC as if from a load instruction. */
5209
5210 static void
5211 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5212 ULONGEST val)
5213 {
5214 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5215 bx_write_pc (regs, val);
5216 else
5217 branch_write_pc (regs, dsc, val);
5218 }
5219
5220 /* Write to the PC as if from an ALU instruction. */
5221
5222 static void
5223 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5224 ULONGEST val)
5225 {
5226 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5227 bx_write_pc (regs, val);
5228 else
5229 branch_write_pc (regs, dsc, val);
5230 }
5231
5232 /* Helper for writing to registers for displaced stepping. Writing to the PC
5233 has a varying effects depending on the instruction which does the write:
5234 this is controlled by the WRITE_PC argument. */
5235
5236 void
5237 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5238 int regno, ULONGEST val, enum pc_write_style write_pc)
5239 {
5240 if (regno == ARM_PC_REGNUM)
5241 {
5242 if (debug_displaced)
5243 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5244 (unsigned long) val);
5245 switch (write_pc)
5246 {
5247 case BRANCH_WRITE_PC:
5248 branch_write_pc (regs, dsc, val);
5249 break;
5250
5251 case BX_WRITE_PC:
5252 bx_write_pc (regs, val);
5253 break;
5254
5255 case LOAD_WRITE_PC:
5256 load_write_pc (regs, dsc, val);
5257 break;
5258
5259 case ALU_WRITE_PC:
5260 alu_write_pc (regs, dsc, val);
5261 break;
5262
5263 case CANNOT_WRITE_PC:
5264 warning (_("Instruction wrote to PC in an unexpected way when "
5265 "single-stepping"));
5266 break;
5267
5268 default:
5269 internal_error (__FILE__, __LINE__,
5270 _("Invalid argument to displaced_write_reg"));
5271 }
5272
5273 dsc->wrote_to_pc = 1;
5274 }
5275 else
5276 {
5277 if (debug_displaced)
5278 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5279 regno, (unsigned long) val);
5280 regcache_cooked_write_unsigned (regs, regno, val);
5281 }
5282 }
5283
5284 /* This function is used to concisely determine if an instruction INSN
5285 references PC. Register fields of interest in INSN should have the
5286 corresponding fields of BITMASK set to 0b1111. The function
5287 returns return 1 if any of these fields in INSN reference the PC
5288 (also 0b1111, r15), else it returns 0. */
5289
5290 static int
5291 insn_references_pc (uint32_t insn, uint32_t bitmask)
5292 {
5293 uint32_t lowbit = 1;
5294
5295 while (bitmask != 0)
5296 {
5297 uint32_t mask;
5298
5299 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5300 ;
5301
5302 if (!lowbit)
5303 break;
5304
5305 mask = lowbit * 0xf;
5306
5307 if ((insn & mask) == mask)
5308 return 1;
5309
5310 bitmask &= ~mask;
5311 }
5312
5313 return 0;
5314 }
5315
5316 /* The simplest copy function. Many instructions have the same effect no
5317 matter what address they are executed at: in those cases, use this. */
5318
5319 static int
5320 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5321 const char *iname, struct displaced_step_closure *dsc)
5322 {
5323 if (debug_displaced)
5324 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5325 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5326 iname);
5327
5328 dsc->modinsn[0] = insn;
5329
5330 return 0;
5331 }
5332
5333 static int
5334 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5335 uint16_t insn2, const char *iname,
5336 struct displaced_step_closure *dsc)
5337 {
5338 if (debug_displaced)
5339 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5340 "opcode/class '%s' unmodified\n", insn1, insn2,
5341 iname);
5342
5343 dsc->modinsn[0] = insn1;
5344 dsc->modinsn[1] = insn2;
5345 dsc->numinsns = 2;
5346
5347 return 0;
5348 }
5349
5350 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5351 modification. */
5352 static int
5353 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5354 const char *iname,
5355 struct displaced_step_closure *dsc)
5356 {
5357 if (debug_displaced)
5358 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5359 "opcode/class '%s' unmodified\n", insn,
5360 iname);
5361
5362 dsc->modinsn[0] = insn;
5363
5364 return 0;
5365 }
5366
5367 /* Preload instructions with immediate offset. */
5368
5369 static void
5370 cleanup_preload (struct gdbarch *gdbarch,
5371 struct regcache *regs, struct displaced_step_closure *dsc)
5372 {
5373 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5374 if (!dsc->u.preload.immed)
5375 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5376 }
5377
5378 static void
5379 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5380 struct displaced_step_closure *dsc, unsigned int rn)
5381 {
5382 ULONGEST rn_val;
5383 /* Preload instructions:
5384
5385 {pli/pld} [rn, #+/-imm]
5386 ->
5387 {pli/pld} [r0, #+/-imm]. */
5388
5389 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5390 rn_val = displaced_read_reg (regs, dsc, rn);
5391 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5392 dsc->u.preload.immed = 1;
5393
5394 dsc->cleanup = &cleanup_preload;
5395 }
5396
5397 static int
5398 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5399 struct displaced_step_closure *dsc)
5400 {
5401 unsigned int rn = bits (insn, 16, 19);
5402
5403 if (!insn_references_pc (insn, 0x000f0000ul))
5404 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5405
5406 if (debug_displaced)
5407 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5408 (unsigned long) insn);
5409
5410 dsc->modinsn[0] = insn & 0xfff0ffff;
5411
5412 install_preload (gdbarch, regs, dsc, rn);
5413
5414 return 0;
5415 }
5416
5417 static int
5418 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5419 struct regcache *regs, struct displaced_step_closure *dsc)
5420 {
5421 unsigned int rn = bits (insn1, 0, 3);
5422 unsigned int u_bit = bit (insn1, 7);
5423 int imm12 = bits (insn2, 0, 11);
5424 ULONGEST pc_val;
5425
5426 if (rn != ARM_PC_REGNUM)
5427 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5428
5429 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5430 PLD (literal) Encoding T1. */
5431 if (debug_displaced)
5432 fprintf_unfiltered (gdb_stdlog,
5433 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5434 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5435 imm12);
5436
5437 if (!u_bit)
5438 imm12 = -1 * imm12;
5439
5440 /* Rewrite instruction {pli/pld} PC imm12 into:
5441 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5442
5443 {pli/pld} [r0, r1]
5444
5445 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5446
5447 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5448 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5449
5450 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5451
5452 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5453 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5454 dsc->u.preload.immed = 0;
5455
5456 /* {pli/pld} [r0, r1] */
5457 dsc->modinsn[0] = insn1 & 0xfff0;
5458 dsc->modinsn[1] = 0xf001;
5459 dsc->numinsns = 2;
5460
5461 dsc->cleanup = &cleanup_preload;
5462 return 0;
5463 }
5464
5465 /* Preload instructions with register offset. */
5466
5467 static void
5468 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5469 struct displaced_step_closure *dsc, unsigned int rn,
5470 unsigned int rm)
5471 {
5472 ULONGEST rn_val, rm_val;
5473
5474 /* Preload register-offset instructions:
5475
5476 {pli/pld} [rn, rm {, shift}]
5477 ->
5478 {pli/pld} [r0, r1 {, shift}]. */
5479
5480 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5481 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5482 rn_val = displaced_read_reg (regs, dsc, rn);
5483 rm_val = displaced_read_reg (regs, dsc, rm);
5484 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5485 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5486 dsc->u.preload.immed = 0;
5487
5488 dsc->cleanup = &cleanup_preload;
5489 }
5490
5491 static int
5492 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5493 struct regcache *regs,
5494 struct displaced_step_closure *dsc)
5495 {
5496 unsigned int rn = bits (insn, 16, 19);
5497 unsigned int rm = bits (insn, 0, 3);
5498
5499
5500 if (!insn_references_pc (insn, 0x000f000ful))
5501 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5502
5503 if (debug_displaced)
5504 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5505 (unsigned long) insn);
5506
5507 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5508
5509 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5510 return 0;
5511 }
5512
5513 /* Copy/cleanup coprocessor load and store instructions. */
5514
5515 static void
5516 cleanup_copro_load_store (struct gdbarch *gdbarch,
5517 struct regcache *regs,
5518 struct displaced_step_closure *dsc)
5519 {
5520 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5521
5522 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5523
5524 if (dsc->u.ldst.writeback)
5525 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5526 }
5527
5528 static void
5529 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5530 struct displaced_step_closure *dsc,
5531 int writeback, unsigned int rn)
5532 {
5533 ULONGEST rn_val;
5534
5535 /* Coprocessor load/store instructions:
5536
5537 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5538 ->
5539 {stc/stc2} [r0, #+/-imm].
5540
5541 ldc/ldc2 are handled identically. */
5542
5543 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5544 rn_val = displaced_read_reg (regs, dsc, rn);
5545 /* PC should be 4-byte aligned. */
5546 rn_val = rn_val & 0xfffffffc;
5547 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5548
5549 dsc->u.ldst.writeback = writeback;
5550 dsc->u.ldst.rn = rn;
5551
5552 dsc->cleanup = &cleanup_copro_load_store;
5553 }
5554
5555 static int
5556 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5557 struct regcache *regs,
5558 struct displaced_step_closure *dsc)
5559 {
5560 unsigned int rn = bits (insn, 16, 19);
5561
5562 if (!insn_references_pc (insn, 0x000f0000ul))
5563 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5564
5565 if (debug_displaced)
5566 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5567 "load/store insn %.8lx\n", (unsigned long) insn);
5568
5569 dsc->modinsn[0] = insn & 0xfff0ffff;
5570
5571 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5572
5573 return 0;
5574 }
5575
5576 static int
5577 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5578 uint16_t insn2, struct regcache *regs,
5579 struct displaced_step_closure *dsc)
5580 {
5581 unsigned int rn = bits (insn1, 0, 3);
5582
5583 if (rn != ARM_PC_REGNUM)
5584 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5585 "copro load/store", dsc);
5586
5587 if (debug_displaced)
5588 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5589 "load/store insn %.4x%.4x\n", insn1, insn2);
5590
5591 dsc->modinsn[0] = insn1 & 0xfff0;
5592 dsc->modinsn[1] = insn2;
5593 dsc->numinsns = 2;
5594
5595 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5596 doesn't support writeback, so pass 0. */
5597 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5598
5599 return 0;
5600 }
5601
5602 /* Clean up branch instructions (actually perform the branch, by setting
5603 PC). */
5604
5605 static void
5606 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5607 struct displaced_step_closure *dsc)
5608 {
5609 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5610 int branch_taken = condition_true (dsc->u.branch.cond, status);
5611 enum pc_write_style write_pc = dsc->u.branch.exchange
5612 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5613
5614 if (!branch_taken)
5615 return;
5616
5617 if (dsc->u.branch.link)
5618 {
5619 /* The value of LR should be the next insn of current one. In order
5620 not to confuse logic hanlding later insn `bx lr', if current insn mode
5621 is Thumb, the bit 0 of LR value should be set to 1. */
5622 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5623
5624 if (dsc->is_thumb)
5625 next_insn_addr |= 0x1;
5626
5627 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5628 CANNOT_WRITE_PC);
5629 }
5630
5631 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5632 }
5633
5634 /* Copy B/BL/BLX instructions with immediate destinations. */
5635
5636 static void
5637 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5638 struct displaced_step_closure *dsc,
5639 unsigned int cond, int exchange, int link, long offset)
5640 {
5641 /* Implement "BL<cond> <label>" as:
5642
5643 Preparation: cond <- instruction condition
5644 Insn: mov r0, r0 (nop)
5645 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5646
5647 B<cond> similar, but don't set r14 in cleanup. */
5648
5649 dsc->u.branch.cond = cond;
5650 dsc->u.branch.link = link;
5651 dsc->u.branch.exchange = exchange;
5652
5653 dsc->u.branch.dest = dsc->insn_addr;
5654 if (link && exchange)
5655 /* For BLX, offset is computed from the Align (PC, 4). */
5656 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5657
5658 if (dsc->is_thumb)
5659 dsc->u.branch.dest += 4 + offset;
5660 else
5661 dsc->u.branch.dest += 8 + offset;
5662
5663 dsc->cleanup = &cleanup_branch;
5664 }
5665 static int
5666 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5667 struct regcache *regs, struct displaced_step_closure *dsc)
5668 {
5669 unsigned int cond = bits (insn, 28, 31);
5670 int exchange = (cond == 0xf);
5671 int link = exchange || bit (insn, 24);
5672 long offset;
5673
5674 if (debug_displaced)
5675 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5676 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5677 (unsigned long) insn);
5678 if (exchange)
5679 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5680 then arrange the switch into Thumb mode. */
5681 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5682 else
5683 offset = bits (insn, 0, 23) << 2;
5684
5685 if (bit (offset, 25))
5686 offset = offset | ~0x3ffffff;
5687
5688 dsc->modinsn[0] = ARM_NOP;
5689
5690 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5691 return 0;
5692 }
5693
5694 static int
5695 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5696 uint16_t insn2, struct regcache *regs,
5697 struct displaced_step_closure *dsc)
5698 {
5699 int link = bit (insn2, 14);
5700 int exchange = link && !bit (insn2, 12);
5701 int cond = INST_AL;
5702 long offset = 0;
5703 int j1 = bit (insn2, 13);
5704 int j2 = bit (insn2, 11);
5705 int s = sbits (insn1, 10, 10);
5706 int i1 = !(j1 ^ bit (insn1, 10));
5707 int i2 = !(j2 ^ bit (insn1, 10));
5708
5709 if (!link && !exchange) /* B */
5710 {
5711 offset = (bits (insn2, 0, 10) << 1);
5712 if (bit (insn2, 12)) /* Encoding T4 */
5713 {
5714 offset |= (bits (insn1, 0, 9) << 12)
5715 | (i2 << 22)
5716 | (i1 << 23)
5717 | (s << 24);
5718 cond = INST_AL;
5719 }
5720 else /* Encoding T3 */
5721 {
5722 offset |= (bits (insn1, 0, 5) << 12)
5723 | (j1 << 18)
5724 | (j2 << 19)
5725 | (s << 20);
5726 cond = bits (insn1, 6, 9);
5727 }
5728 }
5729 else
5730 {
5731 offset = (bits (insn1, 0, 9) << 12);
5732 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5733 offset |= exchange ?
5734 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5735 }
5736
5737 if (debug_displaced)
5738 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
5739 "%.4x %.4x with offset %.8lx\n",
5740 link ? (exchange) ? "blx" : "bl" : "b",
5741 insn1, insn2, offset);
5742
5743 dsc->modinsn[0] = THUMB_NOP;
5744
5745 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5746 return 0;
5747 }
5748
5749 /* Copy B Thumb instructions. */
5750 static int
5751 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
5752 struct displaced_step_closure *dsc)
5753 {
5754 unsigned int cond = 0;
5755 int offset = 0;
5756 unsigned short bit_12_15 = bits (insn, 12, 15);
5757 CORE_ADDR from = dsc->insn_addr;
5758
5759 if (bit_12_15 == 0xd)
5760 {
5761 /* offset = SignExtend (imm8:0, 32) */
5762 offset = sbits ((insn << 1), 0, 8);
5763 cond = bits (insn, 8, 11);
5764 }
5765 else if (bit_12_15 == 0xe) /* Encoding T2 */
5766 {
5767 offset = sbits ((insn << 1), 0, 11);
5768 cond = INST_AL;
5769 }
5770
5771 if (debug_displaced)
5772 fprintf_unfiltered (gdb_stdlog,
5773 "displaced: copying b immediate insn %.4x "
5774 "with offset %d\n", insn, offset);
5775
5776 dsc->u.branch.cond = cond;
5777 dsc->u.branch.link = 0;
5778 dsc->u.branch.exchange = 0;
5779 dsc->u.branch.dest = from + 4 + offset;
5780
5781 dsc->modinsn[0] = THUMB_NOP;
5782
5783 dsc->cleanup = &cleanup_branch;
5784
5785 return 0;
5786 }
5787
5788 /* Copy BX/BLX with register-specified destinations. */
5789
5790 static void
5791 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5792 struct displaced_step_closure *dsc, int link,
5793 unsigned int cond, unsigned int rm)
5794 {
5795 /* Implement {BX,BLX}<cond> <reg>" as:
5796
5797 Preparation: cond <- instruction condition
5798 Insn: mov r0, r0 (nop)
5799 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5800
5801 Don't set r14 in cleanup for BX. */
5802
5803 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5804
5805 dsc->u.branch.cond = cond;
5806 dsc->u.branch.link = link;
5807
5808 dsc->u.branch.exchange = 1;
5809
5810 dsc->cleanup = &cleanup_branch;
5811 }
5812
5813 static int
5814 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5815 struct regcache *regs, struct displaced_step_closure *dsc)
5816 {
5817 unsigned int cond = bits (insn, 28, 31);
5818 /* BX: x12xxx1x
5819 BLX: x12xxx3x. */
5820 int link = bit (insn, 5);
5821 unsigned int rm = bits (insn, 0, 3);
5822
5823 if (debug_displaced)
5824 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5825 (unsigned long) insn);
5826
5827 dsc->modinsn[0] = ARM_NOP;
5828
5829 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5830 return 0;
5831 }
5832
5833 static int
5834 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5835 struct regcache *regs,
5836 struct displaced_step_closure *dsc)
5837 {
5838 int link = bit (insn, 7);
5839 unsigned int rm = bits (insn, 3, 6);
5840
5841 if (debug_displaced)
5842 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
5843 (unsigned short) insn);
5844
5845 dsc->modinsn[0] = THUMB_NOP;
5846
5847 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5848
5849 return 0;
5850 }
5851
5852
5853 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5854
5855 static void
5856 cleanup_alu_imm (struct gdbarch *gdbarch,
5857 struct regcache *regs, struct displaced_step_closure *dsc)
5858 {
5859 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5860 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5861 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5862 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5863 }
5864
5865 static int
5866 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5867 struct displaced_step_closure *dsc)
5868 {
5869 unsigned int rn = bits (insn, 16, 19);
5870 unsigned int rd = bits (insn, 12, 15);
5871 unsigned int op = bits (insn, 21, 24);
5872 int is_mov = (op == 0xd);
5873 ULONGEST rd_val, rn_val;
5874
5875 if (!insn_references_pc (insn, 0x000ff000ul))
5876 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5877
5878 if (debug_displaced)
5879 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5880 "%.8lx\n", is_mov ? "move" : "ALU",
5881 (unsigned long) insn);
5882
5883 /* Instruction is of form:
5884
5885 <op><cond> rd, [rn,] #imm
5886
5887 Rewrite as:
5888
5889 Preparation: tmp1, tmp2 <- r0, r1;
5890 r0, r1 <- rd, rn
5891 Insn: <op><cond> r0, r1, #imm
5892 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5893 */
5894
5895 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5896 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5897 rn_val = displaced_read_reg (regs, dsc, rn);
5898 rd_val = displaced_read_reg (regs, dsc, rd);
5899 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5900 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5901 dsc->rd = rd;
5902
5903 if (is_mov)
5904 dsc->modinsn[0] = insn & 0xfff00fff;
5905 else
5906 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5907
5908 dsc->cleanup = &cleanup_alu_imm;
5909
5910 return 0;
5911 }
5912
5913 static int
5914 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5915 uint16_t insn2, struct regcache *regs,
5916 struct displaced_step_closure *dsc)
5917 {
5918 unsigned int op = bits (insn1, 5, 8);
5919 unsigned int rn, rm, rd;
5920 ULONGEST rd_val, rn_val;
5921
5922 rn = bits (insn1, 0, 3); /* Rn */
5923 rm = bits (insn2, 0, 3); /* Rm */
5924 rd = bits (insn2, 8, 11); /* Rd */
5925
5926 /* This routine is only called for instruction MOV. */
5927 gdb_assert (op == 0x2 && rn == 0xf);
5928
5929 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5930 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5931
5932 if (debug_displaced)
5933 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
5934 "ALU", insn1, insn2);
5935
5936 /* Instruction is of form:
5937
5938 <op><cond> rd, [rn,] #imm
5939
5940 Rewrite as:
5941
5942 Preparation: tmp1, tmp2 <- r0, r1;
5943 r0, r1 <- rd, rn
5944 Insn: <op><cond> r0, r1, #imm
5945 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5946 */
5947
5948 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5949 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5950 rn_val = displaced_read_reg (regs, dsc, rn);
5951 rd_val = displaced_read_reg (regs, dsc, rd);
5952 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5953 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5954 dsc->rd = rd;
5955
5956 dsc->modinsn[0] = insn1;
5957 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5958 dsc->numinsns = 2;
5959
5960 dsc->cleanup = &cleanup_alu_imm;
5961
5962 return 0;
5963 }
5964
5965 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5966
5967 static void
5968 cleanup_alu_reg (struct gdbarch *gdbarch,
5969 struct regcache *regs, struct displaced_step_closure *dsc)
5970 {
5971 ULONGEST rd_val;
5972 int i;
5973
5974 rd_val = displaced_read_reg (regs, dsc, 0);
5975
5976 for (i = 0; i < 3; i++)
5977 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5978
5979 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5980 }
5981
5982 static void
5983 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5984 struct displaced_step_closure *dsc,
5985 unsigned int rd, unsigned int rn, unsigned int rm)
5986 {
5987 ULONGEST rd_val, rn_val, rm_val;
5988
5989 /* Instruction is of form:
5990
5991 <op><cond> rd, [rn,] rm [, <shift>]
5992
5993 Rewrite as:
5994
5995 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5996 r0, r1, r2 <- rd, rn, rm
5997 Insn: <op><cond> r0, r1, r2 [, <shift>]
5998 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5999 */
6000
6001 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6002 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6003 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6004 rd_val = displaced_read_reg (regs, dsc, rd);
6005 rn_val = displaced_read_reg (regs, dsc, rn);
6006 rm_val = displaced_read_reg (regs, dsc, rm);
6007 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6008 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6009 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6010 dsc->rd = rd;
6011
6012 dsc->cleanup = &cleanup_alu_reg;
6013 }
6014
6015 static int
6016 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6017 struct displaced_step_closure *dsc)
6018 {
6019 unsigned int op = bits (insn, 21, 24);
6020 int is_mov = (op == 0xd);
6021
6022 if (!insn_references_pc (insn, 0x000ff00ful))
6023 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6024
6025 if (debug_displaced)
6026 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6027 is_mov ? "move" : "ALU", (unsigned long) insn);
6028
6029 if (is_mov)
6030 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6031 else
6032 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6033
6034 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6035 bits (insn, 0, 3));
6036 return 0;
6037 }
6038
6039 static int
6040 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6041 struct regcache *regs,
6042 struct displaced_step_closure *dsc)
6043 {
6044 unsigned rn, rm, rd;
6045
6046 rd = bits (insn, 3, 6);
6047 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6048 rm = 2;
6049
6050 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6051 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6052
6053 if (debug_displaced)
6054 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6055 "ALU", (unsigned short) insn);
6056
6057 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6058
6059 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6060
6061 return 0;
6062 }
6063
6064 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6065
6066 static void
6067 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6068 struct regcache *regs,
6069 struct displaced_step_closure *dsc)
6070 {
6071 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6072 int i;
6073
6074 for (i = 0; i < 4; i++)
6075 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6076
6077 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6078 }
6079
6080 static void
6081 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6082 struct displaced_step_closure *dsc,
6083 unsigned int rd, unsigned int rn, unsigned int rm,
6084 unsigned rs)
6085 {
6086 int i;
6087 ULONGEST rd_val, rn_val, rm_val, rs_val;
6088
6089 /* Instruction is of form:
6090
6091 <op><cond> rd, [rn,] rm, <shift> rs
6092
6093 Rewrite as:
6094
6095 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6096 r0, r1, r2, r3 <- rd, rn, rm, rs
6097 Insn: <op><cond> r0, r1, r2, <shift> r3
6098 Cleanup: tmp5 <- r0
6099 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6100 rd <- tmp5
6101 */
6102
6103 for (i = 0; i < 4; i++)
6104 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6105
6106 rd_val = displaced_read_reg (regs, dsc, rd);
6107 rn_val = displaced_read_reg (regs, dsc, rn);
6108 rm_val = displaced_read_reg (regs, dsc, rm);
6109 rs_val = displaced_read_reg (regs, dsc, rs);
6110 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6111 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6112 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6113 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6114 dsc->rd = rd;
6115 dsc->cleanup = &cleanup_alu_shifted_reg;
6116 }
6117
6118 static int
6119 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6120 struct regcache *regs,
6121 struct displaced_step_closure *dsc)
6122 {
6123 unsigned int op = bits (insn, 21, 24);
6124 int is_mov = (op == 0xd);
6125 unsigned int rd, rn, rm, rs;
6126
6127 if (!insn_references_pc (insn, 0x000fff0ful))
6128 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6129
6130 if (debug_displaced)
6131 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6132 "%.8lx\n", is_mov ? "move" : "ALU",
6133 (unsigned long) insn);
6134
6135 rn = bits (insn, 16, 19);
6136 rm = bits (insn, 0, 3);
6137 rs = bits (insn, 8, 11);
6138 rd = bits (insn, 12, 15);
6139
6140 if (is_mov)
6141 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6142 else
6143 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6144
6145 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6146
6147 return 0;
6148 }
6149
6150 /* Clean up load instructions. */
6151
6152 static void
6153 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6154 struct displaced_step_closure *dsc)
6155 {
6156 ULONGEST rt_val, rt_val2 = 0, rn_val;
6157
6158 rt_val = displaced_read_reg (regs, dsc, 0);
6159 if (dsc->u.ldst.xfersize == 8)
6160 rt_val2 = displaced_read_reg (regs, dsc, 1);
6161 rn_val = displaced_read_reg (regs, dsc, 2);
6162
6163 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6164 if (dsc->u.ldst.xfersize > 4)
6165 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6166 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6167 if (!dsc->u.ldst.immed)
6168 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6169
6170 /* Handle register writeback. */
6171 if (dsc->u.ldst.writeback)
6172 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6173 /* Put result in right place. */
6174 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6175 if (dsc->u.ldst.xfersize == 8)
6176 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6177 }
6178
6179 /* Clean up store instructions. */
6180
6181 static void
6182 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6183 struct displaced_step_closure *dsc)
6184 {
6185 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6186
6187 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6188 if (dsc->u.ldst.xfersize > 4)
6189 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6190 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6191 if (!dsc->u.ldst.immed)
6192 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6193 if (!dsc->u.ldst.restore_r4)
6194 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6195
6196 /* Writeback. */
6197 if (dsc->u.ldst.writeback)
6198 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6199 }
6200
6201 /* Copy "extra" load/store instructions. These are halfword/doubleword
6202 transfers, which have a different encoding to byte/word transfers. */
6203
6204 static int
6205 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6206 struct regcache *regs, struct displaced_step_closure *dsc)
6207 {
6208 unsigned int op1 = bits (insn, 20, 24);
6209 unsigned int op2 = bits (insn, 5, 6);
6210 unsigned int rt = bits (insn, 12, 15);
6211 unsigned int rn = bits (insn, 16, 19);
6212 unsigned int rm = bits (insn, 0, 3);
6213 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6214 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6215 int immed = (op1 & 0x4) != 0;
6216 int opcode;
6217 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6218
6219 if (!insn_references_pc (insn, 0x000ff00ful))
6220 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6221
6222 if (debug_displaced)
6223 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6224 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6225 (unsigned long) insn);
6226
6227 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6228
6229 if (opcode < 0)
6230 internal_error (__FILE__, __LINE__,
6231 _("copy_extra_ld_st: instruction decode error"));
6232
6233 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6234 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6235 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6236 if (!immed)
6237 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6238
6239 rt_val = displaced_read_reg (regs, dsc, rt);
6240 if (bytesize[opcode] == 8)
6241 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6242 rn_val = displaced_read_reg (regs, dsc, rn);
6243 if (!immed)
6244 rm_val = displaced_read_reg (regs, dsc, rm);
6245
6246 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6247 if (bytesize[opcode] == 8)
6248 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6249 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6250 if (!immed)
6251 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6252
6253 dsc->rd = rt;
6254 dsc->u.ldst.xfersize = bytesize[opcode];
6255 dsc->u.ldst.rn = rn;
6256 dsc->u.ldst.immed = immed;
6257 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6258 dsc->u.ldst.restore_r4 = 0;
6259
6260 if (immed)
6261 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6262 ->
6263 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6264 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6265 else
6266 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6267 ->
6268 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6269 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6270
6271 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6272
6273 return 0;
6274 }
6275
6276 /* Copy byte/half word/word loads and stores. */
6277
6278 static void
6279 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6280 struct displaced_step_closure *dsc, int load,
6281 int immed, int writeback, int size, int usermode,
6282 int rt, int rm, int rn)
6283 {
6284 ULONGEST rt_val, rn_val, rm_val = 0;
6285
6286 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6287 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6288 if (!immed)
6289 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6290 if (!load)
6291 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6292
6293 rt_val = displaced_read_reg (regs, dsc, rt);
6294 rn_val = displaced_read_reg (regs, dsc, rn);
6295 if (!immed)
6296 rm_val = displaced_read_reg (regs, dsc, rm);
6297
6298 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6299 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6300 if (!immed)
6301 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6302 dsc->rd = rt;
6303 dsc->u.ldst.xfersize = size;
6304 dsc->u.ldst.rn = rn;
6305 dsc->u.ldst.immed = immed;
6306 dsc->u.ldst.writeback = writeback;
6307
6308 /* To write PC we can do:
6309
6310 Before this sequence of instructions:
6311 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6312 r2 is the Rn value got from dispalced_read_reg.
6313
6314 Insn1: push {pc} Write address of STR instruction + offset on stack
6315 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6316 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6317 = addr(Insn1) + offset - addr(Insn3) - 8
6318 = offset - 16
6319 Insn4: add r4, r4, #8 r4 = offset - 8
6320 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6321 = from + offset
6322 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6323
6324 Otherwise we don't know what value to write for PC, since the offset is
6325 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6326 of this can be found in Section "Saving from r15" in
6327 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6328
6329 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6330 }
6331
6332
6333 static int
6334 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6335 uint16_t insn2, struct regcache *regs,
6336 struct displaced_step_closure *dsc, int size)
6337 {
6338 unsigned int u_bit = bit (insn1, 7);
6339 unsigned int rt = bits (insn2, 12, 15);
6340 int imm12 = bits (insn2, 0, 11);
6341 ULONGEST pc_val;
6342
6343 if (debug_displaced)
6344 fprintf_unfiltered (gdb_stdlog,
6345 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6346 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6347 imm12);
6348
6349 if (!u_bit)
6350 imm12 = -1 * imm12;
6351
6352 /* Rewrite instruction LDR Rt imm12 into:
6353
6354 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6355
6356 LDR R0, R2, R3,
6357
6358 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6359
6360
6361 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6362 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6363 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6364
6365 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6366
6367 pc_val = pc_val & 0xfffffffc;
6368
6369 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6370 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6371
6372 dsc->rd = rt;
6373
6374 dsc->u.ldst.xfersize = size;
6375 dsc->u.ldst.immed = 0;
6376 dsc->u.ldst.writeback = 0;
6377 dsc->u.ldst.restore_r4 = 0;
6378
6379 /* LDR R0, R2, R3 */
6380 dsc->modinsn[0] = 0xf852;
6381 dsc->modinsn[1] = 0x3;
6382 dsc->numinsns = 2;
6383
6384 dsc->cleanup = &cleanup_load;
6385
6386 return 0;
6387 }
6388
6389 static int
6390 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6391 uint16_t insn2, struct regcache *regs,
6392 struct displaced_step_closure *dsc,
6393 int writeback, int immed)
6394 {
6395 unsigned int rt = bits (insn2, 12, 15);
6396 unsigned int rn = bits (insn1, 0, 3);
6397 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6398 /* In LDR (register), there is also a register Rm, which is not allowed to
6399 be PC, so we don't have to check it. */
6400
6401 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6402 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6403 dsc);
6404
6405 if (debug_displaced)
6406 fprintf_unfiltered (gdb_stdlog,
6407 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6408 rt, rn, insn1, insn2);
6409
6410 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6411 0, rt, rm, rn);
6412
6413 dsc->u.ldst.restore_r4 = 0;
6414
6415 if (immed)
6416 /* ldr[b]<cond> rt, [rn, #imm], etc.
6417 ->
6418 ldr[b]<cond> r0, [r2, #imm]. */
6419 {
6420 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6421 dsc->modinsn[1] = insn2 & 0x0fff;
6422 }
6423 else
6424 /* ldr[b]<cond> rt, [rn, rm], etc.
6425 ->
6426 ldr[b]<cond> r0, [r2, r3]. */
6427 {
6428 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6429 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6430 }
6431
6432 dsc->numinsns = 2;
6433
6434 return 0;
6435 }
6436
6437
6438 static int
6439 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6440 struct regcache *regs,
6441 struct displaced_step_closure *dsc,
6442 int load, int size, int usermode)
6443 {
6444 int immed = !bit (insn, 25);
6445 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6446 unsigned int rt = bits (insn, 12, 15);
6447 unsigned int rn = bits (insn, 16, 19);
6448 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6449
6450 if (!insn_references_pc (insn, 0x000ff00ful))
6451 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6452
6453 if (debug_displaced)
6454 fprintf_unfiltered (gdb_stdlog,
6455 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6456 load ? (size == 1 ? "ldrb" : "ldr")
6457 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6458 rt, rn,
6459 (unsigned long) insn);
6460
6461 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6462 usermode, rt, rm, rn);
6463
6464 if (load || rt != ARM_PC_REGNUM)
6465 {
6466 dsc->u.ldst.restore_r4 = 0;
6467
6468 if (immed)
6469 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6470 ->
6471 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6472 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6473 else
6474 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6475 ->
6476 {ldr,str}[b]<cond> r0, [r2, r3]. */
6477 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6478 }
6479 else
6480 {
6481 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6482 dsc->u.ldst.restore_r4 = 1;
6483 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6484 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6485 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6486 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6487 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6488
6489 /* As above. */
6490 if (immed)
6491 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6492 else
6493 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6494
6495 dsc->numinsns = 6;
6496 }
6497
6498 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6499
6500 return 0;
6501 }
6502
6503 /* Cleanup LDM instructions with fully-populated register list. This is an
6504 unfortunate corner case: it's impossible to implement correctly by modifying
6505 the instruction. The issue is as follows: we have an instruction,
6506
6507 ldm rN, {r0-r15}
6508
6509 which we must rewrite to avoid loading PC. A possible solution would be to
6510 do the load in two halves, something like (with suitable cleanup
6511 afterwards):
6512
6513 mov r8, rN
6514 ldm[id][ab] r8!, {r0-r7}
6515 str r7, <temp>
6516 ldm[id][ab] r8, {r7-r14}
6517 <bkpt>
6518
6519 but at present there's no suitable place for <temp>, since the scratch space
6520 is overwritten before the cleanup routine is called. For now, we simply
6521 emulate the instruction. */
6522
6523 static void
6524 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6525 struct displaced_step_closure *dsc)
6526 {
6527 int inc = dsc->u.block.increment;
6528 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6529 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6530 uint32_t regmask = dsc->u.block.regmask;
6531 int regno = inc ? 0 : 15;
6532 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6533 int exception_return = dsc->u.block.load && dsc->u.block.user
6534 && (regmask & 0x8000) != 0;
6535 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6536 int do_transfer = condition_true (dsc->u.block.cond, status);
6537 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6538
6539 if (!do_transfer)
6540 return;
6541
6542 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6543 sensible we can do here. Complain loudly. */
6544 if (exception_return)
6545 error (_("Cannot single-step exception return"));
6546
6547 /* We don't handle any stores here for now. */
6548 gdb_assert (dsc->u.block.load != 0);
6549
6550 if (debug_displaced)
6551 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6552 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6553 dsc->u.block.increment ? "inc" : "dec",
6554 dsc->u.block.before ? "before" : "after");
6555
6556 while (regmask)
6557 {
6558 uint32_t memword;
6559
6560 if (inc)
6561 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6562 regno++;
6563 else
6564 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6565 regno--;
6566
6567 xfer_addr += bump_before;
6568
6569 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6570 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6571
6572 xfer_addr += bump_after;
6573
6574 regmask &= ~(1 << regno);
6575 }
6576
6577 if (dsc->u.block.writeback)
6578 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6579 CANNOT_WRITE_PC);
6580 }
6581
6582 /* Clean up an STM which included the PC in the register list. */
6583
6584 static void
6585 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6586 struct displaced_step_closure *dsc)
6587 {
6588 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6589 int store_executed = condition_true (dsc->u.block.cond, status);
6590 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6591 CORE_ADDR stm_insn_addr;
6592 uint32_t pc_val;
6593 long offset;
6594 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6595
6596 /* If condition code fails, there's nothing else to do. */
6597 if (!store_executed)
6598 return;
6599
6600 if (dsc->u.block.increment)
6601 {
6602 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6603
6604 if (dsc->u.block.before)
6605 pc_stored_at += 4;
6606 }
6607 else
6608 {
6609 pc_stored_at = dsc->u.block.xfer_addr;
6610
6611 if (dsc->u.block.before)
6612 pc_stored_at -= 4;
6613 }
6614
6615 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6616 stm_insn_addr = dsc->scratch_base;
6617 offset = pc_val - stm_insn_addr;
6618
6619 if (debug_displaced)
6620 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6621 "STM instruction\n", offset);
6622
6623 /* Rewrite the stored PC to the proper value for the non-displaced original
6624 instruction. */
6625 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6626 dsc->insn_addr + offset);
6627 }
6628
6629 /* Clean up an LDM which includes the PC in the register list. We clumped all
6630 the registers in the transferred list into a contiguous range r0...rX (to
6631 avoid loading PC directly and losing control of the debugged program), so we
6632 must undo that here. */
6633
6634 static void
6635 cleanup_block_load_pc (struct gdbarch *gdbarch,
6636 struct regcache *regs,
6637 struct displaced_step_closure *dsc)
6638 {
6639 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6640 int load_executed = condition_true (dsc->u.block.cond, status), i;
6641 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6642 unsigned int regs_loaded = bitcount (mask);
6643 unsigned int num_to_shuffle = regs_loaded, clobbered;
6644
6645 /* The method employed here will fail if the register list is fully populated
6646 (we need to avoid loading PC directly). */
6647 gdb_assert (num_to_shuffle < 16);
6648
6649 if (!load_executed)
6650 return;
6651
6652 clobbered = (1 << num_to_shuffle) - 1;
6653
6654 while (num_to_shuffle > 0)
6655 {
6656 if ((mask & (1 << write_reg)) != 0)
6657 {
6658 unsigned int read_reg = num_to_shuffle - 1;
6659
6660 if (read_reg != write_reg)
6661 {
6662 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6663 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6664 if (debug_displaced)
6665 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6666 "loaded register r%d to r%d\n"), read_reg,
6667 write_reg);
6668 }
6669 else if (debug_displaced)
6670 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6671 "r%d already in the right place\n"),
6672 write_reg);
6673
6674 clobbered &= ~(1 << write_reg);
6675
6676 num_to_shuffle--;
6677 }
6678
6679 write_reg--;
6680 }
6681
6682 /* Restore any registers we scribbled over. */
6683 for (write_reg = 0; clobbered != 0; write_reg++)
6684 {
6685 if ((clobbered & (1 << write_reg)) != 0)
6686 {
6687 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6688 CANNOT_WRITE_PC);
6689 if (debug_displaced)
6690 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6691 "clobbered register r%d\n"), write_reg);
6692 clobbered &= ~(1 << write_reg);
6693 }
6694 }
6695
6696 /* Perform register writeback manually. */
6697 if (dsc->u.block.writeback)
6698 {
6699 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6700
6701 if (dsc->u.block.increment)
6702 new_rn_val += regs_loaded * 4;
6703 else
6704 new_rn_val -= regs_loaded * 4;
6705
6706 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6707 CANNOT_WRITE_PC);
6708 }
6709 }
6710
6711 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6712 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6713
6714 static int
6715 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6716 struct regcache *regs,
6717 struct displaced_step_closure *dsc)
6718 {
6719 int load = bit (insn, 20);
6720 int user = bit (insn, 22);
6721 int increment = bit (insn, 23);
6722 int before = bit (insn, 24);
6723 int writeback = bit (insn, 21);
6724 int rn = bits (insn, 16, 19);
6725
6726 /* Block transfers which don't mention PC can be run directly
6727 out-of-line. */
6728 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6729 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6730
6731 if (rn == ARM_PC_REGNUM)
6732 {
6733 warning (_("displaced: Unpredictable LDM or STM with "
6734 "base register r15"));
6735 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6736 }
6737
6738 if (debug_displaced)
6739 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6740 "%.8lx\n", (unsigned long) insn);
6741
6742 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6743 dsc->u.block.rn = rn;
6744
6745 dsc->u.block.load = load;
6746 dsc->u.block.user = user;
6747 dsc->u.block.increment = increment;
6748 dsc->u.block.before = before;
6749 dsc->u.block.writeback = writeback;
6750 dsc->u.block.cond = bits (insn, 28, 31);
6751
6752 dsc->u.block.regmask = insn & 0xffff;
6753
6754 if (load)
6755 {
6756 if ((insn & 0xffff) == 0xffff)
6757 {
6758 /* LDM with a fully-populated register list. This case is
6759 particularly tricky. Implement for now by fully emulating the
6760 instruction (which might not behave perfectly in all cases, but
6761 these instructions should be rare enough for that not to matter
6762 too much). */
6763 dsc->modinsn[0] = ARM_NOP;
6764
6765 dsc->cleanup = &cleanup_block_load_all;
6766 }
6767 else
6768 {
6769 /* LDM of a list of registers which includes PC. Implement by
6770 rewriting the list of registers to be transferred into a
6771 contiguous chunk r0...rX before doing the transfer, then shuffling
6772 registers into the correct places in the cleanup routine. */
6773 unsigned int regmask = insn & 0xffff;
6774 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6775 unsigned int to = 0, from = 0, i, new_rn;
6776
6777 for (i = 0; i < num_in_list; i++)
6778 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6779
6780 /* Writeback makes things complicated. We need to avoid clobbering
6781 the base register with one of the registers in our modified
6782 register list, but just using a different register can't work in
6783 all cases, e.g.:
6784
6785 ldm r14!, {r0-r13,pc}
6786
6787 which would need to be rewritten as:
6788
6789 ldm rN!, {r0-r14}
6790
6791 but that can't work, because there's no free register for N.
6792
6793 Solve this by turning off the writeback bit, and emulating
6794 writeback manually in the cleanup routine. */
6795
6796 if (writeback)
6797 insn &= ~(1 << 21);
6798
6799 new_regmask = (1 << num_in_list) - 1;
6800
6801 if (debug_displaced)
6802 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6803 "{..., pc}: original reg list %.4x, modified "
6804 "list %.4x\n"), rn, writeback ? "!" : "",
6805 (int) insn & 0xffff, new_regmask);
6806
6807 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6808
6809 dsc->cleanup = &cleanup_block_load_pc;
6810 }
6811 }
6812 else
6813 {
6814 /* STM of a list of registers which includes PC. Run the instruction
6815 as-is, but out of line: this will store the wrong value for the PC,
6816 so we must manually fix up the memory in the cleanup routine.
6817 Doing things this way has the advantage that we can auto-detect
6818 the offset of the PC write (which is architecture-dependent) in
6819 the cleanup routine. */
6820 dsc->modinsn[0] = insn;
6821
6822 dsc->cleanup = &cleanup_block_store_pc;
6823 }
6824
6825 return 0;
6826 }
6827
6828 static int
6829 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6830 struct regcache *regs,
6831 struct displaced_step_closure *dsc)
6832 {
6833 int rn = bits (insn1, 0, 3);
6834 int load = bit (insn1, 4);
6835 int writeback = bit (insn1, 5);
6836
6837 /* Block transfers which don't mention PC can be run directly
6838 out-of-line. */
6839 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6840 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6841
6842 if (rn == ARM_PC_REGNUM)
6843 {
6844 warning (_("displaced: Unpredictable LDM or STM with "
6845 "base register r15"));
6846 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6847 "unpredictable ldm/stm", dsc);
6848 }
6849
6850 if (debug_displaced)
6851 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6852 "%.4x%.4x\n", insn1, insn2);
6853
6854 /* Clear bit 13, since it should be always zero. */
6855 dsc->u.block.regmask = (insn2 & 0xdfff);
6856 dsc->u.block.rn = rn;
6857
6858 dsc->u.block.load = load;
6859 dsc->u.block.user = 0;
6860 dsc->u.block.increment = bit (insn1, 7);
6861 dsc->u.block.before = bit (insn1, 8);
6862 dsc->u.block.writeback = writeback;
6863 dsc->u.block.cond = INST_AL;
6864 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6865
6866 if (load)
6867 {
6868 if (dsc->u.block.regmask == 0xffff)
6869 {
6870 /* This branch is impossible to happen. */
6871 gdb_assert (0);
6872 }
6873 else
6874 {
6875 unsigned int regmask = dsc->u.block.regmask;
6876 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6877 unsigned int to = 0, from = 0, i, new_rn;
6878
6879 for (i = 0; i < num_in_list; i++)
6880 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6881
6882 if (writeback)
6883 insn1 &= ~(1 << 5);
6884
6885 new_regmask = (1 << num_in_list) - 1;
6886
6887 if (debug_displaced)
6888 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6889 "{..., pc}: original reg list %.4x, modified "
6890 "list %.4x\n"), rn, writeback ? "!" : "",
6891 (int) dsc->u.block.regmask, new_regmask);
6892
6893 dsc->modinsn[0] = insn1;
6894 dsc->modinsn[1] = (new_regmask & 0xffff);
6895 dsc->numinsns = 2;
6896
6897 dsc->cleanup = &cleanup_block_load_pc;
6898 }
6899 }
6900 else
6901 {
6902 dsc->modinsn[0] = insn1;
6903 dsc->modinsn[1] = insn2;
6904 dsc->numinsns = 2;
6905 dsc->cleanup = &cleanup_block_store_pc;
6906 }
6907 return 0;
6908 }
6909
6910 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6911 for Linux, where some SVC instructions must be treated specially. */
6912
6913 static void
6914 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6915 struct displaced_step_closure *dsc)
6916 {
6917 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6918
6919 if (debug_displaced)
6920 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6921 "%.8lx\n", (unsigned long) resume_addr);
6922
6923 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6924 }
6925
6926
6927 /* Common copy routine for svc instruciton. */
6928
6929 static int
6930 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6931 struct displaced_step_closure *dsc)
6932 {
6933 /* Preparation: none.
6934 Insn: unmodified svc.
6935 Cleanup: pc <- insn_addr + insn_size. */
6936
6937 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6938 instruction. */
6939 dsc->wrote_to_pc = 1;
6940
6941 /* Allow OS-specific code to override SVC handling. */
6942 if (dsc->u.svc.copy_svc_os)
6943 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6944 else
6945 {
6946 dsc->cleanup = &cleanup_svc;
6947 return 0;
6948 }
6949 }
6950
6951 static int
6952 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6953 struct regcache *regs, struct displaced_step_closure *dsc)
6954 {
6955
6956 if (debug_displaced)
6957 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6958 (unsigned long) insn);
6959
6960 dsc->modinsn[0] = insn;
6961
6962 return install_svc (gdbarch, regs, dsc);
6963 }
6964
6965 static int
6966 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6967 struct regcache *regs, struct displaced_step_closure *dsc)
6968 {
6969
6970 if (debug_displaced)
6971 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
6972 insn);
6973
6974 dsc->modinsn[0] = insn;
6975
6976 return install_svc (gdbarch, regs, dsc);
6977 }
6978
6979 /* Copy undefined instructions. */
6980
6981 static int
6982 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6983 struct displaced_step_closure *dsc)
6984 {
6985 if (debug_displaced)
6986 fprintf_unfiltered (gdb_stdlog,
6987 "displaced: copying undefined insn %.8lx\n",
6988 (unsigned long) insn);
6989
6990 dsc->modinsn[0] = insn;
6991
6992 return 0;
6993 }
6994
6995 static int
6996 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6997 struct displaced_step_closure *dsc)
6998 {
6999
7000 if (debug_displaced)
7001 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7002 "%.4x %.4x\n", (unsigned short) insn1,
7003 (unsigned short) insn2);
7004
7005 dsc->modinsn[0] = insn1;
7006 dsc->modinsn[1] = insn2;
7007 dsc->numinsns = 2;
7008
7009 return 0;
7010 }
7011
7012 /* Copy unpredictable instructions. */
7013
7014 static int
7015 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7016 struct displaced_step_closure *dsc)
7017 {
7018 if (debug_displaced)
7019 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7020 "%.8lx\n", (unsigned long) insn);
7021
7022 dsc->modinsn[0] = insn;
7023
7024 return 0;
7025 }
7026
7027 /* The decode_* functions are instruction decoding helpers. They mostly follow
7028 the presentation in the ARM ARM. */
7029
7030 static int
7031 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7032 struct regcache *regs,
7033 struct displaced_step_closure *dsc)
7034 {
7035 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7036 unsigned int rn = bits (insn, 16, 19);
7037
7038 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7039 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7040 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7041 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7042 else if ((op1 & 0x60) == 0x20)
7043 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7044 else if ((op1 & 0x71) == 0x40)
7045 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7046 dsc);
7047 else if ((op1 & 0x77) == 0x41)
7048 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7049 else if ((op1 & 0x77) == 0x45)
7050 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7051 else if ((op1 & 0x77) == 0x51)
7052 {
7053 if (rn != 0xf)
7054 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7055 else
7056 return arm_copy_unpred (gdbarch, insn, dsc);
7057 }
7058 else if ((op1 & 0x77) == 0x55)
7059 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7060 else if (op1 == 0x57)
7061 switch (op2)
7062 {
7063 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7064 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7065 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7066 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7067 default: return arm_copy_unpred (gdbarch, insn, dsc);
7068 }
7069 else if ((op1 & 0x63) == 0x43)
7070 return arm_copy_unpred (gdbarch, insn, dsc);
7071 else if ((op2 & 0x1) == 0x0)
7072 switch (op1 & ~0x80)
7073 {
7074 case 0x61:
7075 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7076 case 0x65:
7077 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7078 case 0x71: case 0x75:
7079 /* pld/pldw reg. */
7080 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7081 case 0x63: case 0x67: case 0x73: case 0x77:
7082 return arm_copy_unpred (gdbarch, insn, dsc);
7083 default:
7084 return arm_copy_undef (gdbarch, insn, dsc);
7085 }
7086 else
7087 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7088 }
7089
7090 static int
7091 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7092 struct regcache *regs,
7093 struct displaced_step_closure *dsc)
7094 {
7095 if (bit (insn, 27) == 0)
7096 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7097 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7098 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7099 {
7100 case 0x0: case 0x2:
7101 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7102
7103 case 0x1: case 0x3:
7104 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7105
7106 case 0x4: case 0x5: case 0x6: case 0x7:
7107 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7108
7109 case 0x8:
7110 switch ((insn & 0xe00000) >> 21)
7111 {
7112 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7113 /* stc/stc2. */
7114 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7115
7116 case 0x2:
7117 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7118
7119 default:
7120 return arm_copy_undef (gdbarch, insn, dsc);
7121 }
7122
7123 case 0x9:
7124 {
7125 int rn_f = (bits (insn, 16, 19) == 0xf);
7126 switch ((insn & 0xe00000) >> 21)
7127 {
7128 case 0x1: case 0x3:
7129 /* ldc/ldc2 imm (undefined for rn == pc). */
7130 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7131 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7132
7133 case 0x2:
7134 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7135
7136 case 0x4: case 0x5: case 0x6: case 0x7:
7137 /* ldc/ldc2 lit (undefined for rn != pc). */
7138 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7139 : arm_copy_undef (gdbarch, insn, dsc);
7140
7141 default:
7142 return arm_copy_undef (gdbarch, insn, dsc);
7143 }
7144 }
7145
7146 case 0xa:
7147 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7148
7149 case 0xb:
7150 if (bits (insn, 16, 19) == 0xf)
7151 /* ldc/ldc2 lit. */
7152 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7153 else
7154 return arm_copy_undef (gdbarch, insn, dsc);
7155
7156 case 0xc:
7157 if (bit (insn, 4))
7158 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7159 else
7160 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7161
7162 case 0xd:
7163 if (bit (insn, 4))
7164 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7165 else
7166 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7167
7168 default:
7169 return arm_copy_undef (gdbarch, insn, dsc);
7170 }
7171 }
7172
7173 /* Decode miscellaneous instructions in dp/misc encoding space. */
7174
7175 static int
7176 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7177 struct regcache *regs,
7178 struct displaced_step_closure *dsc)
7179 {
7180 unsigned int op2 = bits (insn, 4, 6);
7181 unsigned int op = bits (insn, 21, 22);
7182 unsigned int op1 = bits (insn, 16, 19);
7183
7184 switch (op2)
7185 {
7186 case 0x0:
7187 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7188
7189 case 0x1:
7190 if (op == 0x1) /* bx. */
7191 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7192 else if (op == 0x3)
7193 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7194 else
7195 return arm_copy_undef (gdbarch, insn, dsc);
7196
7197 case 0x2:
7198 if (op == 0x1)
7199 /* Not really supported. */
7200 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7201 else
7202 return arm_copy_undef (gdbarch, insn, dsc);
7203
7204 case 0x3:
7205 if (op == 0x1)
7206 return arm_copy_bx_blx_reg (gdbarch, insn,
7207 regs, dsc); /* blx register. */
7208 else
7209 return arm_copy_undef (gdbarch, insn, dsc);
7210
7211 case 0x5:
7212 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7213
7214 case 0x7:
7215 if (op == 0x1)
7216 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7217 else if (op == 0x3)
7218 /* Not really supported. */
7219 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7220
7221 default:
7222 return arm_copy_undef (gdbarch, insn, dsc);
7223 }
7224 }
7225
7226 static int
7227 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7228 struct regcache *regs,
7229 struct displaced_step_closure *dsc)
7230 {
7231 if (bit (insn, 25))
7232 switch (bits (insn, 20, 24))
7233 {
7234 case 0x10:
7235 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7236
7237 case 0x14:
7238 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7239
7240 case 0x12: case 0x16:
7241 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7242
7243 default:
7244 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7245 }
7246 else
7247 {
7248 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7249
7250 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7251 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7252 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7253 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7254 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7255 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7256 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7257 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7258 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7259 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7260 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7261 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7262 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7263 /* 2nd arg means "unpriveleged". */
7264 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7265 dsc);
7266 }
7267
7268 /* Should be unreachable. */
7269 return 1;
7270 }
7271
7272 static int
7273 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7274 struct regcache *regs,
7275 struct displaced_step_closure *dsc)
7276 {
7277 int a = bit (insn, 25), b = bit (insn, 4);
7278 uint32_t op1 = bits (insn, 20, 24);
7279 int rn_f = bits (insn, 16, 19) == 0xf;
7280
7281 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7282 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7283 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7284 else if ((!a && (op1 & 0x17) == 0x02)
7285 || (a && (op1 & 0x17) == 0x02 && !b))
7286 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7287 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7288 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7289 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7290 else if ((!a && (op1 & 0x17) == 0x03)
7291 || (a && (op1 & 0x17) == 0x03 && !b))
7292 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7293 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7294 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7295 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7296 else if ((!a && (op1 & 0x17) == 0x06)
7297 || (a && (op1 & 0x17) == 0x06 && !b))
7298 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7299 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7300 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7301 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7302 else if ((!a && (op1 & 0x17) == 0x07)
7303 || (a && (op1 & 0x17) == 0x07 && !b))
7304 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7305
7306 /* Should be unreachable. */
7307 return 1;
7308 }
7309
7310 static int
7311 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7312 struct displaced_step_closure *dsc)
7313 {
7314 switch (bits (insn, 20, 24))
7315 {
7316 case 0x00: case 0x01: case 0x02: case 0x03:
7317 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7318
7319 case 0x04: case 0x05: case 0x06: case 0x07:
7320 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7321
7322 case 0x08: case 0x09: case 0x0a: case 0x0b:
7323 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7324 return arm_copy_unmodified (gdbarch, insn,
7325 "decode/pack/unpack/saturate/reverse", dsc);
7326
7327 case 0x18:
7328 if (bits (insn, 5, 7) == 0) /* op2. */
7329 {
7330 if (bits (insn, 12, 15) == 0xf)
7331 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7332 else
7333 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7334 }
7335 else
7336 return arm_copy_undef (gdbarch, insn, dsc);
7337
7338 case 0x1a: case 0x1b:
7339 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7340 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7341 else
7342 return arm_copy_undef (gdbarch, insn, dsc);
7343
7344 case 0x1c: case 0x1d:
7345 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7346 {
7347 if (bits (insn, 0, 3) == 0xf)
7348 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7349 else
7350 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7351 }
7352 else
7353 return arm_copy_undef (gdbarch, insn, dsc);
7354
7355 case 0x1e: case 0x1f:
7356 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7357 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7358 else
7359 return arm_copy_undef (gdbarch, insn, dsc);
7360 }
7361
7362 /* Should be unreachable. */
7363 return 1;
7364 }
7365
7366 static int
7367 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7368 struct regcache *regs,
7369 struct displaced_step_closure *dsc)
7370 {
7371 if (bit (insn, 25))
7372 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7373 else
7374 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7375 }
7376
7377 static int
7378 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7379 struct regcache *regs,
7380 struct displaced_step_closure *dsc)
7381 {
7382 unsigned int opcode = bits (insn, 20, 24);
7383
7384 switch (opcode)
7385 {
7386 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7387 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7388
7389 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7390 case 0x12: case 0x16:
7391 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7392
7393 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7394 case 0x13: case 0x17:
7395 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7396
7397 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7398 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7399 /* Note: no writeback for these instructions. Bit 25 will always be
7400 zero though (via caller), so the following works OK. */
7401 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7402 }
7403
7404 /* Should be unreachable. */
7405 return 1;
7406 }
7407
7408 /* Decode shifted register instructions. */
7409
7410 static int
7411 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7412 uint16_t insn2, struct regcache *regs,
7413 struct displaced_step_closure *dsc)
7414 {
7415 /* PC is only allowed to be used in instruction MOV. */
7416
7417 unsigned int op = bits (insn1, 5, 8);
7418 unsigned int rn = bits (insn1, 0, 3);
7419
7420 if (op == 0x2 && rn == 0xf) /* MOV */
7421 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7422 else
7423 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7424 "dp (shift reg)", dsc);
7425 }
7426
7427
7428 /* Decode extension register load/store. Exactly the same as
7429 arm_decode_ext_reg_ld_st. */
7430
7431 static int
7432 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7433 uint16_t insn2, struct regcache *regs,
7434 struct displaced_step_closure *dsc)
7435 {
7436 unsigned int opcode = bits (insn1, 4, 8);
7437
7438 switch (opcode)
7439 {
7440 case 0x04: case 0x05:
7441 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7442 "vfp/neon vmov", dsc);
7443
7444 case 0x08: case 0x0c: /* 01x00 */
7445 case 0x0a: case 0x0e: /* 01x10 */
7446 case 0x12: case 0x16: /* 10x10 */
7447 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7448 "vfp/neon vstm/vpush", dsc);
7449
7450 case 0x09: case 0x0d: /* 01x01 */
7451 case 0x0b: case 0x0f: /* 01x11 */
7452 case 0x13: case 0x17: /* 10x11 */
7453 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7454 "vfp/neon vldm/vpop", dsc);
7455
7456 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7457 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7458 "vstr", dsc);
7459 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7460 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7461 }
7462
7463 /* Should be unreachable. */
7464 return 1;
7465 }
7466
7467 static int
7468 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7469 struct regcache *regs, struct displaced_step_closure *dsc)
7470 {
7471 unsigned int op1 = bits (insn, 20, 25);
7472 int op = bit (insn, 4);
7473 unsigned int coproc = bits (insn, 8, 11);
7474 unsigned int rn = bits (insn, 16, 19);
7475
7476 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7477 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7478 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7479 && (coproc & 0xe) != 0xa)
7480 /* stc/stc2. */
7481 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7482 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7483 && (coproc & 0xe) != 0xa)
7484 /* ldc/ldc2 imm/lit. */
7485 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7486 else if ((op1 & 0x3e) == 0x00)
7487 return arm_copy_undef (gdbarch, insn, dsc);
7488 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7489 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7490 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7491 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7492 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7493 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7494 else if ((op1 & 0x30) == 0x20 && !op)
7495 {
7496 if ((coproc & 0xe) == 0xa)
7497 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7498 else
7499 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7500 }
7501 else if ((op1 & 0x30) == 0x20 && op)
7502 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7503 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7504 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7505 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7506 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7507 else if ((op1 & 0x30) == 0x30)
7508 return arm_copy_svc (gdbarch, insn, regs, dsc);
7509 else
7510 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7511 }
7512
7513 static int
7514 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7515 uint16_t insn2, struct regcache *regs,
7516 struct displaced_step_closure *dsc)
7517 {
7518 unsigned int coproc = bits (insn2, 8, 11);
7519 unsigned int op1 = bits (insn1, 4, 9);
7520 unsigned int bit_5_8 = bits (insn1, 5, 8);
7521 unsigned int bit_9 = bit (insn1, 9);
7522 unsigned int bit_4 = bit (insn1, 4);
7523 unsigned int rn = bits (insn1, 0, 3);
7524
7525 if (bit_9 == 0)
7526 {
7527 if (bit_5_8 == 2)
7528 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7529 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7530 dsc);
7531 else if (bit_5_8 == 0) /* UNDEFINED. */
7532 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7533 else
7534 {
7535 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7536 if ((coproc & 0xe) == 0xa)
7537 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7538 dsc);
7539 else /* coproc is not 101x. */
7540 {
7541 if (bit_4 == 0) /* STC/STC2. */
7542 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7543 "stc/stc2", dsc);
7544 else /* LDC/LDC2 {literal, immeidate}. */
7545 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7546 regs, dsc);
7547 }
7548 }
7549 }
7550 else
7551 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7552
7553 return 0;
7554 }
7555
7556 static void
7557 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7558 struct displaced_step_closure *dsc, int rd)
7559 {
7560 /* ADR Rd, #imm
7561
7562 Rewrite as:
7563
7564 Preparation: Rd <- PC
7565 Insn: ADD Rd, #imm
7566 Cleanup: Null.
7567 */
7568
7569 /* Rd <- PC */
7570 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7571 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7572 }
7573
7574 static int
7575 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7576 struct displaced_step_closure *dsc,
7577 int rd, unsigned int imm)
7578 {
7579
7580 /* Encoding T2: ADDS Rd, #imm */
7581 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7582
7583 install_pc_relative (gdbarch, regs, dsc, rd);
7584
7585 return 0;
7586 }
7587
7588 static int
7589 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7590 struct regcache *regs,
7591 struct displaced_step_closure *dsc)
7592 {
7593 unsigned int rd = bits (insn, 8, 10);
7594 unsigned int imm8 = bits (insn, 0, 7);
7595
7596 if (debug_displaced)
7597 fprintf_unfiltered (gdb_stdlog,
7598 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7599 rd, imm8, insn);
7600
7601 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7602 }
7603
7604 static int
7605 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7606 uint16_t insn2, struct regcache *regs,
7607 struct displaced_step_closure *dsc)
7608 {
7609 unsigned int rd = bits (insn2, 8, 11);
7610 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7611 extract raw immediate encoding rather than computing immediate. When
7612 generating ADD or SUB instruction, we can simply perform OR operation to
7613 set immediate into ADD. */
7614 unsigned int imm_3_8 = insn2 & 0x70ff;
7615 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7616
7617 if (debug_displaced)
7618 fprintf_unfiltered (gdb_stdlog,
7619 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7620 rd, imm_i, imm_3_8, insn1, insn2);
7621
7622 if (bit (insn1, 7)) /* Encoding T2 */
7623 {
7624 /* Encoding T3: SUB Rd, Rd, #imm */
7625 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7626 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7627 }
7628 else /* Encoding T3 */
7629 {
7630 /* Encoding T3: ADD Rd, Rd, #imm */
7631 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7632 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7633 }
7634 dsc->numinsns = 2;
7635
7636 install_pc_relative (gdbarch, regs, dsc, rd);
7637
7638 return 0;
7639 }
7640
7641 static int
7642 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7643 struct regcache *regs,
7644 struct displaced_step_closure *dsc)
7645 {
7646 unsigned int rt = bits (insn1, 8, 10);
7647 unsigned int pc;
7648 int imm8 = (bits (insn1, 0, 7) << 2);
7649 CORE_ADDR from = dsc->insn_addr;
7650
7651 /* LDR Rd, #imm8
7652
7653 Rwrite as:
7654
7655 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7656
7657 Insn: LDR R0, [R2, R3];
7658 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7659
7660 if (debug_displaced)
7661 fprintf_unfiltered (gdb_stdlog,
7662 "displaced: copying thumb ldr r%d [pc #%d]\n"
7663 , rt, imm8);
7664
7665 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7666 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7667 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7668 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7669 /* The assembler calculates the required value of the offset from the
7670 Align(PC,4) value of this instruction to the label. */
7671 pc = pc & 0xfffffffc;
7672
7673 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7674 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7675
7676 dsc->rd = rt;
7677 dsc->u.ldst.xfersize = 4;
7678 dsc->u.ldst.rn = 0;
7679 dsc->u.ldst.immed = 0;
7680 dsc->u.ldst.writeback = 0;
7681 dsc->u.ldst.restore_r4 = 0;
7682
7683 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7684
7685 dsc->cleanup = &cleanup_load;
7686
7687 return 0;
7688 }
7689
7690 /* Copy Thumb cbnz/cbz insruction. */
7691
7692 static int
7693 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7694 struct regcache *regs,
7695 struct displaced_step_closure *dsc)
7696 {
7697 int non_zero = bit (insn1, 11);
7698 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7699 CORE_ADDR from = dsc->insn_addr;
7700 int rn = bits (insn1, 0, 2);
7701 int rn_val = displaced_read_reg (regs, dsc, rn);
7702
7703 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7704 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7705 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7706 condition is false, let it be, cleanup_branch will do nothing. */
7707 if (dsc->u.branch.cond)
7708 {
7709 dsc->u.branch.cond = INST_AL;
7710 dsc->u.branch.dest = from + 4 + imm5;
7711 }
7712 else
7713 dsc->u.branch.dest = from + 2;
7714
7715 dsc->u.branch.link = 0;
7716 dsc->u.branch.exchange = 0;
7717
7718 if (debug_displaced)
7719 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7720 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7721 rn, rn_val, insn1, dsc->u.branch.dest);
7722
7723 dsc->modinsn[0] = THUMB_NOP;
7724
7725 dsc->cleanup = &cleanup_branch;
7726 return 0;
7727 }
7728
7729 /* Copy Table Branch Byte/Halfword */
7730 static int
7731 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7732 uint16_t insn2, struct regcache *regs,
7733 struct displaced_step_closure *dsc)
7734 {
7735 ULONGEST rn_val, rm_val;
7736 int is_tbh = bit (insn2, 4);
7737 CORE_ADDR halfwords = 0;
7738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7739
7740 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7741 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7742
7743 if (is_tbh)
7744 {
7745 gdb_byte buf[2];
7746
7747 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7748 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7749 }
7750 else
7751 {
7752 gdb_byte buf[1];
7753
7754 target_read_memory (rn_val + rm_val, buf, 1);
7755 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7756 }
7757
7758 if (debug_displaced)
7759 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7760 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7761 (unsigned int) rn_val, (unsigned int) rm_val,
7762 (unsigned int) halfwords);
7763
7764 dsc->u.branch.cond = INST_AL;
7765 dsc->u.branch.link = 0;
7766 dsc->u.branch.exchange = 0;
7767 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7768
7769 dsc->cleanup = &cleanup_branch;
7770
7771 return 0;
7772 }
7773
7774 static void
7775 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7776 struct displaced_step_closure *dsc)
7777 {
7778 /* PC <- r7 */
7779 int val = displaced_read_reg (regs, dsc, 7);
7780 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7781
7782 /* r7 <- r8 */
7783 val = displaced_read_reg (regs, dsc, 8);
7784 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7785
7786 /* r8 <- tmp[0] */
7787 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7788
7789 }
7790
7791 static int
7792 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
7793 struct regcache *regs,
7794 struct displaced_step_closure *dsc)
7795 {
7796 dsc->u.block.regmask = insn1 & 0x00ff;
7797
7798 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7799 to :
7800
7801 (1) register list is full, that is, r0-r7 are used.
7802 Prepare: tmp[0] <- r8
7803
7804 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7805 MOV r8, r7; Move value of r7 to r8;
7806 POP {r7}; Store PC value into r7.
7807
7808 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7809
7810 (2) register list is not full, supposing there are N registers in
7811 register list (except PC, 0 <= N <= 7).
7812 Prepare: for each i, 0 - N, tmp[i] <- ri.
7813
7814 POP {r0, r1, ...., rN};
7815
7816 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7817 from tmp[] properly.
7818 */
7819 if (debug_displaced)
7820 fprintf_unfiltered (gdb_stdlog,
7821 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
7822 dsc->u.block.regmask, insn1);
7823
7824 if (dsc->u.block.regmask == 0xff)
7825 {
7826 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7827
7828 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7829 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7830 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7831
7832 dsc->numinsns = 3;
7833 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7834 }
7835 else
7836 {
7837 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
7838 unsigned int new_regmask, bit = 1;
7839 unsigned int to = 0, from = 0, i, new_rn;
7840
7841 for (i = 0; i < num_in_list + 1; i++)
7842 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7843
7844 new_regmask = (1 << (num_in_list + 1)) - 1;
7845
7846 if (debug_displaced)
7847 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
7848 "{..., pc}: original reg list %.4x,"
7849 " modified list %.4x\n"),
7850 (int) dsc->u.block.regmask, new_regmask);
7851
7852 dsc->u.block.regmask |= 0x8000;
7853 dsc->u.block.writeback = 0;
7854 dsc->u.block.cond = INST_AL;
7855
7856 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7857
7858 dsc->cleanup = &cleanup_block_load_pc;
7859 }
7860
7861 return 0;
7862 }
7863
7864 static void
7865 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7866 struct regcache *regs,
7867 struct displaced_step_closure *dsc)
7868 {
7869 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7870 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7871 int err = 0;
7872
7873 /* 16-bit thumb instructions. */
7874 switch (op_bit_12_15)
7875 {
7876 /* Shift (imme), add, subtract, move and compare. */
7877 case 0: case 1: case 2: case 3:
7878 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7879 "shift/add/sub/mov/cmp",
7880 dsc);
7881 break;
7882 case 4:
7883 switch (op_bit_10_11)
7884 {
7885 case 0: /* Data-processing */
7886 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7887 "data-processing",
7888 dsc);
7889 break;
7890 case 1: /* Special data instructions and branch and exchange. */
7891 {
7892 unsigned short op = bits (insn1, 7, 9);
7893 if (op == 6 || op == 7) /* BX or BLX */
7894 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7895 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7896 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7897 else
7898 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7899 dsc);
7900 }
7901 break;
7902 default: /* LDR (literal) */
7903 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7904 }
7905 break;
7906 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7907 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7908 break;
7909 case 10:
7910 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7911 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7912 else /* Generate SP-relative address */
7913 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7914 break;
7915 case 11: /* Misc 16-bit instructions */
7916 {
7917 switch (bits (insn1, 8, 11))
7918 {
7919 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7920 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7921 break;
7922 case 12: case 13: /* POP */
7923 if (bit (insn1, 8)) /* PC is in register list. */
7924 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7925 else
7926 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7927 break;
7928 case 15: /* If-Then, and hints */
7929 if (bits (insn1, 0, 3))
7930 /* If-Then makes up to four following instructions conditional.
7931 IT instruction itself is not conditional, so handle it as a
7932 common unmodified instruction. */
7933 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7934 dsc);
7935 else
7936 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7937 break;
7938 default:
7939 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7940 }
7941 }
7942 break;
7943 case 12:
7944 if (op_bit_10_11 < 2) /* Store multiple registers */
7945 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7946 else /* Load multiple registers */
7947 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7948 break;
7949 case 13: /* Conditional branch and supervisor call */
7950 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7951 err = thumb_copy_b (gdbarch, insn1, dsc);
7952 else
7953 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7954 break;
7955 case 14: /* Unconditional branch */
7956 err = thumb_copy_b (gdbarch, insn1, dsc);
7957 break;
7958 default:
7959 err = 1;
7960 }
7961
7962 if (err)
7963 internal_error (__FILE__, __LINE__,
7964 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7965 }
7966
7967 static int
7968 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7969 uint16_t insn1, uint16_t insn2,
7970 struct regcache *regs,
7971 struct displaced_step_closure *dsc)
7972 {
7973 int rt = bits (insn2, 12, 15);
7974 int rn = bits (insn1, 0, 3);
7975 int op1 = bits (insn1, 7, 8);
7976 int err = 0;
7977
7978 switch (bits (insn1, 5, 6))
7979 {
7980 case 0: /* Load byte and memory hints */
7981 if (rt == 0xf) /* PLD/PLI */
7982 {
7983 if (rn == 0xf)
7984 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7985 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7986 else
7987 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7988 "pli/pld", dsc);
7989 }
7990 else
7991 {
7992 if (rn == 0xf) /* LDRB/LDRSB (literal) */
7993 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7994 1);
7995 else
7996 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7997 "ldrb{reg, immediate}/ldrbt",
7998 dsc);
7999 }
8000
8001 break;
8002 case 1: /* Load halfword and memory hints. */
8003 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8004 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8005 "pld/unalloc memhint", dsc);
8006 else
8007 {
8008 if (rn == 0xf)
8009 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8010 2);
8011 else
8012 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8013 "ldrh/ldrht", dsc);
8014 }
8015 break;
8016 case 2: /* Load word */
8017 {
8018 int insn2_bit_8_11 = bits (insn2, 8, 11);
8019
8020 if (rn == 0xf)
8021 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8022 else if (op1 == 0x1) /* Encoding T3 */
8023 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8024 0, 1);
8025 else /* op1 == 0x0 */
8026 {
8027 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8028 /* LDR (immediate) */
8029 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8030 dsc, bit (insn2, 8), 1);
8031 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8032 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8033 "ldrt", dsc);
8034 else
8035 /* LDR (register) */
8036 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8037 dsc, 0, 0);
8038 }
8039 break;
8040 }
8041 default:
8042 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8043 break;
8044 }
8045 return 0;
8046 }
8047
8048 static void
8049 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8050 uint16_t insn2, struct regcache *regs,
8051 struct displaced_step_closure *dsc)
8052 {
8053 int err = 0;
8054 unsigned short op = bit (insn2, 15);
8055 unsigned int op1 = bits (insn1, 11, 12);
8056
8057 switch (op1)
8058 {
8059 case 1:
8060 {
8061 switch (bits (insn1, 9, 10))
8062 {
8063 case 0:
8064 if (bit (insn1, 6))
8065 {
8066 /* Load/store {dual, execlusive}, table branch. */
8067 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8068 && bits (insn2, 5, 7) == 0)
8069 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8070 dsc);
8071 else
8072 /* PC is not allowed to use in load/store {dual, exclusive}
8073 instructions. */
8074 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8075 "load/store dual/ex", dsc);
8076 }
8077 else /* load/store multiple */
8078 {
8079 switch (bits (insn1, 7, 8))
8080 {
8081 case 0: case 3: /* SRS, RFE */
8082 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8083 "srs/rfe", dsc);
8084 break;
8085 case 1: case 2: /* LDM/STM/PUSH/POP */
8086 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8087 break;
8088 }
8089 }
8090 break;
8091
8092 case 1:
8093 /* Data-processing (shift register). */
8094 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8095 dsc);
8096 break;
8097 default: /* Coprocessor instructions. */
8098 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8099 break;
8100 }
8101 break;
8102 }
8103 case 2: /* op1 = 2 */
8104 if (op) /* Branch and misc control. */
8105 {
8106 if (bit (insn2, 14) /* BLX/BL */
8107 || bit (insn2, 12) /* Unconditional branch */
8108 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8109 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8110 else
8111 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8112 "misc ctrl", dsc);
8113 }
8114 else
8115 {
8116 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8117 {
8118 int op = bits (insn1, 4, 8);
8119 int rn = bits (insn1, 0, 3);
8120 if ((op == 0 || op == 0xa) && rn == 0xf)
8121 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8122 regs, dsc);
8123 else
8124 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8125 "dp/pb", dsc);
8126 }
8127 else /* Data processing (modified immeidate) */
8128 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8129 "dp/mi", dsc);
8130 }
8131 break;
8132 case 3: /* op1 = 3 */
8133 switch (bits (insn1, 9, 10))
8134 {
8135 case 0:
8136 if (bit (insn1, 4))
8137 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8138 regs, dsc);
8139 else /* NEON Load/Store and Store single data item */
8140 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8141 "neon elt/struct load/store",
8142 dsc);
8143 break;
8144 case 1: /* op1 = 3, bits (9, 10) == 1 */
8145 switch (bits (insn1, 7, 8))
8146 {
8147 case 0: case 1: /* Data processing (register) */
8148 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8149 "dp(reg)", dsc);
8150 break;
8151 case 2: /* Multiply and absolute difference */
8152 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8153 "mul/mua/diff", dsc);
8154 break;
8155 case 3: /* Long multiply and divide */
8156 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8157 "lmul/lmua", dsc);
8158 break;
8159 }
8160 break;
8161 default: /* Coprocessor instructions */
8162 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8163 break;
8164 }
8165 break;
8166 default:
8167 err = 1;
8168 }
8169
8170 if (err)
8171 internal_error (__FILE__, __LINE__,
8172 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8173
8174 }
8175
8176 static void
8177 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8178 CORE_ADDR to, struct regcache *regs,
8179 struct displaced_step_closure *dsc)
8180 {
8181 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8182 uint16_t insn1
8183 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8184
8185 if (debug_displaced)
8186 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8187 "at %.8lx\n", insn1, (unsigned long) from);
8188
8189 dsc->is_thumb = 1;
8190 dsc->insn_size = thumb_insn_size (insn1);
8191 if (thumb_insn_size (insn1) == 4)
8192 {
8193 uint16_t insn2
8194 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8195 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8196 }
8197 else
8198 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8199 }
8200
8201 void
8202 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8203 CORE_ADDR to, struct regcache *regs,
8204 struct displaced_step_closure *dsc)
8205 {
8206 int err = 0;
8207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8208 uint32_t insn;
8209
8210 /* Most displaced instructions use a 1-instruction scratch space, so set this
8211 here and override below if/when necessary. */
8212 dsc->numinsns = 1;
8213 dsc->insn_addr = from;
8214 dsc->scratch_base = to;
8215 dsc->cleanup = NULL;
8216 dsc->wrote_to_pc = 0;
8217
8218 if (!displaced_in_arm_mode (regs))
8219 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8220
8221 dsc->is_thumb = 0;
8222 dsc->insn_size = 4;
8223 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8224 if (debug_displaced)
8225 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8226 "at %.8lx\n", (unsigned long) insn,
8227 (unsigned long) from);
8228
8229 if ((insn & 0xf0000000) == 0xf0000000)
8230 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8231 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8232 {
8233 case 0x0: case 0x1: case 0x2: case 0x3:
8234 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8235 break;
8236
8237 case 0x4: case 0x5: case 0x6:
8238 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8239 break;
8240
8241 case 0x7:
8242 err = arm_decode_media (gdbarch, insn, dsc);
8243 break;
8244
8245 case 0x8: case 0x9: case 0xa: case 0xb:
8246 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8247 break;
8248
8249 case 0xc: case 0xd: case 0xe: case 0xf:
8250 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8251 break;
8252 }
8253
8254 if (err)
8255 internal_error (__FILE__, __LINE__,
8256 _("arm_process_displaced_insn: Instruction decode error"));
8257 }
8258
8259 /* Actually set up the scratch space for a displaced instruction. */
8260
8261 void
8262 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8263 CORE_ADDR to, struct displaced_step_closure *dsc)
8264 {
8265 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8266 unsigned int i, len, offset;
8267 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8268 int size = dsc->is_thumb? 2 : 4;
8269 const unsigned char *bkp_insn;
8270
8271 offset = 0;
8272 /* Poke modified instruction(s). */
8273 for (i = 0; i < dsc->numinsns; i++)
8274 {
8275 if (debug_displaced)
8276 {
8277 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8278 if (size == 4)
8279 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8280 dsc->modinsn[i]);
8281 else if (size == 2)
8282 fprintf_unfiltered (gdb_stdlog, "%.4x",
8283 (unsigned short)dsc->modinsn[i]);
8284
8285 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8286 (unsigned long) to + offset);
8287
8288 }
8289 write_memory_unsigned_integer (to + offset, size,
8290 byte_order_for_code,
8291 dsc->modinsn[i]);
8292 offset += size;
8293 }
8294
8295 /* Choose the correct breakpoint instruction. */
8296 if (dsc->is_thumb)
8297 {
8298 bkp_insn = tdep->thumb_breakpoint;
8299 len = tdep->thumb_breakpoint_size;
8300 }
8301 else
8302 {
8303 bkp_insn = tdep->arm_breakpoint;
8304 len = tdep->arm_breakpoint_size;
8305 }
8306
8307 /* Put breakpoint afterwards. */
8308 write_memory (to + offset, bkp_insn, len);
8309
8310 if (debug_displaced)
8311 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8312 paddress (gdbarch, from), paddress (gdbarch, to));
8313 }
8314
8315 /* Entry point for copying an instruction into scratch space for displaced
8316 stepping. */
8317
8318 struct displaced_step_closure *
8319 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8320 CORE_ADDR from, CORE_ADDR to,
8321 struct regcache *regs)
8322 {
8323 struct displaced_step_closure *dsc
8324 = xmalloc (sizeof (struct displaced_step_closure));
8325 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8326 arm_displaced_init_closure (gdbarch, from, to, dsc);
8327
8328 return dsc;
8329 }
8330
8331 /* Entry point for cleaning things up after a displaced instruction has been
8332 single-stepped. */
8333
8334 void
8335 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8336 struct displaced_step_closure *dsc,
8337 CORE_ADDR from, CORE_ADDR to,
8338 struct regcache *regs)
8339 {
8340 if (dsc->cleanup)
8341 dsc->cleanup (gdbarch, regs, dsc);
8342
8343 if (!dsc->wrote_to_pc)
8344 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8345 dsc->insn_addr + dsc->insn_size);
8346
8347 }
8348
8349 #include "bfd-in2.h"
8350 #include "libcoff.h"
8351
8352 static int
8353 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8354 {
8355 struct gdbarch *gdbarch = info->application_data;
8356
8357 if (arm_pc_is_thumb (gdbarch, memaddr))
8358 {
8359 static asymbol *asym;
8360 static combined_entry_type ce;
8361 static struct coff_symbol_struct csym;
8362 static struct bfd fake_bfd;
8363 static bfd_target fake_target;
8364
8365 if (csym.native == NULL)
8366 {
8367 /* Create a fake symbol vector containing a Thumb symbol.
8368 This is solely so that the code in print_insn_little_arm()
8369 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8370 the presence of a Thumb symbol and switch to decoding
8371 Thumb instructions. */
8372
8373 fake_target.flavour = bfd_target_coff_flavour;
8374 fake_bfd.xvec = &fake_target;
8375 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8376 csym.native = &ce;
8377 csym.symbol.the_bfd = &fake_bfd;
8378 csym.symbol.name = "fake";
8379 asym = (asymbol *) & csym;
8380 }
8381
8382 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8383 info->symbols = &asym;
8384 }
8385 else
8386 info->symbols = NULL;
8387
8388 if (info->endian == BFD_ENDIAN_BIG)
8389 return print_insn_big_arm (memaddr, info);
8390 else
8391 return print_insn_little_arm (memaddr, info);
8392 }
8393
8394 /* The following define instruction sequences that will cause ARM
8395 cpu's to take an undefined instruction trap. These are used to
8396 signal a breakpoint to GDB.
8397
8398 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8399 modes. A different instruction is required for each mode. The ARM
8400 cpu's can also be big or little endian. Thus four different
8401 instructions are needed to support all cases.
8402
8403 Note: ARMv4 defines several new instructions that will take the
8404 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8405 not in fact add the new instructions. The new undefined
8406 instructions in ARMv4 are all instructions that had no defined
8407 behaviour in earlier chips. There is no guarantee that they will
8408 raise an exception, but may be treated as NOP's. In practice, it
8409 may only safe to rely on instructions matching:
8410
8411 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8412 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8413 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8414
8415 Even this may only true if the condition predicate is true. The
8416 following use a condition predicate of ALWAYS so it is always TRUE.
8417
8418 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8419 and NetBSD all use a software interrupt rather than an undefined
8420 instruction to force a trap. This can be handled by by the
8421 abi-specific code during establishment of the gdbarch vector. */
8422
8423 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8424 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8425 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8426 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8427
8428 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8429 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8430 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8431 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8432
8433 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8434 the program counter value to determine whether a 16-bit or 32-bit
8435 breakpoint should be used. It returns a pointer to a string of
8436 bytes that encode a breakpoint instruction, stores the length of
8437 the string to *lenptr, and adjusts the program counter (if
8438 necessary) to point to the actual memory location where the
8439 breakpoint should be inserted. */
8440
8441 static const unsigned char *
8442 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8443 {
8444 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8445 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8446
8447 if (arm_pc_is_thumb (gdbarch, *pcptr))
8448 {
8449 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8450
8451 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8452 check whether we are replacing a 32-bit instruction. */
8453 if (tdep->thumb2_breakpoint != NULL)
8454 {
8455 gdb_byte buf[2];
8456 if (target_read_memory (*pcptr, buf, 2) == 0)
8457 {
8458 unsigned short inst1;
8459 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8460 if (thumb_insn_size (inst1) == 4)
8461 {
8462 *lenptr = tdep->thumb2_breakpoint_size;
8463 return tdep->thumb2_breakpoint;
8464 }
8465 }
8466 }
8467
8468 *lenptr = tdep->thumb_breakpoint_size;
8469 return tdep->thumb_breakpoint;
8470 }
8471 else
8472 {
8473 *lenptr = tdep->arm_breakpoint_size;
8474 return tdep->arm_breakpoint;
8475 }
8476 }
8477
8478 static void
8479 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8480 int *kindptr)
8481 {
8482 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8483
8484 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8485
8486 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8487 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8488 that this is not confused with a 32-bit ARM breakpoint. */
8489 *kindptr = 3;
8490 }
8491
8492 /* Extract from an array REGBUF containing the (raw) register state a
8493 function return value of type TYPE, and copy that, in virtual
8494 format, into VALBUF. */
8495
8496 static void
8497 arm_extract_return_value (struct type *type, struct regcache *regs,
8498 gdb_byte *valbuf)
8499 {
8500 struct gdbarch *gdbarch = get_regcache_arch (regs);
8501 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8502
8503 if (TYPE_CODE_FLT == TYPE_CODE (type))
8504 {
8505 switch (gdbarch_tdep (gdbarch)->fp_model)
8506 {
8507 case ARM_FLOAT_FPA:
8508 {
8509 /* The value is in register F0 in internal format. We need to
8510 extract the raw value and then convert it to the desired
8511 internal type. */
8512 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8513
8514 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8515 convert_from_extended (floatformat_from_type (type), tmpbuf,
8516 valbuf, gdbarch_byte_order (gdbarch));
8517 }
8518 break;
8519
8520 case ARM_FLOAT_SOFT_FPA:
8521 case ARM_FLOAT_SOFT_VFP:
8522 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8523 not using the VFP ABI code. */
8524 case ARM_FLOAT_VFP:
8525 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8526 if (TYPE_LENGTH (type) > 4)
8527 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8528 valbuf + INT_REGISTER_SIZE);
8529 break;
8530
8531 default:
8532 internal_error (__FILE__, __LINE__,
8533 _("arm_extract_return_value: "
8534 "Floating point model not supported"));
8535 break;
8536 }
8537 }
8538 else if (TYPE_CODE (type) == TYPE_CODE_INT
8539 || TYPE_CODE (type) == TYPE_CODE_CHAR
8540 || TYPE_CODE (type) == TYPE_CODE_BOOL
8541 || TYPE_CODE (type) == TYPE_CODE_PTR
8542 || TYPE_CODE (type) == TYPE_CODE_REF
8543 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8544 {
8545 /* If the type is a plain integer, then the access is
8546 straight-forward. Otherwise we have to play around a bit
8547 more. */
8548 int len = TYPE_LENGTH (type);
8549 int regno = ARM_A1_REGNUM;
8550 ULONGEST tmp;
8551
8552 while (len > 0)
8553 {
8554 /* By using store_unsigned_integer we avoid having to do
8555 anything special for small big-endian values. */
8556 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8557 store_unsigned_integer (valbuf,
8558 (len > INT_REGISTER_SIZE
8559 ? INT_REGISTER_SIZE : len),
8560 byte_order, tmp);
8561 len -= INT_REGISTER_SIZE;
8562 valbuf += INT_REGISTER_SIZE;
8563 }
8564 }
8565 else
8566 {
8567 /* For a structure or union the behaviour is as if the value had
8568 been stored to word-aligned memory and then loaded into
8569 registers with 32-bit load instruction(s). */
8570 int len = TYPE_LENGTH (type);
8571 int regno = ARM_A1_REGNUM;
8572 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8573
8574 while (len > 0)
8575 {
8576 regcache_cooked_read (regs, regno++, tmpbuf);
8577 memcpy (valbuf, tmpbuf,
8578 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8579 len -= INT_REGISTER_SIZE;
8580 valbuf += INT_REGISTER_SIZE;
8581 }
8582 }
8583 }
8584
8585
8586 /* Will a function return an aggregate type in memory or in a
8587 register? Return 0 if an aggregate type can be returned in a
8588 register, 1 if it must be returned in memory. */
8589
8590 static int
8591 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8592 {
8593 int nRc;
8594 enum type_code code;
8595
8596 CHECK_TYPEDEF (type);
8597
8598 /* In the ARM ABI, "integer" like aggregate types are returned in
8599 registers. For an aggregate type to be integer like, its size
8600 must be less than or equal to INT_REGISTER_SIZE and the
8601 offset of each addressable subfield must be zero. Note that bit
8602 fields are not addressable, and all addressable subfields of
8603 unions always start at offset zero.
8604
8605 This function is based on the behaviour of GCC 2.95.1.
8606 See: gcc/arm.c: arm_return_in_memory() for details.
8607
8608 Note: All versions of GCC before GCC 2.95.2 do not set up the
8609 parameters correctly for a function returning the following
8610 structure: struct { float f;}; This should be returned in memory,
8611 not a register. Richard Earnshaw sent me a patch, but I do not
8612 know of any way to detect if a function like the above has been
8613 compiled with the correct calling convention. */
8614
8615 /* All aggregate types that won't fit in a register must be returned
8616 in memory. */
8617 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8618 {
8619 return 1;
8620 }
8621
8622 /* The AAPCS says all aggregates not larger than a word are returned
8623 in a register. */
8624 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8625 return 0;
8626
8627 /* The only aggregate types that can be returned in a register are
8628 structs and unions. Arrays must be returned in memory. */
8629 code = TYPE_CODE (type);
8630 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8631 {
8632 return 1;
8633 }
8634
8635 /* Assume all other aggregate types can be returned in a register.
8636 Run a check for structures, unions and arrays. */
8637 nRc = 0;
8638
8639 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8640 {
8641 int i;
8642 /* Need to check if this struct/union is "integer" like. For
8643 this to be true, its size must be less than or equal to
8644 INT_REGISTER_SIZE and the offset of each addressable
8645 subfield must be zero. Note that bit fields are not
8646 addressable, and unions always start at offset zero. If any
8647 of the subfields is a floating point type, the struct/union
8648 cannot be an integer type. */
8649
8650 /* For each field in the object, check:
8651 1) Is it FP? --> yes, nRc = 1;
8652 2) Is it addressable (bitpos != 0) and
8653 not packed (bitsize == 0)?
8654 --> yes, nRc = 1
8655 */
8656
8657 for (i = 0; i < TYPE_NFIELDS (type); i++)
8658 {
8659 enum type_code field_type_code;
8660 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
8661 i)));
8662
8663 /* Is it a floating point type field? */
8664 if (field_type_code == TYPE_CODE_FLT)
8665 {
8666 nRc = 1;
8667 break;
8668 }
8669
8670 /* If bitpos != 0, then we have to care about it. */
8671 if (TYPE_FIELD_BITPOS (type, i) != 0)
8672 {
8673 /* Bitfields are not addressable. If the field bitsize is
8674 zero, then the field is not packed. Hence it cannot be
8675 a bitfield or any other packed type. */
8676 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8677 {
8678 nRc = 1;
8679 break;
8680 }
8681 }
8682 }
8683 }
8684
8685 return nRc;
8686 }
8687
8688 /* Write into appropriate registers a function return value of type
8689 TYPE, given in virtual format. */
8690
8691 static void
8692 arm_store_return_value (struct type *type, struct regcache *regs,
8693 const gdb_byte *valbuf)
8694 {
8695 struct gdbarch *gdbarch = get_regcache_arch (regs);
8696 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8697
8698 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8699 {
8700 char buf[MAX_REGISTER_SIZE];
8701
8702 switch (gdbarch_tdep (gdbarch)->fp_model)
8703 {
8704 case ARM_FLOAT_FPA:
8705
8706 convert_to_extended (floatformat_from_type (type), buf, valbuf,
8707 gdbarch_byte_order (gdbarch));
8708 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
8709 break;
8710
8711 case ARM_FLOAT_SOFT_FPA:
8712 case ARM_FLOAT_SOFT_VFP:
8713 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8714 not using the VFP ABI code. */
8715 case ARM_FLOAT_VFP:
8716 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
8717 if (TYPE_LENGTH (type) > 4)
8718 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
8719 valbuf + INT_REGISTER_SIZE);
8720 break;
8721
8722 default:
8723 internal_error (__FILE__, __LINE__,
8724 _("arm_store_return_value: Floating "
8725 "point model not supported"));
8726 break;
8727 }
8728 }
8729 else if (TYPE_CODE (type) == TYPE_CODE_INT
8730 || TYPE_CODE (type) == TYPE_CODE_CHAR
8731 || TYPE_CODE (type) == TYPE_CODE_BOOL
8732 || TYPE_CODE (type) == TYPE_CODE_PTR
8733 || TYPE_CODE (type) == TYPE_CODE_REF
8734 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8735 {
8736 if (TYPE_LENGTH (type) <= 4)
8737 {
8738 /* Values of one word or less are zero/sign-extended and
8739 returned in r0. */
8740 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8741 LONGEST val = unpack_long (type, valbuf);
8742
8743 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8744 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
8745 }
8746 else
8747 {
8748 /* Integral values greater than one word are stored in consecutive
8749 registers starting with r0. This will always be a multiple of
8750 the regiser size. */
8751 int len = TYPE_LENGTH (type);
8752 int regno = ARM_A1_REGNUM;
8753
8754 while (len > 0)
8755 {
8756 regcache_cooked_write (regs, regno++, valbuf);
8757 len -= INT_REGISTER_SIZE;
8758 valbuf += INT_REGISTER_SIZE;
8759 }
8760 }
8761 }
8762 else
8763 {
8764 /* For a structure or union the behaviour is as if the value had
8765 been stored to word-aligned memory and then loaded into
8766 registers with 32-bit load instruction(s). */
8767 int len = TYPE_LENGTH (type);
8768 int regno = ARM_A1_REGNUM;
8769 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8770
8771 while (len > 0)
8772 {
8773 memcpy (tmpbuf, valbuf,
8774 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8775 regcache_cooked_write (regs, regno++, tmpbuf);
8776 len -= INT_REGISTER_SIZE;
8777 valbuf += INT_REGISTER_SIZE;
8778 }
8779 }
8780 }
8781
8782
8783 /* Handle function return values. */
8784
8785 static enum return_value_convention
8786 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
8787 struct type *valtype, struct regcache *regcache,
8788 gdb_byte *readbuf, const gdb_byte *writebuf)
8789 {
8790 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8791 enum arm_vfp_cprc_base_type vfp_base_type;
8792 int vfp_base_count;
8793
8794 if (arm_vfp_abi_for_function (gdbarch, func_type)
8795 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8796 {
8797 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8798 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8799 int i;
8800 for (i = 0; i < vfp_base_count; i++)
8801 {
8802 if (reg_char == 'q')
8803 {
8804 if (writebuf)
8805 arm_neon_quad_write (gdbarch, regcache, i,
8806 writebuf + i * unit_length);
8807
8808 if (readbuf)
8809 arm_neon_quad_read (gdbarch, regcache, i,
8810 readbuf + i * unit_length);
8811 }
8812 else
8813 {
8814 char name_buf[4];
8815 int regnum;
8816
8817 sprintf (name_buf, "%c%d", reg_char, i);
8818 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8819 strlen (name_buf));
8820 if (writebuf)
8821 regcache_cooked_write (regcache, regnum,
8822 writebuf + i * unit_length);
8823 if (readbuf)
8824 regcache_cooked_read (regcache, regnum,
8825 readbuf + i * unit_length);
8826 }
8827 }
8828 return RETURN_VALUE_REGISTER_CONVENTION;
8829 }
8830
8831 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
8832 || TYPE_CODE (valtype) == TYPE_CODE_UNION
8833 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
8834 {
8835 if (tdep->struct_return == pcc_struct_return
8836 || arm_return_in_memory (gdbarch, valtype))
8837 return RETURN_VALUE_STRUCT_CONVENTION;
8838 }
8839
8840 if (writebuf)
8841 arm_store_return_value (valtype, regcache, writebuf);
8842
8843 if (readbuf)
8844 arm_extract_return_value (valtype, regcache, readbuf);
8845
8846 return RETURN_VALUE_REGISTER_CONVENTION;
8847 }
8848
8849
8850 static int
8851 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8852 {
8853 struct gdbarch *gdbarch = get_frame_arch (frame);
8854 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8855 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8856 CORE_ADDR jb_addr;
8857 char buf[INT_REGISTER_SIZE];
8858
8859 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8860
8861 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8862 INT_REGISTER_SIZE))
8863 return 0;
8864
8865 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
8866 return 1;
8867 }
8868
8869 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8870 return the target PC. Otherwise return 0. */
8871
8872 CORE_ADDR
8873 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8874 {
8875 char *name;
8876 int namelen;
8877 CORE_ADDR start_addr;
8878
8879 /* Find the starting address and name of the function containing the PC. */
8880 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8881 return 0;
8882
8883 /* If PC is in a Thumb call or return stub, return the address of the
8884 target PC, which is in a register. The thunk functions are called
8885 _call_via_xx, where x is the register name. The possible names
8886 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8887 functions, named __ARM_call_via_r[0-7]. */
8888 if (strncmp (name, "_call_via_", 10) == 0
8889 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
8890 {
8891 /* Use the name suffix to determine which register contains the
8892 target PC. */
8893 static char *table[15] =
8894 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8895 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8896 };
8897 int regno;
8898 int offset = strlen (name) - 2;
8899
8900 for (regno = 0; regno <= 14; regno++)
8901 if (strcmp (&name[offset], table[regno]) == 0)
8902 return get_frame_register_unsigned (frame, regno);
8903 }
8904
8905 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8906 non-interworking calls to foo. We could decode the stubs
8907 to find the target but it's easier to use the symbol table. */
8908 namelen = strlen (name);
8909 if (name[0] == '_' && name[1] == '_'
8910 && ((namelen > 2 + strlen ("_from_thumb")
8911 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
8912 strlen ("_from_thumb")) == 0)
8913 || (namelen > 2 + strlen ("_from_arm")
8914 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
8915 strlen ("_from_arm")) == 0)))
8916 {
8917 char *target_name;
8918 int target_len = namelen - 2;
8919 struct minimal_symbol *minsym;
8920 struct objfile *objfile;
8921 struct obj_section *sec;
8922
8923 if (name[namelen - 1] == 'b')
8924 target_len -= strlen ("_from_thumb");
8925 else
8926 target_len -= strlen ("_from_arm");
8927
8928 target_name = alloca (target_len + 1);
8929 memcpy (target_name, name + 2, target_len);
8930 target_name[target_len] = '\0';
8931
8932 sec = find_pc_section (pc);
8933 objfile = (sec == NULL) ? NULL : sec->objfile;
8934 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8935 if (minsym != NULL)
8936 return SYMBOL_VALUE_ADDRESS (minsym);
8937 else
8938 return 0;
8939 }
8940
8941 return 0; /* not a stub */
8942 }
8943
8944 static void
8945 set_arm_command (char *args, int from_tty)
8946 {
8947 printf_unfiltered (_("\
8948 \"set arm\" must be followed by an apporpriate subcommand.\n"));
8949 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
8950 }
8951
8952 static void
8953 show_arm_command (char *args, int from_tty)
8954 {
8955 cmd_show_list (showarmcmdlist, from_tty, "");
8956 }
8957
8958 static void
8959 arm_update_current_architecture (void)
8960 {
8961 struct gdbarch_info info;
8962
8963 /* If the current architecture is not ARM, we have nothing to do. */
8964 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
8965 return;
8966
8967 /* Update the architecture. */
8968 gdbarch_info_init (&info);
8969
8970 if (!gdbarch_update_p (info))
8971 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8972 }
8973
8974 static void
8975 set_fp_model_sfunc (char *args, int from_tty,
8976 struct cmd_list_element *c)
8977 {
8978 enum arm_float_model fp_model;
8979
8980 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8981 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8982 {
8983 arm_fp_model = fp_model;
8984 break;
8985 }
8986
8987 if (fp_model == ARM_FLOAT_LAST)
8988 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
8989 current_fp_model);
8990
8991 arm_update_current_architecture ();
8992 }
8993
8994 static void
8995 show_fp_model (struct ui_file *file, int from_tty,
8996 struct cmd_list_element *c, const char *value)
8997 {
8998 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
8999
9000 if (arm_fp_model == ARM_FLOAT_AUTO
9001 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9002 fprintf_filtered (file, _("\
9003 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9004 fp_model_strings[tdep->fp_model]);
9005 else
9006 fprintf_filtered (file, _("\
9007 The current ARM floating point model is \"%s\".\n"),
9008 fp_model_strings[arm_fp_model]);
9009 }
9010
9011 static void
9012 arm_set_abi (char *args, int from_tty,
9013 struct cmd_list_element *c)
9014 {
9015 enum arm_abi_kind arm_abi;
9016
9017 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9018 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9019 {
9020 arm_abi_global = arm_abi;
9021 break;
9022 }
9023
9024 if (arm_abi == ARM_ABI_LAST)
9025 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9026 arm_abi_string);
9027
9028 arm_update_current_architecture ();
9029 }
9030
9031 static void
9032 arm_show_abi (struct ui_file *file, int from_tty,
9033 struct cmd_list_element *c, const char *value)
9034 {
9035 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9036
9037 if (arm_abi_global == ARM_ABI_AUTO
9038 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9039 fprintf_filtered (file, _("\
9040 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9041 arm_abi_strings[tdep->arm_abi]);
9042 else
9043 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9044 arm_abi_string);
9045 }
9046
9047 static void
9048 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9049 struct cmd_list_element *c, const char *value)
9050 {
9051 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9052
9053 fprintf_filtered (file,
9054 _("The current execution mode assumed "
9055 "(when symbols are unavailable) is \"%s\".\n"),
9056 arm_fallback_mode_string);
9057 }
9058
9059 static void
9060 arm_show_force_mode (struct ui_file *file, int from_tty,
9061 struct cmd_list_element *c, const char *value)
9062 {
9063 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9064
9065 fprintf_filtered (file,
9066 _("The current execution mode assumed "
9067 "(even when symbols are available) is \"%s\".\n"),
9068 arm_force_mode_string);
9069 }
9070
9071 /* If the user changes the register disassembly style used for info
9072 register and other commands, we have to also switch the style used
9073 in opcodes for disassembly output. This function is run in the "set
9074 arm disassembly" command, and does that. */
9075
9076 static void
9077 set_disassembly_style_sfunc (char *args, int from_tty,
9078 struct cmd_list_element *c)
9079 {
9080 set_disassembly_style ();
9081 }
9082 \f
9083 /* Return the ARM register name corresponding to register I. */
9084 static const char *
9085 arm_register_name (struct gdbarch *gdbarch, int i)
9086 {
9087 const int num_regs = gdbarch_num_regs (gdbarch);
9088
9089 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9090 && i >= num_regs && i < num_regs + 32)
9091 {
9092 static const char *const vfp_pseudo_names[] = {
9093 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9094 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9095 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9096 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9097 };
9098
9099 return vfp_pseudo_names[i - num_regs];
9100 }
9101
9102 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9103 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9104 {
9105 static const char *const neon_pseudo_names[] = {
9106 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9107 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9108 };
9109
9110 return neon_pseudo_names[i - num_regs - 32];
9111 }
9112
9113 if (i >= ARRAY_SIZE (arm_register_names))
9114 /* These registers are only supported on targets which supply
9115 an XML description. */
9116 return "";
9117
9118 return arm_register_names[i];
9119 }
9120
9121 static void
9122 set_disassembly_style (void)
9123 {
9124 int current;
9125
9126 /* Find the style that the user wants. */
9127 for (current = 0; current < num_disassembly_options; current++)
9128 if (disassembly_style == valid_disassembly_styles[current])
9129 break;
9130 gdb_assert (current < num_disassembly_options);
9131
9132 /* Synchronize the disassembler. */
9133 set_arm_regname_option (current);
9134 }
9135
9136 /* Test whether the coff symbol specific value corresponds to a Thumb
9137 function. */
9138
9139 static int
9140 coff_sym_is_thumb (int val)
9141 {
9142 return (val == C_THUMBEXT
9143 || val == C_THUMBSTAT
9144 || val == C_THUMBEXTFUNC
9145 || val == C_THUMBSTATFUNC
9146 || val == C_THUMBLABEL);
9147 }
9148
9149 /* arm_coff_make_msymbol_special()
9150 arm_elf_make_msymbol_special()
9151
9152 These functions test whether the COFF or ELF symbol corresponds to
9153 an address in thumb code, and set a "special" bit in a minimal
9154 symbol to indicate that it does. */
9155
9156 static void
9157 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9158 {
9159 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9160 == ST_BRANCH_TO_THUMB)
9161 MSYMBOL_SET_SPECIAL (msym);
9162 }
9163
9164 static void
9165 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9166 {
9167 if (coff_sym_is_thumb (val))
9168 MSYMBOL_SET_SPECIAL (msym);
9169 }
9170
9171 static void
9172 arm_objfile_data_free (struct objfile *objfile, void *arg)
9173 {
9174 struct arm_per_objfile *data = arg;
9175 unsigned int i;
9176
9177 for (i = 0; i < objfile->obfd->section_count; i++)
9178 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9179 }
9180
9181 static void
9182 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9183 asymbol *sym)
9184 {
9185 const char *name = bfd_asymbol_name (sym);
9186 struct arm_per_objfile *data;
9187 VEC(arm_mapping_symbol_s) **map_p;
9188 struct arm_mapping_symbol new_map_sym;
9189
9190 gdb_assert (name[0] == '$');
9191 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9192 return;
9193
9194 data = objfile_data (objfile, arm_objfile_data_key);
9195 if (data == NULL)
9196 {
9197 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9198 struct arm_per_objfile);
9199 set_objfile_data (objfile, arm_objfile_data_key, data);
9200 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9201 objfile->obfd->section_count,
9202 VEC(arm_mapping_symbol_s) *);
9203 }
9204 map_p = &data->section_maps[bfd_get_section (sym)->index];
9205
9206 new_map_sym.value = sym->value;
9207 new_map_sym.type = name[1];
9208
9209 /* Assume that most mapping symbols appear in order of increasing
9210 value. If they were randomly distributed, it would be faster to
9211 always push here and then sort at first use. */
9212 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9213 {
9214 struct arm_mapping_symbol *prev_map_sym;
9215
9216 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9217 if (prev_map_sym->value >= sym->value)
9218 {
9219 unsigned int idx;
9220 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9221 arm_compare_mapping_symbols);
9222 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9223 return;
9224 }
9225 }
9226
9227 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9228 }
9229
9230 static void
9231 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9232 {
9233 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9234 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9235
9236 /* If necessary, set the T bit. */
9237 if (arm_apcs_32)
9238 {
9239 ULONGEST val, t_bit;
9240 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9241 t_bit = arm_psr_thumb_bit (gdbarch);
9242 if (arm_pc_is_thumb (gdbarch, pc))
9243 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9244 val | t_bit);
9245 else
9246 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9247 val & ~t_bit);
9248 }
9249 }
9250
9251 /* Read the contents of a NEON quad register, by reading from two
9252 double registers. This is used to implement the quad pseudo
9253 registers, and for argument passing in case the quad registers are
9254 missing; vectors are passed in quad registers when using the VFP
9255 ABI, even if a NEON unit is not present. REGNUM is the index of
9256 the quad register, in [0, 15]. */
9257
9258 static enum register_status
9259 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9260 int regnum, gdb_byte *buf)
9261 {
9262 char name_buf[4];
9263 gdb_byte reg_buf[8];
9264 int offset, double_regnum;
9265 enum register_status status;
9266
9267 sprintf (name_buf, "d%d", regnum << 1);
9268 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9269 strlen (name_buf));
9270
9271 /* d0 is always the least significant half of q0. */
9272 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9273 offset = 8;
9274 else
9275 offset = 0;
9276
9277 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9278 if (status != REG_VALID)
9279 return status;
9280 memcpy (buf + offset, reg_buf, 8);
9281
9282 offset = 8 - offset;
9283 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9284 if (status != REG_VALID)
9285 return status;
9286 memcpy (buf + offset, reg_buf, 8);
9287
9288 return REG_VALID;
9289 }
9290
9291 static enum register_status
9292 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9293 int regnum, gdb_byte *buf)
9294 {
9295 const int num_regs = gdbarch_num_regs (gdbarch);
9296 char name_buf[4];
9297 gdb_byte reg_buf[8];
9298 int offset, double_regnum;
9299
9300 gdb_assert (regnum >= num_regs);
9301 regnum -= num_regs;
9302
9303 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9304 /* Quad-precision register. */
9305 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9306 else
9307 {
9308 enum register_status status;
9309
9310 /* Single-precision register. */
9311 gdb_assert (regnum < 32);
9312
9313 /* s0 is always the least significant half of d0. */
9314 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9315 offset = (regnum & 1) ? 0 : 4;
9316 else
9317 offset = (regnum & 1) ? 4 : 0;
9318
9319 sprintf (name_buf, "d%d", regnum >> 1);
9320 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9321 strlen (name_buf));
9322
9323 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9324 if (status == REG_VALID)
9325 memcpy (buf, reg_buf + offset, 4);
9326 return status;
9327 }
9328 }
9329
9330 /* Store the contents of BUF to a NEON quad register, by writing to
9331 two double registers. This is used to implement the quad pseudo
9332 registers, and for argument passing in case the quad registers are
9333 missing; vectors are passed in quad registers when using the VFP
9334 ABI, even if a NEON unit is not present. REGNUM is the index
9335 of the quad register, in [0, 15]. */
9336
9337 static void
9338 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9339 int regnum, const gdb_byte *buf)
9340 {
9341 char name_buf[4];
9342 gdb_byte reg_buf[8];
9343 int offset, double_regnum;
9344
9345 sprintf (name_buf, "d%d", regnum << 1);
9346 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9347 strlen (name_buf));
9348
9349 /* d0 is always the least significant half of q0. */
9350 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9351 offset = 8;
9352 else
9353 offset = 0;
9354
9355 regcache_raw_write (regcache, double_regnum, buf + offset);
9356 offset = 8 - offset;
9357 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9358 }
9359
9360 static void
9361 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9362 int regnum, const gdb_byte *buf)
9363 {
9364 const int num_regs = gdbarch_num_regs (gdbarch);
9365 char name_buf[4];
9366 gdb_byte reg_buf[8];
9367 int offset, double_regnum;
9368
9369 gdb_assert (regnum >= num_regs);
9370 regnum -= num_regs;
9371
9372 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9373 /* Quad-precision register. */
9374 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9375 else
9376 {
9377 /* Single-precision register. */
9378 gdb_assert (regnum < 32);
9379
9380 /* s0 is always the least significant half of d0. */
9381 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9382 offset = (regnum & 1) ? 0 : 4;
9383 else
9384 offset = (regnum & 1) ? 4 : 0;
9385
9386 sprintf (name_buf, "d%d", regnum >> 1);
9387 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9388 strlen (name_buf));
9389
9390 regcache_raw_read (regcache, double_regnum, reg_buf);
9391 memcpy (reg_buf + offset, buf, 4);
9392 regcache_raw_write (regcache, double_regnum, reg_buf);
9393 }
9394 }
9395
9396 static struct value *
9397 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9398 {
9399 const int *reg_p = baton;
9400 return value_of_register (*reg_p, frame);
9401 }
9402 \f
9403 static enum gdb_osabi
9404 arm_elf_osabi_sniffer (bfd *abfd)
9405 {
9406 unsigned int elfosabi;
9407 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9408
9409 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9410
9411 if (elfosabi == ELFOSABI_ARM)
9412 /* GNU tools use this value. Check note sections in this case,
9413 as well. */
9414 bfd_map_over_sections (abfd,
9415 generic_elf_osabi_sniff_abi_tag_sections,
9416 &osabi);
9417
9418 /* Anything else will be handled by the generic ELF sniffer. */
9419 return osabi;
9420 }
9421
9422 static int
9423 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9424 struct reggroup *group)
9425 {
9426 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9427 this, FPS register belongs to save_regroup, restore_reggroup, and
9428 all_reggroup, of course. */
9429 if (regnum == ARM_FPS_REGNUM)
9430 return (group == float_reggroup
9431 || group == save_reggroup
9432 || group == restore_reggroup
9433 || group == all_reggroup);
9434 else
9435 return default_register_reggroup_p (gdbarch, regnum, group);
9436 }
9437
9438 \f
9439 /* Initialize the current architecture based on INFO. If possible,
9440 re-use an architecture from ARCHES, which is a list of
9441 architectures already created during this debugging session.
9442
9443 Called e.g. at program startup, when reading a core file, and when
9444 reading a binary file. */
9445
9446 static struct gdbarch *
9447 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9448 {
9449 struct gdbarch_tdep *tdep;
9450 struct gdbarch *gdbarch;
9451 struct gdbarch_list *best_arch;
9452 enum arm_abi_kind arm_abi = arm_abi_global;
9453 enum arm_float_model fp_model = arm_fp_model;
9454 struct tdesc_arch_data *tdesc_data = NULL;
9455 int i, is_m = 0;
9456 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9457 int have_neon = 0;
9458 int have_fpa_registers = 1;
9459 const struct target_desc *tdesc = info.target_desc;
9460
9461 /* If we have an object to base this architecture on, try to determine
9462 its ABI. */
9463
9464 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9465 {
9466 int ei_osabi, e_flags;
9467
9468 switch (bfd_get_flavour (info.abfd))
9469 {
9470 case bfd_target_aout_flavour:
9471 /* Assume it's an old APCS-style ABI. */
9472 arm_abi = ARM_ABI_APCS;
9473 break;
9474
9475 case bfd_target_coff_flavour:
9476 /* Assume it's an old APCS-style ABI. */
9477 /* XXX WinCE? */
9478 arm_abi = ARM_ABI_APCS;
9479 break;
9480
9481 case bfd_target_elf_flavour:
9482 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9483 e_flags = elf_elfheader (info.abfd)->e_flags;
9484
9485 if (ei_osabi == ELFOSABI_ARM)
9486 {
9487 /* GNU tools used to use this value, but do not for EABI
9488 objects. There's nowhere to tag an EABI version
9489 anyway, so assume APCS. */
9490 arm_abi = ARM_ABI_APCS;
9491 }
9492 else if (ei_osabi == ELFOSABI_NONE)
9493 {
9494 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9495 int attr_arch, attr_profile;
9496
9497 switch (eabi_ver)
9498 {
9499 case EF_ARM_EABI_UNKNOWN:
9500 /* Assume GNU tools. */
9501 arm_abi = ARM_ABI_APCS;
9502 break;
9503
9504 case EF_ARM_EABI_VER4:
9505 case EF_ARM_EABI_VER5:
9506 arm_abi = ARM_ABI_AAPCS;
9507 /* EABI binaries default to VFP float ordering.
9508 They may also contain build attributes that can
9509 be used to identify if the VFP argument-passing
9510 ABI is in use. */
9511 if (fp_model == ARM_FLOAT_AUTO)
9512 {
9513 #ifdef HAVE_ELF
9514 switch (bfd_elf_get_obj_attr_int (info.abfd,
9515 OBJ_ATTR_PROC,
9516 Tag_ABI_VFP_args))
9517 {
9518 case 0:
9519 /* "The user intended FP parameter/result
9520 passing to conform to AAPCS, base
9521 variant". */
9522 fp_model = ARM_FLOAT_SOFT_VFP;
9523 break;
9524 case 1:
9525 /* "The user intended FP parameter/result
9526 passing to conform to AAPCS, VFP
9527 variant". */
9528 fp_model = ARM_FLOAT_VFP;
9529 break;
9530 case 2:
9531 /* "The user intended FP parameter/result
9532 passing to conform to tool chain-specific
9533 conventions" - we don't know any such
9534 conventions, so leave it as "auto". */
9535 break;
9536 default:
9537 /* Attribute value not mentioned in the
9538 October 2008 ABI, so leave it as
9539 "auto". */
9540 break;
9541 }
9542 #else
9543 fp_model = ARM_FLOAT_SOFT_VFP;
9544 #endif
9545 }
9546 break;
9547
9548 default:
9549 /* Leave it as "auto". */
9550 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9551 break;
9552 }
9553
9554 #ifdef HAVE_ELF
9555 /* Detect M-profile programs. This only works if the
9556 executable file includes build attributes; GCC does
9557 copy them to the executable, but e.g. RealView does
9558 not. */
9559 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9560 Tag_CPU_arch);
9561 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9562 OBJ_ATTR_PROC,
9563 Tag_CPU_arch_profile);
9564 /* GCC specifies the profile for v6-M; RealView only
9565 specifies the profile for architectures starting with
9566 V7 (as opposed to architectures with a tag
9567 numerically greater than TAG_CPU_ARCH_V7). */
9568 if (!tdesc_has_registers (tdesc)
9569 && (attr_arch == TAG_CPU_ARCH_V6_M
9570 || attr_arch == TAG_CPU_ARCH_V6S_M
9571 || attr_profile == 'M'))
9572 tdesc = tdesc_arm_with_m;
9573 #endif
9574 }
9575
9576 if (fp_model == ARM_FLOAT_AUTO)
9577 {
9578 int e_flags = elf_elfheader (info.abfd)->e_flags;
9579
9580 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9581 {
9582 case 0:
9583 /* Leave it as "auto". Strictly speaking this case
9584 means FPA, but almost nobody uses that now, and
9585 many toolchains fail to set the appropriate bits
9586 for the floating-point model they use. */
9587 break;
9588 case EF_ARM_SOFT_FLOAT:
9589 fp_model = ARM_FLOAT_SOFT_FPA;
9590 break;
9591 case EF_ARM_VFP_FLOAT:
9592 fp_model = ARM_FLOAT_VFP;
9593 break;
9594 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9595 fp_model = ARM_FLOAT_SOFT_VFP;
9596 break;
9597 }
9598 }
9599
9600 if (e_flags & EF_ARM_BE8)
9601 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9602
9603 break;
9604
9605 default:
9606 /* Leave it as "auto". */
9607 break;
9608 }
9609 }
9610
9611 /* Check any target description for validity. */
9612 if (tdesc_has_registers (tdesc))
9613 {
9614 /* For most registers we require GDB's default names; but also allow
9615 the numeric names for sp / lr / pc, as a convenience. */
9616 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9617 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9618 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9619
9620 const struct tdesc_feature *feature;
9621 int valid_p;
9622
9623 feature = tdesc_find_feature (tdesc,
9624 "org.gnu.gdb.arm.core");
9625 if (feature == NULL)
9626 {
9627 feature = tdesc_find_feature (tdesc,
9628 "org.gnu.gdb.arm.m-profile");
9629 if (feature == NULL)
9630 return NULL;
9631 else
9632 is_m = 1;
9633 }
9634
9635 tdesc_data = tdesc_data_alloc ();
9636
9637 valid_p = 1;
9638 for (i = 0; i < ARM_SP_REGNUM; i++)
9639 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9640 arm_register_names[i]);
9641 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9642 ARM_SP_REGNUM,
9643 arm_sp_names);
9644 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9645 ARM_LR_REGNUM,
9646 arm_lr_names);
9647 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9648 ARM_PC_REGNUM,
9649 arm_pc_names);
9650 if (is_m)
9651 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9652 ARM_PS_REGNUM, "xpsr");
9653 else
9654 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9655 ARM_PS_REGNUM, "cpsr");
9656
9657 if (!valid_p)
9658 {
9659 tdesc_data_cleanup (tdesc_data);
9660 return NULL;
9661 }
9662
9663 feature = tdesc_find_feature (tdesc,
9664 "org.gnu.gdb.arm.fpa");
9665 if (feature != NULL)
9666 {
9667 valid_p = 1;
9668 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9669 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9670 arm_register_names[i]);
9671 if (!valid_p)
9672 {
9673 tdesc_data_cleanup (tdesc_data);
9674 return NULL;
9675 }
9676 }
9677 else
9678 have_fpa_registers = 0;
9679
9680 feature = tdesc_find_feature (tdesc,
9681 "org.gnu.gdb.xscale.iwmmxt");
9682 if (feature != NULL)
9683 {
9684 static const char *const iwmmxt_names[] = {
9685 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9686 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9687 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9688 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9689 };
9690
9691 valid_p = 1;
9692 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9693 valid_p
9694 &= tdesc_numbered_register (feature, tdesc_data, i,
9695 iwmmxt_names[i - ARM_WR0_REGNUM]);
9696
9697 /* Check for the control registers, but do not fail if they
9698 are missing. */
9699 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9700 tdesc_numbered_register (feature, tdesc_data, i,
9701 iwmmxt_names[i - ARM_WR0_REGNUM]);
9702
9703 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9704 valid_p
9705 &= tdesc_numbered_register (feature, tdesc_data, i,
9706 iwmmxt_names[i - ARM_WR0_REGNUM]);
9707
9708 if (!valid_p)
9709 {
9710 tdesc_data_cleanup (tdesc_data);
9711 return NULL;
9712 }
9713 }
9714
9715 /* If we have a VFP unit, check whether the single precision registers
9716 are present. If not, then we will synthesize them as pseudo
9717 registers. */
9718 feature = tdesc_find_feature (tdesc,
9719 "org.gnu.gdb.arm.vfp");
9720 if (feature != NULL)
9721 {
9722 static const char *const vfp_double_names[] = {
9723 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9724 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9725 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9726 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9727 };
9728
9729 /* Require the double precision registers. There must be either
9730 16 or 32. */
9731 valid_p = 1;
9732 for (i = 0; i < 32; i++)
9733 {
9734 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9735 ARM_D0_REGNUM + i,
9736 vfp_double_names[i]);
9737 if (!valid_p)
9738 break;
9739 }
9740 if (!valid_p && i == 16)
9741 valid_p = 1;
9742
9743 /* Also require FPSCR. */
9744 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9745 ARM_FPSCR_REGNUM, "fpscr");
9746 if (!valid_p)
9747 {
9748 tdesc_data_cleanup (tdesc_data);
9749 return NULL;
9750 }
9751
9752 if (tdesc_unnumbered_register (feature, "s0") == 0)
9753 have_vfp_pseudos = 1;
9754
9755 have_vfp_registers = 1;
9756
9757 /* If we have VFP, also check for NEON. The architecture allows
9758 NEON without VFP (integer vector operations only), but GDB
9759 does not support that. */
9760 feature = tdesc_find_feature (tdesc,
9761 "org.gnu.gdb.arm.neon");
9762 if (feature != NULL)
9763 {
9764 /* NEON requires 32 double-precision registers. */
9765 if (i != 32)
9766 {
9767 tdesc_data_cleanup (tdesc_data);
9768 return NULL;
9769 }
9770
9771 /* If there are quad registers defined by the stub, use
9772 their type; otherwise (normally) provide them with
9773 the default type. */
9774 if (tdesc_unnumbered_register (feature, "q0") == 0)
9775 have_neon_pseudos = 1;
9776
9777 have_neon = 1;
9778 }
9779 }
9780 }
9781
9782 /* If there is already a candidate, use it. */
9783 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9784 best_arch != NULL;
9785 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9786 {
9787 if (arm_abi != ARM_ABI_AUTO
9788 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9789 continue;
9790
9791 if (fp_model != ARM_FLOAT_AUTO
9792 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9793 continue;
9794
9795 /* There are various other properties in tdep that we do not
9796 need to check here: those derived from a target description,
9797 since gdbarches with a different target description are
9798 automatically disqualified. */
9799
9800 /* Do check is_m, though, since it might come from the binary. */
9801 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9802 continue;
9803
9804 /* Found a match. */
9805 break;
9806 }
9807
9808 if (best_arch != NULL)
9809 {
9810 if (tdesc_data != NULL)
9811 tdesc_data_cleanup (tdesc_data);
9812 return best_arch->gdbarch;
9813 }
9814
9815 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
9816 gdbarch = gdbarch_alloc (&info, tdep);
9817
9818 /* Record additional information about the architecture we are defining.
9819 These are gdbarch discriminators, like the OSABI. */
9820 tdep->arm_abi = arm_abi;
9821 tdep->fp_model = fp_model;
9822 tdep->is_m = is_m;
9823 tdep->have_fpa_registers = have_fpa_registers;
9824 tdep->have_vfp_registers = have_vfp_registers;
9825 tdep->have_vfp_pseudos = have_vfp_pseudos;
9826 tdep->have_neon_pseudos = have_neon_pseudos;
9827 tdep->have_neon = have_neon;
9828
9829 /* Breakpoints. */
9830 switch (info.byte_order_for_code)
9831 {
9832 case BFD_ENDIAN_BIG:
9833 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9834 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9835 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9836 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9837
9838 break;
9839
9840 case BFD_ENDIAN_LITTLE:
9841 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9842 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9843 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9844 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9845
9846 break;
9847
9848 default:
9849 internal_error (__FILE__, __LINE__,
9850 _("arm_gdbarch_init: bad byte order for float format"));
9851 }
9852
9853 /* On ARM targets char defaults to unsigned. */
9854 set_gdbarch_char_signed (gdbarch, 0);
9855
9856 /* Note: for displaced stepping, this includes the breakpoint, and one word
9857 of additional scratch space. This setting isn't used for anything beside
9858 displaced stepping at present. */
9859 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
9860
9861 /* This should be low enough for everything. */
9862 tdep->lowest_pc = 0x20;
9863 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9864
9865 /* The default, for both APCS and AAPCS, is to return small
9866 structures in registers. */
9867 tdep->struct_return = reg_struct_return;
9868
9869 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9870 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9871
9872 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9873
9874 /* Frame handling. */
9875 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
9876 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
9877 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
9878
9879 frame_base_set_default (gdbarch, &arm_normal_base);
9880
9881 /* Address manipulation. */
9882 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
9883 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9884
9885 /* Advance PC across function entry code. */
9886 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9887
9888 /* Detect whether PC is in function epilogue. */
9889 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
9890
9891 /* Skip trampolines. */
9892 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9893
9894 /* The stack grows downward. */
9895 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9896
9897 /* Breakpoint manipulation. */
9898 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
9899 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
9900 arm_remote_breakpoint_from_pc);
9901
9902 /* Information about registers, etc. */
9903 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9904 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9905 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9906 set_gdbarch_register_type (gdbarch, arm_register_type);
9907 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9908
9909 /* This "info float" is FPA-specific. Use the generic version if we
9910 do not have FPA. */
9911 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9912 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9913
9914 /* Internal <-> external register number maps. */
9915 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9916 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9917
9918 set_gdbarch_register_name (gdbarch, arm_register_name);
9919
9920 /* Returning results. */
9921 set_gdbarch_return_value (gdbarch, arm_return_value);
9922
9923 /* Disassembly. */
9924 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9925
9926 /* Minsymbol frobbing. */
9927 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9928 set_gdbarch_coff_make_msymbol_special (gdbarch,
9929 arm_coff_make_msymbol_special);
9930 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9931
9932 /* Thumb-2 IT block support. */
9933 set_gdbarch_adjust_breakpoint_address (gdbarch,
9934 arm_adjust_breakpoint_address);
9935
9936 /* Virtual tables. */
9937 set_gdbarch_vbit_in_delta (gdbarch, 1);
9938
9939 /* Hook in the ABI-specific overrides, if they have been registered. */
9940 gdbarch_init_osabi (info, gdbarch);
9941
9942 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9943
9944 /* Add some default predicates. */
9945 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9946 dwarf2_append_unwinders (gdbarch);
9947 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9948 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9949
9950 /* Now we have tuned the configuration, set a few final things,
9951 based on what the OS ABI has told us. */
9952
9953 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9954 binaries are always marked. */
9955 if (tdep->arm_abi == ARM_ABI_AUTO)
9956 tdep->arm_abi = ARM_ABI_APCS;
9957
9958 /* Watchpoints are not steppable. */
9959 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9960
9961 /* We used to default to FPA for generic ARM, but almost nobody
9962 uses that now, and we now provide a way for the user to force
9963 the model. So default to the most useful variant. */
9964 if (tdep->fp_model == ARM_FLOAT_AUTO)
9965 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9966
9967 if (tdep->jb_pc >= 0)
9968 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9969
9970 /* Floating point sizes and format. */
9971 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9972 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9973 {
9974 set_gdbarch_double_format
9975 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9976 set_gdbarch_long_double_format
9977 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9978 }
9979 else
9980 {
9981 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9982 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9983 }
9984
9985 if (have_vfp_pseudos)
9986 {
9987 /* NOTE: These are the only pseudo registers used by
9988 the ARM target at the moment. If more are added, a
9989 little more care in numbering will be needed. */
9990
9991 int num_pseudos = 32;
9992 if (have_neon_pseudos)
9993 num_pseudos += 16;
9994 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
9995 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
9996 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
9997 }
9998
9999 if (tdesc_data)
10000 {
10001 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10002
10003 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10004
10005 /* Override tdesc_register_type to adjust the types of VFP
10006 registers for NEON. */
10007 set_gdbarch_register_type (gdbarch, arm_register_type);
10008 }
10009
10010 /* Add standard register aliases. We add aliases even for those
10011 nanes which are used by the current architecture - it's simpler,
10012 and does no harm, since nothing ever lists user registers. */
10013 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10014 user_reg_add (gdbarch, arm_register_aliases[i].name,
10015 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10016
10017 return gdbarch;
10018 }
10019
10020 static void
10021 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10022 {
10023 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10024
10025 if (tdep == NULL)
10026 return;
10027
10028 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10029 (unsigned long) tdep->lowest_pc);
10030 }
10031
10032 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10033
10034 void
10035 _initialize_arm_tdep (void)
10036 {
10037 struct ui_file *stb;
10038 long length;
10039 struct cmd_list_element *new_set, *new_show;
10040 const char *setname;
10041 const char *setdesc;
10042 const char *const *regnames;
10043 int numregs, i, j;
10044 static char *helptext;
10045 char regdesc[1024], *rdptr = regdesc;
10046 size_t rest = sizeof (regdesc);
10047
10048 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10049
10050 arm_objfile_data_key
10051 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10052
10053 /* Add ourselves to objfile event chain. */
10054 observer_attach_new_objfile (arm_exidx_new_objfile);
10055 arm_exidx_data_key
10056 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10057
10058 /* Register an ELF OS ABI sniffer for ARM binaries. */
10059 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10060 bfd_target_elf_flavour,
10061 arm_elf_osabi_sniffer);
10062
10063 /* Initialize the standard target descriptions. */
10064 initialize_tdesc_arm_with_m ();
10065 initialize_tdesc_arm_with_iwmmxt ();
10066 initialize_tdesc_arm_with_vfpv2 ();
10067 initialize_tdesc_arm_with_vfpv3 ();
10068 initialize_tdesc_arm_with_neon ();
10069
10070 /* Get the number of possible sets of register names defined in opcodes. */
10071 num_disassembly_options = get_arm_regname_num_options ();
10072
10073 /* Add root prefix command for all "set arm"/"show arm" commands. */
10074 add_prefix_cmd ("arm", no_class, set_arm_command,
10075 _("Various ARM-specific commands."),
10076 &setarmcmdlist, "set arm ", 0, &setlist);
10077
10078 add_prefix_cmd ("arm", no_class, show_arm_command,
10079 _("Various ARM-specific commands."),
10080 &showarmcmdlist, "show arm ", 0, &showlist);
10081
10082 /* Sync the opcode insn printer with our register viewer. */
10083 parse_arm_disassembler_option ("reg-names-std");
10084
10085 /* Initialize the array that will be passed to
10086 add_setshow_enum_cmd(). */
10087 valid_disassembly_styles
10088 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10089 for (i = 0; i < num_disassembly_options; i++)
10090 {
10091 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10092 valid_disassembly_styles[i] = setname;
10093 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10094 rdptr += length;
10095 rest -= length;
10096 /* When we find the default names, tell the disassembler to use
10097 them. */
10098 if (!strcmp (setname, "std"))
10099 {
10100 disassembly_style = setname;
10101 set_arm_regname_option (i);
10102 }
10103 }
10104 /* Mark the end of valid options. */
10105 valid_disassembly_styles[num_disassembly_options] = NULL;
10106
10107 /* Create the help text. */
10108 stb = mem_fileopen ();
10109 fprintf_unfiltered (stb, "%s%s%s",
10110 _("The valid values are:\n"),
10111 regdesc,
10112 _("The default is \"std\"."));
10113 helptext = ui_file_xstrdup (stb, NULL);
10114 ui_file_delete (stb);
10115
10116 add_setshow_enum_cmd("disassembler", no_class,
10117 valid_disassembly_styles, &disassembly_style,
10118 _("Set the disassembly style."),
10119 _("Show the disassembly style."),
10120 helptext,
10121 set_disassembly_style_sfunc,
10122 NULL, /* FIXME: i18n: The disassembly style is
10123 \"%s\". */
10124 &setarmcmdlist, &showarmcmdlist);
10125
10126 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10127 _("Set usage of ARM 32-bit mode."),
10128 _("Show usage of ARM 32-bit mode."),
10129 _("When off, a 26-bit PC will be used."),
10130 NULL,
10131 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10132 mode is %s. */
10133 &setarmcmdlist, &showarmcmdlist);
10134
10135 /* Add a command to allow the user to force the FPU model. */
10136 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10137 _("Set the floating point type."),
10138 _("Show the floating point type."),
10139 _("auto - Determine the FP typefrom the OS-ABI.\n\
10140 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10141 fpa - FPA co-processor (GCC compiled).\n\
10142 softvfp - Software FP with pure-endian doubles.\n\
10143 vfp - VFP co-processor."),
10144 set_fp_model_sfunc, show_fp_model,
10145 &setarmcmdlist, &showarmcmdlist);
10146
10147 /* Add a command to allow the user to force the ABI. */
10148 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10149 _("Set the ABI."),
10150 _("Show the ABI."),
10151 NULL, arm_set_abi, arm_show_abi,
10152 &setarmcmdlist, &showarmcmdlist);
10153
10154 /* Add two commands to allow the user to force the assumed
10155 execution mode. */
10156 add_setshow_enum_cmd ("fallback-mode", class_support,
10157 arm_mode_strings, &arm_fallback_mode_string,
10158 _("Set the mode assumed when symbols are unavailable."),
10159 _("Show the mode assumed when symbols are unavailable."),
10160 NULL, NULL, arm_show_fallback_mode,
10161 &setarmcmdlist, &showarmcmdlist);
10162 add_setshow_enum_cmd ("force-mode", class_support,
10163 arm_mode_strings, &arm_force_mode_string,
10164 _("Set the mode assumed even when symbols are available."),
10165 _("Show the mode assumed even when symbols are available."),
10166 NULL, NULL, arm_show_force_mode,
10167 &setarmcmdlist, &showarmcmdlist);
10168
10169 /* Debugging flag. */
10170 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10171 _("Set ARM debugging."),
10172 _("Show ARM debugging."),
10173 _("When on, arm-specific debugging is enabled."),
10174 NULL,
10175 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10176 &setdebuglist, &showdebuglist);
10177 }
This page took 0.246843 seconds and 4 git commands to generate.