024191b9b3bd9422b488749645e73f65d8eda0d8
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59
60 static int arm_debug;
61
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
65
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
68
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
71
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
74
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data *arm_objfile_data_key;
77
78 struct arm_mapping_symbol
79 {
80 bfd_vma value;
81 char type;
82 };
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
84 DEF_VEC_O(arm_mapping_symbol_s);
85
86 struct arm_per_objfile
87 {
88 VEC(arm_mapping_symbol_s) **section_maps;
89 };
90
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element *setarmcmdlist = NULL;
93 static struct cmd_list_element *showarmcmdlist = NULL;
94
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings[] =
98 {
99 "auto",
100 "softfpa",
101 "fpa",
102 "softvfp",
103 "vfp",
104 NULL
105 };
106
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
109 static const char *current_fp_model = "auto";
110
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings[] =
113 {
114 "auto",
115 "APCS",
116 "AAPCS",
117 NULL
118 };
119
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
122 static const char *arm_abi_string = "auto";
123
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings[] =
126 {
127 "auto",
128 "arm",
129 "thumb",
130 NULL
131 };
132
133 static const char *arm_fallback_mode_string = "auto";
134 static const char *arm_force_mode_string = "auto";
135
136 /* Internal override of the execution mode. -1 means no override,
137 0 means override to ARM mode, 1 means override to Thumb mode.
138 The effect is the same as if arm_force_mode has been set by the
139 user (except the internal override has precedence over a user's
140 arm_force_mode override). */
141 static int arm_override_mode = -1;
142
143 /* Number of different reg name sets (options). */
144 static int num_disassembly_options;
145
146 /* The standard register names, and all the valid aliases for them. Note
147 that `fp', `sp' and `pc' are not added in this alias list, because they
148 have been added as builtin user registers in
149 std-regs.c:_initialize_frame_reg. */
150 static const struct
151 {
152 const char *name;
153 int regnum;
154 } arm_register_aliases[] = {
155 /* Basic register numbers. */
156 { "r0", 0 },
157 { "r1", 1 },
158 { "r2", 2 },
159 { "r3", 3 },
160 { "r4", 4 },
161 { "r5", 5 },
162 { "r6", 6 },
163 { "r7", 7 },
164 { "r8", 8 },
165 { "r9", 9 },
166 { "r10", 10 },
167 { "r11", 11 },
168 { "r12", 12 },
169 { "r13", 13 },
170 { "r14", 14 },
171 { "r15", 15 },
172 /* Synonyms (argument and variable registers). */
173 { "a1", 0 },
174 { "a2", 1 },
175 { "a3", 2 },
176 { "a4", 3 },
177 { "v1", 4 },
178 { "v2", 5 },
179 { "v3", 6 },
180 { "v4", 7 },
181 { "v5", 8 },
182 { "v6", 9 },
183 { "v7", 10 },
184 { "v8", 11 },
185 /* Other platform-specific names for r9. */
186 { "sb", 9 },
187 { "tr", 9 },
188 /* Special names. */
189 { "ip", 12 },
190 { "lr", 14 },
191 /* Names used by GCC (not listed in the ARM EABI). */
192 { "sl", 10 },
193 /* A special name from the older ATPCS. */
194 { "wr", 7 },
195 };
196
197 static const char *const arm_register_names[] =
198 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
199 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
200 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
201 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
202 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
203 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
204 "fps", "cpsr" }; /* 24 25 */
205
206 /* Valid register name styles. */
207 static const char **valid_disassembly_styles;
208
209 /* Disassembly style to use. Default to "std" register names. */
210 static const char *disassembly_style;
211
212 /* This is used to keep the bfd arch_info in sync with the disassembly
213 style. */
214 static void set_disassembly_style_sfunc(char *, int,
215 struct cmd_list_element *);
216 static void set_disassembly_style (void);
217
218 static void convert_from_extended (const struct floatformat *, const void *,
219 void *, int);
220 static void convert_to_extended (const struct floatformat *, void *,
221 const void *, int);
222
223 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
224 struct regcache *regcache,
225 int regnum, gdb_byte *buf);
226 static void arm_neon_quad_write (struct gdbarch *gdbarch,
227 struct regcache *regcache,
228 int regnum, const gdb_byte *buf);
229
230 struct arm_prologue_cache
231 {
232 /* The stack pointer at the time this frame was created; i.e. the
233 caller's stack pointer when this function was called. It is used
234 to identify this frame. */
235 CORE_ADDR prev_sp;
236
237 /* The frame base for this frame is just prev_sp - frame size.
238 FRAMESIZE is the distance from the frame pointer to the
239 initial stack pointer. */
240
241 int framesize;
242
243 /* The register used to hold the frame pointer for this frame. */
244 int framereg;
245
246 /* Saved register offsets. */
247 struct trad_frame_saved_reg *saved_regs;
248 };
249
250 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
251 CORE_ADDR prologue_start,
252 CORE_ADDR prologue_end,
253 struct arm_prologue_cache *cache);
254
255 /* Architecture version for displaced stepping. This effects the behaviour of
256 certain instructions, and really should not be hard-wired. */
257
258 #define DISPLACED_STEPPING_ARCH_VERSION 5
259
260 /* Addresses for calling Thumb functions have the bit 0 set.
261 Here are some macros to test, set, or clear bit 0 of addresses. */
262 #define IS_THUMB_ADDR(addr) ((addr) & 1)
263 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
264 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
265
266 /* Set to true if the 32-bit mode is in use. */
267
268 int arm_apcs_32 = 1;
269
270 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
271
272 int
273 arm_psr_thumb_bit (struct gdbarch *gdbarch)
274 {
275 if (gdbarch_tdep (gdbarch)->is_m)
276 return XPSR_T;
277 else
278 return CPSR_T;
279 }
280
281 /* Determine if FRAME is executing in Thumb mode. */
282
283 int
284 arm_frame_is_thumb (struct frame_info *frame)
285 {
286 CORE_ADDR cpsr;
287 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
288
289 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
290 directly (from a signal frame or dummy frame) or by interpreting
291 the saved LR (from a prologue or DWARF frame). So consult it and
292 trust the unwinders. */
293 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
294
295 return (cpsr & t_bit) != 0;
296 }
297
298 /* Callback for VEC_lower_bound. */
299
300 static inline int
301 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
302 const struct arm_mapping_symbol *rhs)
303 {
304 return lhs->value < rhs->value;
305 }
306
307 /* Search for the mapping symbol covering MEMADDR. If one is found,
308 return its type. Otherwise, return 0. If START is non-NULL,
309 set *START to the location of the mapping symbol. */
310
311 static char
312 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
313 {
314 struct obj_section *sec;
315
316 /* If there are mapping symbols, consult them. */
317 sec = find_pc_section (memaddr);
318 if (sec != NULL)
319 {
320 struct arm_per_objfile *data;
321 VEC(arm_mapping_symbol_s) *map;
322 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
323 0 };
324 unsigned int idx;
325
326 data = objfile_data (sec->objfile, arm_objfile_data_key);
327 if (data != NULL)
328 {
329 map = data->section_maps[sec->the_bfd_section->index];
330 if (!VEC_empty (arm_mapping_symbol_s, map))
331 {
332 struct arm_mapping_symbol *map_sym;
333
334 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
335 arm_compare_mapping_symbols);
336
337 /* VEC_lower_bound finds the earliest ordered insertion
338 point. If the following symbol starts at this exact
339 address, we use that; otherwise, the preceding
340 mapping symbol covers this address. */
341 if (idx < VEC_length (arm_mapping_symbol_s, map))
342 {
343 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
344 if (map_sym->value == map_key.value)
345 {
346 if (start)
347 *start = map_sym->value + obj_section_addr (sec);
348 return map_sym->type;
349 }
350 }
351
352 if (idx > 0)
353 {
354 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
355 if (start)
356 *start = map_sym->value + obj_section_addr (sec);
357 return map_sym->type;
358 }
359 }
360 }
361 }
362
363 return 0;
364 }
365
366 /* Determine if the program counter specified in MEMADDR is in a Thumb
367 function. This function should be called for addresses unrelated to
368 any executing frame; otherwise, prefer arm_frame_is_thumb. */
369
370 int
371 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
372 {
373 struct obj_section *sec;
374 struct minimal_symbol *sym;
375 char type;
376 struct displaced_step_closure* dsc
377 = get_displaced_step_closure_by_addr(memaddr);
378
379 /* If checking the mode of displaced instruction in copy area, the mode
380 should be determined by instruction on the original address. */
381 if (dsc)
382 {
383 if (debug_displaced)
384 fprintf_unfiltered (gdb_stdlog,
385 "displaced: check mode of %.8lx instead of %.8lx\n",
386 (unsigned long) dsc->insn_addr,
387 (unsigned long) memaddr);
388 memaddr = dsc->insn_addr;
389 }
390
391 /* If bit 0 of the address is set, assume this is a Thumb address. */
392 if (IS_THUMB_ADDR (memaddr))
393 return 1;
394
395 /* Respect internal mode override if active. */
396 if (arm_override_mode != -1)
397 return arm_override_mode;
398
399 /* If the user wants to override the symbol table, let him. */
400 if (strcmp (arm_force_mode_string, "arm") == 0)
401 return 0;
402 if (strcmp (arm_force_mode_string, "thumb") == 0)
403 return 1;
404
405 /* ARM v6-M and v7-M are always in Thumb mode. */
406 if (gdbarch_tdep (gdbarch)->is_m)
407 return 1;
408
409 /* If there are mapping symbols, consult them. */
410 type = arm_find_mapping_symbol (memaddr, NULL);
411 if (type)
412 return type == 't';
413
414 /* Thumb functions have a "special" bit set in minimal symbols. */
415 sym = lookup_minimal_symbol_by_pc (memaddr);
416 if (sym)
417 return (MSYMBOL_IS_SPECIAL (sym));
418
419 /* If the user wants to override the fallback mode, let them. */
420 if (strcmp (arm_fallback_mode_string, "arm") == 0)
421 return 0;
422 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
423 return 1;
424
425 /* If we couldn't find any symbol, but we're talking to a running
426 target, then trust the current value of $cpsr. This lets
427 "display/i $pc" always show the correct mode (though if there is
428 a symbol table we will not reach here, so it still may not be
429 displayed in the mode it will be executed). */
430 if (target_has_registers)
431 return arm_frame_is_thumb (get_current_frame ());
432
433 /* Otherwise we're out of luck; we assume ARM. */
434 return 0;
435 }
436
437 /* Remove useless bits from addresses in a running program. */
438 static CORE_ADDR
439 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
440 {
441 if (arm_apcs_32)
442 return UNMAKE_THUMB_ADDR (val);
443 else
444 return (val & 0x03fffffc);
445 }
446
447 /* When reading symbols, we need to zap the low bit of the address,
448 which may be set to 1 for Thumb functions. */
449 static CORE_ADDR
450 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
451 {
452 return val & ~1;
453 }
454
455 /* Return 1 if PC is the start of a compiler helper function which
456 can be safely ignored during prologue skipping. IS_THUMB is true
457 if the function is known to be a Thumb function due to the way it
458 is being called. */
459 static int
460 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
461 {
462 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
463 struct minimal_symbol *msym;
464
465 msym = lookup_minimal_symbol_by_pc (pc);
466 if (msym != NULL
467 && SYMBOL_VALUE_ADDRESS (msym) == pc
468 && SYMBOL_LINKAGE_NAME (msym) != NULL)
469 {
470 const char *name = SYMBOL_LINKAGE_NAME (msym);
471
472 /* The GNU linker's Thumb call stub to foo is named
473 __foo_from_thumb. */
474 if (strstr (name, "_from_thumb") != NULL)
475 name += 2;
476
477 /* On soft-float targets, __truncdfsf2 is called to convert promoted
478 arguments to their argument types in non-prototyped
479 functions. */
480 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
481 return 1;
482 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
483 return 1;
484
485 /* Internal functions related to thread-local storage. */
486 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
487 return 1;
488 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
489 return 1;
490 }
491 else
492 {
493 /* If we run against a stripped glibc, we may be unable to identify
494 special functions by name. Check for one important case,
495 __aeabi_read_tp, by comparing the *code* against the default
496 implementation (this is hand-written ARM assembler in glibc). */
497
498 if (!is_thumb
499 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
500 == 0xe3e00a0f /* mov r0, #0xffff0fff */
501 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
502 == 0xe240f01f) /* sub pc, r0, #31 */
503 return 1;
504 }
505
506 return 0;
507 }
508
509 /* Support routines for instruction parsing. */
510 #define submask(x) ((1L << ((x) + 1)) - 1)
511 #define bit(obj,st) (((obj) >> (st)) & 1)
512 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
513 #define sbits(obj,st,fn) \
514 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
515 #define BranchDest(addr,instr) \
516 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
517
518 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
519 the first 16-bit of instruction, and INSN2 is the second 16-bit of
520 instruction. */
521 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
522 ((bits ((insn1), 0, 3) << 12) \
523 | (bits ((insn1), 10, 10) << 11) \
524 | (bits ((insn2), 12, 14) << 8) \
525 | bits ((insn2), 0, 7))
526
527 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
528 the 32-bit instruction. */
529 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
530 ((bits ((insn), 16, 19) << 12) \
531 | bits ((insn), 0, 11))
532
533 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
534
535 static unsigned int
536 thumb_expand_immediate (unsigned int imm)
537 {
538 unsigned int count = imm >> 7;
539
540 if (count < 8)
541 switch (count / 2)
542 {
543 case 0:
544 return imm & 0xff;
545 case 1:
546 return (imm & 0xff) | ((imm & 0xff) << 16);
547 case 2:
548 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
549 case 3:
550 return (imm & 0xff) | ((imm & 0xff) << 8)
551 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
552 }
553
554 return (0x80 | (imm & 0x7f)) << (32 - count);
555 }
556
557 /* Return 1 if the 16-bit Thumb instruction INST might change
558 control flow, 0 otherwise. */
559
560 static int
561 thumb_instruction_changes_pc (unsigned short inst)
562 {
563 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
564 return 1;
565
566 if ((inst & 0xf000) == 0xd000) /* conditional branch */
567 return 1;
568
569 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
570 return 1;
571
572 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
573 return 1;
574
575 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
576 return 1;
577
578 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
579 return 1;
580
581 return 0;
582 }
583
584 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
585 might change control flow, 0 otherwise. */
586
587 static int
588 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
589 {
590 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
591 {
592 /* Branches and miscellaneous control instructions. */
593
594 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
595 {
596 /* B, BL, BLX. */
597 return 1;
598 }
599 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
600 {
601 /* SUBS PC, LR, #imm8. */
602 return 1;
603 }
604 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
605 {
606 /* Conditional branch. */
607 return 1;
608 }
609
610 return 0;
611 }
612
613 if ((inst1 & 0xfe50) == 0xe810)
614 {
615 /* Load multiple or RFE. */
616
617 if (bit (inst1, 7) && !bit (inst1, 8))
618 {
619 /* LDMIA or POP */
620 if (bit (inst2, 15))
621 return 1;
622 }
623 else if (!bit (inst1, 7) && bit (inst1, 8))
624 {
625 /* LDMDB */
626 if (bit (inst2, 15))
627 return 1;
628 }
629 else if (bit (inst1, 7) && bit (inst1, 8))
630 {
631 /* RFEIA */
632 return 1;
633 }
634 else if (!bit (inst1, 7) && !bit (inst1, 8))
635 {
636 /* RFEDB */
637 return 1;
638 }
639
640 return 0;
641 }
642
643 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
644 {
645 /* MOV PC or MOVS PC. */
646 return 1;
647 }
648
649 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
650 {
651 /* LDR PC. */
652 if (bits (inst1, 0, 3) == 15)
653 return 1;
654 if (bit (inst1, 7))
655 return 1;
656 if (bit (inst2, 11))
657 return 1;
658 if ((inst2 & 0x0fc0) == 0x0000)
659 return 1;
660
661 return 0;
662 }
663
664 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
665 {
666 /* TBB. */
667 return 1;
668 }
669
670 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
671 {
672 /* TBH. */
673 return 1;
674 }
675
676 return 0;
677 }
678
679 /* Analyze a Thumb prologue, looking for a recognizable stack frame
680 and frame pointer. Scan until we encounter a store that could
681 clobber the stack frame unexpectedly, or an unknown instruction.
682 Return the last address which is definitely safe to skip for an
683 initial breakpoint. */
684
685 static CORE_ADDR
686 thumb_analyze_prologue (struct gdbarch *gdbarch,
687 CORE_ADDR start, CORE_ADDR limit,
688 struct arm_prologue_cache *cache)
689 {
690 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
691 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
692 int i;
693 pv_t regs[16];
694 struct pv_area *stack;
695 struct cleanup *back_to;
696 CORE_ADDR offset;
697 CORE_ADDR unrecognized_pc = 0;
698
699 for (i = 0; i < 16; i++)
700 regs[i] = pv_register (i, 0);
701 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
702 back_to = make_cleanup_free_pv_area (stack);
703
704 while (start < limit)
705 {
706 unsigned short insn;
707
708 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
709
710 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
711 {
712 int regno;
713 int mask;
714
715 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
716 break;
717
718 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
719 whether to save LR (R14). */
720 mask = (insn & 0xff) | ((insn & 0x100) << 6);
721
722 /* Calculate offsets of saved R0-R7 and LR. */
723 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
724 if (mask & (1 << regno))
725 {
726 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
727 -4);
728 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
729 }
730 }
731 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
732 sub sp, #simm */
733 {
734 offset = (insn & 0x7f) << 2; /* get scaled offset */
735 if (insn & 0x80) /* Check for SUB. */
736 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
737 -offset);
738 else
739 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
740 offset);
741 }
742 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
743 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
744 (insn & 0xff) << 2);
745 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
746 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
747 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
748 bits (insn, 6, 8));
749 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
750 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
751 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
752 bits (insn, 0, 7));
753 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
754 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
755 && pv_is_constant (regs[bits (insn, 3, 5)]))
756 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
757 regs[bits (insn, 6, 8)]);
758 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
759 && pv_is_constant (regs[bits (insn, 3, 6)]))
760 {
761 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
762 int rm = bits (insn, 3, 6);
763 regs[rd] = pv_add (regs[rd], regs[rm]);
764 }
765 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
766 {
767 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
768 int src_reg = (insn & 0x78) >> 3;
769 regs[dst_reg] = regs[src_reg];
770 }
771 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
772 {
773 /* Handle stores to the stack. Normally pushes are used,
774 but with GCC -mtpcs-frame, there may be other stores
775 in the prologue to create the frame. */
776 int regno = (insn >> 8) & 0x7;
777 pv_t addr;
778
779 offset = (insn & 0xff) << 2;
780 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
781
782 if (pv_area_store_would_trash (stack, addr))
783 break;
784
785 pv_area_store (stack, addr, 4, regs[regno]);
786 }
787 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
788 {
789 int rd = bits (insn, 0, 2);
790 int rn = bits (insn, 3, 5);
791 pv_t addr;
792
793 offset = bits (insn, 6, 10) << 2;
794 addr = pv_add_constant (regs[rn], offset);
795
796 if (pv_area_store_would_trash (stack, addr))
797 break;
798
799 pv_area_store (stack, addr, 4, regs[rd]);
800 }
801 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
802 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
803 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
804 /* Ignore stores of argument registers to the stack. */
805 ;
806 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
807 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
808 /* Ignore block loads from the stack, potentially copying
809 parameters from memory. */
810 ;
811 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
812 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
813 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
814 /* Similarly ignore single loads from the stack. */
815 ;
816 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
817 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
818 /* Skip register copies, i.e. saves to another register
819 instead of the stack. */
820 ;
821 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
822 /* Recognize constant loads; even with small stacks these are necessary
823 on Thumb. */
824 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
825 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
826 {
827 /* Constant pool loads, for the same reason. */
828 unsigned int constant;
829 CORE_ADDR loc;
830
831 loc = start + 4 + bits (insn, 0, 7) * 4;
832 constant = read_memory_unsigned_integer (loc, 4, byte_order);
833 regs[bits (insn, 8, 10)] = pv_constant (constant);
834 }
835 else if ((insn & 0xe000) == 0xe000)
836 {
837 unsigned short inst2;
838
839 inst2 = read_memory_unsigned_integer (start + 2, 2,
840 byte_order_for_code);
841
842 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
843 {
844 /* BL, BLX. Allow some special function calls when
845 skipping the prologue; GCC generates these before
846 storing arguments to the stack. */
847 CORE_ADDR nextpc;
848 int j1, j2, imm1, imm2;
849
850 imm1 = sbits (insn, 0, 10);
851 imm2 = bits (inst2, 0, 10);
852 j1 = bit (inst2, 13);
853 j2 = bit (inst2, 11);
854
855 offset = ((imm1 << 12) + (imm2 << 1));
856 offset ^= ((!j2) << 22) | ((!j1) << 23);
857
858 nextpc = start + 4 + offset;
859 /* For BLX make sure to clear the low bits. */
860 if (bit (inst2, 12) == 0)
861 nextpc = nextpc & 0xfffffffc;
862
863 if (!skip_prologue_function (gdbarch, nextpc,
864 bit (inst2, 12) != 0))
865 break;
866 }
867
868 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
869 { registers } */
870 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
871 {
872 pv_t addr = regs[bits (insn, 0, 3)];
873 int regno;
874
875 if (pv_area_store_would_trash (stack, addr))
876 break;
877
878 /* Calculate offsets of saved registers. */
879 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
880 if (inst2 & (1 << regno))
881 {
882 addr = pv_add_constant (addr, -4);
883 pv_area_store (stack, addr, 4, regs[regno]);
884 }
885
886 if (insn & 0x0020)
887 regs[bits (insn, 0, 3)] = addr;
888 }
889
890 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
891 [Rn, #+/-imm]{!} */
892 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
893 {
894 int regno1 = bits (inst2, 12, 15);
895 int regno2 = bits (inst2, 8, 11);
896 pv_t addr = regs[bits (insn, 0, 3)];
897
898 offset = inst2 & 0xff;
899 if (insn & 0x0080)
900 addr = pv_add_constant (addr, offset);
901 else
902 addr = pv_add_constant (addr, -offset);
903
904 if (pv_area_store_would_trash (stack, addr))
905 break;
906
907 pv_area_store (stack, addr, 4, regs[regno1]);
908 pv_area_store (stack, pv_add_constant (addr, 4),
909 4, regs[regno2]);
910
911 if (insn & 0x0020)
912 regs[bits (insn, 0, 3)] = addr;
913 }
914
915 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
916 && (inst2 & 0x0c00) == 0x0c00
917 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
918 {
919 int regno = bits (inst2, 12, 15);
920 pv_t addr = regs[bits (insn, 0, 3)];
921
922 offset = inst2 & 0xff;
923 if (inst2 & 0x0200)
924 addr = pv_add_constant (addr, offset);
925 else
926 addr = pv_add_constant (addr, -offset);
927
928 if (pv_area_store_would_trash (stack, addr))
929 break;
930
931 pv_area_store (stack, addr, 4, regs[regno]);
932
933 if (inst2 & 0x0100)
934 regs[bits (insn, 0, 3)] = addr;
935 }
936
937 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
938 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
939 {
940 int regno = bits (inst2, 12, 15);
941 pv_t addr;
942
943 offset = inst2 & 0xfff;
944 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
945
946 if (pv_area_store_would_trash (stack, addr))
947 break;
948
949 pv_area_store (stack, addr, 4, regs[regno]);
950 }
951
952 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
953 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
954 /* Ignore stores of argument registers to the stack. */
955 ;
956
957 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
958 && (inst2 & 0x0d00) == 0x0c00
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Ignore stores of argument registers to the stack. */
961 ;
962
963 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
964 { registers } */
965 && (inst2 & 0x8000) == 0x0000
966 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
967 /* Ignore block loads from the stack, potentially copying
968 parameters from memory. */
969 ;
970
971 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
972 [Rn, #+/-imm] */
973 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
974 /* Similarly ignore dual loads from the stack. */
975 ;
976
977 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
978 && (inst2 & 0x0d00) == 0x0c00
979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
980 /* Similarly ignore single loads from the stack. */
981 ;
982
983 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
984 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
985 /* Similarly ignore single loads from the stack. */
986 ;
987
988 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
989 && (inst2 & 0x8000) == 0x0000)
990 {
991 unsigned int imm = ((bits (insn, 10, 10) << 11)
992 | (bits (inst2, 12, 14) << 8)
993 | bits (inst2, 0, 7));
994
995 regs[bits (inst2, 8, 11)]
996 = pv_add_constant (regs[bits (insn, 0, 3)],
997 thumb_expand_immediate (imm));
998 }
999
1000 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1001 && (inst2 & 0x8000) == 0x0000)
1002 {
1003 unsigned int imm = ((bits (insn, 10, 10) << 11)
1004 | (bits (inst2, 12, 14) << 8)
1005 | bits (inst2, 0, 7));
1006
1007 regs[bits (inst2, 8, 11)]
1008 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1009 }
1010
1011 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1012 && (inst2 & 0x8000) == 0x0000)
1013 {
1014 unsigned int imm = ((bits (insn, 10, 10) << 11)
1015 | (bits (inst2, 12, 14) << 8)
1016 | bits (inst2, 0, 7));
1017
1018 regs[bits (inst2, 8, 11)]
1019 = pv_add_constant (regs[bits (insn, 0, 3)],
1020 - (CORE_ADDR) thumb_expand_immediate (imm));
1021 }
1022
1023 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1024 && (inst2 & 0x8000) == 0x0000)
1025 {
1026 unsigned int imm = ((bits (insn, 10, 10) << 11)
1027 | (bits (inst2, 12, 14) << 8)
1028 | bits (inst2, 0, 7));
1029
1030 regs[bits (inst2, 8, 11)]
1031 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1032 }
1033
1034 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1035 {
1036 unsigned int imm = ((bits (insn, 10, 10) << 11)
1037 | (bits (inst2, 12, 14) << 8)
1038 | bits (inst2, 0, 7));
1039
1040 regs[bits (inst2, 8, 11)]
1041 = pv_constant (thumb_expand_immediate (imm));
1042 }
1043
1044 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1045 {
1046 unsigned int imm
1047 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1048
1049 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1050 }
1051
1052 else if (insn == 0xea5f /* mov.w Rd,Rm */
1053 && (inst2 & 0xf0f0) == 0)
1054 {
1055 int dst_reg = (inst2 & 0x0f00) >> 8;
1056 int src_reg = inst2 & 0xf;
1057 regs[dst_reg] = regs[src_reg];
1058 }
1059
1060 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1061 {
1062 /* Constant pool loads. */
1063 unsigned int constant;
1064 CORE_ADDR loc;
1065
1066 offset = bits (insn, 0, 11);
1067 if (insn & 0x0080)
1068 loc = start + 4 + offset;
1069 else
1070 loc = start + 4 - offset;
1071
1072 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1073 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1074 }
1075
1076 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1077 {
1078 /* Constant pool loads. */
1079 unsigned int constant;
1080 CORE_ADDR loc;
1081
1082 offset = bits (insn, 0, 7) << 2;
1083 if (insn & 0x0080)
1084 loc = start + 4 + offset;
1085 else
1086 loc = start + 4 - offset;
1087
1088 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1089 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1090
1091 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1092 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1093 }
1094
1095 else if (thumb2_instruction_changes_pc (insn, inst2))
1096 {
1097 /* Don't scan past anything that might change control flow. */
1098 break;
1099 }
1100 else
1101 {
1102 /* The optimizer might shove anything into the prologue,
1103 so we just skip what we don't recognize. */
1104 unrecognized_pc = start;
1105 }
1106
1107 start += 2;
1108 }
1109 else if (thumb_instruction_changes_pc (insn))
1110 {
1111 /* Don't scan past anything that might change control flow. */
1112 break;
1113 }
1114 else
1115 {
1116 /* The optimizer might shove anything into the prologue,
1117 so we just skip what we don't recognize. */
1118 unrecognized_pc = start;
1119 }
1120
1121 start += 2;
1122 }
1123
1124 if (arm_debug)
1125 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1126 paddress (gdbarch, start));
1127
1128 if (unrecognized_pc == 0)
1129 unrecognized_pc = start;
1130
1131 if (cache == NULL)
1132 {
1133 do_cleanups (back_to);
1134 return unrecognized_pc;
1135 }
1136
1137 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1138 {
1139 /* Frame pointer is fp. Frame size is constant. */
1140 cache->framereg = ARM_FP_REGNUM;
1141 cache->framesize = -regs[ARM_FP_REGNUM].k;
1142 }
1143 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1144 {
1145 /* Frame pointer is r7. Frame size is constant. */
1146 cache->framereg = THUMB_FP_REGNUM;
1147 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1148 }
1149 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1150 {
1151 /* Try the stack pointer... this is a bit desperate. */
1152 cache->framereg = ARM_SP_REGNUM;
1153 cache->framesize = -regs[ARM_SP_REGNUM].k;
1154 }
1155 else
1156 {
1157 /* We're just out of luck. We don't know where the frame is. */
1158 cache->framereg = -1;
1159 cache->framesize = 0;
1160 }
1161
1162 for (i = 0; i < 16; i++)
1163 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1164 cache->saved_regs[i].addr = offset;
1165
1166 do_cleanups (back_to);
1167 return unrecognized_pc;
1168 }
1169
1170
1171 /* Try to analyze the instructions starting from PC, which load symbol
1172 __stack_chk_guard. Return the address of instruction after loading this
1173 symbol, set the dest register number to *BASEREG, and set the size of
1174 instructions for loading symbol in OFFSET. Return 0 if instructions are
1175 not recognized. */
1176
1177 static CORE_ADDR
1178 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1179 unsigned int *destreg, int *offset)
1180 {
1181 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1182 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1183 unsigned int low, high, address;
1184
1185 address = 0;
1186 if (is_thumb)
1187 {
1188 unsigned short insn1
1189 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1190
1191 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1192 {
1193 *destreg = bits (insn1, 8, 10);
1194 *offset = 2;
1195 address = bits (insn1, 0, 7);
1196 }
1197 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1198 {
1199 unsigned short insn2
1200 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1201
1202 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1203
1204 insn1
1205 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1206 insn2
1207 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1208
1209 /* movt Rd, #const */
1210 if ((insn1 & 0xfbc0) == 0xf2c0)
1211 {
1212 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1213 *destreg = bits (insn2, 8, 11);
1214 *offset = 8;
1215 address = (high << 16 | low);
1216 }
1217 }
1218 }
1219 else
1220 {
1221 unsigned int insn
1222 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1223
1224 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1225 {
1226 address = bits (insn, 0, 11);
1227 *destreg = bits (insn, 12, 15);
1228 *offset = 4;
1229 }
1230 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1231 {
1232 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1233
1234 insn
1235 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1236
1237 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1238 {
1239 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1240 *destreg = bits (insn, 12, 15);
1241 *offset = 8;
1242 address = (high << 16 | low);
1243 }
1244 }
1245 }
1246
1247 return address;
1248 }
1249
1250 /* Try to skip a sequence of instructions used for stack protector. If PC
1251 points to the first instruction of this sequence, return the address of
1252 first instruction after this sequence, otherwise, return original PC.
1253
1254 On arm, this sequence of instructions is composed of mainly three steps,
1255 Step 1: load symbol __stack_chk_guard,
1256 Step 2: load from address of __stack_chk_guard,
1257 Step 3: store it to somewhere else.
1258
1259 Usually, instructions on step 2 and step 3 are the same on various ARM
1260 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1261 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1262 instructions in step 1 vary from different ARM architectures. On ARMv7,
1263 they are,
1264
1265 movw Rn, #:lower16:__stack_chk_guard
1266 movt Rn, #:upper16:__stack_chk_guard
1267
1268 On ARMv5t, it is,
1269
1270 ldr Rn, .Label
1271 ....
1272 .Lable:
1273 .word __stack_chk_guard
1274
1275 Since ldr/str is a very popular instruction, we can't use them as
1276 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1277 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1278 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1279
1280 static CORE_ADDR
1281 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1282 {
1283 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1284 unsigned int address, basereg;
1285 struct minimal_symbol *stack_chk_guard;
1286 int offset;
1287 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1288 CORE_ADDR addr;
1289
1290 /* Try to parse the instructions in Step 1. */
1291 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1292 &basereg, &offset);
1293 if (!addr)
1294 return pc;
1295
1296 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1297 /* If name of symbol doesn't start with '__stack_chk_guard', this
1298 instruction sequence is not for stack protector. If symbol is
1299 removed, we conservatively think this sequence is for stack protector. */
1300 if (stack_chk_guard
1301 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1302 strlen ("__stack_chk_guard")) != 0)
1303 return pc;
1304
1305 if (is_thumb)
1306 {
1307 unsigned int destreg;
1308 unsigned short insn
1309 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1310
1311 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1312 if ((insn & 0xf800) != 0x6800)
1313 return pc;
1314 if (bits (insn, 3, 5) != basereg)
1315 return pc;
1316 destreg = bits (insn, 0, 2);
1317
1318 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1319 byte_order_for_code);
1320 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1321 if ((insn & 0xf800) != 0x6000)
1322 return pc;
1323 if (destreg != bits (insn, 0, 2))
1324 return pc;
1325 }
1326 else
1327 {
1328 unsigned int destreg;
1329 unsigned int insn
1330 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1331
1332 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1333 if ((insn & 0x0e500000) != 0x04100000)
1334 return pc;
1335 if (bits (insn, 16, 19) != basereg)
1336 return pc;
1337 destreg = bits (insn, 12, 15);
1338 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1339 insn = read_memory_unsigned_integer (pc + offset + 4,
1340 4, byte_order_for_code);
1341 if ((insn & 0x0e500000) != 0x04000000)
1342 return pc;
1343 if (bits (insn, 12, 15) != destreg)
1344 return pc;
1345 }
1346 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1347 on arm. */
1348 if (is_thumb)
1349 return pc + offset + 4;
1350 else
1351 return pc + offset + 8;
1352 }
1353
1354 /* Advance the PC across any function entry prologue instructions to
1355 reach some "real" code.
1356
1357 The APCS (ARM Procedure Call Standard) defines the following
1358 prologue:
1359
1360 mov ip, sp
1361 [stmfd sp!, {a1,a2,a3,a4}]
1362 stmfd sp!, {...,fp,ip,lr,pc}
1363 [stfe f7, [sp, #-12]!]
1364 [stfe f6, [sp, #-12]!]
1365 [stfe f5, [sp, #-12]!]
1366 [stfe f4, [sp, #-12]!]
1367 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1368
1369 static CORE_ADDR
1370 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1371 {
1372 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1373 unsigned long inst;
1374 CORE_ADDR skip_pc;
1375 CORE_ADDR func_addr, limit_pc;
1376 struct symtab_and_line sal;
1377
1378 /* See if we can determine the end of the prologue via the symbol table.
1379 If so, then return either PC, or the PC after the prologue, whichever
1380 is greater. */
1381 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1382 {
1383 CORE_ADDR post_prologue_pc
1384 = skip_prologue_using_sal (gdbarch, func_addr);
1385 struct symtab *s = find_pc_symtab (func_addr);
1386
1387 if (post_prologue_pc)
1388 post_prologue_pc
1389 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1390
1391
1392 /* GCC always emits a line note before the prologue and another
1393 one after, even if the two are at the same address or on the
1394 same line. Take advantage of this so that we do not need to
1395 know every instruction that might appear in the prologue. We
1396 will have producer information for most binaries; if it is
1397 missing (e.g. for -gstabs), assuming the GNU tools. */
1398 if (post_prologue_pc
1399 && (s == NULL
1400 || s->producer == NULL
1401 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1402 return post_prologue_pc;
1403
1404 if (post_prologue_pc != 0)
1405 {
1406 CORE_ADDR analyzed_limit;
1407
1408 /* For non-GCC compilers, make sure the entire line is an
1409 acceptable prologue; GDB will round this function's
1410 return value up to the end of the following line so we
1411 can not skip just part of a line (and we do not want to).
1412
1413 RealView does not treat the prologue specially, but does
1414 associate prologue code with the opening brace; so this
1415 lets us skip the first line if we think it is the opening
1416 brace. */
1417 if (arm_pc_is_thumb (gdbarch, func_addr))
1418 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1419 post_prologue_pc, NULL);
1420 else
1421 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1422 post_prologue_pc, NULL);
1423
1424 if (analyzed_limit != post_prologue_pc)
1425 return func_addr;
1426
1427 return post_prologue_pc;
1428 }
1429 }
1430
1431 /* Can't determine prologue from the symbol table, need to examine
1432 instructions. */
1433
1434 /* Find an upper limit on the function prologue using the debug
1435 information. If the debug information could not be used to provide
1436 that bound, then use an arbitrary large number as the upper bound. */
1437 /* Like arm_scan_prologue, stop no later than pc + 64. */
1438 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1439 if (limit_pc == 0)
1440 limit_pc = pc + 64; /* Magic. */
1441
1442
1443 /* Check if this is Thumb code. */
1444 if (arm_pc_is_thumb (gdbarch, pc))
1445 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1446
1447 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1448 {
1449 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1450
1451 /* "mov ip, sp" is no longer a required part of the prologue. */
1452 if (inst == 0xe1a0c00d) /* mov ip, sp */
1453 continue;
1454
1455 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1456 continue;
1457
1458 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1459 continue;
1460
1461 /* Some prologues begin with "str lr, [sp, #-4]!". */
1462 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1463 continue;
1464
1465 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1466 continue;
1467
1468 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1469 continue;
1470
1471 /* Any insns after this point may float into the code, if it makes
1472 for better instruction scheduling, so we skip them only if we
1473 find them, but still consider the function to be frame-ful. */
1474
1475 /* We may have either one sfmfd instruction here, or several stfe
1476 insns, depending on the version of floating point code we
1477 support. */
1478 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1479 continue;
1480
1481 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1482 continue;
1483
1484 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1485 continue;
1486
1487 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1488 continue;
1489
1490 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1491 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1492 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1493 continue;
1494
1495 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1496 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1497 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1498 continue;
1499
1500 /* Un-recognized instruction; stop scanning. */
1501 break;
1502 }
1503
1504 return skip_pc; /* End of prologue. */
1505 }
1506
1507 /* *INDENT-OFF* */
1508 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1509 This function decodes a Thumb function prologue to determine:
1510 1) the size of the stack frame
1511 2) which registers are saved on it
1512 3) the offsets of saved regs
1513 4) the offset from the stack pointer to the frame pointer
1514
1515 A typical Thumb function prologue would create this stack frame
1516 (offsets relative to FP)
1517 old SP -> 24 stack parameters
1518 20 LR
1519 16 R7
1520 R7 -> 0 local variables (16 bytes)
1521 SP -> -12 additional stack space (12 bytes)
1522 The frame size would thus be 36 bytes, and the frame offset would be
1523 12 bytes. The frame register is R7.
1524
1525 The comments for thumb_skip_prolog() describe the algorithm we use
1526 to detect the end of the prolog. */
1527 /* *INDENT-ON* */
1528
1529 static void
1530 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1531 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1532 {
1533 CORE_ADDR prologue_start;
1534 CORE_ADDR prologue_end;
1535 CORE_ADDR current_pc;
1536
1537 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1538 &prologue_end))
1539 {
1540 /* See comment in arm_scan_prologue for an explanation of
1541 this heuristics. */
1542 if (prologue_end > prologue_start + 64)
1543 {
1544 prologue_end = prologue_start + 64;
1545 }
1546 }
1547 else
1548 /* We're in the boondocks: we have no idea where the start of the
1549 function is. */
1550 return;
1551
1552 prologue_end = min (prologue_end, prev_pc);
1553
1554 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1555 }
1556
1557 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1558
1559 static int
1560 arm_instruction_changes_pc (uint32_t this_instr)
1561 {
1562 if (bits (this_instr, 28, 31) == INST_NV)
1563 /* Unconditional instructions. */
1564 switch (bits (this_instr, 24, 27))
1565 {
1566 case 0xa:
1567 case 0xb:
1568 /* Branch with Link and change to Thumb. */
1569 return 1;
1570 case 0xc:
1571 case 0xd:
1572 case 0xe:
1573 /* Coprocessor register transfer. */
1574 if (bits (this_instr, 12, 15) == 15)
1575 error (_("Invalid update to pc in instruction"));
1576 return 0;
1577 default:
1578 return 0;
1579 }
1580 else
1581 switch (bits (this_instr, 25, 27))
1582 {
1583 case 0x0:
1584 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1585 {
1586 /* Multiplies and extra load/stores. */
1587 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1588 /* Neither multiplies nor extension load/stores are allowed
1589 to modify PC. */
1590 return 0;
1591
1592 /* Otherwise, miscellaneous instructions. */
1593
1594 /* BX <reg>, BXJ <reg>, BLX <reg> */
1595 if (bits (this_instr, 4, 27) == 0x12fff1
1596 || bits (this_instr, 4, 27) == 0x12fff2
1597 || bits (this_instr, 4, 27) == 0x12fff3)
1598 return 1;
1599
1600 /* Other miscellaneous instructions are unpredictable if they
1601 modify PC. */
1602 return 0;
1603 }
1604 /* Data processing instruction. Fall through. */
1605
1606 case 0x1:
1607 if (bits (this_instr, 12, 15) == 15)
1608 return 1;
1609 else
1610 return 0;
1611
1612 case 0x2:
1613 case 0x3:
1614 /* Media instructions and architecturally undefined instructions. */
1615 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1616 return 0;
1617
1618 /* Stores. */
1619 if (bit (this_instr, 20) == 0)
1620 return 0;
1621
1622 /* Loads. */
1623 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1624 return 1;
1625 else
1626 return 0;
1627
1628 case 0x4:
1629 /* Load/store multiple. */
1630 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1631 return 1;
1632 else
1633 return 0;
1634
1635 case 0x5:
1636 /* Branch and branch with link. */
1637 return 1;
1638
1639 case 0x6:
1640 case 0x7:
1641 /* Coprocessor transfers or SWIs can not affect PC. */
1642 return 0;
1643
1644 default:
1645 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1646 }
1647 }
1648
1649 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1650 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1651 fill it in. Return the first address not recognized as a prologue
1652 instruction.
1653
1654 We recognize all the instructions typically found in ARM prologues,
1655 plus harmless instructions which can be skipped (either for analysis
1656 purposes, or a more restrictive set that can be skipped when finding
1657 the end of the prologue). */
1658
1659 static CORE_ADDR
1660 arm_analyze_prologue (struct gdbarch *gdbarch,
1661 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1662 struct arm_prologue_cache *cache)
1663 {
1664 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1665 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1666 int regno;
1667 CORE_ADDR offset, current_pc;
1668 pv_t regs[ARM_FPS_REGNUM];
1669 struct pv_area *stack;
1670 struct cleanup *back_to;
1671 int framereg, framesize;
1672 CORE_ADDR unrecognized_pc = 0;
1673
1674 /* Search the prologue looking for instructions that set up the
1675 frame pointer, adjust the stack pointer, and save registers.
1676
1677 Be careful, however, and if it doesn't look like a prologue,
1678 don't try to scan it. If, for instance, a frameless function
1679 begins with stmfd sp!, then we will tell ourselves there is
1680 a frame, which will confuse stack traceback, as well as "finish"
1681 and other operations that rely on a knowledge of the stack
1682 traceback. */
1683
1684 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1685 regs[regno] = pv_register (regno, 0);
1686 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1687 back_to = make_cleanup_free_pv_area (stack);
1688
1689 for (current_pc = prologue_start;
1690 current_pc < prologue_end;
1691 current_pc += 4)
1692 {
1693 unsigned int insn
1694 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1695
1696 if (insn == 0xe1a0c00d) /* mov ip, sp */
1697 {
1698 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1699 continue;
1700 }
1701 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1702 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1703 {
1704 unsigned imm = insn & 0xff; /* immediate value */
1705 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1706 int rd = bits (insn, 12, 15);
1707 imm = (imm >> rot) | (imm << (32 - rot));
1708 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1709 continue;
1710 }
1711 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1712 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1713 {
1714 unsigned imm = insn & 0xff; /* immediate value */
1715 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1716 int rd = bits (insn, 12, 15);
1717 imm = (imm >> rot) | (imm << (32 - rot));
1718 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1719 continue;
1720 }
1721 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1722 [sp, #-4]! */
1723 {
1724 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1725 break;
1726 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1727 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1728 regs[bits (insn, 12, 15)]);
1729 continue;
1730 }
1731 else if ((insn & 0xffff0000) == 0xe92d0000)
1732 /* stmfd sp!, {..., fp, ip, lr, pc}
1733 or
1734 stmfd sp!, {a1, a2, a3, a4} */
1735 {
1736 int mask = insn & 0xffff;
1737
1738 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1739 break;
1740
1741 /* Calculate offsets of saved registers. */
1742 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1743 if (mask & (1 << regno))
1744 {
1745 regs[ARM_SP_REGNUM]
1746 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1747 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1748 }
1749 }
1750 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1751 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1752 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1753 {
1754 /* No need to add this to saved_regs -- it's just an arg reg. */
1755 continue;
1756 }
1757 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1758 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1759 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1760 {
1761 /* No need to add this to saved_regs -- it's just an arg reg. */
1762 continue;
1763 }
1764 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1765 { registers } */
1766 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1767 {
1768 /* No need to add this to saved_regs -- it's just arg regs. */
1769 continue;
1770 }
1771 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1772 {
1773 unsigned imm = insn & 0xff; /* immediate value */
1774 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1775 imm = (imm >> rot) | (imm << (32 - rot));
1776 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1777 }
1778 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1779 {
1780 unsigned imm = insn & 0xff; /* immediate value */
1781 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1782 imm = (imm >> rot) | (imm << (32 - rot));
1783 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1784 }
1785 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1786 [sp, -#c]! */
1787 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1788 {
1789 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1790 break;
1791
1792 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1793 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1794 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1795 }
1796 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1797 [sp!] */
1798 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1799 {
1800 int n_saved_fp_regs;
1801 unsigned int fp_start_reg, fp_bound_reg;
1802
1803 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1804 break;
1805
1806 if ((insn & 0x800) == 0x800) /* N0 is set */
1807 {
1808 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1809 n_saved_fp_regs = 3;
1810 else
1811 n_saved_fp_regs = 1;
1812 }
1813 else
1814 {
1815 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1816 n_saved_fp_regs = 2;
1817 else
1818 n_saved_fp_regs = 4;
1819 }
1820
1821 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1822 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1823 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1824 {
1825 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1826 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1827 regs[fp_start_reg++]);
1828 }
1829 }
1830 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1831 {
1832 /* Allow some special function calls when skipping the
1833 prologue; GCC generates these before storing arguments to
1834 the stack. */
1835 CORE_ADDR dest = BranchDest (current_pc, insn);
1836
1837 if (skip_prologue_function (gdbarch, dest, 0))
1838 continue;
1839 else
1840 break;
1841 }
1842 else if ((insn & 0xf0000000) != 0xe0000000)
1843 break; /* Condition not true, exit early. */
1844 else if (arm_instruction_changes_pc (insn))
1845 /* Don't scan past anything that might change control flow. */
1846 break;
1847 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1848 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1849 /* Ignore block loads from the stack, potentially copying
1850 parameters from memory. */
1851 continue;
1852 else if ((insn & 0xfc500000) == 0xe4100000
1853 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1854 /* Similarly ignore single loads from the stack. */
1855 continue;
1856 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1857 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1858 register instead of the stack. */
1859 continue;
1860 else
1861 {
1862 /* The optimizer might shove anything into the prologue,
1863 so we just skip what we don't recognize. */
1864 unrecognized_pc = current_pc;
1865 continue;
1866 }
1867 }
1868
1869 if (unrecognized_pc == 0)
1870 unrecognized_pc = current_pc;
1871
1872 /* The frame size is just the distance from the frame register
1873 to the original stack pointer. */
1874 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1875 {
1876 /* Frame pointer is fp. */
1877 framereg = ARM_FP_REGNUM;
1878 framesize = -regs[ARM_FP_REGNUM].k;
1879 }
1880 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1881 {
1882 /* Try the stack pointer... this is a bit desperate. */
1883 framereg = ARM_SP_REGNUM;
1884 framesize = -regs[ARM_SP_REGNUM].k;
1885 }
1886 else
1887 {
1888 /* We're just out of luck. We don't know where the frame is. */
1889 framereg = -1;
1890 framesize = 0;
1891 }
1892
1893 if (cache)
1894 {
1895 cache->framereg = framereg;
1896 cache->framesize = framesize;
1897
1898 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1899 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1900 cache->saved_regs[regno].addr = offset;
1901 }
1902
1903 if (arm_debug)
1904 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1905 paddress (gdbarch, unrecognized_pc));
1906
1907 do_cleanups (back_to);
1908 return unrecognized_pc;
1909 }
1910
1911 static void
1912 arm_scan_prologue (struct frame_info *this_frame,
1913 struct arm_prologue_cache *cache)
1914 {
1915 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1916 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1917 int regno;
1918 CORE_ADDR prologue_start, prologue_end, current_pc;
1919 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1920 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1921 pv_t regs[ARM_FPS_REGNUM];
1922 struct pv_area *stack;
1923 struct cleanup *back_to;
1924 CORE_ADDR offset;
1925
1926 /* Assume there is no frame until proven otherwise. */
1927 cache->framereg = ARM_SP_REGNUM;
1928 cache->framesize = 0;
1929
1930 /* Check for Thumb prologue. */
1931 if (arm_frame_is_thumb (this_frame))
1932 {
1933 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1934 return;
1935 }
1936
1937 /* Find the function prologue. If we can't find the function in
1938 the symbol table, peek in the stack frame to find the PC. */
1939 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1940 &prologue_end))
1941 {
1942 /* One way to find the end of the prologue (which works well
1943 for unoptimized code) is to do the following:
1944
1945 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1946
1947 if (sal.line == 0)
1948 prologue_end = prev_pc;
1949 else if (sal.end < prologue_end)
1950 prologue_end = sal.end;
1951
1952 This mechanism is very accurate so long as the optimizer
1953 doesn't move any instructions from the function body into the
1954 prologue. If this happens, sal.end will be the last
1955 instruction in the first hunk of prologue code just before
1956 the first instruction that the scheduler has moved from
1957 the body to the prologue.
1958
1959 In order to make sure that we scan all of the prologue
1960 instructions, we use a slightly less accurate mechanism which
1961 may scan more than necessary. To help compensate for this
1962 lack of accuracy, the prologue scanning loop below contains
1963 several clauses which'll cause the loop to terminate early if
1964 an implausible prologue instruction is encountered.
1965
1966 The expression
1967
1968 prologue_start + 64
1969
1970 is a suitable endpoint since it accounts for the largest
1971 possible prologue plus up to five instructions inserted by
1972 the scheduler. */
1973
1974 if (prologue_end > prologue_start + 64)
1975 {
1976 prologue_end = prologue_start + 64; /* See above. */
1977 }
1978 }
1979 else
1980 {
1981 /* We have no symbol information. Our only option is to assume this
1982 function has a standard stack frame and the normal frame register.
1983 Then, we can find the value of our frame pointer on entrance to
1984 the callee (or at the present moment if this is the innermost frame).
1985 The value stored there should be the address of the stmfd + 8. */
1986 CORE_ADDR frame_loc;
1987 LONGEST return_value;
1988
1989 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1990 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1991 return;
1992 else
1993 {
1994 prologue_start = gdbarch_addr_bits_remove
1995 (gdbarch, return_value) - 8;
1996 prologue_end = prologue_start + 64; /* See above. */
1997 }
1998 }
1999
2000 if (prev_pc < prologue_end)
2001 prologue_end = prev_pc;
2002
2003 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2004 }
2005
2006 static struct arm_prologue_cache *
2007 arm_make_prologue_cache (struct frame_info *this_frame)
2008 {
2009 int reg;
2010 struct arm_prologue_cache *cache;
2011 CORE_ADDR unwound_fp;
2012
2013 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2014 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2015
2016 arm_scan_prologue (this_frame, cache);
2017
2018 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2019 if (unwound_fp == 0)
2020 return cache;
2021
2022 cache->prev_sp = unwound_fp + cache->framesize;
2023
2024 /* Calculate actual addresses of saved registers using offsets
2025 determined by arm_scan_prologue. */
2026 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2027 if (trad_frame_addr_p (cache->saved_regs, reg))
2028 cache->saved_regs[reg].addr += cache->prev_sp;
2029
2030 return cache;
2031 }
2032
2033 /* Our frame ID for a normal frame is the current function's starting PC
2034 and the caller's SP when we were called. */
2035
2036 static void
2037 arm_prologue_this_id (struct frame_info *this_frame,
2038 void **this_cache,
2039 struct frame_id *this_id)
2040 {
2041 struct arm_prologue_cache *cache;
2042 struct frame_id id;
2043 CORE_ADDR pc, func;
2044
2045 if (*this_cache == NULL)
2046 *this_cache = arm_make_prologue_cache (this_frame);
2047 cache = *this_cache;
2048
2049 /* This is meant to halt the backtrace at "_start". */
2050 pc = get_frame_pc (this_frame);
2051 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2052 return;
2053
2054 /* If we've hit a wall, stop. */
2055 if (cache->prev_sp == 0)
2056 return;
2057
2058 /* Use function start address as part of the frame ID. If we cannot
2059 identify the start address (due to missing symbol information),
2060 fall back to just using the current PC. */
2061 func = get_frame_func (this_frame);
2062 if (!func)
2063 func = pc;
2064
2065 id = frame_id_build (cache->prev_sp, func);
2066 *this_id = id;
2067 }
2068
2069 static struct value *
2070 arm_prologue_prev_register (struct frame_info *this_frame,
2071 void **this_cache,
2072 int prev_regnum)
2073 {
2074 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2075 struct arm_prologue_cache *cache;
2076
2077 if (*this_cache == NULL)
2078 *this_cache = arm_make_prologue_cache (this_frame);
2079 cache = *this_cache;
2080
2081 /* If we are asked to unwind the PC, then we need to return the LR
2082 instead. The prologue may save PC, but it will point into this
2083 frame's prologue, not the next frame's resume location. Also
2084 strip the saved T bit. A valid LR may have the low bit set, but
2085 a valid PC never does. */
2086 if (prev_regnum == ARM_PC_REGNUM)
2087 {
2088 CORE_ADDR lr;
2089
2090 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2091 return frame_unwind_got_constant (this_frame, prev_regnum,
2092 arm_addr_bits_remove (gdbarch, lr));
2093 }
2094
2095 /* SP is generally not saved to the stack, but this frame is
2096 identified by the next frame's stack pointer at the time of the call.
2097 The value was already reconstructed into PREV_SP. */
2098 if (prev_regnum == ARM_SP_REGNUM)
2099 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2100
2101 /* The CPSR may have been changed by the call instruction and by the
2102 called function. The only bit we can reconstruct is the T bit,
2103 by checking the low bit of LR as of the call. This is a reliable
2104 indicator of Thumb-ness except for some ARM v4T pre-interworking
2105 Thumb code, which could get away with a clear low bit as long as
2106 the called function did not use bx. Guess that all other
2107 bits are unchanged; the condition flags are presumably lost,
2108 but the processor status is likely valid. */
2109 if (prev_regnum == ARM_PS_REGNUM)
2110 {
2111 CORE_ADDR lr, cpsr;
2112 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2113
2114 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2115 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2116 if (IS_THUMB_ADDR (lr))
2117 cpsr |= t_bit;
2118 else
2119 cpsr &= ~t_bit;
2120 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2121 }
2122
2123 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2124 prev_regnum);
2125 }
2126
2127 struct frame_unwind arm_prologue_unwind = {
2128 NORMAL_FRAME,
2129 default_frame_unwind_stop_reason,
2130 arm_prologue_this_id,
2131 arm_prologue_prev_register,
2132 NULL,
2133 default_frame_sniffer
2134 };
2135
2136 /* Maintain a list of ARM exception table entries per objfile, similar to the
2137 list of mapping symbols. We only cache entries for standard ARM-defined
2138 personality routines; the cache will contain only the frame unwinding
2139 instructions associated with the entry (not the descriptors). */
2140
2141 static const struct objfile_data *arm_exidx_data_key;
2142
2143 struct arm_exidx_entry
2144 {
2145 bfd_vma addr;
2146 gdb_byte *entry;
2147 };
2148 typedef struct arm_exidx_entry arm_exidx_entry_s;
2149 DEF_VEC_O(arm_exidx_entry_s);
2150
2151 struct arm_exidx_data
2152 {
2153 VEC(arm_exidx_entry_s) **section_maps;
2154 };
2155
2156 static void
2157 arm_exidx_data_free (struct objfile *objfile, void *arg)
2158 {
2159 struct arm_exidx_data *data = arg;
2160 unsigned int i;
2161
2162 for (i = 0; i < objfile->obfd->section_count; i++)
2163 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2164 }
2165
2166 static inline int
2167 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2168 const struct arm_exidx_entry *rhs)
2169 {
2170 return lhs->addr < rhs->addr;
2171 }
2172
2173 static struct obj_section *
2174 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2175 {
2176 struct obj_section *osect;
2177
2178 ALL_OBJFILE_OSECTIONS (objfile, osect)
2179 if (bfd_get_section_flags (objfile->obfd,
2180 osect->the_bfd_section) & SEC_ALLOC)
2181 {
2182 bfd_vma start, size;
2183 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2184 size = bfd_get_section_size (osect->the_bfd_section);
2185
2186 if (start <= vma && vma < start + size)
2187 return osect;
2188 }
2189
2190 return NULL;
2191 }
2192
2193 /* Parse contents of exception table and exception index sections
2194 of OBJFILE, and fill in the exception table entry cache.
2195
2196 For each entry that refers to a standard ARM-defined personality
2197 routine, extract the frame unwinding instructions (from either
2198 the index or the table section). The unwinding instructions
2199 are normalized by:
2200 - extracting them from the rest of the table data
2201 - converting to host endianness
2202 - appending the implicit 0xb0 ("Finish") code
2203
2204 The extracted and normalized instructions are stored for later
2205 retrieval by the arm_find_exidx_entry routine. */
2206
2207 static void
2208 arm_exidx_new_objfile (struct objfile *objfile)
2209 {
2210 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2211 struct arm_exidx_data *data;
2212 asection *exidx, *extab;
2213 bfd_vma exidx_vma = 0, extab_vma = 0;
2214 bfd_size_type exidx_size = 0, extab_size = 0;
2215 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2216 LONGEST i;
2217
2218 /* If we've already touched this file, do nothing. */
2219 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2220 return;
2221
2222 /* Read contents of exception table and index. */
2223 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2224 if (exidx)
2225 {
2226 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2227 exidx_size = bfd_get_section_size (exidx);
2228 exidx_data = xmalloc (exidx_size);
2229 make_cleanup (xfree, exidx_data);
2230
2231 if (!bfd_get_section_contents (objfile->obfd, exidx,
2232 exidx_data, 0, exidx_size))
2233 {
2234 do_cleanups (cleanups);
2235 return;
2236 }
2237 }
2238
2239 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2240 if (extab)
2241 {
2242 extab_vma = bfd_section_vma (objfile->obfd, extab);
2243 extab_size = bfd_get_section_size (extab);
2244 extab_data = xmalloc (extab_size);
2245 make_cleanup (xfree, extab_data);
2246
2247 if (!bfd_get_section_contents (objfile->obfd, extab,
2248 extab_data, 0, extab_size))
2249 {
2250 do_cleanups (cleanups);
2251 return;
2252 }
2253 }
2254
2255 /* Allocate exception table data structure. */
2256 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2257 set_objfile_data (objfile, arm_exidx_data_key, data);
2258 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2259 objfile->obfd->section_count,
2260 VEC(arm_exidx_entry_s) *);
2261
2262 /* Fill in exception table. */
2263 for (i = 0; i < exidx_size / 8; i++)
2264 {
2265 struct arm_exidx_entry new_exidx_entry;
2266 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2267 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2268 bfd_vma addr = 0, word = 0;
2269 int n_bytes = 0, n_words = 0;
2270 struct obj_section *sec;
2271 gdb_byte *entry = NULL;
2272
2273 /* Extract address of start of function. */
2274 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2275 idx += exidx_vma + i * 8;
2276
2277 /* Find section containing function and compute section offset. */
2278 sec = arm_obj_section_from_vma (objfile, idx);
2279 if (sec == NULL)
2280 continue;
2281 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2282
2283 /* Determine address of exception table entry. */
2284 if (val == 1)
2285 {
2286 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2287 }
2288 else if ((val & 0xff000000) == 0x80000000)
2289 {
2290 /* Exception table entry embedded in .ARM.exidx
2291 -- must be short form. */
2292 word = val;
2293 n_bytes = 3;
2294 }
2295 else if (!(val & 0x80000000))
2296 {
2297 /* Exception table entry in .ARM.extab. */
2298 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2299 addr += exidx_vma + i * 8 + 4;
2300
2301 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2302 {
2303 word = bfd_h_get_32 (objfile->obfd,
2304 extab_data + addr - extab_vma);
2305 addr += 4;
2306
2307 if ((word & 0xff000000) == 0x80000000)
2308 {
2309 /* Short form. */
2310 n_bytes = 3;
2311 }
2312 else if ((word & 0xff000000) == 0x81000000
2313 || (word & 0xff000000) == 0x82000000)
2314 {
2315 /* Long form. */
2316 n_bytes = 2;
2317 n_words = ((word >> 16) & 0xff);
2318 }
2319 else if (!(word & 0x80000000))
2320 {
2321 bfd_vma pers;
2322 struct obj_section *pers_sec;
2323 int gnu_personality = 0;
2324
2325 /* Custom personality routine. */
2326 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2327 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2328
2329 /* Check whether we've got one of the variants of the
2330 GNU personality routines. */
2331 pers_sec = arm_obj_section_from_vma (objfile, pers);
2332 if (pers_sec)
2333 {
2334 static const char *personality[] =
2335 {
2336 "__gcc_personality_v0",
2337 "__gxx_personality_v0",
2338 "__gcj_personality_v0",
2339 "__gnu_objc_personality_v0",
2340 NULL
2341 };
2342
2343 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2344 int k;
2345
2346 for (k = 0; personality[k]; k++)
2347 if (lookup_minimal_symbol_by_pc_name
2348 (pc, personality[k], objfile))
2349 {
2350 gnu_personality = 1;
2351 break;
2352 }
2353 }
2354
2355 /* If so, the next word contains a word count in the high
2356 byte, followed by the same unwind instructions as the
2357 pre-defined forms. */
2358 if (gnu_personality
2359 && addr + 4 <= extab_vma + extab_size)
2360 {
2361 word = bfd_h_get_32 (objfile->obfd,
2362 extab_data + addr - extab_vma);
2363 addr += 4;
2364 n_bytes = 3;
2365 n_words = ((word >> 24) & 0xff);
2366 }
2367 }
2368 }
2369 }
2370
2371 /* Sanity check address. */
2372 if (n_words)
2373 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2374 n_words = n_bytes = 0;
2375
2376 /* The unwind instructions reside in WORD (only the N_BYTES least
2377 significant bytes are valid), followed by N_WORDS words in the
2378 extab section starting at ADDR. */
2379 if (n_bytes || n_words)
2380 {
2381 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2382 n_bytes + n_words * 4 + 1);
2383
2384 while (n_bytes--)
2385 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2386
2387 while (n_words--)
2388 {
2389 word = bfd_h_get_32 (objfile->obfd,
2390 extab_data + addr - extab_vma);
2391 addr += 4;
2392
2393 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2394 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2395 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2396 *p++ = (gdb_byte) (word & 0xff);
2397 }
2398
2399 /* Implied "Finish" to terminate the list. */
2400 *p++ = 0xb0;
2401 }
2402
2403 /* Push entry onto vector. They are guaranteed to always
2404 appear in order of increasing addresses. */
2405 new_exidx_entry.addr = idx;
2406 new_exidx_entry.entry = entry;
2407 VEC_safe_push (arm_exidx_entry_s,
2408 data->section_maps[sec->the_bfd_section->index],
2409 &new_exidx_entry);
2410 }
2411
2412 do_cleanups (cleanups);
2413 }
2414
2415 /* Search for the exception table entry covering MEMADDR. If one is found,
2416 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2417 set *START to the start of the region covered by this entry. */
2418
2419 static gdb_byte *
2420 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2421 {
2422 struct obj_section *sec;
2423
2424 sec = find_pc_section (memaddr);
2425 if (sec != NULL)
2426 {
2427 struct arm_exidx_data *data;
2428 VEC(arm_exidx_entry_s) *map;
2429 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2430 unsigned int idx;
2431
2432 data = objfile_data (sec->objfile, arm_exidx_data_key);
2433 if (data != NULL)
2434 {
2435 map = data->section_maps[sec->the_bfd_section->index];
2436 if (!VEC_empty (arm_exidx_entry_s, map))
2437 {
2438 struct arm_exidx_entry *map_sym;
2439
2440 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2441 arm_compare_exidx_entries);
2442
2443 /* VEC_lower_bound finds the earliest ordered insertion
2444 point. If the following symbol starts at this exact
2445 address, we use that; otherwise, the preceding
2446 exception table entry covers this address. */
2447 if (idx < VEC_length (arm_exidx_entry_s, map))
2448 {
2449 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2450 if (map_sym->addr == map_key.addr)
2451 {
2452 if (start)
2453 *start = map_sym->addr + obj_section_addr (sec);
2454 return map_sym->entry;
2455 }
2456 }
2457
2458 if (idx > 0)
2459 {
2460 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2461 if (start)
2462 *start = map_sym->addr + obj_section_addr (sec);
2463 return map_sym->entry;
2464 }
2465 }
2466 }
2467 }
2468
2469 return NULL;
2470 }
2471
2472 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2473 instruction list from the ARM exception table entry ENTRY, allocate and
2474 return a prologue cache structure describing how to unwind this frame.
2475
2476 Return NULL if the unwinding instruction list contains a "spare",
2477 "reserved" or "refuse to unwind" instruction as defined in section
2478 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2479 for the ARM Architecture" document. */
2480
2481 static struct arm_prologue_cache *
2482 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2483 {
2484 CORE_ADDR vsp = 0;
2485 int vsp_valid = 0;
2486
2487 struct arm_prologue_cache *cache;
2488 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2489 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2490
2491 for (;;)
2492 {
2493 gdb_byte insn;
2494
2495 /* Whenever we reload SP, we actually have to retrieve its
2496 actual value in the current frame. */
2497 if (!vsp_valid)
2498 {
2499 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2500 {
2501 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2502 vsp = get_frame_register_unsigned (this_frame, reg);
2503 }
2504 else
2505 {
2506 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2507 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2508 }
2509
2510 vsp_valid = 1;
2511 }
2512
2513 /* Decode next unwind instruction. */
2514 insn = *entry++;
2515
2516 if ((insn & 0xc0) == 0)
2517 {
2518 int offset = insn & 0x3f;
2519 vsp += (offset << 2) + 4;
2520 }
2521 else if ((insn & 0xc0) == 0x40)
2522 {
2523 int offset = insn & 0x3f;
2524 vsp -= (offset << 2) + 4;
2525 }
2526 else if ((insn & 0xf0) == 0x80)
2527 {
2528 int mask = ((insn & 0xf) << 8) | *entry++;
2529 int i;
2530
2531 /* The special case of an all-zero mask identifies
2532 "Refuse to unwind". We return NULL to fall back
2533 to the prologue analyzer. */
2534 if (mask == 0)
2535 return NULL;
2536
2537 /* Pop registers r4..r15 under mask. */
2538 for (i = 0; i < 12; i++)
2539 if (mask & (1 << i))
2540 {
2541 cache->saved_regs[4 + i].addr = vsp;
2542 vsp += 4;
2543 }
2544
2545 /* Special-case popping SP -- we need to reload vsp. */
2546 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2547 vsp_valid = 0;
2548 }
2549 else if ((insn & 0xf0) == 0x90)
2550 {
2551 int reg = insn & 0xf;
2552
2553 /* Reserved cases. */
2554 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2555 return NULL;
2556
2557 /* Set SP from another register and mark VSP for reload. */
2558 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2559 vsp_valid = 0;
2560 }
2561 else if ((insn & 0xf0) == 0xa0)
2562 {
2563 int count = insn & 0x7;
2564 int pop_lr = (insn & 0x8) != 0;
2565 int i;
2566
2567 /* Pop r4..r[4+count]. */
2568 for (i = 0; i <= count; i++)
2569 {
2570 cache->saved_regs[4 + i].addr = vsp;
2571 vsp += 4;
2572 }
2573
2574 /* If indicated by flag, pop LR as well. */
2575 if (pop_lr)
2576 {
2577 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2578 vsp += 4;
2579 }
2580 }
2581 else if (insn == 0xb0)
2582 {
2583 /* We could only have updated PC by popping into it; if so, it
2584 will show up as address. Otherwise, copy LR into PC. */
2585 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2586 cache->saved_regs[ARM_PC_REGNUM]
2587 = cache->saved_regs[ARM_LR_REGNUM];
2588
2589 /* We're done. */
2590 break;
2591 }
2592 else if (insn == 0xb1)
2593 {
2594 int mask = *entry++;
2595 int i;
2596
2597 /* All-zero mask and mask >= 16 is "spare". */
2598 if (mask == 0 || mask >= 16)
2599 return NULL;
2600
2601 /* Pop r0..r3 under mask. */
2602 for (i = 0; i < 4; i++)
2603 if (mask & (1 << i))
2604 {
2605 cache->saved_regs[i].addr = vsp;
2606 vsp += 4;
2607 }
2608 }
2609 else if (insn == 0xb2)
2610 {
2611 ULONGEST offset = 0;
2612 unsigned shift = 0;
2613
2614 do
2615 {
2616 offset |= (*entry & 0x7f) << shift;
2617 shift += 7;
2618 }
2619 while (*entry++ & 0x80);
2620
2621 vsp += 0x204 + (offset << 2);
2622 }
2623 else if (insn == 0xb3)
2624 {
2625 int start = *entry >> 4;
2626 int count = (*entry++) & 0xf;
2627 int i;
2628
2629 /* Only registers D0..D15 are valid here. */
2630 if (start + count >= 16)
2631 return NULL;
2632
2633 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2634 for (i = 0; i <= count; i++)
2635 {
2636 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2637 vsp += 8;
2638 }
2639
2640 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2641 vsp += 4;
2642 }
2643 else if ((insn & 0xf8) == 0xb8)
2644 {
2645 int count = insn & 0x7;
2646 int i;
2647
2648 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2649 for (i = 0; i <= count; i++)
2650 {
2651 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2652 vsp += 8;
2653 }
2654
2655 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2656 vsp += 4;
2657 }
2658 else if (insn == 0xc6)
2659 {
2660 int start = *entry >> 4;
2661 int count = (*entry++) & 0xf;
2662 int i;
2663
2664 /* Only registers WR0..WR15 are valid. */
2665 if (start + count >= 16)
2666 return NULL;
2667
2668 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2669 for (i = 0; i <= count; i++)
2670 {
2671 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2672 vsp += 8;
2673 }
2674 }
2675 else if (insn == 0xc7)
2676 {
2677 int mask = *entry++;
2678 int i;
2679
2680 /* All-zero mask and mask >= 16 is "spare". */
2681 if (mask == 0 || mask >= 16)
2682 return NULL;
2683
2684 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2685 for (i = 0; i < 4; i++)
2686 if (mask & (1 << i))
2687 {
2688 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2689 vsp += 4;
2690 }
2691 }
2692 else if ((insn & 0xf8) == 0xc0)
2693 {
2694 int count = insn & 0x7;
2695 int i;
2696
2697 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2698 for (i = 0; i <= count; i++)
2699 {
2700 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2701 vsp += 8;
2702 }
2703 }
2704 else if (insn == 0xc8)
2705 {
2706 int start = *entry >> 4;
2707 int count = (*entry++) & 0xf;
2708 int i;
2709
2710 /* Only registers D0..D31 are valid. */
2711 if (start + count >= 16)
2712 return NULL;
2713
2714 /* Pop VFP double-precision registers
2715 D[16+start]..D[16+start+count]. */
2716 for (i = 0; i <= count; i++)
2717 {
2718 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2719 vsp += 8;
2720 }
2721 }
2722 else if (insn == 0xc9)
2723 {
2724 int start = *entry >> 4;
2725 int count = (*entry++) & 0xf;
2726 int i;
2727
2728 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2729 for (i = 0; i <= count; i++)
2730 {
2731 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2732 vsp += 8;
2733 }
2734 }
2735 else if ((insn & 0xf8) == 0xd0)
2736 {
2737 int count = insn & 0x7;
2738 int i;
2739
2740 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2741 for (i = 0; i <= count; i++)
2742 {
2743 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2744 vsp += 8;
2745 }
2746 }
2747 else
2748 {
2749 /* Everything else is "spare". */
2750 return NULL;
2751 }
2752 }
2753
2754 /* If we restore SP from a register, assume this was the frame register.
2755 Otherwise just fall back to SP as frame register. */
2756 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2757 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2758 else
2759 cache->framereg = ARM_SP_REGNUM;
2760
2761 /* Determine offset to previous frame. */
2762 cache->framesize
2763 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2764
2765 /* We already got the previous SP. */
2766 cache->prev_sp = vsp;
2767
2768 return cache;
2769 }
2770
2771 /* Unwinding via ARM exception table entries. Note that the sniffer
2772 already computes a filled-in prologue cache, which is then used
2773 with the same arm_prologue_this_id and arm_prologue_prev_register
2774 routines also used for prologue-parsing based unwinding. */
2775
2776 static int
2777 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2778 struct frame_info *this_frame,
2779 void **this_prologue_cache)
2780 {
2781 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2782 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2783 CORE_ADDR addr_in_block, exidx_region, func_start;
2784 struct arm_prologue_cache *cache;
2785 gdb_byte *entry;
2786
2787 /* See if we have an ARM exception table entry covering this address. */
2788 addr_in_block = get_frame_address_in_block (this_frame);
2789 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2790 if (!entry)
2791 return 0;
2792
2793 /* The ARM exception table does not describe unwind information
2794 for arbitrary PC values, but is guaranteed to be correct only
2795 at call sites. We have to decide here whether we want to use
2796 ARM exception table information for this frame, or fall back
2797 to using prologue parsing. (Note that if we have DWARF CFI,
2798 this sniffer isn't even called -- CFI is always preferred.)
2799
2800 Before we make this decision, however, we check whether we
2801 actually have *symbol* information for the current frame.
2802 If not, prologue parsing would not work anyway, so we might
2803 as well use the exception table and hope for the best. */
2804 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2805 {
2806 int exc_valid = 0;
2807
2808 /* If the next frame is "normal", we are at a call site in this
2809 frame, so exception information is guaranteed to be valid. */
2810 if (get_next_frame (this_frame)
2811 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2812 exc_valid = 1;
2813
2814 /* We also assume exception information is valid if we're currently
2815 blocked in a system call. The system library is supposed to
2816 ensure this, so that e.g. pthread cancellation works. */
2817 if (arm_frame_is_thumb (this_frame))
2818 {
2819 LONGEST insn;
2820
2821 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2822 byte_order_for_code, &insn)
2823 && (insn & 0xff00) == 0xdf00 /* svc */)
2824 exc_valid = 1;
2825 }
2826 else
2827 {
2828 LONGEST insn;
2829
2830 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2831 byte_order_for_code, &insn)
2832 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2833 exc_valid = 1;
2834 }
2835
2836 /* Bail out if we don't know that exception information is valid. */
2837 if (!exc_valid)
2838 return 0;
2839
2840 /* The ARM exception index does not mark the *end* of the region
2841 covered by the entry, and some functions will not have any entry.
2842 To correctly recognize the end of the covered region, the linker
2843 should have inserted dummy records with a CANTUNWIND marker.
2844
2845 Unfortunately, current versions of GNU ld do not reliably do
2846 this, and thus we may have found an incorrect entry above.
2847 As a (temporary) sanity check, we only use the entry if it
2848 lies *within* the bounds of the function. Note that this check
2849 might reject perfectly valid entries that just happen to cover
2850 multiple functions; therefore this check ought to be removed
2851 once the linker is fixed. */
2852 if (func_start > exidx_region)
2853 return 0;
2854 }
2855
2856 /* Decode the list of unwinding instructions into a prologue cache.
2857 Note that this may fail due to e.g. a "refuse to unwind" code. */
2858 cache = arm_exidx_fill_cache (this_frame, entry);
2859 if (!cache)
2860 return 0;
2861
2862 *this_prologue_cache = cache;
2863 return 1;
2864 }
2865
2866 struct frame_unwind arm_exidx_unwind = {
2867 NORMAL_FRAME,
2868 default_frame_unwind_stop_reason,
2869 arm_prologue_this_id,
2870 arm_prologue_prev_register,
2871 NULL,
2872 arm_exidx_unwind_sniffer
2873 };
2874
2875 static struct arm_prologue_cache *
2876 arm_make_stub_cache (struct frame_info *this_frame)
2877 {
2878 struct arm_prologue_cache *cache;
2879
2880 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2881 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2882
2883 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2884
2885 return cache;
2886 }
2887
2888 /* Our frame ID for a stub frame is the current SP and LR. */
2889
2890 static void
2891 arm_stub_this_id (struct frame_info *this_frame,
2892 void **this_cache,
2893 struct frame_id *this_id)
2894 {
2895 struct arm_prologue_cache *cache;
2896
2897 if (*this_cache == NULL)
2898 *this_cache = arm_make_stub_cache (this_frame);
2899 cache = *this_cache;
2900
2901 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2902 }
2903
2904 static int
2905 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2906 struct frame_info *this_frame,
2907 void **this_prologue_cache)
2908 {
2909 CORE_ADDR addr_in_block;
2910 char dummy[4];
2911
2912 addr_in_block = get_frame_address_in_block (this_frame);
2913 if (in_plt_section (addr_in_block, NULL)
2914 /* We also use the stub winder if the target memory is unreadable
2915 to avoid having the prologue unwinder trying to read it. */
2916 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2917 return 1;
2918
2919 return 0;
2920 }
2921
2922 struct frame_unwind arm_stub_unwind = {
2923 NORMAL_FRAME,
2924 default_frame_unwind_stop_reason,
2925 arm_stub_this_id,
2926 arm_prologue_prev_register,
2927 NULL,
2928 arm_stub_unwind_sniffer
2929 };
2930
2931 static CORE_ADDR
2932 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2933 {
2934 struct arm_prologue_cache *cache;
2935
2936 if (*this_cache == NULL)
2937 *this_cache = arm_make_prologue_cache (this_frame);
2938 cache = *this_cache;
2939
2940 return cache->prev_sp - cache->framesize;
2941 }
2942
2943 struct frame_base arm_normal_base = {
2944 &arm_prologue_unwind,
2945 arm_normal_frame_base,
2946 arm_normal_frame_base,
2947 arm_normal_frame_base
2948 };
2949
2950 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2951 dummy frame. The frame ID's base needs to match the TOS value
2952 saved by save_dummy_frame_tos() and returned from
2953 arm_push_dummy_call, and the PC needs to match the dummy frame's
2954 breakpoint. */
2955
2956 static struct frame_id
2957 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2958 {
2959 return frame_id_build (get_frame_register_unsigned (this_frame,
2960 ARM_SP_REGNUM),
2961 get_frame_pc (this_frame));
2962 }
2963
2964 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2965 be used to construct the previous frame's ID, after looking up the
2966 containing function). */
2967
2968 static CORE_ADDR
2969 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2970 {
2971 CORE_ADDR pc;
2972 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2973 return arm_addr_bits_remove (gdbarch, pc);
2974 }
2975
2976 static CORE_ADDR
2977 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2978 {
2979 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2980 }
2981
2982 static struct value *
2983 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2984 int regnum)
2985 {
2986 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2987 CORE_ADDR lr, cpsr;
2988 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2989
2990 switch (regnum)
2991 {
2992 case ARM_PC_REGNUM:
2993 /* The PC is normally copied from the return column, which
2994 describes saves of LR. However, that version may have an
2995 extra bit set to indicate Thumb state. The bit is not
2996 part of the PC. */
2997 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2998 return frame_unwind_got_constant (this_frame, regnum,
2999 arm_addr_bits_remove (gdbarch, lr));
3000
3001 case ARM_PS_REGNUM:
3002 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3003 cpsr = get_frame_register_unsigned (this_frame, regnum);
3004 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3005 if (IS_THUMB_ADDR (lr))
3006 cpsr |= t_bit;
3007 else
3008 cpsr &= ~t_bit;
3009 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3010
3011 default:
3012 internal_error (__FILE__, __LINE__,
3013 _("Unexpected register %d"), regnum);
3014 }
3015 }
3016
3017 static void
3018 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3019 struct dwarf2_frame_state_reg *reg,
3020 struct frame_info *this_frame)
3021 {
3022 switch (regnum)
3023 {
3024 case ARM_PC_REGNUM:
3025 case ARM_PS_REGNUM:
3026 reg->how = DWARF2_FRAME_REG_FN;
3027 reg->loc.fn = arm_dwarf2_prev_register;
3028 break;
3029 case ARM_SP_REGNUM:
3030 reg->how = DWARF2_FRAME_REG_CFA;
3031 break;
3032 }
3033 }
3034
3035 /* Return true if we are in the function's epilogue, i.e. after the
3036 instruction that destroyed the function's stack frame. */
3037
3038 static int
3039 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3040 {
3041 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3042 unsigned int insn, insn2;
3043 int found_return = 0, found_stack_adjust = 0;
3044 CORE_ADDR func_start, func_end;
3045 CORE_ADDR scan_pc;
3046 gdb_byte buf[4];
3047
3048 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3049 return 0;
3050
3051 /* The epilogue is a sequence of instructions along the following lines:
3052
3053 - add stack frame size to SP or FP
3054 - [if frame pointer used] restore SP from FP
3055 - restore registers from SP [may include PC]
3056 - a return-type instruction [if PC wasn't already restored]
3057
3058 In a first pass, we scan forward from the current PC and verify the
3059 instructions we find as compatible with this sequence, ending in a
3060 return instruction.
3061
3062 However, this is not sufficient to distinguish indirect function calls
3063 within a function from indirect tail calls in the epilogue in some cases.
3064 Therefore, if we didn't already find any SP-changing instruction during
3065 forward scan, we add a backward scanning heuristic to ensure we actually
3066 are in the epilogue. */
3067
3068 scan_pc = pc;
3069 while (scan_pc < func_end && !found_return)
3070 {
3071 if (target_read_memory (scan_pc, buf, 2))
3072 break;
3073
3074 scan_pc += 2;
3075 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3076
3077 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3078 found_return = 1;
3079 else if (insn == 0x46f7) /* mov pc, lr */
3080 found_return = 1;
3081 else if (insn == 0x46bd) /* mov sp, r7 */
3082 found_stack_adjust = 1;
3083 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3084 found_stack_adjust = 1;
3085 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3086 {
3087 found_stack_adjust = 1;
3088 if (insn & 0x0100) /* <registers> include PC. */
3089 found_return = 1;
3090 }
3091 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3092 {
3093 if (target_read_memory (scan_pc, buf, 2))
3094 break;
3095
3096 scan_pc += 2;
3097 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3098
3099 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3100 {
3101 found_stack_adjust = 1;
3102 if (insn2 & 0x8000) /* <registers> include PC. */
3103 found_return = 1;
3104 }
3105 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3106 && (insn2 & 0x0fff) == 0x0b04)
3107 {
3108 found_stack_adjust = 1;
3109 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3110 found_return = 1;
3111 }
3112 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3113 && (insn2 & 0x0e00) == 0x0a00)
3114 found_stack_adjust = 1;
3115 else
3116 break;
3117 }
3118 else
3119 break;
3120 }
3121
3122 if (!found_return)
3123 return 0;
3124
3125 /* Since any instruction in the epilogue sequence, with the possible
3126 exception of return itself, updates the stack pointer, we need to
3127 scan backwards for at most one instruction. Try either a 16-bit or
3128 a 32-bit instruction. This is just a heuristic, so we do not worry
3129 too much about false positives. */
3130
3131 if (!found_stack_adjust)
3132 {
3133 if (pc - 4 < func_start)
3134 return 0;
3135 if (target_read_memory (pc - 4, buf, 4))
3136 return 0;
3137
3138 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3139 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3140
3141 if (insn2 == 0x46bd) /* mov sp, r7 */
3142 found_stack_adjust = 1;
3143 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3144 found_stack_adjust = 1;
3145 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3146 found_stack_adjust = 1;
3147 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3148 found_stack_adjust = 1;
3149 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3150 && (insn2 & 0x0fff) == 0x0b04)
3151 found_stack_adjust = 1;
3152 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3153 && (insn2 & 0x0e00) == 0x0a00)
3154 found_stack_adjust = 1;
3155 }
3156
3157 return found_stack_adjust;
3158 }
3159
3160 /* Return true if we are in the function's epilogue, i.e. after the
3161 instruction that destroyed the function's stack frame. */
3162
3163 static int
3164 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3165 {
3166 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3167 unsigned int insn;
3168 int found_return, found_stack_adjust;
3169 CORE_ADDR func_start, func_end;
3170
3171 if (arm_pc_is_thumb (gdbarch, pc))
3172 return thumb_in_function_epilogue_p (gdbarch, pc);
3173
3174 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3175 return 0;
3176
3177 /* We are in the epilogue if the previous instruction was a stack
3178 adjustment and the next instruction is a possible return (bx, mov
3179 pc, or pop). We could have to scan backwards to find the stack
3180 adjustment, or forwards to find the return, but this is a decent
3181 approximation. First scan forwards. */
3182
3183 found_return = 0;
3184 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3185 if (bits (insn, 28, 31) != INST_NV)
3186 {
3187 if ((insn & 0x0ffffff0) == 0x012fff10)
3188 /* BX. */
3189 found_return = 1;
3190 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3191 /* MOV PC. */
3192 found_return = 1;
3193 else if ((insn & 0x0fff0000) == 0x08bd0000
3194 && (insn & 0x0000c000) != 0)
3195 /* POP (LDMIA), including PC or LR. */
3196 found_return = 1;
3197 }
3198
3199 if (!found_return)
3200 return 0;
3201
3202 /* Scan backwards. This is just a heuristic, so do not worry about
3203 false positives from mode changes. */
3204
3205 if (pc < func_start + 4)
3206 return 0;
3207
3208 found_stack_adjust = 0;
3209 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3210 if (bits (insn, 28, 31) != INST_NV)
3211 {
3212 if ((insn & 0x0df0f000) == 0x0080d000)
3213 /* ADD SP (register or immediate). */
3214 found_stack_adjust = 1;
3215 else if ((insn & 0x0df0f000) == 0x0040d000)
3216 /* SUB SP (register or immediate). */
3217 found_stack_adjust = 1;
3218 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3219 /* MOV SP. */
3220 found_stack_adjust = 1;
3221 else if ((insn & 0x0fff0000) == 0x08bd0000)
3222 /* POP (LDMIA). */
3223 found_stack_adjust = 1;
3224 }
3225
3226 if (found_stack_adjust)
3227 return 1;
3228
3229 return 0;
3230 }
3231
3232
3233 /* When arguments must be pushed onto the stack, they go on in reverse
3234 order. The code below implements a FILO (stack) to do this. */
3235
3236 struct stack_item
3237 {
3238 int len;
3239 struct stack_item *prev;
3240 void *data;
3241 };
3242
3243 static struct stack_item *
3244 push_stack_item (struct stack_item *prev, const void *contents, int len)
3245 {
3246 struct stack_item *si;
3247 si = xmalloc (sizeof (struct stack_item));
3248 si->data = xmalloc (len);
3249 si->len = len;
3250 si->prev = prev;
3251 memcpy (si->data, contents, len);
3252 return si;
3253 }
3254
3255 static struct stack_item *
3256 pop_stack_item (struct stack_item *si)
3257 {
3258 struct stack_item *dead = si;
3259 si = si->prev;
3260 xfree (dead->data);
3261 xfree (dead);
3262 return si;
3263 }
3264
3265
3266 /* Return the alignment (in bytes) of the given type. */
3267
3268 static int
3269 arm_type_align (struct type *t)
3270 {
3271 int n;
3272 int align;
3273 int falign;
3274
3275 t = check_typedef (t);
3276 switch (TYPE_CODE (t))
3277 {
3278 default:
3279 /* Should never happen. */
3280 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3281 return 4;
3282
3283 case TYPE_CODE_PTR:
3284 case TYPE_CODE_ENUM:
3285 case TYPE_CODE_INT:
3286 case TYPE_CODE_FLT:
3287 case TYPE_CODE_SET:
3288 case TYPE_CODE_RANGE:
3289 case TYPE_CODE_BITSTRING:
3290 case TYPE_CODE_REF:
3291 case TYPE_CODE_CHAR:
3292 case TYPE_CODE_BOOL:
3293 return TYPE_LENGTH (t);
3294
3295 case TYPE_CODE_ARRAY:
3296 case TYPE_CODE_COMPLEX:
3297 /* TODO: What about vector types? */
3298 return arm_type_align (TYPE_TARGET_TYPE (t));
3299
3300 case TYPE_CODE_STRUCT:
3301 case TYPE_CODE_UNION:
3302 align = 1;
3303 for (n = 0; n < TYPE_NFIELDS (t); n++)
3304 {
3305 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3306 if (falign > align)
3307 align = falign;
3308 }
3309 return align;
3310 }
3311 }
3312
3313 /* Possible base types for a candidate for passing and returning in
3314 VFP registers. */
3315
3316 enum arm_vfp_cprc_base_type
3317 {
3318 VFP_CPRC_UNKNOWN,
3319 VFP_CPRC_SINGLE,
3320 VFP_CPRC_DOUBLE,
3321 VFP_CPRC_VEC64,
3322 VFP_CPRC_VEC128
3323 };
3324
3325 /* The length of one element of base type B. */
3326
3327 static unsigned
3328 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3329 {
3330 switch (b)
3331 {
3332 case VFP_CPRC_SINGLE:
3333 return 4;
3334 case VFP_CPRC_DOUBLE:
3335 return 8;
3336 case VFP_CPRC_VEC64:
3337 return 8;
3338 case VFP_CPRC_VEC128:
3339 return 16;
3340 default:
3341 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3342 (int) b);
3343 }
3344 }
3345
3346 /* The character ('s', 'd' or 'q') for the type of VFP register used
3347 for passing base type B. */
3348
3349 static int
3350 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3351 {
3352 switch (b)
3353 {
3354 case VFP_CPRC_SINGLE:
3355 return 's';
3356 case VFP_CPRC_DOUBLE:
3357 return 'd';
3358 case VFP_CPRC_VEC64:
3359 return 'd';
3360 case VFP_CPRC_VEC128:
3361 return 'q';
3362 default:
3363 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3364 (int) b);
3365 }
3366 }
3367
3368 /* Determine whether T may be part of a candidate for passing and
3369 returning in VFP registers, ignoring the limit on the total number
3370 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3371 classification of the first valid component found; if it is not
3372 VFP_CPRC_UNKNOWN, all components must have the same classification
3373 as *BASE_TYPE. If it is found that T contains a type not permitted
3374 for passing and returning in VFP registers, a type differently
3375 classified from *BASE_TYPE, or two types differently classified
3376 from each other, return -1, otherwise return the total number of
3377 base-type elements found (possibly 0 in an empty structure or
3378 array). Vectors and complex types are not currently supported,
3379 matching the generic AAPCS support. */
3380
3381 static int
3382 arm_vfp_cprc_sub_candidate (struct type *t,
3383 enum arm_vfp_cprc_base_type *base_type)
3384 {
3385 t = check_typedef (t);
3386 switch (TYPE_CODE (t))
3387 {
3388 case TYPE_CODE_FLT:
3389 switch (TYPE_LENGTH (t))
3390 {
3391 case 4:
3392 if (*base_type == VFP_CPRC_UNKNOWN)
3393 *base_type = VFP_CPRC_SINGLE;
3394 else if (*base_type != VFP_CPRC_SINGLE)
3395 return -1;
3396 return 1;
3397
3398 case 8:
3399 if (*base_type == VFP_CPRC_UNKNOWN)
3400 *base_type = VFP_CPRC_DOUBLE;
3401 else if (*base_type != VFP_CPRC_DOUBLE)
3402 return -1;
3403 return 1;
3404
3405 default:
3406 return -1;
3407 }
3408 break;
3409
3410 case TYPE_CODE_ARRAY:
3411 {
3412 int count;
3413 unsigned unitlen;
3414 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3415 if (count == -1)
3416 return -1;
3417 if (TYPE_LENGTH (t) == 0)
3418 {
3419 gdb_assert (count == 0);
3420 return 0;
3421 }
3422 else if (count == 0)
3423 return -1;
3424 unitlen = arm_vfp_cprc_unit_length (*base_type);
3425 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3426 return TYPE_LENGTH (t) / unitlen;
3427 }
3428 break;
3429
3430 case TYPE_CODE_STRUCT:
3431 {
3432 int count = 0;
3433 unsigned unitlen;
3434 int i;
3435 for (i = 0; i < TYPE_NFIELDS (t); i++)
3436 {
3437 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3438 base_type);
3439 if (sub_count == -1)
3440 return -1;
3441 count += sub_count;
3442 }
3443 if (TYPE_LENGTH (t) == 0)
3444 {
3445 gdb_assert (count == 0);
3446 return 0;
3447 }
3448 else if (count == 0)
3449 return -1;
3450 unitlen = arm_vfp_cprc_unit_length (*base_type);
3451 if (TYPE_LENGTH (t) != unitlen * count)
3452 return -1;
3453 return count;
3454 }
3455
3456 case TYPE_CODE_UNION:
3457 {
3458 int count = 0;
3459 unsigned unitlen;
3460 int i;
3461 for (i = 0; i < TYPE_NFIELDS (t); i++)
3462 {
3463 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3464 base_type);
3465 if (sub_count == -1)
3466 return -1;
3467 count = (count > sub_count ? count : sub_count);
3468 }
3469 if (TYPE_LENGTH (t) == 0)
3470 {
3471 gdb_assert (count == 0);
3472 return 0;
3473 }
3474 else if (count == 0)
3475 return -1;
3476 unitlen = arm_vfp_cprc_unit_length (*base_type);
3477 if (TYPE_LENGTH (t) != unitlen * count)
3478 return -1;
3479 return count;
3480 }
3481
3482 default:
3483 break;
3484 }
3485
3486 return -1;
3487 }
3488
3489 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3490 if passed to or returned from a non-variadic function with the VFP
3491 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3492 *BASE_TYPE to the base type for T and *COUNT to the number of
3493 elements of that base type before returning. */
3494
3495 static int
3496 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3497 int *count)
3498 {
3499 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3500 int c = arm_vfp_cprc_sub_candidate (t, &b);
3501 if (c <= 0 || c > 4)
3502 return 0;
3503 *base_type = b;
3504 *count = c;
3505 return 1;
3506 }
3507
3508 /* Return 1 if the VFP ABI should be used for passing arguments to and
3509 returning values from a function of type FUNC_TYPE, 0
3510 otherwise. */
3511
3512 static int
3513 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3514 {
3515 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3516 /* Variadic functions always use the base ABI. Assume that functions
3517 without debug info are not variadic. */
3518 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3519 return 0;
3520 /* The VFP ABI is only supported as a variant of AAPCS. */
3521 if (tdep->arm_abi != ARM_ABI_AAPCS)
3522 return 0;
3523 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3524 }
3525
3526 /* We currently only support passing parameters in integer registers, which
3527 conforms with GCC's default model, and VFP argument passing following
3528 the VFP variant of AAPCS. Several other variants exist and
3529 we should probably support some of them based on the selected ABI. */
3530
3531 static CORE_ADDR
3532 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3533 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3534 struct value **args, CORE_ADDR sp, int struct_return,
3535 CORE_ADDR struct_addr)
3536 {
3537 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3538 int argnum;
3539 int argreg;
3540 int nstack;
3541 struct stack_item *si = NULL;
3542 int use_vfp_abi;
3543 struct type *ftype;
3544 unsigned vfp_regs_free = (1 << 16) - 1;
3545
3546 /* Determine the type of this function and whether the VFP ABI
3547 applies. */
3548 ftype = check_typedef (value_type (function));
3549 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3550 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3551 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3552
3553 /* Set the return address. For the ARM, the return breakpoint is
3554 always at BP_ADDR. */
3555 if (arm_pc_is_thumb (gdbarch, bp_addr))
3556 bp_addr |= 1;
3557 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3558
3559 /* Walk through the list of args and determine how large a temporary
3560 stack is required. Need to take care here as structs may be
3561 passed on the stack, and we have to to push them. */
3562 nstack = 0;
3563
3564 argreg = ARM_A1_REGNUM;
3565 nstack = 0;
3566
3567 /* The struct_return pointer occupies the first parameter
3568 passing register. */
3569 if (struct_return)
3570 {
3571 if (arm_debug)
3572 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3573 gdbarch_register_name (gdbarch, argreg),
3574 paddress (gdbarch, struct_addr));
3575 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3576 argreg++;
3577 }
3578
3579 for (argnum = 0; argnum < nargs; argnum++)
3580 {
3581 int len;
3582 struct type *arg_type;
3583 struct type *target_type;
3584 enum type_code typecode;
3585 const bfd_byte *val;
3586 int align;
3587 enum arm_vfp_cprc_base_type vfp_base_type;
3588 int vfp_base_count;
3589 int may_use_core_reg = 1;
3590
3591 arg_type = check_typedef (value_type (args[argnum]));
3592 len = TYPE_LENGTH (arg_type);
3593 target_type = TYPE_TARGET_TYPE (arg_type);
3594 typecode = TYPE_CODE (arg_type);
3595 val = value_contents (args[argnum]);
3596
3597 align = arm_type_align (arg_type);
3598 /* Round alignment up to a whole number of words. */
3599 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3600 /* Different ABIs have different maximum alignments. */
3601 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3602 {
3603 /* The APCS ABI only requires word alignment. */
3604 align = INT_REGISTER_SIZE;
3605 }
3606 else
3607 {
3608 /* The AAPCS requires at most doubleword alignment. */
3609 if (align > INT_REGISTER_SIZE * 2)
3610 align = INT_REGISTER_SIZE * 2;
3611 }
3612
3613 if (use_vfp_abi
3614 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3615 &vfp_base_count))
3616 {
3617 int regno;
3618 int unit_length;
3619 int shift;
3620 unsigned mask;
3621
3622 /* Because this is a CPRC it cannot go in a core register or
3623 cause a core register to be skipped for alignment.
3624 Either it goes in VFP registers and the rest of this loop
3625 iteration is skipped for this argument, or it goes on the
3626 stack (and the stack alignment code is correct for this
3627 case). */
3628 may_use_core_reg = 0;
3629
3630 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3631 shift = unit_length / 4;
3632 mask = (1 << (shift * vfp_base_count)) - 1;
3633 for (regno = 0; regno < 16; regno += shift)
3634 if (((vfp_regs_free >> regno) & mask) == mask)
3635 break;
3636
3637 if (regno < 16)
3638 {
3639 int reg_char;
3640 int reg_scaled;
3641 int i;
3642
3643 vfp_regs_free &= ~(mask << regno);
3644 reg_scaled = regno / shift;
3645 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3646 for (i = 0; i < vfp_base_count; i++)
3647 {
3648 char name_buf[4];
3649 int regnum;
3650 if (reg_char == 'q')
3651 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3652 val + i * unit_length);
3653 else
3654 {
3655 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3656 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3657 strlen (name_buf));
3658 regcache_cooked_write (regcache, regnum,
3659 val + i * unit_length);
3660 }
3661 }
3662 continue;
3663 }
3664 else
3665 {
3666 /* This CPRC could not go in VFP registers, so all VFP
3667 registers are now marked as used. */
3668 vfp_regs_free = 0;
3669 }
3670 }
3671
3672 /* Push stack padding for dowubleword alignment. */
3673 if (nstack & (align - 1))
3674 {
3675 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3676 nstack += INT_REGISTER_SIZE;
3677 }
3678
3679 /* Doubleword aligned quantities must go in even register pairs. */
3680 if (may_use_core_reg
3681 && argreg <= ARM_LAST_ARG_REGNUM
3682 && align > INT_REGISTER_SIZE
3683 && argreg & 1)
3684 argreg++;
3685
3686 /* If the argument is a pointer to a function, and it is a
3687 Thumb function, create a LOCAL copy of the value and set
3688 the THUMB bit in it. */
3689 if (TYPE_CODE_PTR == typecode
3690 && target_type != NULL
3691 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3692 {
3693 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3694 if (arm_pc_is_thumb (gdbarch, regval))
3695 {
3696 bfd_byte *copy = alloca (len);
3697 store_unsigned_integer (copy, len, byte_order,
3698 MAKE_THUMB_ADDR (regval));
3699 val = copy;
3700 }
3701 }
3702
3703 /* Copy the argument to general registers or the stack in
3704 register-sized pieces. Large arguments are split between
3705 registers and stack. */
3706 while (len > 0)
3707 {
3708 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3709
3710 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3711 {
3712 /* The argument is being passed in a general purpose
3713 register. */
3714 CORE_ADDR regval
3715 = extract_unsigned_integer (val, partial_len, byte_order);
3716 if (byte_order == BFD_ENDIAN_BIG)
3717 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3718 if (arm_debug)
3719 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3720 argnum,
3721 gdbarch_register_name
3722 (gdbarch, argreg),
3723 phex (regval, INT_REGISTER_SIZE));
3724 regcache_cooked_write_unsigned (regcache, argreg, regval);
3725 argreg++;
3726 }
3727 else
3728 {
3729 /* Push the arguments onto the stack. */
3730 if (arm_debug)
3731 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3732 argnum, nstack);
3733 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3734 nstack += INT_REGISTER_SIZE;
3735 }
3736
3737 len -= partial_len;
3738 val += partial_len;
3739 }
3740 }
3741 /* If we have an odd number of words to push, then decrement the stack
3742 by one word now, so first stack argument will be dword aligned. */
3743 if (nstack & 4)
3744 sp -= 4;
3745
3746 while (si)
3747 {
3748 sp -= si->len;
3749 write_memory (sp, si->data, si->len);
3750 si = pop_stack_item (si);
3751 }
3752
3753 /* Finally, update teh SP register. */
3754 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3755
3756 return sp;
3757 }
3758
3759
3760 /* Always align the frame to an 8-byte boundary. This is required on
3761 some platforms and harmless on the rest. */
3762
3763 static CORE_ADDR
3764 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3765 {
3766 /* Align the stack to eight bytes. */
3767 return sp & ~ (CORE_ADDR) 7;
3768 }
3769
3770 static void
3771 print_fpu_flags (int flags)
3772 {
3773 if (flags & (1 << 0))
3774 fputs ("IVO ", stdout);
3775 if (flags & (1 << 1))
3776 fputs ("DVZ ", stdout);
3777 if (flags & (1 << 2))
3778 fputs ("OFL ", stdout);
3779 if (flags & (1 << 3))
3780 fputs ("UFL ", stdout);
3781 if (flags & (1 << 4))
3782 fputs ("INX ", stdout);
3783 putchar ('\n');
3784 }
3785
3786 /* Print interesting information about the floating point processor
3787 (if present) or emulator. */
3788 static void
3789 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3790 struct frame_info *frame, const char *args)
3791 {
3792 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3793 int type;
3794
3795 type = (status >> 24) & 127;
3796 if (status & (1 << 31))
3797 printf (_("Hardware FPU type %d\n"), type);
3798 else
3799 printf (_("Software FPU type %d\n"), type);
3800 /* i18n: [floating point unit] mask */
3801 fputs (_("mask: "), stdout);
3802 print_fpu_flags (status >> 16);
3803 /* i18n: [floating point unit] flags */
3804 fputs (_("flags: "), stdout);
3805 print_fpu_flags (status);
3806 }
3807
3808 /* Construct the ARM extended floating point type. */
3809 static struct type *
3810 arm_ext_type (struct gdbarch *gdbarch)
3811 {
3812 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3813
3814 if (!tdep->arm_ext_type)
3815 tdep->arm_ext_type
3816 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3817 floatformats_arm_ext);
3818
3819 return tdep->arm_ext_type;
3820 }
3821
3822 static struct type *
3823 arm_neon_double_type (struct gdbarch *gdbarch)
3824 {
3825 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3826
3827 if (tdep->neon_double_type == NULL)
3828 {
3829 struct type *t, *elem;
3830
3831 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3832 TYPE_CODE_UNION);
3833 elem = builtin_type (gdbarch)->builtin_uint8;
3834 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3835 elem = builtin_type (gdbarch)->builtin_uint16;
3836 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3837 elem = builtin_type (gdbarch)->builtin_uint32;
3838 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3839 elem = builtin_type (gdbarch)->builtin_uint64;
3840 append_composite_type_field (t, "u64", elem);
3841 elem = builtin_type (gdbarch)->builtin_float;
3842 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3843 elem = builtin_type (gdbarch)->builtin_double;
3844 append_composite_type_field (t, "f64", elem);
3845
3846 TYPE_VECTOR (t) = 1;
3847 TYPE_NAME (t) = "neon_d";
3848 tdep->neon_double_type = t;
3849 }
3850
3851 return tdep->neon_double_type;
3852 }
3853
3854 /* FIXME: The vector types are not correctly ordered on big-endian
3855 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3856 bits of d0 - regardless of what unit size is being held in d0. So
3857 the offset of the first uint8 in d0 is 7, but the offset of the
3858 first float is 4. This code works as-is for little-endian
3859 targets. */
3860
3861 static struct type *
3862 arm_neon_quad_type (struct gdbarch *gdbarch)
3863 {
3864 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3865
3866 if (tdep->neon_quad_type == NULL)
3867 {
3868 struct type *t, *elem;
3869
3870 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3871 TYPE_CODE_UNION);
3872 elem = builtin_type (gdbarch)->builtin_uint8;
3873 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3874 elem = builtin_type (gdbarch)->builtin_uint16;
3875 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3876 elem = builtin_type (gdbarch)->builtin_uint32;
3877 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3878 elem = builtin_type (gdbarch)->builtin_uint64;
3879 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3880 elem = builtin_type (gdbarch)->builtin_float;
3881 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3882 elem = builtin_type (gdbarch)->builtin_double;
3883 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3884
3885 TYPE_VECTOR (t) = 1;
3886 TYPE_NAME (t) = "neon_q";
3887 tdep->neon_quad_type = t;
3888 }
3889
3890 return tdep->neon_quad_type;
3891 }
3892
3893 /* Return the GDB type object for the "standard" data type of data in
3894 register N. */
3895
3896 static struct type *
3897 arm_register_type (struct gdbarch *gdbarch, int regnum)
3898 {
3899 int num_regs = gdbarch_num_regs (gdbarch);
3900
3901 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3902 && regnum >= num_regs && regnum < num_regs + 32)
3903 return builtin_type (gdbarch)->builtin_float;
3904
3905 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3906 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3907 return arm_neon_quad_type (gdbarch);
3908
3909 /* If the target description has register information, we are only
3910 in this function so that we can override the types of
3911 double-precision registers for NEON. */
3912 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3913 {
3914 struct type *t = tdesc_register_type (gdbarch, regnum);
3915
3916 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3917 && TYPE_CODE (t) == TYPE_CODE_FLT
3918 && gdbarch_tdep (gdbarch)->have_neon)
3919 return arm_neon_double_type (gdbarch);
3920 else
3921 return t;
3922 }
3923
3924 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3925 {
3926 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3927 return builtin_type (gdbarch)->builtin_void;
3928
3929 return arm_ext_type (gdbarch);
3930 }
3931 else if (regnum == ARM_SP_REGNUM)
3932 return builtin_type (gdbarch)->builtin_data_ptr;
3933 else if (regnum == ARM_PC_REGNUM)
3934 return builtin_type (gdbarch)->builtin_func_ptr;
3935 else if (regnum >= ARRAY_SIZE (arm_register_names))
3936 /* These registers are only supported on targets which supply
3937 an XML description. */
3938 return builtin_type (gdbarch)->builtin_int0;
3939 else
3940 return builtin_type (gdbarch)->builtin_uint32;
3941 }
3942
3943 /* Map a DWARF register REGNUM onto the appropriate GDB register
3944 number. */
3945
3946 static int
3947 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3948 {
3949 /* Core integer regs. */
3950 if (reg >= 0 && reg <= 15)
3951 return reg;
3952
3953 /* Legacy FPA encoding. These were once used in a way which
3954 overlapped with VFP register numbering, so their use is
3955 discouraged, but GDB doesn't support the ARM toolchain
3956 which used them for VFP. */
3957 if (reg >= 16 && reg <= 23)
3958 return ARM_F0_REGNUM + reg - 16;
3959
3960 /* New assignments for the FPA registers. */
3961 if (reg >= 96 && reg <= 103)
3962 return ARM_F0_REGNUM + reg - 96;
3963
3964 /* WMMX register assignments. */
3965 if (reg >= 104 && reg <= 111)
3966 return ARM_WCGR0_REGNUM + reg - 104;
3967
3968 if (reg >= 112 && reg <= 127)
3969 return ARM_WR0_REGNUM + reg - 112;
3970
3971 if (reg >= 192 && reg <= 199)
3972 return ARM_WC0_REGNUM + reg - 192;
3973
3974 /* VFP v2 registers. A double precision value is actually
3975 in d1 rather than s2, but the ABI only defines numbering
3976 for the single precision registers. This will "just work"
3977 in GDB for little endian targets (we'll read eight bytes,
3978 starting in s0 and then progressing to s1), but will be
3979 reversed on big endian targets with VFP. This won't
3980 be a problem for the new Neon quad registers; you're supposed
3981 to use DW_OP_piece for those. */
3982 if (reg >= 64 && reg <= 95)
3983 {
3984 char name_buf[4];
3985
3986 sprintf (name_buf, "s%d", reg - 64);
3987 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3988 strlen (name_buf));
3989 }
3990
3991 /* VFP v3 / Neon registers. This range is also used for VFP v2
3992 registers, except that it now describes d0 instead of s0. */
3993 if (reg >= 256 && reg <= 287)
3994 {
3995 char name_buf[4];
3996
3997 sprintf (name_buf, "d%d", reg - 256);
3998 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3999 strlen (name_buf));
4000 }
4001
4002 return -1;
4003 }
4004
4005 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4006 static int
4007 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4008 {
4009 int reg = regnum;
4010 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4011
4012 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4013 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4014
4015 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4016 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4017
4018 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4019 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4020
4021 if (reg < NUM_GREGS)
4022 return SIM_ARM_R0_REGNUM + reg;
4023 reg -= NUM_GREGS;
4024
4025 if (reg < NUM_FREGS)
4026 return SIM_ARM_FP0_REGNUM + reg;
4027 reg -= NUM_FREGS;
4028
4029 if (reg < NUM_SREGS)
4030 return SIM_ARM_FPS_REGNUM + reg;
4031 reg -= NUM_SREGS;
4032
4033 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4034 }
4035
4036 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4037 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4038 It is thought that this is is the floating-point register format on
4039 little-endian systems. */
4040
4041 static void
4042 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4043 void *dbl, int endianess)
4044 {
4045 DOUBLEST d;
4046
4047 if (endianess == BFD_ENDIAN_BIG)
4048 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4049 else
4050 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4051 ptr, &d);
4052 floatformat_from_doublest (fmt, &d, dbl);
4053 }
4054
4055 static void
4056 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4057 int endianess)
4058 {
4059 DOUBLEST d;
4060
4061 floatformat_to_doublest (fmt, ptr, &d);
4062 if (endianess == BFD_ENDIAN_BIG)
4063 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4064 else
4065 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4066 &d, dbl);
4067 }
4068
4069 static int
4070 condition_true (unsigned long cond, unsigned long status_reg)
4071 {
4072 if (cond == INST_AL || cond == INST_NV)
4073 return 1;
4074
4075 switch (cond)
4076 {
4077 case INST_EQ:
4078 return ((status_reg & FLAG_Z) != 0);
4079 case INST_NE:
4080 return ((status_reg & FLAG_Z) == 0);
4081 case INST_CS:
4082 return ((status_reg & FLAG_C) != 0);
4083 case INST_CC:
4084 return ((status_reg & FLAG_C) == 0);
4085 case INST_MI:
4086 return ((status_reg & FLAG_N) != 0);
4087 case INST_PL:
4088 return ((status_reg & FLAG_N) == 0);
4089 case INST_VS:
4090 return ((status_reg & FLAG_V) != 0);
4091 case INST_VC:
4092 return ((status_reg & FLAG_V) == 0);
4093 case INST_HI:
4094 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4095 case INST_LS:
4096 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4097 case INST_GE:
4098 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4099 case INST_LT:
4100 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4101 case INST_GT:
4102 return (((status_reg & FLAG_Z) == 0)
4103 && (((status_reg & FLAG_N) == 0)
4104 == ((status_reg & FLAG_V) == 0)));
4105 case INST_LE:
4106 return (((status_reg & FLAG_Z) != 0)
4107 || (((status_reg & FLAG_N) == 0)
4108 != ((status_reg & FLAG_V) == 0)));
4109 }
4110 return 1;
4111 }
4112
4113 static unsigned long
4114 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4115 unsigned long pc_val, unsigned long status_reg)
4116 {
4117 unsigned long res, shift;
4118 int rm = bits (inst, 0, 3);
4119 unsigned long shifttype = bits (inst, 5, 6);
4120
4121 if (bit (inst, 4))
4122 {
4123 int rs = bits (inst, 8, 11);
4124 shift = (rs == 15 ? pc_val + 8
4125 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4126 }
4127 else
4128 shift = bits (inst, 7, 11);
4129
4130 res = (rm == ARM_PC_REGNUM
4131 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4132 : get_frame_register_unsigned (frame, rm));
4133
4134 switch (shifttype)
4135 {
4136 case 0: /* LSL */
4137 res = shift >= 32 ? 0 : res << shift;
4138 break;
4139
4140 case 1: /* LSR */
4141 res = shift >= 32 ? 0 : res >> shift;
4142 break;
4143
4144 case 2: /* ASR */
4145 if (shift >= 32)
4146 shift = 31;
4147 res = ((res & 0x80000000L)
4148 ? ~((~res) >> shift) : res >> shift);
4149 break;
4150
4151 case 3: /* ROR/RRX */
4152 shift &= 31;
4153 if (shift == 0)
4154 res = (res >> 1) | (carry ? 0x80000000L : 0);
4155 else
4156 res = (res >> shift) | (res << (32 - shift));
4157 break;
4158 }
4159
4160 return res & 0xffffffff;
4161 }
4162
4163 /* Return number of 1-bits in VAL. */
4164
4165 static int
4166 bitcount (unsigned long val)
4167 {
4168 int nbits;
4169 for (nbits = 0; val != 0; nbits++)
4170 val &= val - 1; /* Delete rightmost 1-bit in val. */
4171 return nbits;
4172 }
4173
4174 /* Return the size in bytes of the complete Thumb instruction whose
4175 first halfword is INST1. */
4176
4177 static int
4178 thumb_insn_size (unsigned short inst1)
4179 {
4180 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4181 return 4;
4182 else
4183 return 2;
4184 }
4185
4186 static int
4187 thumb_advance_itstate (unsigned int itstate)
4188 {
4189 /* Preserve IT[7:5], the first three bits of the condition. Shift
4190 the upcoming condition flags left by one bit. */
4191 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4192
4193 /* If we have finished the IT block, clear the state. */
4194 if ((itstate & 0x0f) == 0)
4195 itstate = 0;
4196
4197 return itstate;
4198 }
4199
4200 /* Find the next PC after the current instruction executes. In some
4201 cases we can not statically determine the answer (see the IT state
4202 handling in this function); in that case, a breakpoint may be
4203 inserted in addition to the returned PC, which will be used to set
4204 another breakpoint by our caller. */
4205
4206 static CORE_ADDR
4207 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4208 {
4209 struct gdbarch *gdbarch = get_frame_arch (frame);
4210 struct address_space *aspace = get_frame_address_space (frame);
4211 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4212 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4213 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4214 unsigned short inst1;
4215 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4216 unsigned long offset;
4217 ULONGEST status, itstate;
4218
4219 nextpc = MAKE_THUMB_ADDR (nextpc);
4220 pc_val = MAKE_THUMB_ADDR (pc_val);
4221
4222 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4223
4224 /* Thumb-2 conditional execution support. There are eight bits in
4225 the CPSR which describe conditional execution state. Once
4226 reconstructed (they're in a funny order), the low five bits
4227 describe the low bit of the condition for each instruction and
4228 how many instructions remain. The high three bits describe the
4229 base condition. One of the low four bits will be set if an IT
4230 block is active. These bits read as zero on earlier
4231 processors. */
4232 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4233 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4234
4235 /* If-Then handling. On GNU/Linux, where this routine is used, we
4236 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4237 can disable execution of the undefined instruction. So we might
4238 miss the breakpoint if we set it on a skipped conditional
4239 instruction. Because conditional instructions can change the
4240 flags, affecting the execution of further instructions, we may
4241 need to set two breakpoints. */
4242
4243 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4244 {
4245 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4246 {
4247 /* An IT instruction. Because this instruction does not
4248 modify the flags, we can accurately predict the next
4249 executed instruction. */
4250 itstate = inst1 & 0x00ff;
4251 pc += thumb_insn_size (inst1);
4252
4253 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4254 {
4255 inst1 = read_memory_unsigned_integer (pc, 2,
4256 byte_order_for_code);
4257 pc += thumb_insn_size (inst1);
4258 itstate = thumb_advance_itstate (itstate);
4259 }
4260
4261 return MAKE_THUMB_ADDR (pc);
4262 }
4263 else if (itstate != 0)
4264 {
4265 /* We are in a conditional block. Check the condition. */
4266 if (! condition_true (itstate >> 4, status))
4267 {
4268 /* Advance to the next executed instruction. */
4269 pc += thumb_insn_size (inst1);
4270 itstate = thumb_advance_itstate (itstate);
4271
4272 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4273 {
4274 inst1 = read_memory_unsigned_integer (pc, 2,
4275 byte_order_for_code);
4276 pc += thumb_insn_size (inst1);
4277 itstate = thumb_advance_itstate (itstate);
4278 }
4279
4280 return MAKE_THUMB_ADDR (pc);
4281 }
4282 else if ((itstate & 0x0f) == 0x08)
4283 {
4284 /* This is the last instruction of the conditional
4285 block, and it is executed. We can handle it normally
4286 because the following instruction is not conditional,
4287 and we must handle it normally because it is
4288 permitted to branch. Fall through. */
4289 }
4290 else
4291 {
4292 int cond_negated;
4293
4294 /* There are conditional instructions after this one.
4295 If this instruction modifies the flags, then we can
4296 not predict what the next executed instruction will
4297 be. Fortunately, this instruction is architecturally
4298 forbidden to branch; we know it will fall through.
4299 Start by skipping past it. */
4300 pc += thumb_insn_size (inst1);
4301 itstate = thumb_advance_itstate (itstate);
4302
4303 /* Set a breakpoint on the following instruction. */
4304 gdb_assert ((itstate & 0x0f) != 0);
4305 arm_insert_single_step_breakpoint (gdbarch, aspace,
4306 MAKE_THUMB_ADDR (pc));
4307 cond_negated = (itstate >> 4) & 1;
4308
4309 /* Skip all following instructions with the same
4310 condition. If there is a later instruction in the IT
4311 block with the opposite condition, set the other
4312 breakpoint there. If not, then set a breakpoint on
4313 the instruction after the IT block. */
4314 do
4315 {
4316 inst1 = read_memory_unsigned_integer (pc, 2,
4317 byte_order_for_code);
4318 pc += thumb_insn_size (inst1);
4319 itstate = thumb_advance_itstate (itstate);
4320 }
4321 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4322
4323 return MAKE_THUMB_ADDR (pc);
4324 }
4325 }
4326 }
4327 else if (itstate & 0x0f)
4328 {
4329 /* We are in a conditional block. Check the condition. */
4330 int cond = itstate >> 4;
4331
4332 if (! condition_true (cond, status))
4333 {
4334 /* Advance to the next instruction. All the 32-bit
4335 instructions share a common prefix. */
4336 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4337 return MAKE_THUMB_ADDR (pc + 4);
4338 else
4339 return MAKE_THUMB_ADDR (pc + 2);
4340 }
4341
4342 /* Otherwise, handle the instruction normally. */
4343 }
4344
4345 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4346 {
4347 CORE_ADDR sp;
4348
4349 /* Fetch the saved PC from the stack. It's stored above
4350 all of the other registers. */
4351 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4352 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4353 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4354 }
4355 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4356 {
4357 unsigned long cond = bits (inst1, 8, 11);
4358 if (cond == 0x0f) /* 0x0f = SWI */
4359 {
4360 struct gdbarch_tdep *tdep;
4361 tdep = gdbarch_tdep (gdbarch);
4362
4363 if (tdep->syscall_next_pc != NULL)
4364 nextpc = tdep->syscall_next_pc (frame);
4365
4366 }
4367 else if (cond != 0x0f && condition_true (cond, status))
4368 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4369 }
4370 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4371 {
4372 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4373 }
4374 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4375 {
4376 unsigned short inst2;
4377 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4378
4379 /* Default to the next instruction. */
4380 nextpc = pc + 4;
4381 nextpc = MAKE_THUMB_ADDR (nextpc);
4382
4383 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4384 {
4385 /* Branches and miscellaneous control instructions. */
4386
4387 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4388 {
4389 /* B, BL, BLX. */
4390 int j1, j2, imm1, imm2;
4391
4392 imm1 = sbits (inst1, 0, 10);
4393 imm2 = bits (inst2, 0, 10);
4394 j1 = bit (inst2, 13);
4395 j2 = bit (inst2, 11);
4396
4397 offset = ((imm1 << 12) + (imm2 << 1));
4398 offset ^= ((!j2) << 22) | ((!j1) << 23);
4399
4400 nextpc = pc_val + offset;
4401 /* For BLX make sure to clear the low bits. */
4402 if (bit (inst2, 12) == 0)
4403 nextpc = nextpc & 0xfffffffc;
4404 }
4405 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4406 {
4407 /* SUBS PC, LR, #imm8. */
4408 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4409 nextpc -= inst2 & 0x00ff;
4410 }
4411 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4412 {
4413 /* Conditional branch. */
4414 if (condition_true (bits (inst1, 6, 9), status))
4415 {
4416 int sign, j1, j2, imm1, imm2;
4417
4418 sign = sbits (inst1, 10, 10);
4419 imm1 = bits (inst1, 0, 5);
4420 imm2 = bits (inst2, 0, 10);
4421 j1 = bit (inst2, 13);
4422 j2 = bit (inst2, 11);
4423
4424 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4425 offset += (imm1 << 12) + (imm2 << 1);
4426
4427 nextpc = pc_val + offset;
4428 }
4429 }
4430 }
4431 else if ((inst1 & 0xfe50) == 0xe810)
4432 {
4433 /* Load multiple or RFE. */
4434 int rn, offset, load_pc = 1;
4435
4436 rn = bits (inst1, 0, 3);
4437 if (bit (inst1, 7) && !bit (inst1, 8))
4438 {
4439 /* LDMIA or POP */
4440 if (!bit (inst2, 15))
4441 load_pc = 0;
4442 offset = bitcount (inst2) * 4 - 4;
4443 }
4444 else if (!bit (inst1, 7) && bit (inst1, 8))
4445 {
4446 /* LDMDB */
4447 if (!bit (inst2, 15))
4448 load_pc = 0;
4449 offset = -4;
4450 }
4451 else if (bit (inst1, 7) && bit (inst1, 8))
4452 {
4453 /* RFEIA */
4454 offset = 0;
4455 }
4456 else if (!bit (inst1, 7) && !bit (inst1, 8))
4457 {
4458 /* RFEDB */
4459 offset = -8;
4460 }
4461 else
4462 load_pc = 0;
4463
4464 if (load_pc)
4465 {
4466 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4467 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4468 }
4469 }
4470 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4471 {
4472 /* MOV PC or MOVS PC. */
4473 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4474 nextpc = MAKE_THUMB_ADDR (nextpc);
4475 }
4476 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4477 {
4478 /* LDR PC. */
4479 CORE_ADDR base;
4480 int rn, load_pc = 1;
4481
4482 rn = bits (inst1, 0, 3);
4483 base = get_frame_register_unsigned (frame, rn);
4484 if (rn == ARM_PC_REGNUM)
4485 {
4486 base = (base + 4) & ~(CORE_ADDR) 0x3;
4487 if (bit (inst1, 7))
4488 base += bits (inst2, 0, 11);
4489 else
4490 base -= bits (inst2, 0, 11);
4491 }
4492 else if (bit (inst1, 7))
4493 base += bits (inst2, 0, 11);
4494 else if (bit (inst2, 11))
4495 {
4496 if (bit (inst2, 10))
4497 {
4498 if (bit (inst2, 9))
4499 base += bits (inst2, 0, 7);
4500 else
4501 base -= bits (inst2, 0, 7);
4502 }
4503 }
4504 else if ((inst2 & 0x0fc0) == 0x0000)
4505 {
4506 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4507 base += get_frame_register_unsigned (frame, rm) << shift;
4508 }
4509 else
4510 /* Reserved. */
4511 load_pc = 0;
4512
4513 if (load_pc)
4514 nextpc = get_frame_memory_unsigned (frame, base, 4);
4515 }
4516 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4517 {
4518 /* TBB. */
4519 CORE_ADDR tbl_reg, table, offset, length;
4520
4521 tbl_reg = bits (inst1, 0, 3);
4522 if (tbl_reg == 0x0f)
4523 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4524 else
4525 table = get_frame_register_unsigned (frame, tbl_reg);
4526
4527 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4528 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4529 nextpc = pc_val + length;
4530 }
4531 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4532 {
4533 /* TBH. */
4534 CORE_ADDR tbl_reg, table, offset, length;
4535
4536 tbl_reg = bits (inst1, 0, 3);
4537 if (tbl_reg == 0x0f)
4538 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4539 else
4540 table = get_frame_register_unsigned (frame, tbl_reg);
4541
4542 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4543 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4544 nextpc = pc_val + length;
4545 }
4546 }
4547 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4548 {
4549 if (bits (inst1, 3, 6) == 0x0f)
4550 nextpc = pc_val;
4551 else
4552 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4553 }
4554 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4555 {
4556 if (bits (inst1, 3, 6) == 0x0f)
4557 nextpc = pc_val;
4558 else
4559 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4560
4561 nextpc = MAKE_THUMB_ADDR (nextpc);
4562 }
4563 else if ((inst1 & 0xf500) == 0xb100)
4564 {
4565 /* CBNZ or CBZ. */
4566 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4567 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4568
4569 if (bit (inst1, 11) && reg != 0)
4570 nextpc = pc_val + imm;
4571 else if (!bit (inst1, 11) && reg == 0)
4572 nextpc = pc_val + imm;
4573 }
4574 return nextpc;
4575 }
4576
4577 /* Get the raw next address. PC is the current program counter, in
4578 FRAME, which is assumed to be executing in ARM mode.
4579
4580 The value returned has the execution state of the next instruction
4581 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4582 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4583 address. */
4584
4585 static CORE_ADDR
4586 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4587 {
4588 struct gdbarch *gdbarch = get_frame_arch (frame);
4589 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4590 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4591 unsigned long pc_val;
4592 unsigned long this_instr;
4593 unsigned long status;
4594 CORE_ADDR nextpc;
4595
4596 pc_val = (unsigned long) pc;
4597 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4598
4599 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4600 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4601
4602 if (bits (this_instr, 28, 31) == INST_NV)
4603 switch (bits (this_instr, 24, 27))
4604 {
4605 case 0xa:
4606 case 0xb:
4607 {
4608 /* Branch with Link and change to Thumb. */
4609 nextpc = BranchDest (pc, this_instr);
4610 nextpc |= bit (this_instr, 24) << 1;
4611 nextpc = MAKE_THUMB_ADDR (nextpc);
4612 break;
4613 }
4614 case 0xc:
4615 case 0xd:
4616 case 0xe:
4617 /* Coprocessor register transfer. */
4618 if (bits (this_instr, 12, 15) == 15)
4619 error (_("Invalid update to pc in instruction"));
4620 break;
4621 }
4622 else if (condition_true (bits (this_instr, 28, 31), status))
4623 {
4624 switch (bits (this_instr, 24, 27))
4625 {
4626 case 0x0:
4627 case 0x1: /* data processing */
4628 case 0x2:
4629 case 0x3:
4630 {
4631 unsigned long operand1, operand2, result = 0;
4632 unsigned long rn;
4633 int c;
4634
4635 if (bits (this_instr, 12, 15) != 15)
4636 break;
4637
4638 if (bits (this_instr, 22, 25) == 0
4639 && bits (this_instr, 4, 7) == 9) /* multiply */
4640 error (_("Invalid update to pc in instruction"));
4641
4642 /* BX <reg>, BLX <reg> */
4643 if (bits (this_instr, 4, 27) == 0x12fff1
4644 || bits (this_instr, 4, 27) == 0x12fff3)
4645 {
4646 rn = bits (this_instr, 0, 3);
4647 nextpc = ((rn == ARM_PC_REGNUM)
4648 ? (pc_val + 8)
4649 : get_frame_register_unsigned (frame, rn));
4650
4651 return nextpc;
4652 }
4653
4654 /* Multiply into PC. */
4655 c = (status & FLAG_C) ? 1 : 0;
4656 rn = bits (this_instr, 16, 19);
4657 operand1 = ((rn == ARM_PC_REGNUM)
4658 ? (pc_val + 8)
4659 : get_frame_register_unsigned (frame, rn));
4660
4661 if (bit (this_instr, 25))
4662 {
4663 unsigned long immval = bits (this_instr, 0, 7);
4664 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4665 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4666 & 0xffffffff;
4667 }
4668 else /* operand 2 is a shifted register. */
4669 operand2 = shifted_reg_val (frame, this_instr, c,
4670 pc_val, status);
4671
4672 switch (bits (this_instr, 21, 24))
4673 {
4674 case 0x0: /*and */
4675 result = operand1 & operand2;
4676 break;
4677
4678 case 0x1: /*eor */
4679 result = operand1 ^ operand2;
4680 break;
4681
4682 case 0x2: /*sub */
4683 result = operand1 - operand2;
4684 break;
4685
4686 case 0x3: /*rsb */
4687 result = operand2 - operand1;
4688 break;
4689
4690 case 0x4: /*add */
4691 result = operand1 + operand2;
4692 break;
4693
4694 case 0x5: /*adc */
4695 result = operand1 + operand2 + c;
4696 break;
4697
4698 case 0x6: /*sbc */
4699 result = operand1 - operand2 + c;
4700 break;
4701
4702 case 0x7: /*rsc */
4703 result = operand2 - operand1 + c;
4704 break;
4705
4706 case 0x8:
4707 case 0x9:
4708 case 0xa:
4709 case 0xb: /* tst, teq, cmp, cmn */
4710 result = (unsigned long) nextpc;
4711 break;
4712
4713 case 0xc: /*orr */
4714 result = operand1 | operand2;
4715 break;
4716
4717 case 0xd: /*mov */
4718 /* Always step into a function. */
4719 result = operand2;
4720 break;
4721
4722 case 0xe: /*bic */
4723 result = operand1 & ~operand2;
4724 break;
4725
4726 case 0xf: /*mvn */
4727 result = ~operand2;
4728 break;
4729 }
4730
4731 /* In 26-bit APCS the bottom two bits of the result are
4732 ignored, and we always end up in ARM state. */
4733 if (!arm_apcs_32)
4734 nextpc = arm_addr_bits_remove (gdbarch, result);
4735 else
4736 nextpc = result;
4737
4738 break;
4739 }
4740
4741 case 0x4:
4742 case 0x5: /* data transfer */
4743 case 0x6:
4744 case 0x7:
4745 if (bit (this_instr, 20))
4746 {
4747 /* load */
4748 if (bits (this_instr, 12, 15) == 15)
4749 {
4750 /* rd == pc */
4751 unsigned long rn;
4752 unsigned long base;
4753
4754 if (bit (this_instr, 22))
4755 error (_("Invalid update to pc in instruction"));
4756
4757 /* byte write to PC */
4758 rn = bits (this_instr, 16, 19);
4759 base = ((rn == ARM_PC_REGNUM)
4760 ? (pc_val + 8)
4761 : get_frame_register_unsigned (frame, rn));
4762
4763 if (bit (this_instr, 24))
4764 {
4765 /* pre-indexed */
4766 int c = (status & FLAG_C) ? 1 : 0;
4767 unsigned long offset =
4768 (bit (this_instr, 25)
4769 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4770 : bits (this_instr, 0, 11));
4771
4772 if (bit (this_instr, 23))
4773 base += offset;
4774 else
4775 base -= offset;
4776 }
4777 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4778 4, byte_order);
4779 }
4780 }
4781 break;
4782
4783 case 0x8:
4784 case 0x9: /* block transfer */
4785 if (bit (this_instr, 20))
4786 {
4787 /* LDM */
4788 if (bit (this_instr, 15))
4789 {
4790 /* loading pc */
4791 int offset = 0;
4792
4793 if (bit (this_instr, 23))
4794 {
4795 /* up */
4796 unsigned long reglist = bits (this_instr, 0, 14);
4797 offset = bitcount (reglist) * 4;
4798 if (bit (this_instr, 24)) /* pre */
4799 offset += 4;
4800 }
4801 else if (bit (this_instr, 24))
4802 offset = -4;
4803
4804 {
4805 unsigned long rn_val =
4806 get_frame_register_unsigned (frame,
4807 bits (this_instr, 16, 19));
4808 nextpc =
4809 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4810 + offset),
4811 4, byte_order);
4812 }
4813 }
4814 }
4815 break;
4816
4817 case 0xb: /* branch & link */
4818 case 0xa: /* branch */
4819 {
4820 nextpc = BranchDest (pc, this_instr);
4821 break;
4822 }
4823
4824 case 0xc:
4825 case 0xd:
4826 case 0xe: /* coproc ops */
4827 break;
4828 case 0xf: /* SWI */
4829 {
4830 struct gdbarch_tdep *tdep;
4831 tdep = gdbarch_tdep (gdbarch);
4832
4833 if (tdep->syscall_next_pc != NULL)
4834 nextpc = tdep->syscall_next_pc (frame);
4835
4836 }
4837 break;
4838
4839 default:
4840 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4841 return (pc);
4842 }
4843 }
4844
4845 return nextpc;
4846 }
4847
4848 /* Determine next PC after current instruction executes. Will call either
4849 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4850 loop is detected. */
4851
4852 CORE_ADDR
4853 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4854 {
4855 CORE_ADDR nextpc;
4856
4857 if (arm_frame_is_thumb (frame))
4858 {
4859 nextpc = thumb_get_next_pc_raw (frame, pc);
4860 if (nextpc == MAKE_THUMB_ADDR (pc))
4861 error (_("Infinite loop detected"));
4862 }
4863 else
4864 {
4865 nextpc = arm_get_next_pc_raw (frame, pc);
4866 if (nextpc == pc)
4867 error (_("Infinite loop detected"));
4868 }
4869
4870 return nextpc;
4871 }
4872
4873 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4874 of the appropriate mode (as encoded in the PC value), even if this
4875 differs from what would be expected according to the symbol tables. */
4876
4877 void
4878 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4879 struct address_space *aspace,
4880 CORE_ADDR pc)
4881 {
4882 struct cleanup *old_chain
4883 = make_cleanup_restore_integer (&arm_override_mode);
4884
4885 arm_override_mode = IS_THUMB_ADDR (pc);
4886 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4887
4888 insert_single_step_breakpoint (gdbarch, aspace, pc);
4889
4890 do_cleanups (old_chain);
4891 }
4892
4893 /* single_step() is called just before we want to resume the inferior,
4894 if we want to single-step it but there is no hardware or kernel
4895 single-step support. We find the target of the coming instruction
4896 and breakpoint it. */
4897
4898 int
4899 arm_software_single_step (struct frame_info *frame)
4900 {
4901 struct gdbarch *gdbarch = get_frame_arch (frame);
4902 struct address_space *aspace = get_frame_address_space (frame);
4903 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4904
4905 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4906
4907 return 1;
4908 }
4909
4910 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4911 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4912 NULL if an error occurs. BUF is freed. */
4913
4914 static gdb_byte *
4915 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4916 int old_len, int new_len)
4917 {
4918 gdb_byte *new_buf, *middle;
4919 int bytes_to_read = new_len - old_len;
4920
4921 new_buf = xmalloc (new_len);
4922 memcpy (new_buf + bytes_to_read, buf, old_len);
4923 xfree (buf);
4924 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4925 {
4926 xfree (new_buf);
4927 return NULL;
4928 }
4929 return new_buf;
4930 }
4931
4932 /* An IT block is at most the 2-byte IT instruction followed by
4933 four 4-byte instructions. The furthest back we must search to
4934 find an IT block that affects the current instruction is thus
4935 2 + 3 * 4 == 14 bytes. */
4936 #define MAX_IT_BLOCK_PREFIX 14
4937
4938 /* Use a quick scan if there are more than this many bytes of
4939 code. */
4940 #define IT_SCAN_THRESHOLD 32
4941
4942 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4943 A breakpoint in an IT block may not be hit, depending on the
4944 condition flags. */
4945 static CORE_ADDR
4946 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4947 {
4948 gdb_byte *buf;
4949 char map_type;
4950 CORE_ADDR boundary, func_start;
4951 int buf_len, buf2_len;
4952 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4953 int i, any, last_it, last_it_count;
4954
4955 /* If we are using BKPT breakpoints, none of this is necessary. */
4956 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4957 return bpaddr;
4958
4959 /* ARM mode does not have this problem. */
4960 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4961 return bpaddr;
4962
4963 /* We are setting a breakpoint in Thumb code that could potentially
4964 contain an IT block. The first step is to find how much Thumb
4965 code there is; we do not need to read outside of known Thumb
4966 sequences. */
4967 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4968 if (map_type == 0)
4969 /* Thumb-2 code must have mapping symbols to have a chance. */
4970 return bpaddr;
4971
4972 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4973
4974 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4975 && func_start > boundary)
4976 boundary = func_start;
4977
4978 /* Search for a candidate IT instruction. We have to do some fancy
4979 footwork to distinguish a real IT instruction from the second
4980 half of a 32-bit instruction, but there is no need for that if
4981 there's no candidate. */
4982 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4983 if (buf_len == 0)
4984 /* No room for an IT instruction. */
4985 return bpaddr;
4986
4987 buf = xmalloc (buf_len);
4988 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4989 return bpaddr;
4990 any = 0;
4991 for (i = 0; i < buf_len; i += 2)
4992 {
4993 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4994 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4995 {
4996 any = 1;
4997 break;
4998 }
4999 }
5000 if (any == 0)
5001 {
5002 xfree (buf);
5003 return bpaddr;
5004 }
5005
5006 /* OK, the code bytes before this instruction contain at least one
5007 halfword which resembles an IT instruction. We know that it's
5008 Thumb code, but there are still two possibilities. Either the
5009 halfword really is an IT instruction, or it is the second half of
5010 a 32-bit Thumb instruction. The only way we can tell is to
5011 scan forwards from a known instruction boundary. */
5012 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5013 {
5014 int definite;
5015
5016 /* There's a lot of code before this instruction. Start with an
5017 optimistic search; it's easy to recognize halfwords that can
5018 not be the start of a 32-bit instruction, and use that to
5019 lock on to the instruction boundaries. */
5020 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5021 if (buf == NULL)
5022 return bpaddr;
5023 buf_len = IT_SCAN_THRESHOLD;
5024
5025 definite = 0;
5026 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5027 {
5028 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5029 if (thumb_insn_size (inst1) == 2)
5030 {
5031 definite = 1;
5032 break;
5033 }
5034 }
5035
5036 /* At this point, if DEFINITE, BUF[I] is the first place we
5037 are sure that we know the instruction boundaries, and it is far
5038 enough from BPADDR that we could not miss an IT instruction
5039 affecting BPADDR. If ! DEFINITE, give up - start from a
5040 known boundary. */
5041 if (! definite)
5042 {
5043 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5044 bpaddr - boundary);
5045 if (buf == NULL)
5046 return bpaddr;
5047 buf_len = bpaddr - boundary;
5048 i = 0;
5049 }
5050 }
5051 else
5052 {
5053 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5054 if (buf == NULL)
5055 return bpaddr;
5056 buf_len = bpaddr - boundary;
5057 i = 0;
5058 }
5059
5060 /* Scan forwards. Find the last IT instruction before BPADDR. */
5061 last_it = -1;
5062 last_it_count = 0;
5063 while (i < buf_len)
5064 {
5065 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5066 last_it_count--;
5067 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5068 {
5069 last_it = i;
5070 if (inst1 & 0x0001)
5071 last_it_count = 4;
5072 else if (inst1 & 0x0002)
5073 last_it_count = 3;
5074 else if (inst1 & 0x0004)
5075 last_it_count = 2;
5076 else
5077 last_it_count = 1;
5078 }
5079 i += thumb_insn_size (inst1);
5080 }
5081
5082 xfree (buf);
5083
5084 if (last_it == -1)
5085 /* There wasn't really an IT instruction after all. */
5086 return bpaddr;
5087
5088 if (last_it_count < 1)
5089 /* It was too far away. */
5090 return bpaddr;
5091
5092 /* This really is a trouble spot. Move the breakpoint to the IT
5093 instruction. */
5094 return bpaddr - buf_len + last_it;
5095 }
5096
5097 /* ARM displaced stepping support.
5098
5099 Generally ARM displaced stepping works as follows:
5100
5101 1. When an instruction is to be single-stepped, it is first decoded by
5102 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5103 Depending on the type of instruction, it is then copied to a scratch
5104 location, possibly in a modified form. The copy_* set of functions
5105 performs such modification, as necessary. A breakpoint is placed after
5106 the modified instruction in the scratch space to return control to GDB.
5107 Note in particular that instructions which modify the PC will no longer
5108 do so after modification.
5109
5110 2. The instruction is single-stepped, by setting the PC to the scratch
5111 location address, and resuming. Control returns to GDB when the
5112 breakpoint is hit.
5113
5114 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5115 function used for the current instruction. This function's job is to
5116 put the CPU/memory state back to what it would have been if the
5117 instruction had been executed unmodified in its original location. */
5118
5119 /* NOP instruction (mov r0, r0). */
5120 #define ARM_NOP 0xe1a00000
5121
5122 /* Helper for register reads for displaced stepping. In particular, this
5123 returns the PC as it would be seen by the instruction at its original
5124 location. */
5125
5126 ULONGEST
5127 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5128 int regno)
5129 {
5130 ULONGEST ret;
5131 CORE_ADDR from = dsc->insn_addr;
5132
5133 if (regno == ARM_PC_REGNUM)
5134 {
5135 /* Compute pipeline offset:
5136 - When executing an ARM instruction, PC reads as the address of the
5137 current instruction plus 8.
5138 - When executing a Thumb instruction, PC reads as the address of the
5139 current instruction plus 4. */
5140
5141 if (!dsc->is_thumb)
5142 from += 8;
5143 else
5144 from += 4;
5145
5146 if (debug_displaced)
5147 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5148 (unsigned long) from);
5149 return (ULONGEST) from;
5150 }
5151 else
5152 {
5153 regcache_cooked_read_unsigned (regs, regno, &ret);
5154 if (debug_displaced)
5155 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5156 regno, (unsigned long) ret);
5157 return ret;
5158 }
5159 }
5160
5161 static int
5162 displaced_in_arm_mode (struct regcache *regs)
5163 {
5164 ULONGEST ps;
5165 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5166
5167 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5168
5169 return (ps & t_bit) == 0;
5170 }
5171
5172 /* Write to the PC as from a branch instruction. */
5173
5174 static void
5175 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5176 ULONGEST val)
5177 {
5178 if (!dsc->is_thumb)
5179 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5180 architecture versions < 6. */
5181 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5182 val & ~(ULONGEST) 0x3);
5183 else
5184 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5185 val & ~(ULONGEST) 0x1);
5186 }
5187
5188 /* Write to the PC as from a branch-exchange instruction. */
5189
5190 static void
5191 bx_write_pc (struct regcache *regs, ULONGEST val)
5192 {
5193 ULONGEST ps;
5194 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5195
5196 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5197
5198 if ((val & 1) == 1)
5199 {
5200 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5201 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5202 }
5203 else if ((val & 2) == 0)
5204 {
5205 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5206 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5207 }
5208 else
5209 {
5210 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5211 mode, align dest to 4 bytes). */
5212 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5213 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5214 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5215 }
5216 }
5217
5218 /* Write to the PC as if from a load instruction. */
5219
5220 static void
5221 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5222 ULONGEST val)
5223 {
5224 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5225 bx_write_pc (regs, val);
5226 else
5227 branch_write_pc (regs, dsc, val);
5228 }
5229
5230 /* Write to the PC as if from an ALU instruction. */
5231
5232 static void
5233 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5234 ULONGEST val)
5235 {
5236 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5237 bx_write_pc (regs, val);
5238 else
5239 branch_write_pc (regs, dsc, val);
5240 }
5241
5242 /* Helper for writing to registers for displaced stepping. Writing to the PC
5243 has a varying effects depending on the instruction which does the write:
5244 this is controlled by the WRITE_PC argument. */
5245
5246 void
5247 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5248 int regno, ULONGEST val, enum pc_write_style write_pc)
5249 {
5250 if (regno == ARM_PC_REGNUM)
5251 {
5252 if (debug_displaced)
5253 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5254 (unsigned long) val);
5255 switch (write_pc)
5256 {
5257 case BRANCH_WRITE_PC:
5258 branch_write_pc (regs, dsc, val);
5259 break;
5260
5261 case BX_WRITE_PC:
5262 bx_write_pc (regs, val);
5263 break;
5264
5265 case LOAD_WRITE_PC:
5266 load_write_pc (regs, dsc, val);
5267 break;
5268
5269 case ALU_WRITE_PC:
5270 alu_write_pc (regs, dsc, val);
5271 break;
5272
5273 case CANNOT_WRITE_PC:
5274 warning (_("Instruction wrote to PC in an unexpected way when "
5275 "single-stepping"));
5276 break;
5277
5278 default:
5279 internal_error (__FILE__, __LINE__,
5280 _("Invalid argument to displaced_write_reg"));
5281 }
5282
5283 dsc->wrote_to_pc = 1;
5284 }
5285 else
5286 {
5287 if (debug_displaced)
5288 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5289 regno, (unsigned long) val);
5290 regcache_cooked_write_unsigned (regs, regno, val);
5291 }
5292 }
5293
5294 /* This function is used to concisely determine if an instruction INSN
5295 references PC. Register fields of interest in INSN should have the
5296 corresponding fields of BITMASK set to 0b1111. The function
5297 returns return 1 if any of these fields in INSN reference the PC
5298 (also 0b1111, r15), else it returns 0. */
5299
5300 static int
5301 insn_references_pc (uint32_t insn, uint32_t bitmask)
5302 {
5303 uint32_t lowbit = 1;
5304
5305 while (bitmask != 0)
5306 {
5307 uint32_t mask;
5308
5309 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5310 ;
5311
5312 if (!lowbit)
5313 break;
5314
5315 mask = lowbit * 0xf;
5316
5317 if ((insn & mask) == mask)
5318 return 1;
5319
5320 bitmask &= ~mask;
5321 }
5322
5323 return 0;
5324 }
5325
5326 /* The simplest copy function. Many instructions have the same effect no
5327 matter what address they are executed at: in those cases, use this. */
5328
5329 static int
5330 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5331 const char *iname, struct displaced_step_closure *dsc)
5332 {
5333 if (debug_displaced)
5334 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5335 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5336 iname);
5337
5338 dsc->modinsn[0] = insn;
5339
5340 return 0;
5341 }
5342
5343 /* Preload instructions with immediate offset. */
5344
5345 static void
5346 cleanup_preload (struct gdbarch *gdbarch,
5347 struct regcache *regs, struct displaced_step_closure *dsc)
5348 {
5349 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5350 if (!dsc->u.preload.immed)
5351 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5352 }
5353
5354 static int
5355 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5356 struct displaced_step_closure *dsc)
5357 {
5358 unsigned int rn = bits (insn, 16, 19);
5359 ULONGEST rn_val;
5360
5361 if (!insn_references_pc (insn, 0x000f0000ul))
5362 return copy_unmodified (gdbarch, insn, "preload", dsc);
5363
5364 if (debug_displaced)
5365 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5366 (unsigned long) insn);
5367
5368 /* Preload instructions:
5369
5370 {pli/pld} [rn, #+/-imm]
5371 ->
5372 {pli/pld} [r0, #+/-imm]. */
5373
5374 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5375 rn_val = displaced_read_reg (regs, dsc, rn);
5376 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5377
5378 dsc->u.preload.immed = 1;
5379
5380 dsc->modinsn[0] = insn & 0xfff0ffff;
5381
5382 dsc->cleanup = &cleanup_preload;
5383
5384 return 0;
5385 }
5386
5387 /* Preload instructions with register offset. */
5388
5389 static int
5390 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5391 struct regcache *regs,
5392 struct displaced_step_closure *dsc)
5393 {
5394 unsigned int rn = bits (insn, 16, 19);
5395 unsigned int rm = bits (insn, 0, 3);
5396 ULONGEST rn_val, rm_val;
5397
5398 if (!insn_references_pc (insn, 0x000f000ful))
5399 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
5400
5401 if (debug_displaced)
5402 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5403 (unsigned long) insn);
5404
5405 /* Preload register-offset instructions:
5406
5407 {pli/pld} [rn, rm {, shift}]
5408 ->
5409 {pli/pld} [r0, r1 {, shift}]. */
5410
5411 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5412 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5413 rn_val = displaced_read_reg (regs, dsc, rn);
5414 rm_val = displaced_read_reg (regs, dsc, rm);
5415 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5416 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5417
5418 dsc->u.preload.immed = 0;
5419
5420 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5421
5422 dsc->cleanup = &cleanup_preload;
5423
5424 return 0;
5425 }
5426
5427 /* Copy/cleanup coprocessor load and store instructions. */
5428
5429 static void
5430 cleanup_copro_load_store (struct gdbarch *gdbarch,
5431 struct regcache *regs,
5432 struct displaced_step_closure *dsc)
5433 {
5434 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5435
5436 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5437
5438 if (dsc->u.ldst.writeback)
5439 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5440 }
5441
5442 static int
5443 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5444 struct regcache *regs,
5445 struct displaced_step_closure *dsc)
5446 {
5447 unsigned int rn = bits (insn, 16, 19);
5448 ULONGEST rn_val;
5449
5450 if (!insn_references_pc (insn, 0x000f0000ul))
5451 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5452
5453 if (debug_displaced)
5454 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5455 "load/store insn %.8lx\n", (unsigned long) insn);
5456
5457 /* Coprocessor load/store instructions:
5458
5459 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5460 ->
5461 {stc/stc2} [r0, #+/-imm].
5462
5463 ldc/ldc2 are handled identically. */
5464
5465 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5466 rn_val = displaced_read_reg (regs, dsc, rn);
5467 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5468
5469 dsc->u.ldst.writeback = bit (insn, 25);
5470 dsc->u.ldst.rn = rn;
5471
5472 dsc->modinsn[0] = insn & 0xfff0ffff;
5473
5474 dsc->cleanup = &cleanup_copro_load_store;
5475
5476 return 0;
5477 }
5478
5479 /* Clean up branch instructions (actually perform the branch, by setting
5480 PC). */
5481
5482 static void
5483 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5484 struct displaced_step_closure *dsc)
5485 {
5486 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5487 int branch_taken = condition_true (dsc->u.branch.cond, status);
5488 enum pc_write_style write_pc = dsc->u.branch.exchange
5489 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5490
5491 if (!branch_taken)
5492 return;
5493
5494 if (dsc->u.branch.link)
5495 {
5496 /* The value of LR should be the next insn of current one. In order
5497 not to confuse logic hanlding later insn `bx lr', if current insn mode
5498 is Thumb, the bit 0 of LR value should be set to 1. */
5499 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5500
5501 if (dsc->is_thumb)
5502 next_insn_addr |= 0x1;
5503
5504 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5505 CANNOT_WRITE_PC);
5506 }
5507
5508 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5509 }
5510
5511 /* Copy B/BL/BLX instructions with immediate destinations. */
5512
5513 static int
5514 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5515 struct regcache *regs, struct displaced_step_closure *dsc)
5516 {
5517 unsigned int cond = bits (insn, 28, 31);
5518 int exchange = (cond == 0xf);
5519 int link = exchange || bit (insn, 24);
5520 CORE_ADDR from = dsc->insn_addr;
5521 long offset;
5522
5523 if (debug_displaced)
5524 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5525 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5526 (unsigned long) insn);
5527
5528 /* Implement "BL<cond> <label>" as:
5529
5530 Preparation: cond <- instruction condition
5531 Insn: mov r0, r0 (nop)
5532 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5533
5534 B<cond> similar, but don't set r14 in cleanup. */
5535
5536 if (exchange)
5537 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5538 then arrange the switch into Thumb mode. */
5539 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5540 else
5541 offset = bits (insn, 0, 23) << 2;
5542
5543 if (bit (offset, 25))
5544 offset = offset | ~0x3ffffff;
5545
5546 dsc->u.branch.cond = cond;
5547 dsc->u.branch.link = link;
5548 dsc->u.branch.exchange = exchange;
5549 dsc->u.branch.dest = from + 8 + offset;
5550
5551 dsc->modinsn[0] = ARM_NOP;
5552
5553 dsc->cleanup = &cleanup_branch;
5554
5555 return 0;
5556 }
5557
5558 /* Copy BX/BLX with register-specified destinations. */
5559
5560 static int
5561 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5562 struct regcache *regs, struct displaced_step_closure *dsc)
5563 {
5564 unsigned int cond = bits (insn, 28, 31);
5565 /* BX: x12xxx1x
5566 BLX: x12xxx3x. */
5567 int link = bit (insn, 5);
5568 unsigned int rm = bits (insn, 0, 3);
5569
5570 if (debug_displaced)
5571 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
5572 "%.8lx\n", (link) ? "blx" : "bx",
5573 (unsigned long) insn);
5574
5575 /* Implement {BX,BLX}<cond> <reg>" as:
5576
5577 Preparation: cond <- instruction condition
5578 Insn: mov r0, r0 (nop)
5579 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5580
5581 Don't set r14 in cleanup for BX. */
5582
5583 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5584
5585 dsc->u.branch.cond = cond;
5586 dsc->u.branch.link = link;
5587 dsc->u.branch.exchange = 1;
5588
5589 dsc->modinsn[0] = ARM_NOP;
5590
5591 dsc->cleanup = &cleanup_branch;
5592
5593 return 0;
5594 }
5595
5596 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5597
5598 static void
5599 cleanup_alu_imm (struct gdbarch *gdbarch,
5600 struct regcache *regs, struct displaced_step_closure *dsc)
5601 {
5602 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5603 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5604 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5605 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5606 }
5607
5608 static int
5609 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5610 struct displaced_step_closure *dsc)
5611 {
5612 unsigned int rn = bits (insn, 16, 19);
5613 unsigned int rd = bits (insn, 12, 15);
5614 unsigned int op = bits (insn, 21, 24);
5615 int is_mov = (op == 0xd);
5616 ULONGEST rd_val, rn_val;
5617
5618 if (!insn_references_pc (insn, 0x000ff000ul))
5619 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5620
5621 if (debug_displaced)
5622 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5623 "%.8lx\n", is_mov ? "move" : "ALU",
5624 (unsigned long) insn);
5625
5626 /* Instruction is of form:
5627
5628 <op><cond> rd, [rn,] #imm
5629
5630 Rewrite as:
5631
5632 Preparation: tmp1, tmp2 <- r0, r1;
5633 r0, r1 <- rd, rn
5634 Insn: <op><cond> r0, r1, #imm
5635 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5636 */
5637
5638 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5639 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5640 rn_val = displaced_read_reg (regs, dsc, rn);
5641 rd_val = displaced_read_reg (regs, dsc, rd);
5642 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5643 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5644 dsc->rd = rd;
5645
5646 if (is_mov)
5647 dsc->modinsn[0] = insn & 0xfff00fff;
5648 else
5649 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5650
5651 dsc->cleanup = &cleanup_alu_imm;
5652
5653 return 0;
5654 }
5655
5656 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5657
5658 static void
5659 cleanup_alu_reg (struct gdbarch *gdbarch,
5660 struct regcache *regs, struct displaced_step_closure *dsc)
5661 {
5662 ULONGEST rd_val;
5663 int i;
5664
5665 rd_val = displaced_read_reg (regs, dsc, 0);
5666
5667 for (i = 0; i < 3; i++)
5668 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5669
5670 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5671 }
5672
5673 static int
5674 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5675 struct displaced_step_closure *dsc)
5676 {
5677 unsigned int rn = bits (insn, 16, 19);
5678 unsigned int rm = bits (insn, 0, 3);
5679 unsigned int rd = bits (insn, 12, 15);
5680 unsigned int op = bits (insn, 21, 24);
5681 int is_mov = (op == 0xd);
5682 ULONGEST rd_val, rn_val, rm_val;
5683
5684 if (!insn_references_pc (insn, 0x000ff00ful))
5685 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5686
5687 if (debug_displaced)
5688 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5689 is_mov ? "move" : "ALU", (unsigned long) insn);
5690
5691 /* Instruction is of form:
5692
5693 <op><cond> rd, [rn,] rm [, <shift>]
5694
5695 Rewrite as:
5696
5697 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5698 r0, r1, r2 <- rd, rn, rm
5699 Insn: <op><cond> r0, r1, r2 [, <shift>]
5700 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5701 */
5702
5703 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5704 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5705 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5706 rd_val = displaced_read_reg (regs, dsc, rd);
5707 rn_val = displaced_read_reg (regs, dsc, rn);
5708 rm_val = displaced_read_reg (regs, dsc, rm);
5709 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5710 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5711 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5712 dsc->rd = rd;
5713
5714 if (is_mov)
5715 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5716 else
5717 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5718
5719 dsc->cleanup = &cleanup_alu_reg;
5720
5721 return 0;
5722 }
5723
5724 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5725
5726 static void
5727 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5728 struct regcache *regs,
5729 struct displaced_step_closure *dsc)
5730 {
5731 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5732 int i;
5733
5734 for (i = 0; i < 4; i++)
5735 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5736
5737 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5738 }
5739
5740 static int
5741 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5742 struct regcache *regs,
5743 struct displaced_step_closure *dsc)
5744 {
5745 unsigned int rn = bits (insn, 16, 19);
5746 unsigned int rm = bits (insn, 0, 3);
5747 unsigned int rd = bits (insn, 12, 15);
5748 unsigned int rs = bits (insn, 8, 11);
5749 unsigned int op = bits (insn, 21, 24);
5750 int is_mov = (op == 0xd), i;
5751 ULONGEST rd_val, rn_val, rm_val, rs_val;
5752
5753 if (!insn_references_pc (insn, 0x000fff0ful))
5754 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5755
5756 if (debug_displaced)
5757 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5758 "%.8lx\n", is_mov ? "move" : "ALU",
5759 (unsigned long) insn);
5760
5761 /* Instruction is of form:
5762
5763 <op><cond> rd, [rn,] rm, <shift> rs
5764
5765 Rewrite as:
5766
5767 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5768 r0, r1, r2, r3 <- rd, rn, rm, rs
5769 Insn: <op><cond> r0, r1, r2, <shift> r3
5770 Cleanup: tmp5 <- r0
5771 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5772 rd <- tmp5
5773 */
5774
5775 for (i = 0; i < 4; i++)
5776 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5777
5778 rd_val = displaced_read_reg (regs, dsc, rd);
5779 rn_val = displaced_read_reg (regs, dsc, rn);
5780 rm_val = displaced_read_reg (regs, dsc, rm);
5781 rs_val = displaced_read_reg (regs, dsc, rs);
5782 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5783 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5784 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5785 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5786 dsc->rd = rd;
5787
5788 if (is_mov)
5789 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5790 else
5791 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5792
5793 dsc->cleanup = &cleanup_alu_shifted_reg;
5794
5795 return 0;
5796 }
5797
5798 /* Clean up load instructions. */
5799
5800 static void
5801 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5802 struct displaced_step_closure *dsc)
5803 {
5804 ULONGEST rt_val, rt_val2 = 0, rn_val;
5805
5806 rt_val = displaced_read_reg (regs, dsc, 0);
5807 if (dsc->u.ldst.xfersize == 8)
5808 rt_val2 = displaced_read_reg (regs, dsc, 1);
5809 rn_val = displaced_read_reg (regs, dsc, 2);
5810
5811 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5812 if (dsc->u.ldst.xfersize > 4)
5813 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5814 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5815 if (!dsc->u.ldst.immed)
5816 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5817
5818 /* Handle register writeback. */
5819 if (dsc->u.ldst.writeback)
5820 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5821 /* Put result in right place. */
5822 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5823 if (dsc->u.ldst.xfersize == 8)
5824 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5825 }
5826
5827 /* Clean up store instructions. */
5828
5829 static void
5830 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5831 struct displaced_step_closure *dsc)
5832 {
5833 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5834
5835 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5836 if (dsc->u.ldst.xfersize > 4)
5837 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5838 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5839 if (!dsc->u.ldst.immed)
5840 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5841 if (!dsc->u.ldst.restore_r4)
5842 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5843
5844 /* Writeback. */
5845 if (dsc->u.ldst.writeback)
5846 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5847 }
5848
5849 /* Copy "extra" load/store instructions. These are halfword/doubleword
5850 transfers, which have a different encoding to byte/word transfers. */
5851
5852 static int
5853 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5854 struct regcache *regs, struct displaced_step_closure *dsc)
5855 {
5856 unsigned int op1 = bits (insn, 20, 24);
5857 unsigned int op2 = bits (insn, 5, 6);
5858 unsigned int rt = bits (insn, 12, 15);
5859 unsigned int rn = bits (insn, 16, 19);
5860 unsigned int rm = bits (insn, 0, 3);
5861 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5862 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5863 int immed = (op1 & 0x4) != 0;
5864 int opcode;
5865 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5866
5867 if (!insn_references_pc (insn, 0x000ff00ful))
5868 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5869
5870 if (debug_displaced)
5871 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5872 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5873 (unsigned long) insn);
5874
5875 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5876
5877 if (opcode < 0)
5878 internal_error (__FILE__, __LINE__,
5879 _("copy_extra_ld_st: instruction decode error"));
5880
5881 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5882 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5883 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5884 if (!immed)
5885 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5886
5887 rt_val = displaced_read_reg (regs, dsc, rt);
5888 if (bytesize[opcode] == 8)
5889 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5890 rn_val = displaced_read_reg (regs, dsc, rn);
5891 if (!immed)
5892 rm_val = displaced_read_reg (regs, dsc, rm);
5893
5894 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5895 if (bytesize[opcode] == 8)
5896 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5897 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5898 if (!immed)
5899 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5900
5901 dsc->rd = rt;
5902 dsc->u.ldst.xfersize = bytesize[opcode];
5903 dsc->u.ldst.rn = rn;
5904 dsc->u.ldst.immed = immed;
5905 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5906 dsc->u.ldst.restore_r4 = 0;
5907
5908 if (immed)
5909 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5910 ->
5911 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5912 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5913 else
5914 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5915 ->
5916 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5917 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5918
5919 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5920
5921 return 0;
5922 }
5923
5924 /* Copy byte/word loads and stores. */
5925
5926 static int
5927 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5928 struct regcache *regs,
5929 struct displaced_step_closure *dsc, int load, int byte,
5930 int usermode)
5931 {
5932 int immed = !bit (insn, 25);
5933 unsigned int rt = bits (insn, 12, 15);
5934 unsigned int rn = bits (insn, 16, 19);
5935 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5936 ULONGEST rt_val, rn_val, rm_val = 0;
5937
5938 if (!insn_references_pc (insn, 0x000ff00ful))
5939 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5940
5941 if (debug_displaced)
5942 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5943 load ? (byte ? "ldrb" : "ldr")
5944 : (byte ? "strb" : "str"), usermode ? "t" : "",
5945 (unsigned long) insn);
5946
5947 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5948 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5949 if (!immed)
5950 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5951 if (!load)
5952 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5953
5954 rt_val = displaced_read_reg (regs, dsc, rt);
5955 rn_val = displaced_read_reg (regs, dsc, rn);
5956 if (!immed)
5957 rm_val = displaced_read_reg (regs, dsc, rm);
5958
5959 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5960 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5961 if (!immed)
5962 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5963
5964 dsc->rd = rt;
5965 dsc->u.ldst.xfersize = byte ? 1 : 4;
5966 dsc->u.ldst.rn = rn;
5967 dsc->u.ldst.immed = immed;
5968 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5969
5970 /* To write PC we can do:
5971
5972 Before this sequence of instructions:
5973 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5974 r2 is the Rn value got from dispalced_read_reg.
5975
5976 Insn1: push {pc} Write address of STR instruction + offset on stack
5977 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5978 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5979 = addr(Insn1) + offset - addr(Insn3) - 8
5980 = offset - 16
5981 Insn4: add r4, r4, #8 r4 = offset - 8
5982 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5983 = from + offset
5984 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5985
5986 Otherwise we don't know what value to write for PC, since the offset is
5987 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5988 of this can be found in Section "Saving from r15" in
5989 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5990
5991 if (load || rt != ARM_PC_REGNUM)
5992 {
5993 dsc->u.ldst.restore_r4 = 0;
5994
5995 if (immed)
5996 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5997 ->
5998 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5999 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6000 else
6001 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6002 ->
6003 {ldr,str}[b]<cond> r0, [r2, r3]. */
6004 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6005 }
6006 else
6007 {
6008 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6009 dsc->u.ldst.restore_r4 = 1;
6010 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6011 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6012 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6013 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6014 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6015
6016 /* As above. */
6017 if (immed)
6018 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6019 else
6020 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6021
6022 dsc->numinsns = 6;
6023 }
6024
6025 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6026
6027 return 0;
6028 }
6029
6030 /* Cleanup LDM instructions with fully-populated register list. This is an
6031 unfortunate corner case: it's impossible to implement correctly by modifying
6032 the instruction. The issue is as follows: we have an instruction,
6033
6034 ldm rN, {r0-r15}
6035
6036 which we must rewrite to avoid loading PC. A possible solution would be to
6037 do the load in two halves, something like (with suitable cleanup
6038 afterwards):
6039
6040 mov r8, rN
6041 ldm[id][ab] r8!, {r0-r7}
6042 str r7, <temp>
6043 ldm[id][ab] r8, {r7-r14}
6044 <bkpt>
6045
6046 but at present there's no suitable place for <temp>, since the scratch space
6047 is overwritten before the cleanup routine is called. For now, we simply
6048 emulate the instruction. */
6049
6050 static void
6051 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6052 struct displaced_step_closure *dsc)
6053 {
6054 int inc = dsc->u.block.increment;
6055 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6056 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6057 uint32_t regmask = dsc->u.block.regmask;
6058 int regno = inc ? 0 : 15;
6059 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6060 int exception_return = dsc->u.block.load && dsc->u.block.user
6061 && (regmask & 0x8000) != 0;
6062 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6063 int do_transfer = condition_true (dsc->u.block.cond, status);
6064 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6065
6066 if (!do_transfer)
6067 return;
6068
6069 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6070 sensible we can do here. Complain loudly. */
6071 if (exception_return)
6072 error (_("Cannot single-step exception return"));
6073
6074 /* We don't handle any stores here for now. */
6075 gdb_assert (dsc->u.block.load != 0);
6076
6077 if (debug_displaced)
6078 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6079 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6080 dsc->u.block.increment ? "inc" : "dec",
6081 dsc->u.block.before ? "before" : "after");
6082
6083 while (regmask)
6084 {
6085 uint32_t memword;
6086
6087 if (inc)
6088 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6089 regno++;
6090 else
6091 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6092 regno--;
6093
6094 xfer_addr += bump_before;
6095
6096 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6097 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6098
6099 xfer_addr += bump_after;
6100
6101 regmask &= ~(1 << regno);
6102 }
6103
6104 if (dsc->u.block.writeback)
6105 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6106 CANNOT_WRITE_PC);
6107 }
6108
6109 /* Clean up an STM which included the PC in the register list. */
6110
6111 static void
6112 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6113 struct displaced_step_closure *dsc)
6114 {
6115 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6116 int store_executed = condition_true (dsc->u.block.cond, status);
6117 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6118 CORE_ADDR stm_insn_addr;
6119 uint32_t pc_val;
6120 long offset;
6121 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6122
6123 /* If condition code fails, there's nothing else to do. */
6124 if (!store_executed)
6125 return;
6126
6127 if (dsc->u.block.increment)
6128 {
6129 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6130
6131 if (dsc->u.block.before)
6132 pc_stored_at += 4;
6133 }
6134 else
6135 {
6136 pc_stored_at = dsc->u.block.xfer_addr;
6137
6138 if (dsc->u.block.before)
6139 pc_stored_at -= 4;
6140 }
6141
6142 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6143 stm_insn_addr = dsc->scratch_base;
6144 offset = pc_val - stm_insn_addr;
6145
6146 if (debug_displaced)
6147 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6148 "STM instruction\n", offset);
6149
6150 /* Rewrite the stored PC to the proper value for the non-displaced original
6151 instruction. */
6152 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6153 dsc->insn_addr + offset);
6154 }
6155
6156 /* Clean up an LDM which includes the PC in the register list. We clumped all
6157 the registers in the transferred list into a contiguous range r0...rX (to
6158 avoid loading PC directly and losing control of the debugged program), so we
6159 must undo that here. */
6160
6161 static void
6162 cleanup_block_load_pc (struct gdbarch *gdbarch,
6163 struct regcache *regs,
6164 struct displaced_step_closure *dsc)
6165 {
6166 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6167 int load_executed = condition_true (dsc->u.block.cond, status), i;
6168 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6169 unsigned int regs_loaded = bitcount (mask);
6170 unsigned int num_to_shuffle = regs_loaded, clobbered;
6171
6172 /* The method employed here will fail if the register list is fully populated
6173 (we need to avoid loading PC directly). */
6174 gdb_assert (num_to_shuffle < 16);
6175
6176 if (!load_executed)
6177 return;
6178
6179 clobbered = (1 << num_to_shuffle) - 1;
6180
6181 while (num_to_shuffle > 0)
6182 {
6183 if ((mask & (1 << write_reg)) != 0)
6184 {
6185 unsigned int read_reg = num_to_shuffle - 1;
6186
6187 if (read_reg != write_reg)
6188 {
6189 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6190 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6191 if (debug_displaced)
6192 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6193 "loaded register r%d to r%d\n"), read_reg,
6194 write_reg);
6195 }
6196 else if (debug_displaced)
6197 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6198 "r%d already in the right place\n"),
6199 write_reg);
6200
6201 clobbered &= ~(1 << write_reg);
6202
6203 num_to_shuffle--;
6204 }
6205
6206 write_reg--;
6207 }
6208
6209 /* Restore any registers we scribbled over. */
6210 for (write_reg = 0; clobbered != 0; write_reg++)
6211 {
6212 if ((clobbered & (1 << write_reg)) != 0)
6213 {
6214 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6215 CANNOT_WRITE_PC);
6216 if (debug_displaced)
6217 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6218 "clobbered register r%d\n"), write_reg);
6219 clobbered &= ~(1 << write_reg);
6220 }
6221 }
6222
6223 /* Perform register writeback manually. */
6224 if (dsc->u.block.writeback)
6225 {
6226 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6227
6228 if (dsc->u.block.increment)
6229 new_rn_val += regs_loaded * 4;
6230 else
6231 new_rn_val -= regs_loaded * 4;
6232
6233 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6234 CANNOT_WRITE_PC);
6235 }
6236 }
6237
6238 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6239 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6240
6241 static int
6242 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6243 struct displaced_step_closure *dsc)
6244 {
6245 int load = bit (insn, 20);
6246 int user = bit (insn, 22);
6247 int increment = bit (insn, 23);
6248 int before = bit (insn, 24);
6249 int writeback = bit (insn, 21);
6250 int rn = bits (insn, 16, 19);
6251
6252 /* Block transfers which don't mention PC can be run directly
6253 out-of-line. */
6254 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6255 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6256
6257 if (rn == ARM_PC_REGNUM)
6258 {
6259 warning (_("displaced: Unpredictable LDM or STM with "
6260 "base register r15"));
6261 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6262 }
6263
6264 if (debug_displaced)
6265 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6266 "%.8lx\n", (unsigned long) insn);
6267
6268 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6269 dsc->u.block.rn = rn;
6270
6271 dsc->u.block.load = load;
6272 dsc->u.block.user = user;
6273 dsc->u.block.increment = increment;
6274 dsc->u.block.before = before;
6275 dsc->u.block.writeback = writeback;
6276 dsc->u.block.cond = bits (insn, 28, 31);
6277
6278 dsc->u.block.regmask = insn & 0xffff;
6279
6280 if (load)
6281 {
6282 if ((insn & 0xffff) == 0xffff)
6283 {
6284 /* LDM with a fully-populated register list. This case is
6285 particularly tricky. Implement for now by fully emulating the
6286 instruction (which might not behave perfectly in all cases, but
6287 these instructions should be rare enough for that not to matter
6288 too much). */
6289 dsc->modinsn[0] = ARM_NOP;
6290
6291 dsc->cleanup = &cleanup_block_load_all;
6292 }
6293 else
6294 {
6295 /* LDM of a list of registers which includes PC. Implement by
6296 rewriting the list of registers to be transferred into a
6297 contiguous chunk r0...rX before doing the transfer, then shuffling
6298 registers into the correct places in the cleanup routine. */
6299 unsigned int regmask = insn & 0xffff;
6300 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6301 unsigned int to = 0, from = 0, i, new_rn;
6302
6303 for (i = 0; i < num_in_list; i++)
6304 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6305
6306 /* Writeback makes things complicated. We need to avoid clobbering
6307 the base register with one of the registers in our modified
6308 register list, but just using a different register can't work in
6309 all cases, e.g.:
6310
6311 ldm r14!, {r0-r13,pc}
6312
6313 which would need to be rewritten as:
6314
6315 ldm rN!, {r0-r14}
6316
6317 but that can't work, because there's no free register for N.
6318
6319 Solve this by turning off the writeback bit, and emulating
6320 writeback manually in the cleanup routine. */
6321
6322 if (writeback)
6323 insn &= ~(1 << 21);
6324
6325 new_regmask = (1 << num_in_list) - 1;
6326
6327 if (debug_displaced)
6328 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6329 "{..., pc}: original reg list %.4x, modified "
6330 "list %.4x\n"), rn, writeback ? "!" : "",
6331 (int) insn & 0xffff, new_regmask);
6332
6333 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6334
6335 dsc->cleanup = &cleanup_block_load_pc;
6336 }
6337 }
6338 else
6339 {
6340 /* STM of a list of registers which includes PC. Run the instruction
6341 as-is, but out of line: this will store the wrong value for the PC,
6342 so we must manually fix up the memory in the cleanup routine.
6343 Doing things this way has the advantage that we can auto-detect
6344 the offset of the PC write (which is architecture-dependent) in
6345 the cleanup routine. */
6346 dsc->modinsn[0] = insn;
6347
6348 dsc->cleanup = &cleanup_block_store_pc;
6349 }
6350
6351 return 0;
6352 }
6353
6354 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6355 for Linux, where some SVC instructions must be treated specially. */
6356
6357 static void
6358 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6359 struct displaced_step_closure *dsc)
6360 {
6361 CORE_ADDR resume_addr = dsc->insn_addr + 4;
6362
6363 if (debug_displaced)
6364 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6365 "%.8lx\n", (unsigned long) resume_addr);
6366
6367 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6368 }
6369
6370 static int
6371 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6372 struct regcache *regs, struct displaced_step_closure *dsc)
6373 {
6374 /* Allow OS-specific code to override SVC handling. */
6375 if (dsc->u.svc.copy_svc_os)
6376 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
6377
6378 if (debug_displaced)
6379 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6380 (unsigned long) insn);
6381
6382 /* Preparation: none.
6383 Insn: unmodified svc.
6384 Cleanup: pc <- insn_addr + 4. */
6385
6386 dsc->modinsn[0] = insn;
6387
6388 dsc->cleanup = &cleanup_svc;
6389 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6390 instruction. */
6391 dsc->wrote_to_pc = 1;
6392
6393 return 0;
6394 }
6395
6396 /* Copy undefined instructions. */
6397
6398 static int
6399 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6400 struct displaced_step_closure *dsc)
6401 {
6402 if (debug_displaced)
6403 fprintf_unfiltered (gdb_stdlog,
6404 "displaced: copying undefined insn %.8lx\n",
6405 (unsigned long) insn);
6406
6407 dsc->modinsn[0] = insn;
6408
6409 return 0;
6410 }
6411
6412 /* Copy unpredictable instructions. */
6413
6414 static int
6415 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6416 struct displaced_step_closure *dsc)
6417 {
6418 if (debug_displaced)
6419 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6420 "%.8lx\n", (unsigned long) insn);
6421
6422 dsc->modinsn[0] = insn;
6423
6424 return 0;
6425 }
6426
6427 /* The decode_* functions are instruction decoding helpers. They mostly follow
6428 the presentation in the ARM ARM. */
6429
6430 static int
6431 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6432 struct regcache *regs,
6433 struct displaced_step_closure *dsc)
6434 {
6435 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6436 unsigned int rn = bits (insn, 16, 19);
6437
6438 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6439 return copy_unmodified (gdbarch, insn, "cps", dsc);
6440 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6441 return copy_unmodified (gdbarch, insn, "setend", dsc);
6442 else if ((op1 & 0x60) == 0x20)
6443 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6444 else if ((op1 & 0x71) == 0x40)
6445 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
6446 else if ((op1 & 0x77) == 0x41)
6447 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6448 else if ((op1 & 0x77) == 0x45)
6449 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6450 else if ((op1 & 0x77) == 0x51)
6451 {
6452 if (rn != 0xf)
6453 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6454 else
6455 return copy_unpred (gdbarch, insn, dsc);
6456 }
6457 else if ((op1 & 0x77) == 0x55)
6458 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6459 else if (op1 == 0x57)
6460 switch (op2)
6461 {
6462 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
6463 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
6464 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
6465 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
6466 default: return copy_unpred (gdbarch, insn, dsc);
6467 }
6468 else if ((op1 & 0x63) == 0x43)
6469 return copy_unpred (gdbarch, insn, dsc);
6470 else if ((op2 & 0x1) == 0x0)
6471 switch (op1 & ~0x80)
6472 {
6473 case 0x61:
6474 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6475 case 0x65:
6476 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6477 case 0x71: case 0x75:
6478 /* pld/pldw reg. */
6479 return copy_preload_reg (gdbarch, insn, regs, dsc);
6480 case 0x63: case 0x67: case 0x73: case 0x77:
6481 return copy_unpred (gdbarch, insn, dsc);
6482 default:
6483 return copy_undef (gdbarch, insn, dsc);
6484 }
6485 else
6486 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6487 }
6488
6489 static int
6490 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6491 struct regcache *regs,
6492 struct displaced_step_closure *dsc)
6493 {
6494 if (bit (insn, 27) == 0)
6495 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6496 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6497 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6498 {
6499 case 0x0: case 0x2:
6500 return copy_unmodified (gdbarch, insn, "srs", dsc);
6501
6502 case 0x1: case 0x3:
6503 return copy_unmodified (gdbarch, insn, "rfe", dsc);
6504
6505 case 0x4: case 0x5: case 0x6: case 0x7:
6506 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6507
6508 case 0x8:
6509 switch ((insn & 0xe00000) >> 21)
6510 {
6511 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6512 /* stc/stc2. */
6513 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6514
6515 case 0x2:
6516 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6517
6518 default:
6519 return copy_undef (gdbarch, insn, dsc);
6520 }
6521
6522 case 0x9:
6523 {
6524 int rn_f = (bits (insn, 16, 19) == 0xf);
6525 switch ((insn & 0xe00000) >> 21)
6526 {
6527 case 0x1: case 0x3:
6528 /* ldc/ldc2 imm (undefined for rn == pc). */
6529 return rn_f ? copy_undef (gdbarch, insn, dsc)
6530 : copy_copro_load_store (gdbarch, insn, regs, dsc);
6531
6532 case 0x2:
6533 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6534
6535 case 0x4: case 0x5: case 0x6: case 0x7:
6536 /* ldc/ldc2 lit (undefined for rn != pc). */
6537 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
6538 : copy_undef (gdbarch, insn, dsc);
6539
6540 default:
6541 return copy_undef (gdbarch, insn, dsc);
6542 }
6543 }
6544
6545 case 0xa:
6546 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6547
6548 case 0xb:
6549 if (bits (insn, 16, 19) == 0xf)
6550 /* ldc/ldc2 lit. */
6551 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6552 else
6553 return copy_undef (gdbarch, insn, dsc);
6554
6555 case 0xc:
6556 if (bit (insn, 4))
6557 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6558 else
6559 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6560
6561 case 0xd:
6562 if (bit (insn, 4))
6563 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6564 else
6565 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6566
6567 default:
6568 return copy_undef (gdbarch, insn, dsc);
6569 }
6570 }
6571
6572 /* Decode miscellaneous instructions in dp/misc encoding space. */
6573
6574 static int
6575 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6576 struct regcache *regs,
6577 struct displaced_step_closure *dsc)
6578 {
6579 unsigned int op2 = bits (insn, 4, 6);
6580 unsigned int op = bits (insn, 21, 22);
6581 unsigned int op1 = bits (insn, 16, 19);
6582
6583 switch (op2)
6584 {
6585 case 0x0:
6586 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6587
6588 case 0x1:
6589 if (op == 0x1) /* bx. */
6590 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6591 else if (op == 0x3)
6592 return copy_unmodified (gdbarch, insn, "clz", dsc);
6593 else
6594 return copy_undef (gdbarch, insn, dsc);
6595
6596 case 0x2:
6597 if (op == 0x1)
6598 /* Not really supported. */
6599 return copy_unmodified (gdbarch, insn, "bxj", dsc);
6600 else
6601 return copy_undef (gdbarch, insn, dsc);
6602
6603 case 0x3:
6604 if (op == 0x1)
6605 return copy_bx_blx_reg (gdbarch, insn,
6606 regs, dsc); /* blx register. */
6607 else
6608 return copy_undef (gdbarch, insn, dsc);
6609
6610 case 0x5:
6611 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6612
6613 case 0x7:
6614 if (op == 0x1)
6615 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
6616 else if (op == 0x3)
6617 /* Not really supported. */
6618 return copy_unmodified (gdbarch, insn, "smc", dsc);
6619
6620 default:
6621 return copy_undef (gdbarch, insn, dsc);
6622 }
6623 }
6624
6625 static int
6626 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6627 struct displaced_step_closure *dsc)
6628 {
6629 if (bit (insn, 25))
6630 switch (bits (insn, 20, 24))
6631 {
6632 case 0x10:
6633 return copy_unmodified (gdbarch, insn, "movw", dsc);
6634
6635 case 0x14:
6636 return copy_unmodified (gdbarch, insn, "movt", dsc);
6637
6638 case 0x12: case 0x16:
6639 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
6640
6641 default:
6642 return copy_alu_imm (gdbarch, insn, regs, dsc);
6643 }
6644 else
6645 {
6646 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6647
6648 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6649 return copy_alu_reg (gdbarch, insn, regs, dsc);
6650 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6651 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6652 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6653 return decode_miscellaneous (gdbarch, insn, regs, dsc);
6654 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6655 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6656 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6657 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6658 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6659 return copy_unmodified (gdbarch, insn, "synch", dsc);
6660 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6661 /* 2nd arg means "unpriveleged". */
6662 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6663 dsc);
6664 }
6665
6666 /* Should be unreachable. */
6667 return 1;
6668 }
6669
6670 static int
6671 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6672 struct regcache *regs,
6673 struct displaced_step_closure *dsc)
6674 {
6675 int a = bit (insn, 25), b = bit (insn, 4);
6676 uint32_t op1 = bits (insn, 20, 24);
6677 int rn_f = bits (insn, 16, 19) == 0xf;
6678
6679 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6680 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6681 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
6682 else if ((!a && (op1 & 0x17) == 0x02)
6683 || (a && (op1 & 0x17) == 0x02 && !b))
6684 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
6685 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6686 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6687 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
6688 else if ((!a && (op1 & 0x17) == 0x03)
6689 || (a && (op1 & 0x17) == 0x03 && !b))
6690 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
6691 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6692 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6693 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6694 else if ((!a && (op1 & 0x17) == 0x06)
6695 || (a && (op1 & 0x17) == 0x06 && !b))
6696 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6697 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6698 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6699 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6700 else if ((!a && (op1 & 0x17) == 0x07)
6701 || (a && (op1 & 0x17) == 0x07 && !b))
6702 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6703
6704 /* Should be unreachable. */
6705 return 1;
6706 }
6707
6708 static int
6709 decode_media (struct gdbarch *gdbarch, uint32_t insn,
6710 struct displaced_step_closure *dsc)
6711 {
6712 switch (bits (insn, 20, 24))
6713 {
6714 case 0x00: case 0x01: case 0x02: case 0x03:
6715 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6716
6717 case 0x04: case 0x05: case 0x06: case 0x07:
6718 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6719
6720 case 0x08: case 0x09: case 0x0a: case 0x0b:
6721 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6722 return copy_unmodified (gdbarch, insn,
6723 "decode/pack/unpack/saturate/reverse", dsc);
6724
6725 case 0x18:
6726 if (bits (insn, 5, 7) == 0) /* op2. */
6727 {
6728 if (bits (insn, 12, 15) == 0xf)
6729 return copy_unmodified (gdbarch, insn, "usad8", dsc);
6730 else
6731 return copy_unmodified (gdbarch, insn, "usada8", dsc);
6732 }
6733 else
6734 return copy_undef (gdbarch, insn, dsc);
6735
6736 case 0x1a: case 0x1b:
6737 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6738 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
6739 else
6740 return copy_undef (gdbarch, insn, dsc);
6741
6742 case 0x1c: case 0x1d:
6743 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6744 {
6745 if (bits (insn, 0, 3) == 0xf)
6746 return copy_unmodified (gdbarch, insn, "bfc", dsc);
6747 else
6748 return copy_unmodified (gdbarch, insn, "bfi", dsc);
6749 }
6750 else
6751 return copy_undef (gdbarch, insn, dsc);
6752
6753 case 0x1e: case 0x1f:
6754 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6755 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
6756 else
6757 return copy_undef (gdbarch, insn, dsc);
6758 }
6759
6760 /* Should be unreachable. */
6761 return 1;
6762 }
6763
6764 static int
6765 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6766 struct regcache *regs, struct displaced_step_closure *dsc)
6767 {
6768 if (bit (insn, 25))
6769 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
6770 else
6771 return copy_block_xfer (gdbarch, insn, regs, dsc);
6772 }
6773
6774 static int
6775 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6776 struct regcache *regs,
6777 struct displaced_step_closure *dsc)
6778 {
6779 unsigned int opcode = bits (insn, 20, 24);
6780
6781 switch (opcode)
6782 {
6783 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6784 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6785
6786 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6787 case 0x12: case 0x16:
6788 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6789
6790 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6791 case 0x13: case 0x17:
6792 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6793
6794 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6795 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6796 /* Note: no writeback for these instructions. Bit 25 will always be
6797 zero though (via caller), so the following works OK. */
6798 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6799 }
6800
6801 /* Should be unreachable. */
6802 return 1;
6803 }
6804
6805 static int
6806 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6807 struct regcache *regs, struct displaced_step_closure *dsc)
6808 {
6809 unsigned int op1 = bits (insn, 20, 25);
6810 int op = bit (insn, 4);
6811 unsigned int coproc = bits (insn, 8, 11);
6812 unsigned int rn = bits (insn, 16, 19);
6813
6814 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6815 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6816 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6817 && (coproc & 0xe) != 0xa)
6818 /* stc/stc2. */
6819 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6820 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6821 && (coproc & 0xe) != 0xa)
6822 /* ldc/ldc2 imm/lit. */
6823 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6824 else if ((op1 & 0x3e) == 0x00)
6825 return copy_undef (gdbarch, insn, dsc);
6826 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6827 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6828 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6829 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6830 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6831 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6832 else if ((op1 & 0x30) == 0x20 && !op)
6833 {
6834 if ((coproc & 0xe) == 0xa)
6835 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6836 else
6837 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6838 }
6839 else if ((op1 & 0x30) == 0x20 && op)
6840 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6841 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6842 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6843 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6844 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6845 else if ((op1 & 0x30) == 0x30)
6846 return copy_svc (gdbarch, insn, to, regs, dsc);
6847 else
6848 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6849 }
6850
6851 static void
6852 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6853 CORE_ADDR to, struct regcache *regs,
6854 struct displaced_step_closure *dsc)
6855 {
6856 error (_("Displaced stepping is only supported in ARM mode"));
6857 }
6858
6859 void
6860 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6861 CORE_ADDR to, struct regcache *regs,
6862 struct displaced_step_closure *dsc)
6863 {
6864 int err = 0;
6865 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6866 uint32_t insn;
6867
6868 /* Most displaced instructions use a 1-instruction scratch space, so set this
6869 here and override below if/when necessary. */
6870 dsc->numinsns = 1;
6871 dsc->insn_addr = from;
6872 dsc->scratch_base = to;
6873 dsc->cleanup = NULL;
6874 dsc->wrote_to_pc = 0;
6875
6876 if (!displaced_in_arm_mode (regs))
6877 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6878
6879 dsc->is_thumb = 0;
6880 dsc->insn_size = 4;
6881 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6882 if (debug_displaced)
6883 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6884 "at %.8lx\n", (unsigned long) insn,
6885 (unsigned long) from);
6886
6887 if ((insn & 0xf0000000) == 0xf0000000)
6888 err = decode_unconditional (gdbarch, insn, regs, dsc);
6889 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6890 {
6891 case 0x0: case 0x1: case 0x2: case 0x3:
6892 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6893 break;
6894
6895 case 0x4: case 0x5: case 0x6:
6896 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6897 break;
6898
6899 case 0x7:
6900 err = decode_media (gdbarch, insn, dsc);
6901 break;
6902
6903 case 0x8: case 0x9: case 0xa: case 0xb:
6904 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6905 break;
6906
6907 case 0xc: case 0xd: case 0xe: case 0xf:
6908 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6909 break;
6910 }
6911
6912 if (err)
6913 internal_error (__FILE__, __LINE__,
6914 _("arm_process_displaced_insn: Instruction decode error"));
6915 }
6916
6917 /* Actually set up the scratch space for a displaced instruction. */
6918
6919 void
6920 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6921 CORE_ADDR to, struct displaced_step_closure *dsc)
6922 {
6923 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6924 unsigned int i, len, offset;
6925 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6926 int size = dsc->is_thumb? 2 : 4;
6927 const unsigned char *bkp_insn;
6928
6929 offset = 0;
6930 /* Poke modified instruction(s). */
6931 for (i = 0; i < dsc->numinsns; i++)
6932 {
6933 if (debug_displaced)
6934 {
6935 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
6936 if (size == 4)
6937 fprintf_unfiltered (gdb_stdlog, "%.8lx",
6938 dsc->modinsn[i]);
6939 else if (size == 2)
6940 fprintf_unfiltered (gdb_stdlog, "%.4x",
6941 (unsigned short)dsc->modinsn[i]);
6942
6943 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
6944 (unsigned long) to + offset);
6945
6946 }
6947 write_memory_unsigned_integer (to + offset, size,
6948 byte_order_for_code,
6949 dsc->modinsn[i]);
6950 offset += size;
6951 }
6952
6953 /* Choose the correct breakpoint instruction. */
6954 if (dsc->is_thumb)
6955 {
6956 bkp_insn = tdep->thumb_breakpoint;
6957 len = tdep->thumb_breakpoint_size;
6958 }
6959 else
6960 {
6961 bkp_insn = tdep->arm_breakpoint;
6962 len = tdep->arm_breakpoint_size;
6963 }
6964
6965 /* Put breakpoint afterwards. */
6966 write_memory (to + offset, bkp_insn, len);
6967
6968 if (debug_displaced)
6969 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6970 paddress (gdbarch, from), paddress (gdbarch, to));
6971 }
6972
6973 /* Entry point for copying an instruction into scratch space for displaced
6974 stepping. */
6975
6976 struct displaced_step_closure *
6977 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6978 CORE_ADDR from, CORE_ADDR to,
6979 struct regcache *regs)
6980 {
6981 struct displaced_step_closure *dsc
6982 = xmalloc (sizeof (struct displaced_step_closure));
6983 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
6984 arm_displaced_init_closure (gdbarch, from, to, dsc);
6985
6986 return dsc;
6987 }
6988
6989 /* Entry point for cleaning things up after a displaced instruction has been
6990 single-stepped. */
6991
6992 void
6993 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6994 struct displaced_step_closure *dsc,
6995 CORE_ADDR from, CORE_ADDR to,
6996 struct regcache *regs)
6997 {
6998 if (dsc->cleanup)
6999 dsc->cleanup (gdbarch, regs, dsc);
7000
7001 if (!dsc->wrote_to_pc)
7002 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7003 dsc->insn_addr + dsc->insn_size);
7004
7005 }
7006
7007 #include "bfd-in2.h"
7008 #include "libcoff.h"
7009
7010 static int
7011 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7012 {
7013 struct gdbarch *gdbarch = info->application_data;
7014
7015 if (arm_pc_is_thumb (gdbarch, memaddr))
7016 {
7017 static asymbol *asym;
7018 static combined_entry_type ce;
7019 static struct coff_symbol_struct csym;
7020 static struct bfd fake_bfd;
7021 static bfd_target fake_target;
7022
7023 if (csym.native == NULL)
7024 {
7025 /* Create a fake symbol vector containing a Thumb symbol.
7026 This is solely so that the code in print_insn_little_arm()
7027 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7028 the presence of a Thumb symbol and switch to decoding
7029 Thumb instructions. */
7030
7031 fake_target.flavour = bfd_target_coff_flavour;
7032 fake_bfd.xvec = &fake_target;
7033 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7034 csym.native = &ce;
7035 csym.symbol.the_bfd = &fake_bfd;
7036 csym.symbol.name = "fake";
7037 asym = (asymbol *) & csym;
7038 }
7039
7040 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7041 info->symbols = &asym;
7042 }
7043 else
7044 info->symbols = NULL;
7045
7046 if (info->endian == BFD_ENDIAN_BIG)
7047 return print_insn_big_arm (memaddr, info);
7048 else
7049 return print_insn_little_arm (memaddr, info);
7050 }
7051
7052 /* The following define instruction sequences that will cause ARM
7053 cpu's to take an undefined instruction trap. These are used to
7054 signal a breakpoint to GDB.
7055
7056 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7057 modes. A different instruction is required for each mode. The ARM
7058 cpu's can also be big or little endian. Thus four different
7059 instructions are needed to support all cases.
7060
7061 Note: ARMv4 defines several new instructions that will take the
7062 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7063 not in fact add the new instructions. The new undefined
7064 instructions in ARMv4 are all instructions that had no defined
7065 behaviour in earlier chips. There is no guarantee that they will
7066 raise an exception, but may be treated as NOP's. In practice, it
7067 may only safe to rely on instructions matching:
7068
7069 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7070 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7071 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7072
7073 Even this may only true if the condition predicate is true. The
7074 following use a condition predicate of ALWAYS so it is always TRUE.
7075
7076 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7077 and NetBSD all use a software interrupt rather than an undefined
7078 instruction to force a trap. This can be handled by by the
7079 abi-specific code during establishment of the gdbarch vector. */
7080
7081 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7082 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7083 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7084 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7085
7086 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7087 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7088 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7089 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7090
7091 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7092 the program counter value to determine whether a 16-bit or 32-bit
7093 breakpoint should be used. It returns a pointer to a string of
7094 bytes that encode a breakpoint instruction, stores the length of
7095 the string to *lenptr, and adjusts the program counter (if
7096 necessary) to point to the actual memory location where the
7097 breakpoint should be inserted. */
7098
7099 static const unsigned char *
7100 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7101 {
7102 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7103 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7104
7105 if (arm_pc_is_thumb (gdbarch, *pcptr))
7106 {
7107 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7108
7109 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7110 check whether we are replacing a 32-bit instruction. */
7111 if (tdep->thumb2_breakpoint != NULL)
7112 {
7113 gdb_byte buf[2];
7114 if (target_read_memory (*pcptr, buf, 2) == 0)
7115 {
7116 unsigned short inst1;
7117 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7118 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7119 {
7120 *lenptr = tdep->thumb2_breakpoint_size;
7121 return tdep->thumb2_breakpoint;
7122 }
7123 }
7124 }
7125
7126 *lenptr = tdep->thumb_breakpoint_size;
7127 return tdep->thumb_breakpoint;
7128 }
7129 else
7130 {
7131 *lenptr = tdep->arm_breakpoint_size;
7132 return tdep->arm_breakpoint;
7133 }
7134 }
7135
7136 static void
7137 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7138 int *kindptr)
7139 {
7140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7141
7142 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7143
7144 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7145 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7146 that this is not confused with a 32-bit ARM breakpoint. */
7147 *kindptr = 3;
7148 }
7149
7150 /* Extract from an array REGBUF containing the (raw) register state a
7151 function return value of type TYPE, and copy that, in virtual
7152 format, into VALBUF. */
7153
7154 static void
7155 arm_extract_return_value (struct type *type, struct regcache *regs,
7156 gdb_byte *valbuf)
7157 {
7158 struct gdbarch *gdbarch = get_regcache_arch (regs);
7159 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7160
7161 if (TYPE_CODE_FLT == TYPE_CODE (type))
7162 {
7163 switch (gdbarch_tdep (gdbarch)->fp_model)
7164 {
7165 case ARM_FLOAT_FPA:
7166 {
7167 /* The value is in register F0 in internal format. We need to
7168 extract the raw value and then convert it to the desired
7169 internal type. */
7170 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7171
7172 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7173 convert_from_extended (floatformat_from_type (type), tmpbuf,
7174 valbuf, gdbarch_byte_order (gdbarch));
7175 }
7176 break;
7177
7178 case ARM_FLOAT_SOFT_FPA:
7179 case ARM_FLOAT_SOFT_VFP:
7180 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7181 not using the VFP ABI code. */
7182 case ARM_FLOAT_VFP:
7183 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7184 if (TYPE_LENGTH (type) > 4)
7185 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7186 valbuf + INT_REGISTER_SIZE);
7187 break;
7188
7189 default:
7190 internal_error (__FILE__, __LINE__,
7191 _("arm_extract_return_value: "
7192 "Floating point model not supported"));
7193 break;
7194 }
7195 }
7196 else if (TYPE_CODE (type) == TYPE_CODE_INT
7197 || TYPE_CODE (type) == TYPE_CODE_CHAR
7198 || TYPE_CODE (type) == TYPE_CODE_BOOL
7199 || TYPE_CODE (type) == TYPE_CODE_PTR
7200 || TYPE_CODE (type) == TYPE_CODE_REF
7201 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7202 {
7203 /* If the type is a plain integer, then the access is
7204 straight-forward. Otherwise we have to play around a bit
7205 more. */
7206 int len = TYPE_LENGTH (type);
7207 int regno = ARM_A1_REGNUM;
7208 ULONGEST tmp;
7209
7210 while (len > 0)
7211 {
7212 /* By using store_unsigned_integer we avoid having to do
7213 anything special for small big-endian values. */
7214 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7215 store_unsigned_integer (valbuf,
7216 (len > INT_REGISTER_SIZE
7217 ? INT_REGISTER_SIZE : len),
7218 byte_order, tmp);
7219 len -= INT_REGISTER_SIZE;
7220 valbuf += INT_REGISTER_SIZE;
7221 }
7222 }
7223 else
7224 {
7225 /* For a structure or union the behaviour is as if the value had
7226 been stored to word-aligned memory and then loaded into
7227 registers with 32-bit load instruction(s). */
7228 int len = TYPE_LENGTH (type);
7229 int regno = ARM_A1_REGNUM;
7230 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7231
7232 while (len > 0)
7233 {
7234 regcache_cooked_read (regs, regno++, tmpbuf);
7235 memcpy (valbuf, tmpbuf,
7236 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7237 len -= INT_REGISTER_SIZE;
7238 valbuf += INT_REGISTER_SIZE;
7239 }
7240 }
7241 }
7242
7243
7244 /* Will a function return an aggregate type in memory or in a
7245 register? Return 0 if an aggregate type can be returned in a
7246 register, 1 if it must be returned in memory. */
7247
7248 static int
7249 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7250 {
7251 int nRc;
7252 enum type_code code;
7253
7254 CHECK_TYPEDEF (type);
7255
7256 /* In the ARM ABI, "integer" like aggregate types are returned in
7257 registers. For an aggregate type to be integer like, its size
7258 must be less than or equal to INT_REGISTER_SIZE and the
7259 offset of each addressable subfield must be zero. Note that bit
7260 fields are not addressable, and all addressable subfields of
7261 unions always start at offset zero.
7262
7263 This function is based on the behaviour of GCC 2.95.1.
7264 See: gcc/arm.c: arm_return_in_memory() for details.
7265
7266 Note: All versions of GCC before GCC 2.95.2 do not set up the
7267 parameters correctly for a function returning the following
7268 structure: struct { float f;}; This should be returned in memory,
7269 not a register. Richard Earnshaw sent me a patch, but I do not
7270 know of any way to detect if a function like the above has been
7271 compiled with the correct calling convention. */
7272
7273 /* All aggregate types that won't fit in a register must be returned
7274 in memory. */
7275 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7276 {
7277 return 1;
7278 }
7279
7280 /* The AAPCS says all aggregates not larger than a word are returned
7281 in a register. */
7282 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7283 return 0;
7284
7285 /* The only aggregate types that can be returned in a register are
7286 structs and unions. Arrays must be returned in memory. */
7287 code = TYPE_CODE (type);
7288 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7289 {
7290 return 1;
7291 }
7292
7293 /* Assume all other aggregate types can be returned in a register.
7294 Run a check for structures, unions and arrays. */
7295 nRc = 0;
7296
7297 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7298 {
7299 int i;
7300 /* Need to check if this struct/union is "integer" like. For
7301 this to be true, its size must be less than or equal to
7302 INT_REGISTER_SIZE and the offset of each addressable
7303 subfield must be zero. Note that bit fields are not
7304 addressable, and unions always start at offset zero. If any
7305 of the subfields is a floating point type, the struct/union
7306 cannot be an integer type. */
7307
7308 /* For each field in the object, check:
7309 1) Is it FP? --> yes, nRc = 1;
7310 2) Is it addressable (bitpos != 0) and
7311 not packed (bitsize == 0)?
7312 --> yes, nRc = 1
7313 */
7314
7315 for (i = 0; i < TYPE_NFIELDS (type); i++)
7316 {
7317 enum type_code field_type_code;
7318 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7319 i)));
7320
7321 /* Is it a floating point type field? */
7322 if (field_type_code == TYPE_CODE_FLT)
7323 {
7324 nRc = 1;
7325 break;
7326 }
7327
7328 /* If bitpos != 0, then we have to care about it. */
7329 if (TYPE_FIELD_BITPOS (type, i) != 0)
7330 {
7331 /* Bitfields are not addressable. If the field bitsize is
7332 zero, then the field is not packed. Hence it cannot be
7333 a bitfield or any other packed type. */
7334 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7335 {
7336 nRc = 1;
7337 break;
7338 }
7339 }
7340 }
7341 }
7342
7343 return nRc;
7344 }
7345
7346 /* Write into appropriate registers a function return value of type
7347 TYPE, given in virtual format. */
7348
7349 static void
7350 arm_store_return_value (struct type *type, struct regcache *regs,
7351 const gdb_byte *valbuf)
7352 {
7353 struct gdbarch *gdbarch = get_regcache_arch (regs);
7354 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7355
7356 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7357 {
7358 char buf[MAX_REGISTER_SIZE];
7359
7360 switch (gdbarch_tdep (gdbarch)->fp_model)
7361 {
7362 case ARM_FLOAT_FPA:
7363
7364 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7365 gdbarch_byte_order (gdbarch));
7366 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7367 break;
7368
7369 case ARM_FLOAT_SOFT_FPA:
7370 case ARM_FLOAT_SOFT_VFP:
7371 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7372 not using the VFP ABI code. */
7373 case ARM_FLOAT_VFP:
7374 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7375 if (TYPE_LENGTH (type) > 4)
7376 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7377 valbuf + INT_REGISTER_SIZE);
7378 break;
7379
7380 default:
7381 internal_error (__FILE__, __LINE__,
7382 _("arm_store_return_value: Floating "
7383 "point model not supported"));
7384 break;
7385 }
7386 }
7387 else if (TYPE_CODE (type) == TYPE_CODE_INT
7388 || TYPE_CODE (type) == TYPE_CODE_CHAR
7389 || TYPE_CODE (type) == TYPE_CODE_BOOL
7390 || TYPE_CODE (type) == TYPE_CODE_PTR
7391 || TYPE_CODE (type) == TYPE_CODE_REF
7392 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7393 {
7394 if (TYPE_LENGTH (type) <= 4)
7395 {
7396 /* Values of one word or less are zero/sign-extended and
7397 returned in r0. */
7398 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7399 LONGEST val = unpack_long (type, valbuf);
7400
7401 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7402 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7403 }
7404 else
7405 {
7406 /* Integral values greater than one word are stored in consecutive
7407 registers starting with r0. This will always be a multiple of
7408 the regiser size. */
7409 int len = TYPE_LENGTH (type);
7410 int regno = ARM_A1_REGNUM;
7411
7412 while (len > 0)
7413 {
7414 regcache_cooked_write (regs, regno++, valbuf);
7415 len -= INT_REGISTER_SIZE;
7416 valbuf += INT_REGISTER_SIZE;
7417 }
7418 }
7419 }
7420 else
7421 {
7422 /* For a structure or union the behaviour is as if the value had
7423 been stored to word-aligned memory and then loaded into
7424 registers with 32-bit load instruction(s). */
7425 int len = TYPE_LENGTH (type);
7426 int regno = ARM_A1_REGNUM;
7427 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7428
7429 while (len > 0)
7430 {
7431 memcpy (tmpbuf, valbuf,
7432 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7433 regcache_cooked_write (regs, regno++, tmpbuf);
7434 len -= INT_REGISTER_SIZE;
7435 valbuf += INT_REGISTER_SIZE;
7436 }
7437 }
7438 }
7439
7440
7441 /* Handle function return values. */
7442
7443 static enum return_value_convention
7444 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7445 struct type *valtype, struct regcache *regcache,
7446 gdb_byte *readbuf, const gdb_byte *writebuf)
7447 {
7448 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7449 enum arm_vfp_cprc_base_type vfp_base_type;
7450 int vfp_base_count;
7451
7452 if (arm_vfp_abi_for_function (gdbarch, func_type)
7453 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7454 {
7455 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7456 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7457 int i;
7458 for (i = 0; i < vfp_base_count; i++)
7459 {
7460 if (reg_char == 'q')
7461 {
7462 if (writebuf)
7463 arm_neon_quad_write (gdbarch, regcache, i,
7464 writebuf + i * unit_length);
7465
7466 if (readbuf)
7467 arm_neon_quad_read (gdbarch, regcache, i,
7468 readbuf + i * unit_length);
7469 }
7470 else
7471 {
7472 char name_buf[4];
7473 int regnum;
7474
7475 sprintf (name_buf, "%c%d", reg_char, i);
7476 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7477 strlen (name_buf));
7478 if (writebuf)
7479 regcache_cooked_write (regcache, regnum,
7480 writebuf + i * unit_length);
7481 if (readbuf)
7482 regcache_cooked_read (regcache, regnum,
7483 readbuf + i * unit_length);
7484 }
7485 }
7486 return RETURN_VALUE_REGISTER_CONVENTION;
7487 }
7488
7489 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7490 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7491 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7492 {
7493 if (tdep->struct_return == pcc_struct_return
7494 || arm_return_in_memory (gdbarch, valtype))
7495 return RETURN_VALUE_STRUCT_CONVENTION;
7496 }
7497
7498 if (writebuf)
7499 arm_store_return_value (valtype, regcache, writebuf);
7500
7501 if (readbuf)
7502 arm_extract_return_value (valtype, regcache, readbuf);
7503
7504 return RETURN_VALUE_REGISTER_CONVENTION;
7505 }
7506
7507
7508 static int
7509 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7510 {
7511 struct gdbarch *gdbarch = get_frame_arch (frame);
7512 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7514 CORE_ADDR jb_addr;
7515 char buf[INT_REGISTER_SIZE];
7516
7517 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7518
7519 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7520 INT_REGISTER_SIZE))
7521 return 0;
7522
7523 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7524 return 1;
7525 }
7526
7527 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7528 return the target PC. Otherwise return 0. */
7529
7530 CORE_ADDR
7531 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7532 {
7533 char *name;
7534 int namelen;
7535 CORE_ADDR start_addr;
7536
7537 /* Find the starting address and name of the function containing the PC. */
7538 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7539 return 0;
7540
7541 /* If PC is in a Thumb call or return stub, return the address of the
7542 target PC, which is in a register. The thunk functions are called
7543 _call_via_xx, where x is the register name. The possible names
7544 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7545 functions, named __ARM_call_via_r[0-7]. */
7546 if (strncmp (name, "_call_via_", 10) == 0
7547 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7548 {
7549 /* Use the name suffix to determine which register contains the
7550 target PC. */
7551 static char *table[15] =
7552 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7553 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7554 };
7555 int regno;
7556 int offset = strlen (name) - 2;
7557
7558 for (regno = 0; regno <= 14; regno++)
7559 if (strcmp (&name[offset], table[regno]) == 0)
7560 return get_frame_register_unsigned (frame, regno);
7561 }
7562
7563 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7564 non-interworking calls to foo. We could decode the stubs
7565 to find the target but it's easier to use the symbol table. */
7566 namelen = strlen (name);
7567 if (name[0] == '_' && name[1] == '_'
7568 && ((namelen > 2 + strlen ("_from_thumb")
7569 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7570 strlen ("_from_thumb")) == 0)
7571 || (namelen > 2 + strlen ("_from_arm")
7572 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7573 strlen ("_from_arm")) == 0)))
7574 {
7575 char *target_name;
7576 int target_len = namelen - 2;
7577 struct minimal_symbol *minsym;
7578 struct objfile *objfile;
7579 struct obj_section *sec;
7580
7581 if (name[namelen - 1] == 'b')
7582 target_len -= strlen ("_from_thumb");
7583 else
7584 target_len -= strlen ("_from_arm");
7585
7586 target_name = alloca (target_len + 1);
7587 memcpy (target_name, name + 2, target_len);
7588 target_name[target_len] = '\0';
7589
7590 sec = find_pc_section (pc);
7591 objfile = (sec == NULL) ? NULL : sec->objfile;
7592 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7593 if (minsym != NULL)
7594 return SYMBOL_VALUE_ADDRESS (minsym);
7595 else
7596 return 0;
7597 }
7598
7599 return 0; /* not a stub */
7600 }
7601
7602 static void
7603 set_arm_command (char *args, int from_tty)
7604 {
7605 printf_unfiltered (_("\
7606 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7607 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7608 }
7609
7610 static void
7611 show_arm_command (char *args, int from_tty)
7612 {
7613 cmd_show_list (showarmcmdlist, from_tty, "");
7614 }
7615
7616 static void
7617 arm_update_current_architecture (void)
7618 {
7619 struct gdbarch_info info;
7620
7621 /* If the current architecture is not ARM, we have nothing to do. */
7622 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7623 return;
7624
7625 /* Update the architecture. */
7626 gdbarch_info_init (&info);
7627
7628 if (!gdbarch_update_p (info))
7629 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7630 }
7631
7632 static void
7633 set_fp_model_sfunc (char *args, int from_tty,
7634 struct cmd_list_element *c)
7635 {
7636 enum arm_float_model fp_model;
7637
7638 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7639 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7640 {
7641 arm_fp_model = fp_model;
7642 break;
7643 }
7644
7645 if (fp_model == ARM_FLOAT_LAST)
7646 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7647 current_fp_model);
7648
7649 arm_update_current_architecture ();
7650 }
7651
7652 static void
7653 show_fp_model (struct ui_file *file, int from_tty,
7654 struct cmd_list_element *c, const char *value)
7655 {
7656 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7657
7658 if (arm_fp_model == ARM_FLOAT_AUTO
7659 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7660 fprintf_filtered (file, _("\
7661 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7662 fp_model_strings[tdep->fp_model]);
7663 else
7664 fprintf_filtered (file, _("\
7665 The current ARM floating point model is \"%s\".\n"),
7666 fp_model_strings[arm_fp_model]);
7667 }
7668
7669 static void
7670 arm_set_abi (char *args, int from_tty,
7671 struct cmd_list_element *c)
7672 {
7673 enum arm_abi_kind arm_abi;
7674
7675 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7676 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7677 {
7678 arm_abi_global = arm_abi;
7679 break;
7680 }
7681
7682 if (arm_abi == ARM_ABI_LAST)
7683 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7684 arm_abi_string);
7685
7686 arm_update_current_architecture ();
7687 }
7688
7689 static void
7690 arm_show_abi (struct ui_file *file, int from_tty,
7691 struct cmd_list_element *c, const char *value)
7692 {
7693 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7694
7695 if (arm_abi_global == ARM_ABI_AUTO
7696 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7697 fprintf_filtered (file, _("\
7698 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7699 arm_abi_strings[tdep->arm_abi]);
7700 else
7701 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7702 arm_abi_string);
7703 }
7704
7705 static void
7706 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7707 struct cmd_list_element *c, const char *value)
7708 {
7709 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7710
7711 fprintf_filtered (file,
7712 _("The current execution mode assumed "
7713 "(when symbols are unavailable) is \"%s\".\n"),
7714 arm_fallback_mode_string);
7715 }
7716
7717 static void
7718 arm_show_force_mode (struct ui_file *file, int from_tty,
7719 struct cmd_list_element *c, const char *value)
7720 {
7721 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7722
7723 fprintf_filtered (file,
7724 _("The current execution mode assumed "
7725 "(even when symbols are available) is \"%s\".\n"),
7726 arm_force_mode_string);
7727 }
7728
7729 /* If the user changes the register disassembly style used for info
7730 register and other commands, we have to also switch the style used
7731 in opcodes for disassembly output. This function is run in the "set
7732 arm disassembly" command, and does that. */
7733
7734 static void
7735 set_disassembly_style_sfunc (char *args, int from_tty,
7736 struct cmd_list_element *c)
7737 {
7738 set_disassembly_style ();
7739 }
7740 \f
7741 /* Return the ARM register name corresponding to register I. */
7742 static const char *
7743 arm_register_name (struct gdbarch *gdbarch, int i)
7744 {
7745 const int num_regs = gdbarch_num_regs (gdbarch);
7746
7747 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7748 && i >= num_regs && i < num_regs + 32)
7749 {
7750 static const char *const vfp_pseudo_names[] = {
7751 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7752 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7753 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7754 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7755 };
7756
7757 return vfp_pseudo_names[i - num_regs];
7758 }
7759
7760 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7761 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7762 {
7763 static const char *const neon_pseudo_names[] = {
7764 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7765 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7766 };
7767
7768 return neon_pseudo_names[i - num_regs - 32];
7769 }
7770
7771 if (i >= ARRAY_SIZE (arm_register_names))
7772 /* These registers are only supported on targets which supply
7773 an XML description. */
7774 return "";
7775
7776 return arm_register_names[i];
7777 }
7778
7779 static void
7780 set_disassembly_style (void)
7781 {
7782 int current;
7783
7784 /* Find the style that the user wants. */
7785 for (current = 0; current < num_disassembly_options; current++)
7786 if (disassembly_style == valid_disassembly_styles[current])
7787 break;
7788 gdb_assert (current < num_disassembly_options);
7789
7790 /* Synchronize the disassembler. */
7791 set_arm_regname_option (current);
7792 }
7793
7794 /* Test whether the coff symbol specific value corresponds to a Thumb
7795 function. */
7796
7797 static int
7798 coff_sym_is_thumb (int val)
7799 {
7800 return (val == C_THUMBEXT
7801 || val == C_THUMBSTAT
7802 || val == C_THUMBEXTFUNC
7803 || val == C_THUMBSTATFUNC
7804 || val == C_THUMBLABEL);
7805 }
7806
7807 /* arm_coff_make_msymbol_special()
7808 arm_elf_make_msymbol_special()
7809
7810 These functions test whether the COFF or ELF symbol corresponds to
7811 an address in thumb code, and set a "special" bit in a minimal
7812 symbol to indicate that it does. */
7813
7814 static void
7815 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7816 {
7817 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
7818 == ST_BRANCH_TO_THUMB)
7819 MSYMBOL_SET_SPECIAL (msym);
7820 }
7821
7822 static void
7823 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7824 {
7825 if (coff_sym_is_thumb (val))
7826 MSYMBOL_SET_SPECIAL (msym);
7827 }
7828
7829 static void
7830 arm_objfile_data_free (struct objfile *objfile, void *arg)
7831 {
7832 struct arm_per_objfile *data = arg;
7833 unsigned int i;
7834
7835 for (i = 0; i < objfile->obfd->section_count; i++)
7836 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7837 }
7838
7839 static void
7840 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7841 asymbol *sym)
7842 {
7843 const char *name = bfd_asymbol_name (sym);
7844 struct arm_per_objfile *data;
7845 VEC(arm_mapping_symbol_s) **map_p;
7846 struct arm_mapping_symbol new_map_sym;
7847
7848 gdb_assert (name[0] == '$');
7849 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7850 return;
7851
7852 data = objfile_data (objfile, arm_objfile_data_key);
7853 if (data == NULL)
7854 {
7855 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7856 struct arm_per_objfile);
7857 set_objfile_data (objfile, arm_objfile_data_key, data);
7858 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7859 objfile->obfd->section_count,
7860 VEC(arm_mapping_symbol_s) *);
7861 }
7862 map_p = &data->section_maps[bfd_get_section (sym)->index];
7863
7864 new_map_sym.value = sym->value;
7865 new_map_sym.type = name[1];
7866
7867 /* Assume that most mapping symbols appear in order of increasing
7868 value. If they were randomly distributed, it would be faster to
7869 always push here and then sort at first use. */
7870 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7871 {
7872 struct arm_mapping_symbol *prev_map_sym;
7873
7874 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7875 if (prev_map_sym->value >= sym->value)
7876 {
7877 unsigned int idx;
7878 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7879 arm_compare_mapping_symbols);
7880 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7881 return;
7882 }
7883 }
7884
7885 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7886 }
7887
7888 static void
7889 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7890 {
7891 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7892 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7893
7894 /* If necessary, set the T bit. */
7895 if (arm_apcs_32)
7896 {
7897 ULONGEST val, t_bit;
7898 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7899 t_bit = arm_psr_thumb_bit (gdbarch);
7900 if (arm_pc_is_thumb (gdbarch, pc))
7901 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7902 val | t_bit);
7903 else
7904 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7905 val & ~t_bit);
7906 }
7907 }
7908
7909 /* Read the contents of a NEON quad register, by reading from two
7910 double registers. This is used to implement the quad pseudo
7911 registers, and for argument passing in case the quad registers are
7912 missing; vectors are passed in quad registers when using the VFP
7913 ABI, even if a NEON unit is not present. REGNUM is the index of
7914 the quad register, in [0, 15]. */
7915
7916 static enum register_status
7917 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7918 int regnum, gdb_byte *buf)
7919 {
7920 char name_buf[4];
7921 gdb_byte reg_buf[8];
7922 int offset, double_regnum;
7923 enum register_status status;
7924
7925 sprintf (name_buf, "d%d", regnum << 1);
7926 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7927 strlen (name_buf));
7928
7929 /* d0 is always the least significant half of q0. */
7930 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7931 offset = 8;
7932 else
7933 offset = 0;
7934
7935 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7936 if (status != REG_VALID)
7937 return status;
7938 memcpy (buf + offset, reg_buf, 8);
7939
7940 offset = 8 - offset;
7941 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7942 if (status != REG_VALID)
7943 return status;
7944 memcpy (buf + offset, reg_buf, 8);
7945
7946 return REG_VALID;
7947 }
7948
7949 static enum register_status
7950 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7951 int regnum, gdb_byte *buf)
7952 {
7953 const int num_regs = gdbarch_num_regs (gdbarch);
7954 char name_buf[4];
7955 gdb_byte reg_buf[8];
7956 int offset, double_regnum;
7957
7958 gdb_assert (regnum >= num_regs);
7959 regnum -= num_regs;
7960
7961 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7962 /* Quad-precision register. */
7963 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7964 else
7965 {
7966 enum register_status status;
7967
7968 /* Single-precision register. */
7969 gdb_assert (regnum < 32);
7970
7971 /* s0 is always the least significant half of d0. */
7972 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7973 offset = (regnum & 1) ? 0 : 4;
7974 else
7975 offset = (regnum & 1) ? 4 : 0;
7976
7977 sprintf (name_buf, "d%d", regnum >> 1);
7978 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7979 strlen (name_buf));
7980
7981 status = regcache_raw_read (regcache, double_regnum, reg_buf);
7982 if (status == REG_VALID)
7983 memcpy (buf, reg_buf + offset, 4);
7984 return status;
7985 }
7986 }
7987
7988 /* Store the contents of BUF to a NEON quad register, by writing to
7989 two double registers. This is used to implement the quad pseudo
7990 registers, and for argument passing in case the quad registers are
7991 missing; vectors are passed in quad registers when using the VFP
7992 ABI, even if a NEON unit is not present. REGNUM is the index
7993 of the quad register, in [0, 15]. */
7994
7995 static void
7996 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7997 int regnum, const gdb_byte *buf)
7998 {
7999 char name_buf[4];
8000 gdb_byte reg_buf[8];
8001 int offset, double_regnum;
8002
8003 sprintf (name_buf, "d%d", regnum << 1);
8004 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8005 strlen (name_buf));
8006
8007 /* d0 is always the least significant half of q0. */
8008 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8009 offset = 8;
8010 else
8011 offset = 0;
8012
8013 regcache_raw_write (regcache, double_regnum, buf + offset);
8014 offset = 8 - offset;
8015 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
8016 }
8017
8018 static void
8019 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8020 int regnum, const gdb_byte *buf)
8021 {
8022 const int num_regs = gdbarch_num_regs (gdbarch);
8023 char name_buf[4];
8024 gdb_byte reg_buf[8];
8025 int offset, double_regnum;
8026
8027 gdb_assert (regnum >= num_regs);
8028 regnum -= num_regs;
8029
8030 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8031 /* Quad-precision register. */
8032 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8033 else
8034 {
8035 /* Single-precision register. */
8036 gdb_assert (regnum < 32);
8037
8038 /* s0 is always the least significant half of d0. */
8039 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8040 offset = (regnum & 1) ? 0 : 4;
8041 else
8042 offset = (regnum & 1) ? 4 : 0;
8043
8044 sprintf (name_buf, "d%d", regnum >> 1);
8045 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8046 strlen (name_buf));
8047
8048 regcache_raw_read (regcache, double_regnum, reg_buf);
8049 memcpy (reg_buf + offset, buf, 4);
8050 regcache_raw_write (regcache, double_regnum, reg_buf);
8051 }
8052 }
8053
8054 static struct value *
8055 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8056 {
8057 const int *reg_p = baton;
8058 return value_of_register (*reg_p, frame);
8059 }
8060 \f
8061 static enum gdb_osabi
8062 arm_elf_osabi_sniffer (bfd *abfd)
8063 {
8064 unsigned int elfosabi;
8065 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8066
8067 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8068
8069 if (elfosabi == ELFOSABI_ARM)
8070 /* GNU tools use this value. Check note sections in this case,
8071 as well. */
8072 bfd_map_over_sections (abfd,
8073 generic_elf_osabi_sniff_abi_tag_sections,
8074 &osabi);
8075
8076 /* Anything else will be handled by the generic ELF sniffer. */
8077 return osabi;
8078 }
8079
8080 static int
8081 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8082 struct reggroup *group)
8083 {
8084 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8085 this, FPS register belongs to save_regroup, restore_reggroup, and
8086 all_reggroup, of course. */
8087 if (regnum == ARM_FPS_REGNUM)
8088 return (group == float_reggroup
8089 || group == save_reggroup
8090 || group == restore_reggroup
8091 || group == all_reggroup);
8092 else
8093 return default_register_reggroup_p (gdbarch, regnum, group);
8094 }
8095
8096 \f
8097 /* Initialize the current architecture based on INFO. If possible,
8098 re-use an architecture from ARCHES, which is a list of
8099 architectures already created during this debugging session.
8100
8101 Called e.g. at program startup, when reading a core file, and when
8102 reading a binary file. */
8103
8104 static struct gdbarch *
8105 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8106 {
8107 struct gdbarch_tdep *tdep;
8108 struct gdbarch *gdbarch;
8109 struct gdbarch_list *best_arch;
8110 enum arm_abi_kind arm_abi = arm_abi_global;
8111 enum arm_float_model fp_model = arm_fp_model;
8112 struct tdesc_arch_data *tdesc_data = NULL;
8113 int i, is_m = 0;
8114 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8115 int have_neon = 0;
8116 int have_fpa_registers = 1;
8117 const struct target_desc *tdesc = info.target_desc;
8118
8119 /* If we have an object to base this architecture on, try to determine
8120 its ABI. */
8121
8122 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8123 {
8124 int ei_osabi, e_flags;
8125
8126 switch (bfd_get_flavour (info.abfd))
8127 {
8128 case bfd_target_aout_flavour:
8129 /* Assume it's an old APCS-style ABI. */
8130 arm_abi = ARM_ABI_APCS;
8131 break;
8132
8133 case bfd_target_coff_flavour:
8134 /* Assume it's an old APCS-style ABI. */
8135 /* XXX WinCE? */
8136 arm_abi = ARM_ABI_APCS;
8137 break;
8138
8139 case bfd_target_elf_flavour:
8140 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8141 e_flags = elf_elfheader (info.abfd)->e_flags;
8142
8143 if (ei_osabi == ELFOSABI_ARM)
8144 {
8145 /* GNU tools used to use this value, but do not for EABI
8146 objects. There's nowhere to tag an EABI version
8147 anyway, so assume APCS. */
8148 arm_abi = ARM_ABI_APCS;
8149 }
8150 else if (ei_osabi == ELFOSABI_NONE)
8151 {
8152 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8153 int attr_arch, attr_profile;
8154
8155 switch (eabi_ver)
8156 {
8157 case EF_ARM_EABI_UNKNOWN:
8158 /* Assume GNU tools. */
8159 arm_abi = ARM_ABI_APCS;
8160 break;
8161
8162 case EF_ARM_EABI_VER4:
8163 case EF_ARM_EABI_VER5:
8164 arm_abi = ARM_ABI_AAPCS;
8165 /* EABI binaries default to VFP float ordering.
8166 They may also contain build attributes that can
8167 be used to identify if the VFP argument-passing
8168 ABI is in use. */
8169 if (fp_model == ARM_FLOAT_AUTO)
8170 {
8171 #ifdef HAVE_ELF
8172 switch (bfd_elf_get_obj_attr_int (info.abfd,
8173 OBJ_ATTR_PROC,
8174 Tag_ABI_VFP_args))
8175 {
8176 case 0:
8177 /* "The user intended FP parameter/result
8178 passing to conform to AAPCS, base
8179 variant". */
8180 fp_model = ARM_FLOAT_SOFT_VFP;
8181 break;
8182 case 1:
8183 /* "The user intended FP parameter/result
8184 passing to conform to AAPCS, VFP
8185 variant". */
8186 fp_model = ARM_FLOAT_VFP;
8187 break;
8188 case 2:
8189 /* "The user intended FP parameter/result
8190 passing to conform to tool chain-specific
8191 conventions" - we don't know any such
8192 conventions, so leave it as "auto". */
8193 break;
8194 default:
8195 /* Attribute value not mentioned in the
8196 October 2008 ABI, so leave it as
8197 "auto". */
8198 break;
8199 }
8200 #else
8201 fp_model = ARM_FLOAT_SOFT_VFP;
8202 #endif
8203 }
8204 break;
8205
8206 default:
8207 /* Leave it as "auto". */
8208 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8209 break;
8210 }
8211
8212 #ifdef HAVE_ELF
8213 /* Detect M-profile programs. This only works if the
8214 executable file includes build attributes; GCC does
8215 copy them to the executable, but e.g. RealView does
8216 not. */
8217 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8218 Tag_CPU_arch);
8219 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8220 OBJ_ATTR_PROC,
8221 Tag_CPU_arch_profile);
8222 /* GCC specifies the profile for v6-M; RealView only
8223 specifies the profile for architectures starting with
8224 V7 (as opposed to architectures with a tag
8225 numerically greater than TAG_CPU_ARCH_V7). */
8226 if (!tdesc_has_registers (tdesc)
8227 && (attr_arch == TAG_CPU_ARCH_V6_M
8228 || attr_arch == TAG_CPU_ARCH_V6S_M
8229 || attr_profile == 'M'))
8230 tdesc = tdesc_arm_with_m;
8231 #endif
8232 }
8233
8234 if (fp_model == ARM_FLOAT_AUTO)
8235 {
8236 int e_flags = elf_elfheader (info.abfd)->e_flags;
8237
8238 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8239 {
8240 case 0:
8241 /* Leave it as "auto". Strictly speaking this case
8242 means FPA, but almost nobody uses that now, and
8243 many toolchains fail to set the appropriate bits
8244 for the floating-point model they use. */
8245 break;
8246 case EF_ARM_SOFT_FLOAT:
8247 fp_model = ARM_FLOAT_SOFT_FPA;
8248 break;
8249 case EF_ARM_VFP_FLOAT:
8250 fp_model = ARM_FLOAT_VFP;
8251 break;
8252 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8253 fp_model = ARM_FLOAT_SOFT_VFP;
8254 break;
8255 }
8256 }
8257
8258 if (e_flags & EF_ARM_BE8)
8259 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8260
8261 break;
8262
8263 default:
8264 /* Leave it as "auto". */
8265 break;
8266 }
8267 }
8268
8269 /* Check any target description for validity. */
8270 if (tdesc_has_registers (tdesc))
8271 {
8272 /* For most registers we require GDB's default names; but also allow
8273 the numeric names for sp / lr / pc, as a convenience. */
8274 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8275 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8276 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8277
8278 const struct tdesc_feature *feature;
8279 int valid_p;
8280
8281 feature = tdesc_find_feature (tdesc,
8282 "org.gnu.gdb.arm.core");
8283 if (feature == NULL)
8284 {
8285 feature = tdesc_find_feature (tdesc,
8286 "org.gnu.gdb.arm.m-profile");
8287 if (feature == NULL)
8288 return NULL;
8289 else
8290 is_m = 1;
8291 }
8292
8293 tdesc_data = tdesc_data_alloc ();
8294
8295 valid_p = 1;
8296 for (i = 0; i < ARM_SP_REGNUM; i++)
8297 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8298 arm_register_names[i]);
8299 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8300 ARM_SP_REGNUM,
8301 arm_sp_names);
8302 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8303 ARM_LR_REGNUM,
8304 arm_lr_names);
8305 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8306 ARM_PC_REGNUM,
8307 arm_pc_names);
8308 if (is_m)
8309 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8310 ARM_PS_REGNUM, "xpsr");
8311 else
8312 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8313 ARM_PS_REGNUM, "cpsr");
8314
8315 if (!valid_p)
8316 {
8317 tdesc_data_cleanup (tdesc_data);
8318 return NULL;
8319 }
8320
8321 feature = tdesc_find_feature (tdesc,
8322 "org.gnu.gdb.arm.fpa");
8323 if (feature != NULL)
8324 {
8325 valid_p = 1;
8326 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8327 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8328 arm_register_names[i]);
8329 if (!valid_p)
8330 {
8331 tdesc_data_cleanup (tdesc_data);
8332 return NULL;
8333 }
8334 }
8335 else
8336 have_fpa_registers = 0;
8337
8338 feature = tdesc_find_feature (tdesc,
8339 "org.gnu.gdb.xscale.iwmmxt");
8340 if (feature != NULL)
8341 {
8342 static const char *const iwmmxt_names[] = {
8343 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8344 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8345 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8346 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8347 };
8348
8349 valid_p = 1;
8350 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8351 valid_p
8352 &= tdesc_numbered_register (feature, tdesc_data, i,
8353 iwmmxt_names[i - ARM_WR0_REGNUM]);
8354
8355 /* Check for the control registers, but do not fail if they
8356 are missing. */
8357 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8358 tdesc_numbered_register (feature, tdesc_data, i,
8359 iwmmxt_names[i - ARM_WR0_REGNUM]);
8360
8361 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8362 valid_p
8363 &= tdesc_numbered_register (feature, tdesc_data, i,
8364 iwmmxt_names[i - ARM_WR0_REGNUM]);
8365
8366 if (!valid_p)
8367 {
8368 tdesc_data_cleanup (tdesc_data);
8369 return NULL;
8370 }
8371 }
8372
8373 /* If we have a VFP unit, check whether the single precision registers
8374 are present. If not, then we will synthesize them as pseudo
8375 registers. */
8376 feature = tdesc_find_feature (tdesc,
8377 "org.gnu.gdb.arm.vfp");
8378 if (feature != NULL)
8379 {
8380 static const char *const vfp_double_names[] = {
8381 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8382 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8383 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8384 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8385 };
8386
8387 /* Require the double precision registers. There must be either
8388 16 or 32. */
8389 valid_p = 1;
8390 for (i = 0; i < 32; i++)
8391 {
8392 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8393 ARM_D0_REGNUM + i,
8394 vfp_double_names[i]);
8395 if (!valid_p)
8396 break;
8397 }
8398 if (!valid_p && i == 16)
8399 valid_p = 1;
8400
8401 /* Also require FPSCR. */
8402 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8403 ARM_FPSCR_REGNUM, "fpscr");
8404 if (!valid_p)
8405 {
8406 tdesc_data_cleanup (tdesc_data);
8407 return NULL;
8408 }
8409
8410 if (tdesc_unnumbered_register (feature, "s0") == 0)
8411 have_vfp_pseudos = 1;
8412
8413 have_vfp_registers = 1;
8414
8415 /* If we have VFP, also check for NEON. The architecture allows
8416 NEON without VFP (integer vector operations only), but GDB
8417 does not support that. */
8418 feature = tdesc_find_feature (tdesc,
8419 "org.gnu.gdb.arm.neon");
8420 if (feature != NULL)
8421 {
8422 /* NEON requires 32 double-precision registers. */
8423 if (i != 32)
8424 {
8425 tdesc_data_cleanup (tdesc_data);
8426 return NULL;
8427 }
8428
8429 /* If there are quad registers defined by the stub, use
8430 their type; otherwise (normally) provide them with
8431 the default type. */
8432 if (tdesc_unnumbered_register (feature, "q0") == 0)
8433 have_neon_pseudos = 1;
8434
8435 have_neon = 1;
8436 }
8437 }
8438 }
8439
8440 /* If there is already a candidate, use it. */
8441 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8442 best_arch != NULL;
8443 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8444 {
8445 if (arm_abi != ARM_ABI_AUTO
8446 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8447 continue;
8448
8449 if (fp_model != ARM_FLOAT_AUTO
8450 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8451 continue;
8452
8453 /* There are various other properties in tdep that we do not
8454 need to check here: those derived from a target description,
8455 since gdbarches with a different target description are
8456 automatically disqualified. */
8457
8458 /* Do check is_m, though, since it might come from the binary. */
8459 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8460 continue;
8461
8462 /* Found a match. */
8463 break;
8464 }
8465
8466 if (best_arch != NULL)
8467 {
8468 if (tdesc_data != NULL)
8469 tdesc_data_cleanup (tdesc_data);
8470 return best_arch->gdbarch;
8471 }
8472
8473 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8474 gdbarch = gdbarch_alloc (&info, tdep);
8475
8476 /* Record additional information about the architecture we are defining.
8477 These are gdbarch discriminators, like the OSABI. */
8478 tdep->arm_abi = arm_abi;
8479 tdep->fp_model = fp_model;
8480 tdep->is_m = is_m;
8481 tdep->have_fpa_registers = have_fpa_registers;
8482 tdep->have_vfp_registers = have_vfp_registers;
8483 tdep->have_vfp_pseudos = have_vfp_pseudos;
8484 tdep->have_neon_pseudos = have_neon_pseudos;
8485 tdep->have_neon = have_neon;
8486
8487 /* Breakpoints. */
8488 switch (info.byte_order_for_code)
8489 {
8490 case BFD_ENDIAN_BIG:
8491 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8492 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8493 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8494 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8495
8496 break;
8497
8498 case BFD_ENDIAN_LITTLE:
8499 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8500 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8501 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8502 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8503
8504 break;
8505
8506 default:
8507 internal_error (__FILE__, __LINE__,
8508 _("arm_gdbarch_init: bad byte order for float format"));
8509 }
8510
8511 /* On ARM targets char defaults to unsigned. */
8512 set_gdbarch_char_signed (gdbarch, 0);
8513
8514 /* Note: for displaced stepping, this includes the breakpoint, and one word
8515 of additional scratch space. This setting isn't used for anything beside
8516 displaced stepping at present. */
8517 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8518
8519 /* This should be low enough for everything. */
8520 tdep->lowest_pc = 0x20;
8521 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8522
8523 /* The default, for both APCS and AAPCS, is to return small
8524 structures in registers. */
8525 tdep->struct_return = reg_struct_return;
8526
8527 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8528 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8529
8530 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8531
8532 /* Frame handling. */
8533 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8534 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8535 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8536
8537 frame_base_set_default (gdbarch, &arm_normal_base);
8538
8539 /* Address manipulation. */
8540 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8541 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8542
8543 /* Advance PC across function entry code. */
8544 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8545
8546 /* Detect whether PC is in function epilogue. */
8547 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8548
8549 /* Skip trampolines. */
8550 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8551
8552 /* The stack grows downward. */
8553 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8554
8555 /* Breakpoint manipulation. */
8556 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8557 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8558 arm_remote_breakpoint_from_pc);
8559
8560 /* Information about registers, etc. */
8561 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8562 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8563 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8564 set_gdbarch_register_type (gdbarch, arm_register_type);
8565 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8566
8567 /* This "info float" is FPA-specific. Use the generic version if we
8568 do not have FPA. */
8569 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8570 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8571
8572 /* Internal <-> external register number maps. */
8573 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8574 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8575
8576 set_gdbarch_register_name (gdbarch, arm_register_name);
8577
8578 /* Returning results. */
8579 set_gdbarch_return_value (gdbarch, arm_return_value);
8580
8581 /* Disassembly. */
8582 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8583
8584 /* Minsymbol frobbing. */
8585 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8586 set_gdbarch_coff_make_msymbol_special (gdbarch,
8587 arm_coff_make_msymbol_special);
8588 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8589
8590 /* Thumb-2 IT block support. */
8591 set_gdbarch_adjust_breakpoint_address (gdbarch,
8592 arm_adjust_breakpoint_address);
8593
8594 /* Virtual tables. */
8595 set_gdbarch_vbit_in_delta (gdbarch, 1);
8596
8597 /* Hook in the ABI-specific overrides, if they have been registered. */
8598 gdbarch_init_osabi (info, gdbarch);
8599
8600 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8601
8602 /* Add some default predicates. */
8603 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8604 dwarf2_append_unwinders (gdbarch);
8605 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8606 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8607
8608 /* Now we have tuned the configuration, set a few final things,
8609 based on what the OS ABI has told us. */
8610
8611 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8612 binaries are always marked. */
8613 if (tdep->arm_abi == ARM_ABI_AUTO)
8614 tdep->arm_abi = ARM_ABI_APCS;
8615
8616 /* Watchpoints are not steppable. */
8617 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8618
8619 /* We used to default to FPA for generic ARM, but almost nobody
8620 uses that now, and we now provide a way for the user to force
8621 the model. So default to the most useful variant. */
8622 if (tdep->fp_model == ARM_FLOAT_AUTO)
8623 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8624
8625 if (tdep->jb_pc >= 0)
8626 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8627
8628 /* Floating point sizes and format. */
8629 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8630 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8631 {
8632 set_gdbarch_double_format
8633 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8634 set_gdbarch_long_double_format
8635 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8636 }
8637 else
8638 {
8639 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8640 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8641 }
8642
8643 if (have_vfp_pseudos)
8644 {
8645 /* NOTE: These are the only pseudo registers used by
8646 the ARM target at the moment. If more are added, a
8647 little more care in numbering will be needed. */
8648
8649 int num_pseudos = 32;
8650 if (have_neon_pseudos)
8651 num_pseudos += 16;
8652 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8653 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8654 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8655 }
8656
8657 if (tdesc_data)
8658 {
8659 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8660
8661 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8662
8663 /* Override tdesc_register_type to adjust the types of VFP
8664 registers for NEON. */
8665 set_gdbarch_register_type (gdbarch, arm_register_type);
8666 }
8667
8668 /* Add standard register aliases. We add aliases even for those
8669 nanes which are used by the current architecture - it's simpler,
8670 and does no harm, since nothing ever lists user registers. */
8671 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8672 user_reg_add (gdbarch, arm_register_aliases[i].name,
8673 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8674
8675 return gdbarch;
8676 }
8677
8678 static void
8679 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8680 {
8681 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8682
8683 if (tdep == NULL)
8684 return;
8685
8686 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8687 (unsigned long) tdep->lowest_pc);
8688 }
8689
8690 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8691
8692 void
8693 _initialize_arm_tdep (void)
8694 {
8695 struct ui_file *stb;
8696 long length;
8697 struct cmd_list_element *new_set, *new_show;
8698 const char *setname;
8699 const char *setdesc;
8700 const char *const *regnames;
8701 int numregs, i, j;
8702 static char *helptext;
8703 char regdesc[1024], *rdptr = regdesc;
8704 size_t rest = sizeof (regdesc);
8705
8706 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8707
8708 arm_objfile_data_key
8709 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8710
8711 /* Add ourselves to objfile event chain. */
8712 observer_attach_new_objfile (arm_exidx_new_objfile);
8713 arm_exidx_data_key
8714 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8715
8716 /* Register an ELF OS ABI sniffer for ARM binaries. */
8717 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8718 bfd_target_elf_flavour,
8719 arm_elf_osabi_sniffer);
8720
8721 /* Initialize the standard target descriptions. */
8722 initialize_tdesc_arm_with_m ();
8723
8724 /* Get the number of possible sets of register names defined in opcodes. */
8725 num_disassembly_options = get_arm_regname_num_options ();
8726
8727 /* Add root prefix command for all "set arm"/"show arm" commands. */
8728 add_prefix_cmd ("arm", no_class, set_arm_command,
8729 _("Various ARM-specific commands."),
8730 &setarmcmdlist, "set arm ", 0, &setlist);
8731
8732 add_prefix_cmd ("arm", no_class, show_arm_command,
8733 _("Various ARM-specific commands."),
8734 &showarmcmdlist, "show arm ", 0, &showlist);
8735
8736 /* Sync the opcode insn printer with our register viewer. */
8737 parse_arm_disassembler_option ("reg-names-std");
8738
8739 /* Initialize the array that will be passed to
8740 add_setshow_enum_cmd(). */
8741 valid_disassembly_styles
8742 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8743 for (i = 0; i < num_disassembly_options; i++)
8744 {
8745 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8746 valid_disassembly_styles[i] = setname;
8747 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8748 rdptr += length;
8749 rest -= length;
8750 /* When we find the default names, tell the disassembler to use
8751 them. */
8752 if (!strcmp (setname, "std"))
8753 {
8754 disassembly_style = setname;
8755 set_arm_regname_option (i);
8756 }
8757 }
8758 /* Mark the end of valid options. */
8759 valid_disassembly_styles[num_disassembly_options] = NULL;
8760
8761 /* Create the help text. */
8762 stb = mem_fileopen ();
8763 fprintf_unfiltered (stb, "%s%s%s",
8764 _("The valid values are:\n"),
8765 regdesc,
8766 _("The default is \"std\"."));
8767 helptext = ui_file_xstrdup (stb, NULL);
8768 ui_file_delete (stb);
8769
8770 add_setshow_enum_cmd("disassembler", no_class,
8771 valid_disassembly_styles, &disassembly_style,
8772 _("Set the disassembly style."),
8773 _("Show the disassembly style."),
8774 helptext,
8775 set_disassembly_style_sfunc,
8776 NULL, /* FIXME: i18n: The disassembly style is
8777 \"%s\". */
8778 &setarmcmdlist, &showarmcmdlist);
8779
8780 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8781 _("Set usage of ARM 32-bit mode."),
8782 _("Show usage of ARM 32-bit mode."),
8783 _("When off, a 26-bit PC will be used."),
8784 NULL,
8785 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8786 mode is %s. */
8787 &setarmcmdlist, &showarmcmdlist);
8788
8789 /* Add a command to allow the user to force the FPU model. */
8790 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8791 _("Set the floating point type."),
8792 _("Show the floating point type."),
8793 _("auto - Determine the FP typefrom the OS-ABI.\n\
8794 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8795 fpa - FPA co-processor (GCC compiled).\n\
8796 softvfp - Software FP with pure-endian doubles.\n\
8797 vfp - VFP co-processor."),
8798 set_fp_model_sfunc, show_fp_model,
8799 &setarmcmdlist, &showarmcmdlist);
8800
8801 /* Add a command to allow the user to force the ABI. */
8802 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8803 _("Set the ABI."),
8804 _("Show the ABI."),
8805 NULL, arm_set_abi, arm_show_abi,
8806 &setarmcmdlist, &showarmcmdlist);
8807
8808 /* Add two commands to allow the user to force the assumed
8809 execution mode. */
8810 add_setshow_enum_cmd ("fallback-mode", class_support,
8811 arm_mode_strings, &arm_fallback_mode_string,
8812 _("Set the mode assumed when symbols are unavailable."),
8813 _("Show the mode assumed when symbols are unavailable."),
8814 NULL, NULL, arm_show_fallback_mode,
8815 &setarmcmdlist, &showarmcmdlist);
8816 add_setshow_enum_cmd ("force-mode", class_support,
8817 arm_mode_strings, &arm_force_mode_string,
8818 _("Set the mode assumed even when symbols are available."),
8819 _("Show the mode assumed even when symbols are available."),
8820 NULL, NULL, arm_show_force_mode,
8821 &setarmcmdlist, &showarmcmdlist);
8822
8823 /* Debugging flag. */
8824 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8825 _("Set ARM debugging."),
8826 _("Show ARM debugging."),
8827 _("When on, arm-specific debugging is enabled."),
8828 NULL,
8829 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8830 &setdebuglist, &showdebuglist);
8831 }
This page took 0.248156 seconds and 4 git commands to generate.