* solib-svr4.c (enable_break): Don't attempt to place breakpoints,
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 #include "features/arm-with-m.c"
57
58 static int arm_debug;
59
60 /* Macros for setting and testing a bit in a minimal symbol that marks
61 it as Thumb function. The MSB of the minimal symbol's "info" field
62 is used for this purpose.
63
64 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
65 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
66
67 #define MSYMBOL_SET_SPECIAL(msym) \
68 MSYMBOL_TARGET_FLAG_1 (msym) = 1
69
70 #define MSYMBOL_IS_SPECIAL(msym) \
71 MSYMBOL_TARGET_FLAG_1 (msym)
72
73 /* Per-objfile data used for mapping symbols. */
74 static const struct objfile_data *arm_objfile_data_key;
75
76 struct arm_mapping_symbol
77 {
78 bfd_vma value;
79 char type;
80 };
81 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
82 DEF_VEC_O(arm_mapping_symbol_s);
83
84 struct arm_per_objfile
85 {
86 VEC(arm_mapping_symbol_s) **section_maps;
87 };
88
89 /* The list of available "set arm ..." and "show arm ..." commands. */
90 static struct cmd_list_element *setarmcmdlist = NULL;
91 static struct cmd_list_element *showarmcmdlist = NULL;
92
93 /* The type of floating-point to use. Keep this in sync with enum
94 arm_float_model, and the help string in _initialize_arm_tdep. */
95 static const char *fp_model_strings[] =
96 {
97 "auto",
98 "softfpa",
99 "fpa",
100 "softvfp",
101 "vfp",
102 NULL
103 };
104
105 /* A variable that can be configured by the user. */
106 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
107 static const char *current_fp_model = "auto";
108
109 /* The ABI to use. Keep this in sync with arm_abi_kind. */
110 static const char *arm_abi_strings[] =
111 {
112 "auto",
113 "APCS",
114 "AAPCS",
115 NULL
116 };
117
118 /* A variable that can be configured by the user. */
119 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
120 static const char *arm_abi_string = "auto";
121
122 /* The execution mode to assume. */
123 static const char *arm_mode_strings[] =
124 {
125 "auto",
126 "arm",
127 "thumb",
128 NULL
129 };
130
131 static const char *arm_fallback_mode_string = "auto";
132 static const char *arm_force_mode_string = "auto";
133
134 /* Number of different reg name sets (options). */
135 static int num_disassembly_options;
136
137 /* The standard register names, and all the valid aliases for them. */
138 static const struct
139 {
140 const char *name;
141 int regnum;
142 } arm_register_aliases[] = {
143 /* Basic register numbers. */
144 { "r0", 0 },
145 { "r1", 1 },
146 { "r2", 2 },
147 { "r3", 3 },
148 { "r4", 4 },
149 { "r5", 5 },
150 { "r6", 6 },
151 { "r7", 7 },
152 { "r8", 8 },
153 { "r9", 9 },
154 { "r10", 10 },
155 { "r11", 11 },
156 { "r12", 12 },
157 { "r13", 13 },
158 { "r14", 14 },
159 { "r15", 15 },
160 /* Synonyms (argument and variable registers). */
161 { "a1", 0 },
162 { "a2", 1 },
163 { "a3", 2 },
164 { "a4", 3 },
165 { "v1", 4 },
166 { "v2", 5 },
167 { "v3", 6 },
168 { "v4", 7 },
169 { "v5", 8 },
170 { "v6", 9 },
171 { "v7", 10 },
172 { "v8", 11 },
173 /* Other platform-specific names for r9. */
174 { "sb", 9 },
175 { "tr", 9 },
176 /* Special names. */
177 { "ip", 12 },
178 { "sp", 13 },
179 { "lr", 14 },
180 { "pc", 15 },
181 /* Names used by GCC (not listed in the ARM EABI). */
182 { "sl", 10 },
183 { "fp", 11 },
184 /* A special name from the older ATPCS. */
185 { "wr", 7 },
186 };
187
188 static const char *const arm_register_names[] =
189 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
190 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
191 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
192 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
193 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
194 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
195 "fps", "cpsr" }; /* 24 25 */
196
197 /* Valid register name styles. */
198 static const char **valid_disassembly_styles;
199
200 /* Disassembly style to use. Default to "std" register names. */
201 static const char *disassembly_style;
202
203 /* This is used to keep the bfd arch_info in sync with the disassembly
204 style. */
205 static void set_disassembly_style_sfunc(char *, int,
206 struct cmd_list_element *);
207 static void set_disassembly_style (void);
208
209 static void convert_from_extended (const struct floatformat *, const void *,
210 void *, int);
211 static void convert_to_extended (const struct floatformat *, void *,
212 const void *, int);
213
214 static void arm_neon_quad_read (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, gdb_byte *buf);
217 static void arm_neon_quad_write (struct gdbarch *gdbarch,
218 struct regcache *regcache,
219 int regnum, const gdb_byte *buf);
220
221 struct arm_prologue_cache
222 {
223 /* The stack pointer at the time this frame was created; i.e. the
224 caller's stack pointer when this function was called. It is used
225 to identify this frame. */
226 CORE_ADDR prev_sp;
227
228 /* The frame base for this frame is just prev_sp - frame size.
229 FRAMESIZE is the distance from the frame pointer to the
230 initial stack pointer. */
231
232 int framesize;
233
234 /* The register used to hold the frame pointer for this frame. */
235 int framereg;
236
237 /* Saved register offsets. */
238 struct trad_frame_saved_reg *saved_regs;
239 };
240
241 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
242 CORE_ADDR prologue_start,
243 CORE_ADDR prologue_end,
244 struct arm_prologue_cache *cache);
245
246 /* Architecture version for displaced stepping. This effects the behaviour of
247 certain instructions, and really should not be hard-wired. */
248
249 #define DISPLACED_STEPPING_ARCH_VERSION 5
250
251 /* Addresses for calling Thumb functions have the bit 0 set.
252 Here are some macros to test, set, or clear bit 0 of addresses. */
253 #define IS_THUMB_ADDR(addr) ((addr) & 1)
254 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
255 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
256
257 /* Set to true if the 32-bit mode is in use. */
258
259 int arm_apcs_32 = 1;
260
261 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
262
263 static int
264 arm_psr_thumb_bit (struct gdbarch *gdbarch)
265 {
266 if (gdbarch_tdep (gdbarch)->is_m)
267 return XPSR_T;
268 else
269 return CPSR_T;
270 }
271
272 /* Determine if FRAME is executing in Thumb mode. */
273
274 int
275 arm_frame_is_thumb (struct frame_info *frame)
276 {
277 CORE_ADDR cpsr;
278 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
279
280 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
281 directly (from a signal frame or dummy frame) or by interpreting
282 the saved LR (from a prologue or DWARF frame). So consult it and
283 trust the unwinders. */
284 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
285
286 return (cpsr & t_bit) != 0;
287 }
288
289 /* Callback for VEC_lower_bound. */
290
291 static inline int
292 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
293 const struct arm_mapping_symbol *rhs)
294 {
295 return lhs->value < rhs->value;
296 }
297
298 /* Search for the mapping symbol covering MEMADDR. If one is found,
299 return its type. Otherwise, return 0. If START is non-NULL,
300 set *START to the location of the mapping symbol. */
301
302 static char
303 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
304 {
305 struct obj_section *sec;
306
307 /* If there are mapping symbols, consult them. */
308 sec = find_pc_section (memaddr);
309 if (sec != NULL)
310 {
311 struct arm_per_objfile *data;
312 VEC(arm_mapping_symbol_s) *map;
313 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
314 0 };
315 unsigned int idx;
316
317 data = objfile_data (sec->objfile, arm_objfile_data_key);
318 if (data != NULL)
319 {
320 map = data->section_maps[sec->the_bfd_section->index];
321 if (!VEC_empty (arm_mapping_symbol_s, map))
322 {
323 struct arm_mapping_symbol *map_sym;
324
325 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
326 arm_compare_mapping_symbols);
327
328 /* VEC_lower_bound finds the earliest ordered insertion
329 point. If the following symbol starts at this exact
330 address, we use that; otherwise, the preceding
331 mapping symbol covers this address. */
332 if (idx < VEC_length (arm_mapping_symbol_s, map))
333 {
334 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
335 if (map_sym->value == map_key.value)
336 {
337 if (start)
338 *start = map_sym->value + obj_section_addr (sec);
339 return map_sym->type;
340 }
341 }
342
343 if (idx > 0)
344 {
345 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
346 if (start)
347 *start = map_sym->value + obj_section_addr (sec);
348 return map_sym->type;
349 }
350 }
351 }
352 }
353
354 return 0;
355 }
356
357 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
358 CORE_ADDR pc, int insert_bkpt);
359
360 /* Determine if the program counter specified in MEMADDR is in a Thumb
361 function. This function should be called for addresses unrelated to
362 any executing frame; otherwise, prefer arm_frame_is_thumb. */
363
364 static int
365 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
366 {
367 struct obj_section *sec;
368 struct minimal_symbol *sym;
369 char type;
370
371 /* If bit 0 of the address is set, assume this is a Thumb address. */
372 if (IS_THUMB_ADDR (memaddr))
373 return 1;
374
375 /* If the user wants to override the symbol table, let him. */
376 if (strcmp (arm_force_mode_string, "arm") == 0)
377 return 0;
378 if (strcmp (arm_force_mode_string, "thumb") == 0)
379 return 1;
380
381 /* ARM v6-M and v7-M are always in Thumb mode. */
382 if (gdbarch_tdep (gdbarch)->is_m)
383 return 1;
384
385 /* If there are mapping symbols, consult them. */
386 type = arm_find_mapping_symbol (memaddr, NULL);
387 if (type)
388 return type == 't';
389
390 /* Thumb functions have a "special" bit set in minimal symbols. */
391 sym = lookup_minimal_symbol_by_pc (memaddr);
392 if (sym)
393 return (MSYMBOL_IS_SPECIAL (sym));
394
395 /* If the user wants to override the fallback mode, let them. */
396 if (strcmp (arm_fallback_mode_string, "arm") == 0)
397 return 0;
398 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
399 return 1;
400
401 /* If we couldn't find any symbol, but we're talking to a running
402 target, then trust the current value of $cpsr. This lets
403 "display/i $pc" always show the correct mode (though if there is
404 a symbol table we will not reach here, so it still may not be
405 displayed in the mode it will be executed).
406
407 As a further heuristic if we detect that we are doing a single-step we
408 see what state executing the current instruction ends up with us being
409 in. */
410 if (target_has_registers)
411 {
412 struct frame_info *current_frame = get_current_frame ();
413 CORE_ADDR current_pc = get_frame_pc (current_frame);
414 int is_thumb = arm_frame_is_thumb (current_frame);
415 CORE_ADDR next_pc;
416 if (memaddr == current_pc)
417 return is_thumb;
418 else
419 {
420 struct gdbarch *gdbarch = get_frame_arch (current_frame);
421 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
422 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
423 return IS_THUMB_ADDR (next_pc);
424 else
425 return is_thumb;
426 }
427 }
428
429 /* Otherwise we're out of luck; we assume ARM. */
430 return 0;
431 }
432
433 /* Remove useless bits from addresses in a running program. */
434 static CORE_ADDR
435 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
436 {
437 if (arm_apcs_32)
438 return UNMAKE_THUMB_ADDR (val);
439 else
440 return (val & 0x03fffffc);
441 }
442
443 /* When reading symbols, we need to zap the low bit of the address,
444 which may be set to 1 for Thumb functions. */
445 static CORE_ADDR
446 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
447 {
448 return val & ~1;
449 }
450
451 /* Return 1 if PC is the start of a compiler helper function which
452 can be safely ignored during prologue skipping. */
453 static int
454 skip_prologue_function (CORE_ADDR pc)
455 {
456 struct minimal_symbol *msym;
457 const char *name;
458
459 msym = lookup_minimal_symbol_by_pc (pc);
460 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
461 return 0;
462
463 name = SYMBOL_LINKAGE_NAME (msym);
464 if (name == NULL)
465 return 0;
466
467 /* The GNU linker's Thumb call stub to foo is named
468 __foo_from_thumb. */
469 if (strstr (name, "_from_thumb") != NULL)
470 name += 2;
471
472 /* On soft-float targets, __truncdfsf2 is called to convert promoted
473 arguments to their argument types in non-prototyped
474 functions. */
475 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
476 return 1;
477 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
478 return 1;
479
480 /* Internal functions related to thread-local storage. */
481 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
482 return 1;
483 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
484 return 1;
485
486 return 0;
487 }
488
489 /* Support routines for instruction parsing. */
490 #define submask(x) ((1L << ((x) + 1)) - 1)
491 #define bit(obj,st) (((obj) >> (st)) & 1)
492 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
493 #define sbits(obj,st,fn) \
494 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
495 #define BranchDest(addr,instr) \
496 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
497
498 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
499
500 static unsigned int
501 thumb_expand_immediate (unsigned int imm)
502 {
503 unsigned int count = imm >> 7;
504
505 if (count < 8)
506 switch (count / 2)
507 {
508 case 0:
509 return imm & 0xff;
510 case 1:
511 return (imm & 0xff) | ((imm & 0xff) << 16);
512 case 2:
513 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
514 case 3:
515 return (imm & 0xff) | ((imm & 0xff) << 8)
516 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
517 }
518
519 return (0x80 | (imm & 0x7f)) << (32 - count);
520 }
521
522 /* Return 1 if the 16-bit Thumb instruction INST might change
523 control flow, 0 otherwise. */
524
525 static int
526 thumb_instruction_changes_pc (unsigned short inst)
527 {
528 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
529 return 1;
530
531 if ((inst & 0xf000) == 0xd000) /* conditional branch */
532 return 1;
533
534 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
535 return 1;
536
537 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
538 return 1;
539
540 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
541 return 1;
542
543 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
544 return 1;
545
546 return 0;
547 }
548
549 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
550 might change control flow, 0 otherwise. */
551
552 static int
553 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
554 {
555 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
556 {
557 /* Branches and miscellaneous control instructions. */
558
559 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
560 {
561 /* B, BL, BLX. */
562 return 1;
563 }
564 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
565 {
566 /* SUBS PC, LR, #imm8. */
567 return 1;
568 }
569 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
570 {
571 /* Conditional branch. */
572 return 1;
573 }
574
575 return 0;
576 }
577
578 if ((inst1 & 0xfe50) == 0xe810)
579 {
580 /* Load multiple or RFE. */
581
582 if (bit (inst1, 7) && !bit (inst1, 8))
583 {
584 /* LDMIA or POP */
585 if (bit (inst2, 15))
586 return 1;
587 }
588 else if (!bit (inst1, 7) && bit (inst1, 8))
589 {
590 /* LDMDB */
591 if (bit (inst2, 15))
592 return 1;
593 }
594 else if (bit (inst1, 7) && bit (inst1, 8))
595 {
596 /* RFEIA */
597 return 1;
598 }
599 else if (!bit (inst1, 7) && !bit (inst1, 8))
600 {
601 /* RFEDB */
602 return 1;
603 }
604
605 return 0;
606 }
607
608 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
609 {
610 /* MOV PC or MOVS PC. */
611 return 1;
612 }
613
614 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
615 {
616 /* LDR PC. */
617 if (bits (inst1, 0, 3) == 15)
618 return 1;
619 if (bit (inst1, 7))
620 return 1;
621 if (bit (inst2, 11))
622 return 1;
623 if ((inst2 & 0x0fc0) == 0x0000)
624 return 1;
625
626 return 0;
627 }
628
629 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
630 {
631 /* TBB. */
632 return 1;
633 }
634
635 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
636 {
637 /* TBH. */
638 return 1;
639 }
640
641 return 0;
642 }
643
644 /* Analyze a Thumb prologue, looking for a recognizable stack frame
645 and frame pointer. Scan until we encounter a store that could
646 clobber the stack frame unexpectedly, or an unknown instruction.
647 Return the last address which is definitely safe to skip for an
648 initial breakpoint. */
649
650 static CORE_ADDR
651 thumb_analyze_prologue (struct gdbarch *gdbarch,
652 CORE_ADDR start, CORE_ADDR limit,
653 struct arm_prologue_cache *cache)
654 {
655 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
656 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
657 int i;
658 pv_t regs[16];
659 struct pv_area *stack;
660 struct cleanup *back_to;
661 CORE_ADDR offset;
662 CORE_ADDR unrecognized_pc = 0;
663
664 for (i = 0; i < 16; i++)
665 regs[i] = pv_register (i, 0);
666 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
667 back_to = make_cleanup_free_pv_area (stack);
668
669 while (start < limit)
670 {
671 unsigned short insn;
672
673 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
674
675 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
676 {
677 int regno;
678 int mask;
679
680 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
681 break;
682
683 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
684 whether to save LR (R14). */
685 mask = (insn & 0xff) | ((insn & 0x100) << 6);
686
687 /* Calculate offsets of saved R0-R7 and LR. */
688 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
689 if (mask & (1 << regno))
690 {
691 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
692 -4);
693 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
694 }
695 }
696 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
697 sub sp, #simm */
698 {
699 offset = (insn & 0x7f) << 2; /* get scaled offset */
700 if (insn & 0x80) /* Check for SUB. */
701 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
702 -offset);
703 else
704 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
705 offset);
706 }
707 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
708 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
709 (insn & 0xff) << 2);
710 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
711 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
712 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
713 bits (insn, 6, 8));
714 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
715 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
716 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
717 bits (insn, 0, 7));
718 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
719 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
720 && pv_is_constant (regs[bits (insn, 3, 5)]))
721 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
722 regs[bits (insn, 6, 8)]);
723 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
724 && pv_is_constant (regs[bits (insn, 3, 6)]))
725 {
726 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
727 int rm = bits (insn, 3, 6);
728 regs[rd] = pv_add (regs[rd], regs[rm]);
729 }
730 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
731 {
732 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
733 int src_reg = (insn & 0x78) >> 3;
734 regs[dst_reg] = regs[src_reg];
735 }
736 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
737 {
738 /* Handle stores to the stack. Normally pushes are used,
739 but with GCC -mtpcs-frame, there may be other stores
740 in the prologue to create the frame. */
741 int regno = (insn >> 8) & 0x7;
742 pv_t addr;
743
744 offset = (insn & 0xff) << 2;
745 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
746
747 if (pv_area_store_would_trash (stack, addr))
748 break;
749
750 pv_area_store (stack, addr, 4, regs[regno]);
751 }
752 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
753 {
754 int rd = bits (insn, 0, 2);
755 int rn = bits (insn, 3, 5);
756 pv_t addr;
757
758 offset = bits (insn, 6, 10) << 2;
759 addr = pv_add_constant (regs[rn], offset);
760
761 if (pv_area_store_would_trash (stack, addr))
762 break;
763
764 pv_area_store (stack, addr, 4, regs[rd]);
765 }
766 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
767 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
768 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
769 /* Ignore stores of argument registers to the stack. */
770 ;
771 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
772 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
773 /* Ignore block loads from the stack, potentially copying
774 parameters from memory. */
775 ;
776 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
777 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
778 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
779 /* Similarly ignore single loads from the stack. */
780 ;
781 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
782 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
783 /* Skip register copies, i.e. saves to another register
784 instead of the stack. */
785 ;
786 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
787 /* Recognize constant loads; even with small stacks these are necessary
788 on Thumb. */
789 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
790 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
791 {
792 /* Constant pool loads, for the same reason. */
793 unsigned int constant;
794 CORE_ADDR loc;
795
796 loc = start + 4 + bits (insn, 0, 7) * 4;
797 constant = read_memory_unsigned_integer (loc, 4, byte_order);
798 regs[bits (insn, 8, 10)] = pv_constant (constant);
799 }
800 else if ((insn & 0xe000) == 0xe000)
801 {
802 unsigned short inst2;
803
804 inst2 = read_memory_unsigned_integer (start + 2, 2,
805 byte_order_for_code);
806
807 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
808 {
809 /* BL, BLX. Allow some special function calls when
810 skipping the prologue; GCC generates these before
811 storing arguments to the stack. */
812 CORE_ADDR nextpc;
813 int j1, j2, imm1, imm2;
814
815 imm1 = sbits (insn, 0, 10);
816 imm2 = bits (inst2, 0, 10);
817 j1 = bit (inst2, 13);
818 j2 = bit (inst2, 11);
819
820 offset = ((imm1 << 12) + (imm2 << 1));
821 offset ^= ((!j2) << 22) | ((!j1) << 23);
822
823 nextpc = start + 4 + offset;
824 /* For BLX make sure to clear the low bits. */
825 if (bit (inst2, 12) == 0)
826 nextpc = nextpc & 0xfffffffc;
827
828 if (!skip_prologue_function (nextpc))
829 break;
830 }
831
832 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!}, { registers } */
833 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
834 {
835 pv_t addr = regs[bits (insn, 0, 3)];
836 int regno;
837
838 if (pv_area_store_would_trash (stack, addr))
839 break;
840
841 /* Calculate offsets of saved registers. */
842 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
843 if (inst2 & (1 << regno))
844 {
845 addr = pv_add_constant (addr, -4);
846 pv_area_store (stack, addr, 4, regs[regno]);
847 }
848
849 if (insn & 0x0020)
850 regs[bits (insn, 0, 3)] = addr;
851 }
852
853 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2, [Rn, #+/-imm]{!} */
854 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
855 {
856 int regno1 = bits (inst2, 12, 15);
857 int regno2 = bits (inst2, 8, 11);
858 pv_t addr = regs[bits (insn, 0, 3)];
859
860 offset = inst2 & 0xff;
861 if (insn & 0x0080)
862 addr = pv_add_constant (addr, offset);
863 else
864 addr = pv_add_constant (addr, -offset);
865
866 if (pv_area_store_would_trash (stack, addr))
867 break;
868
869 pv_area_store (stack, addr, 4, regs[regno1]);
870 pv_area_store (stack, pv_add_constant (addr, 4),
871 4, regs[regno2]);
872
873 if (insn & 0x0020)
874 regs[bits (insn, 0, 3)] = addr;
875 }
876
877 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
878 && (inst2 & 0x0c00) == 0x0c00
879 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
880 {
881 int regno = bits (inst2, 12, 15);
882 pv_t addr = regs[bits (insn, 0, 3)];
883
884 offset = inst2 & 0xff;
885 if (inst2 & 0x0200)
886 addr = pv_add_constant (addr, offset);
887 else
888 addr = pv_add_constant (addr, -offset);
889
890 if (pv_area_store_would_trash (stack, addr))
891 break;
892
893 pv_area_store (stack, addr, 4, regs[regno]);
894
895 if (inst2 & 0x0100)
896 regs[bits (insn, 0, 3)] = addr;
897 }
898
899 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
900 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
901 {
902 int regno = bits (inst2, 12, 15);
903 pv_t addr;
904
905 offset = inst2 & 0xfff;
906 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno]);
912 }
913
914 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
915 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
916 /* Ignore stores of argument registers to the stack. */
917 ;
918
919 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
920 && (inst2 & 0x0d00) == 0x0c00
921 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
922 /* Ignore stores of argument registers to the stack. */
923 ;
924
925 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
926 && (inst2 & 0x8000) == 0x0000
927 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
928 /* Ignore block loads from the stack, potentially copying
929 parameters from memory. */
930 ;
931
932 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2, [Rn, #+/-imm] */
933 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
934 /* Similarly ignore dual loads from the stack. */
935 ;
936
937 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
938 && (inst2 & 0x0d00) == 0x0c00
939 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
940 /* Similarly ignore single loads from the stack. */
941 ;
942
943 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
944 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
945 /* Similarly ignore single loads from the stack. */
946 ;
947
948 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
949 && (inst2 & 0x8000) == 0x0000)
950 {
951 unsigned int imm = ((bits (insn, 10, 10) << 11)
952 | (bits (inst2, 12, 14) << 8)
953 | bits (inst2, 0, 7));
954
955 regs[bits (inst2, 8, 11)]
956 = pv_add_constant (regs[bits (insn, 0, 3)],
957 thumb_expand_immediate (imm));
958 }
959
960 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
961 && (inst2 & 0x8000) == 0x0000)
962 {
963 unsigned int imm = ((bits (insn, 10, 10) << 11)
964 | (bits (inst2, 12, 14) << 8)
965 | bits (inst2, 0, 7));
966
967 regs[bits (inst2, 8, 11)]
968 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
969 }
970
971 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
972 && (inst2 & 0x8000) == 0x0000)
973 {
974 unsigned int imm = ((bits (insn, 10, 10) << 11)
975 | (bits (inst2, 12, 14) << 8)
976 | bits (inst2, 0, 7));
977
978 regs[bits (inst2, 8, 11)]
979 = pv_add_constant (regs[bits (insn, 0, 3)],
980 - (CORE_ADDR) thumb_expand_immediate (imm));
981 }
982
983 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
984 && (inst2 & 0x8000) == 0x0000)
985 {
986 unsigned int imm = ((bits (insn, 10, 10) << 11)
987 | (bits (inst2, 12, 14) << 8)
988 | bits (inst2, 0, 7));
989
990 regs[bits (inst2, 8, 11)]
991 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
992 }
993
994 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
995 {
996 unsigned int imm = ((bits (insn, 10, 10) << 11)
997 | (bits (inst2, 12, 14) << 8)
998 | bits (inst2, 0, 7));
999
1000 regs[bits (inst2, 8, 11)]
1001 = pv_constant (thumb_expand_immediate (imm));
1002 }
1003
1004 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1005 {
1006 unsigned int imm = ((bits (insn, 0, 3) << 12)
1007 | (bits (insn, 10, 10) << 11)
1008 | (bits (inst2, 12, 14) << 8)
1009 | bits (inst2, 0, 7));
1010
1011 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1012 }
1013
1014 else if (insn == 0xea5f /* mov.w Rd,Rm */
1015 && (inst2 & 0xf0f0) == 0)
1016 {
1017 int dst_reg = (inst2 & 0x0f00) >> 8;
1018 int src_reg = inst2 & 0xf;
1019 regs[dst_reg] = regs[src_reg];
1020 }
1021
1022 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1023 {
1024 /* Constant pool loads. */
1025 unsigned int constant;
1026 CORE_ADDR loc;
1027
1028 offset = bits (insn, 0, 11);
1029 if (insn & 0x0080)
1030 loc = start + 4 + offset;
1031 else
1032 loc = start + 4 - offset;
1033
1034 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1035 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1036 }
1037
1038 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1039 {
1040 /* Constant pool loads. */
1041 unsigned int constant;
1042 CORE_ADDR loc;
1043
1044 offset = bits (insn, 0, 7) << 2;
1045 if (insn & 0x0080)
1046 loc = start + 4 + offset;
1047 else
1048 loc = start + 4 - offset;
1049
1050 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1051 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1052
1053 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1054 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1055 }
1056
1057 else if (thumb2_instruction_changes_pc (insn, inst2))
1058 {
1059 /* Don't scan past anything that might change control flow. */
1060 break;
1061 }
1062 else
1063 {
1064 /* The optimizer might shove anything into the prologue,
1065 so we just skip what we don't recognize. */
1066 unrecognized_pc = start;
1067 }
1068
1069 start += 2;
1070 }
1071 else if (thumb_instruction_changes_pc (insn))
1072 {
1073 /* Don't scan past anything that might change control flow. */
1074 break;
1075 }
1076 else
1077 {
1078 /* The optimizer might shove anything into the prologue,
1079 so we just skip what we don't recognize. */
1080 unrecognized_pc = start;
1081 }
1082
1083 start += 2;
1084 }
1085
1086 if (arm_debug)
1087 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1088 paddress (gdbarch, start));
1089
1090 if (unrecognized_pc == 0)
1091 unrecognized_pc = start;
1092
1093 if (cache == NULL)
1094 {
1095 do_cleanups (back_to);
1096 return unrecognized_pc;
1097 }
1098
1099 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1100 {
1101 /* Frame pointer is fp. Frame size is constant. */
1102 cache->framereg = ARM_FP_REGNUM;
1103 cache->framesize = -regs[ARM_FP_REGNUM].k;
1104 }
1105 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1106 {
1107 /* Frame pointer is r7. Frame size is constant. */
1108 cache->framereg = THUMB_FP_REGNUM;
1109 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1110 }
1111 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1112 {
1113 /* Try the stack pointer... this is a bit desperate. */
1114 cache->framereg = ARM_SP_REGNUM;
1115 cache->framesize = -regs[ARM_SP_REGNUM].k;
1116 }
1117 else
1118 {
1119 /* We're just out of luck. We don't know where the frame is. */
1120 cache->framereg = -1;
1121 cache->framesize = 0;
1122 }
1123
1124 for (i = 0; i < 16; i++)
1125 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1126 cache->saved_regs[i].addr = offset;
1127
1128 do_cleanups (back_to);
1129 return unrecognized_pc;
1130 }
1131
1132 /* Advance the PC across any function entry prologue instructions to
1133 reach some "real" code.
1134
1135 The APCS (ARM Procedure Call Standard) defines the following
1136 prologue:
1137
1138 mov ip, sp
1139 [stmfd sp!, {a1,a2,a3,a4}]
1140 stmfd sp!, {...,fp,ip,lr,pc}
1141 [stfe f7, [sp, #-12]!]
1142 [stfe f6, [sp, #-12]!]
1143 [stfe f5, [sp, #-12]!]
1144 [stfe f4, [sp, #-12]!]
1145 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
1146
1147 static CORE_ADDR
1148 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1149 {
1150 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1151 unsigned long inst;
1152 CORE_ADDR skip_pc;
1153 CORE_ADDR func_addr, limit_pc;
1154 struct symtab_and_line sal;
1155
1156 /* See if we can determine the end of the prologue via the symbol table.
1157 If so, then return either PC, or the PC after the prologue, whichever
1158 is greater. */
1159 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1160 {
1161 CORE_ADDR post_prologue_pc
1162 = skip_prologue_using_sal (gdbarch, func_addr);
1163 struct symtab *s = find_pc_symtab (func_addr);
1164
1165 /* GCC always emits a line note before the prologue and another
1166 one after, even if the two are at the same address or on the
1167 same line. Take advantage of this so that we do not need to
1168 know every instruction that might appear in the prologue. We
1169 will have producer information for most binaries; if it is
1170 missing (e.g. for -gstabs), assuming the GNU tools. */
1171 if (post_prologue_pc
1172 && (s == NULL
1173 || s->producer == NULL
1174 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1175 return post_prologue_pc;
1176
1177 if (post_prologue_pc != 0)
1178 {
1179 CORE_ADDR analyzed_limit;
1180
1181 /* For non-GCC compilers, make sure the entire line is an
1182 acceptable prologue; GDB will round this function's
1183 return value up to the end of the following line so we
1184 can not skip just part of a line (and we do not want to).
1185
1186 RealView does not treat the prologue specially, but does
1187 associate prologue code with the opening brace; so this
1188 lets us skip the first line if we think it is the opening
1189 brace. */
1190 if (arm_pc_is_thumb (gdbarch, func_addr))
1191 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1192 post_prologue_pc, NULL);
1193 else
1194 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1195 post_prologue_pc, NULL);
1196
1197 if (analyzed_limit != post_prologue_pc)
1198 return func_addr;
1199
1200 return post_prologue_pc;
1201 }
1202 }
1203
1204 /* Can't determine prologue from the symbol table, need to examine
1205 instructions. */
1206
1207 /* Find an upper limit on the function prologue using the debug
1208 information. If the debug information could not be used to provide
1209 that bound, then use an arbitrary large number as the upper bound. */
1210 /* Like arm_scan_prologue, stop no later than pc + 64. */
1211 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1212 if (limit_pc == 0)
1213 limit_pc = pc + 64; /* Magic. */
1214
1215
1216 /* Check if this is Thumb code. */
1217 if (arm_pc_is_thumb (gdbarch, pc))
1218 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1219
1220 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1221 {
1222 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1223
1224 /* "mov ip, sp" is no longer a required part of the prologue. */
1225 if (inst == 0xe1a0c00d) /* mov ip, sp */
1226 continue;
1227
1228 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1229 continue;
1230
1231 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1232 continue;
1233
1234 /* Some prologues begin with "str lr, [sp, #-4]!". */
1235 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1236 continue;
1237
1238 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1239 continue;
1240
1241 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1242 continue;
1243
1244 /* Any insns after this point may float into the code, if it makes
1245 for better instruction scheduling, so we skip them only if we
1246 find them, but still consider the function to be frame-ful. */
1247
1248 /* We may have either one sfmfd instruction here, or several stfe
1249 insns, depending on the version of floating point code we
1250 support. */
1251 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1252 continue;
1253
1254 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1255 continue;
1256
1257 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1258 continue;
1259
1260 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1261 continue;
1262
1263 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1264 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1265 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1266 continue;
1267
1268 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1269 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1270 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1271 continue;
1272
1273 /* Un-recognized instruction; stop scanning. */
1274 break;
1275 }
1276
1277 return skip_pc; /* End of prologue */
1278 }
1279
1280 /* *INDENT-OFF* */
1281 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1282 This function decodes a Thumb function prologue to determine:
1283 1) the size of the stack frame
1284 2) which registers are saved on it
1285 3) the offsets of saved regs
1286 4) the offset from the stack pointer to the frame pointer
1287
1288 A typical Thumb function prologue would create this stack frame
1289 (offsets relative to FP)
1290 old SP -> 24 stack parameters
1291 20 LR
1292 16 R7
1293 R7 -> 0 local variables (16 bytes)
1294 SP -> -12 additional stack space (12 bytes)
1295 The frame size would thus be 36 bytes, and the frame offset would be
1296 12 bytes. The frame register is R7.
1297
1298 The comments for thumb_skip_prolog() describe the algorithm we use
1299 to detect the end of the prolog. */
1300 /* *INDENT-ON* */
1301
1302 static void
1303 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1304 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1305 {
1306 CORE_ADDR prologue_start;
1307 CORE_ADDR prologue_end;
1308 CORE_ADDR current_pc;
1309
1310 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1311 &prologue_end))
1312 {
1313 /* See comment in arm_scan_prologue for an explanation of
1314 this heuristics. */
1315 if (prologue_end > prologue_start + 64)
1316 {
1317 prologue_end = prologue_start + 64;
1318 }
1319 }
1320 else
1321 /* We're in the boondocks: we have no idea where the start of the
1322 function is. */
1323 return;
1324
1325 prologue_end = min (prologue_end, prev_pc);
1326
1327 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1328 }
1329
1330 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1331
1332 static int
1333 arm_instruction_changes_pc (uint32_t this_instr)
1334 {
1335 if (bits (this_instr, 28, 31) == INST_NV)
1336 /* Unconditional instructions. */
1337 switch (bits (this_instr, 24, 27))
1338 {
1339 case 0xa:
1340 case 0xb:
1341 /* Branch with Link and change to Thumb. */
1342 return 1;
1343 case 0xc:
1344 case 0xd:
1345 case 0xe:
1346 /* Coprocessor register transfer. */
1347 if (bits (this_instr, 12, 15) == 15)
1348 error (_("Invalid update to pc in instruction"));
1349 return 0;
1350 default:
1351 return 0;
1352 }
1353 else
1354 switch (bits (this_instr, 25, 27))
1355 {
1356 case 0x0:
1357 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1358 {
1359 /* Multiplies and extra load/stores. */
1360 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1361 /* Neither multiplies nor extension load/stores are allowed
1362 to modify PC. */
1363 return 0;
1364
1365 /* Otherwise, miscellaneous instructions. */
1366
1367 /* BX <reg>, BXJ <reg>, BLX <reg> */
1368 if (bits (this_instr, 4, 27) == 0x12fff1
1369 || bits (this_instr, 4, 27) == 0x12fff2
1370 || bits (this_instr, 4, 27) == 0x12fff3)
1371 return 1;
1372
1373 /* Other miscellaneous instructions are unpredictable if they
1374 modify PC. */
1375 return 0;
1376 }
1377 /* Data processing instruction. Fall through. */
1378
1379 case 0x1:
1380 if (bits (this_instr, 12, 15) == 15)
1381 return 1;
1382 else
1383 return 0;
1384
1385 case 0x2:
1386 case 0x3:
1387 /* Media instructions and architecturally undefined instructions. */
1388 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1389 return 0;
1390
1391 /* Stores. */
1392 if (bit (this_instr, 20) == 0)
1393 return 0;
1394
1395 /* Loads. */
1396 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1397 return 1;
1398 else
1399 return 0;
1400
1401 case 0x4:
1402 /* Load/store multiple. */
1403 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1404 return 1;
1405 else
1406 return 0;
1407
1408 case 0x5:
1409 /* Branch and branch with link. */
1410 return 1;
1411
1412 case 0x6:
1413 case 0x7:
1414 /* Coprocessor transfers or SWIs can not affect PC. */
1415 return 0;
1416
1417 default:
1418 internal_error (__FILE__, __LINE__, "bad value in switch");
1419 }
1420 }
1421
1422 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1423 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1424 fill it in. Return the first address not recognized as a prologue
1425 instruction.
1426
1427 We recognize all the instructions typically found in ARM prologues,
1428 plus harmless instructions which can be skipped (either for analysis
1429 purposes, or a more restrictive set that can be skipped when finding
1430 the end of the prologue). */
1431
1432 static CORE_ADDR
1433 arm_analyze_prologue (struct gdbarch *gdbarch,
1434 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1435 struct arm_prologue_cache *cache)
1436 {
1437 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1438 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1439 int regno;
1440 CORE_ADDR offset, current_pc;
1441 pv_t regs[ARM_FPS_REGNUM];
1442 struct pv_area *stack;
1443 struct cleanup *back_to;
1444 int framereg, framesize;
1445 CORE_ADDR unrecognized_pc = 0;
1446
1447 /* Search the prologue looking for instructions that set up the
1448 frame pointer, adjust the stack pointer, and save registers.
1449
1450 Be careful, however, and if it doesn't look like a prologue,
1451 don't try to scan it. If, for instance, a frameless function
1452 begins with stmfd sp!, then we will tell ourselves there is
1453 a frame, which will confuse stack traceback, as well as "finish"
1454 and other operations that rely on a knowledge of the stack
1455 traceback. */
1456
1457 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1458 regs[regno] = pv_register (regno, 0);
1459 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1460 back_to = make_cleanup_free_pv_area (stack);
1461
1462 for (current_pc = prologue_start;
1463 current_pc < prologue_end;
1464 current_pc += 4)
1465 {
1466 unsigned int insn
1467 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1468
1469 if (insn == 0xe1a0c00d) /* mov ip, sp */
1470 {
1471 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1472 continue;
1473 }
1474 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1475 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1476 {
1477 unsigned imm = insn & 0xff; /* immediate value */
1478 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1479 int rd = bits (insn, 12, 15);
1480 imm = (imm >> rot) | (imm << (32 - rot));
1481 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1482 continue;
1483 }
1484 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1485 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1486 {
1487 unsigned imm = insn & 0xff; /* immediate value */
1488 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1489 int rd = bits (insn, 12, 15);
1490 imm = (imm >> rot) | (imm << (32 - rot));
1491 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1492 continue;
1493 }
1494 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1495 {
1496 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1497 break;
1498 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1499 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1500 regs[bits (insn, 12, 15)]);
1501 continue;
1502 }
1503 else if ((insn & 0xffff0000) == 0xe92d0000)
1504 /* stmfd sp!, {..., fp, ip, lr, pc}
1505 or
1506 stmfd sp!, {a1, a2, a3, a4} */
1507 {
1508 int mask = insn & 0xffff;
1509
1510 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1511 break;
1512
1513 /* Calculate offsets of saved registers. */
1514 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1515 if (mask & (1 << regno))
1516 {
1517 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1518 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1519 }
1520 }
1521 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1522 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1523 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1524 {
1525 /* No need to add this to saved_regs -- it's just an arg reg. */
1526 continue;
1527 }
1528 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1529 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1530 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1531 {
1532 /* No need to add this to saved_regs -- it's just an arg reg. */
1533 continue;
1534 }
1535 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1536 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1537 {
1538 /* No need to add this to saved_regs -- it's just arg regs. */
1539 continue;
1540 }
1541 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1542 {
1543 unsigned imm = insn & 0xff; /* immediate value */
1544 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1545 imm = (imm >> rot) | (imm << (32 - rot));
1546 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1547 }
1548 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1549 {
1550 unsigned imm = insn & 0xff; /* immediate value */
1551 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1552 imm = (imm >> rot) | (imm << (32 - rot));
1553 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1554 }
1555 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1556 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1557 {
1558 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1559 break;
1560
1561 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1562 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1563 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1564 }
1565 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1566 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1567 {
1568 int n_saved_fp_regs;
1569 unsigned int fp_start_reg, fp_bound_reg;
1570
1571 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1572 break;
1573
1574 if ((insn & 0x800) == 0x800) /* N0 is set */
1575 {
1576 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1577 n_saved_fp_regs = 3;
1578 else
1579 n_saved_fp_regs = 1;
1580 }
1581 else
1582 {
1583 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1584 n_saved_fp_regs = 2;
1585 else
1586 n_saved_fp_regs = 4;
1587 }
1588
1589 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1590 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1591 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1592 {
1593 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1594 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1595 regs[fp_start_reg++]);
1596 }
1597 }
1598 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1599 {
1600 /* Allow some special function calls when skipping the
1601 prologue; GCC generates these before storing arguments to
1602 the stack. */
1603 CORE_ADDR dest = BranchDest (current_pc, insn);
1604
1605 if (skip_prologue_function (dest))
1606 continue;
1607 else
1608 break;
1609 }
1610 else if ((insn & 0xf0000000) != 0xe0000000)
1611 break; /* Condition not true, exit early */
1612 else if (arm_instruction_changes_pc (insn))
1613 /* Don't scan past anything that might change control flow. */
1614 break;
1615 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1616 {
1617 /* Ignore block loads from the stack, potentially copying
1618 parameters from memory. */
1619 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1620 continue;
1621 else
1622 break;
1623 }
1624 else if ((insn & 0xfc500000) == 0xe4100000)
1625 {
1626 /* Similarly ignore single loads from the stack. */
1627 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1628 continue;
1629 else
1630 break;
1631 }
1632 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1633 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1634 register instead of the stack. */
1635 continue;
1636 else
1637 {
1638 /* The optimizer might shove anything into the prologue,
1639 so we just skip what we don't recognize. */
1640 unrecognized_pc = current_pc;
1641 continue;
1642 }
1643 }
1644
1645 if (unrecognized_pc == 0)
1646 unrecognized_pc = current_pc;
1647
1648 /* The frame size is just the distance from the frame register
1649 to the original stack pointer. */
1650 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1651 {
1652 /* Frame pointer is fp. */
1653 framereg = ARM_FP_REGNUM;
1654 framesize = -regs[ARM_FP_REGNUM].k;
1655 }
1656 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1657 {
1658 /* Try the stack pointer... this is a bit desperate. */
1659 framereg = ARM_SP_REGNUM;
1660 framesize = -regs[ARM_SP_REGNUM].k;
1661 }
1662 else
1663 {
1664 /* We're just out of luck. We don't know where the frame is. */
1665 framereg = -1;
1666 framesize = 0;
1667 }
1668
1669 if (cache)
1670 {
1671 cache->framereg = framereg;
1672 cache->framesize = framesize;
1673
1674 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1675 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1676 cache->saved_regs[regno].addr = offset;
1677 }
1678
1679 if (arm_debug)
1680 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1681 paddress (gdbarch, unrecognized_pc));
1682
1683 do_cleanups (back_to);
1684 return unrecognized_pc;
1685 }
1686
1687 static void
1688 arm_scan_prologue (struct frame_info *this_frame,
1689 struct arm_prologue_cache *cache)
1690 {
1691 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1692 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1693 int regno;
1694 CORE_ADDR prologue_start, prologue_end, current_pc;
1695 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1696 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1697 pv_t regs[ARM_FPS_REGNUM];
1698 struct pv_area *stack;
1699 struct cleanup *back_to;
1700 CORE_ADDR offset;
1701
1702 /* Assume there is no frame until proven otherwise. */
1703 cache->framereg = ARM_SP_REGNUM;
1704 cache->framesize = 0;
1705
1706 /* Check for Thumb prologue. */
1707 if (arm_frame_is_thumb (this_frame))
1708 {
1709 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1710 return;
1711 }
1712
1713 /* Find the function prologue. If we can't find the function in
1714 the symbol table, peek in the stack frame to find the PC. */
1715 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1716 &prologue_end))
1717 {
1718 /* One way to find the end of the prologue (which works well
1719 for unoptimized code) is to do the following:
1720
1721 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1722
1723 if (sal.line == 0)
1724 prologue_end = prev_pc;
1725 else if (sal.end < prologue_end)
1726 prologue_end = sal.end;
1727
1728 This mechanism is very accurate so long as the optimizer
1729 doesn't move any instructions from the function body into the
1730 prologue. If this happens, sal.end will be the last
1731 instruction in the first hunk of prologue code just before
1732 the first instruction that the scheduler has moved from
1733 the body to the prologue.
1734
1735 In order to make sure that we scan all of the prologue
1736 instructions, we use a slightly less accurate mechanism which
1737 may scan more than necessary. To help compensate for this
1738 lack of accuracy, the prologue scanning loop below contains
1739 several clauses which'll cause the loop to terminate early if
1740 an implausible prologue instruction is encountered.
1741
1742 The expression
1743
1744 prologue_start + 64
1745
1746 is a suitable endpoint since it accounts for the largest
1747 possible prologue plus up to five instructions inserted by
1748 the scheduler. */
1749
1750 if (prologue_end > prologue_start + 64)
1751 {
1752 prologue_end = prologue_start + 64; /* See above. */
1753 }
1754 }
1755 else
1756 {
1757 /* We have no symbol information. Our only option is to assume this
1758 function has a standard stack frame and the normal frame register.
1759 Then, we can find the value of our frame pointer on entrance to
1760 the callee (or at the present moment if this is the innermost frame).
1761 The value stored there should be the address of the stmfd + 8. */
1762 CORE_ADDR frame_loc;
1763 LONGEST return_value;
1764
1765 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1766 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1767 return;
1768 else
1769 {
1770 prologue_start = gdbarch_addr_bits_remove
1771 (gdbarch, return_value) - 8;
1772 prologue_end = prologue_start + 64; /* See above. */
1773 }
1774 }
1775
1776 if (prev_pc < prologue_end)
1777 prologue_end = prev_pc;
1778
1779 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1780 }
1781
1782 static struct arm_prologue_cache *
1783 arm_make_prologue_cache (struct frame_info *this_frame)
1784 {
1785 int reg;
1786 struct arm_prologue_cache *cache;
1787 CORE_ADDR unwound_fp;
1788
1789 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1790 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1791
1792 arm_scan_prologue (this_frame, cache);
1793
1794 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1795 if (unwound_fp == 0)
1796 return cache;
1797
1798 cache->prev_sp = unwound_fp + cache->framesize;
1799
1800 /* Calculate actual addresses of saved registers using offsets
1801 determined by arm_scan_prologue. */
1802 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1803 if (trad_frame_addr_p (cache->saved_regs, reg))
1804 cache->saved_regs[reg].addr += cache->prev_sp;
1805
1806 return cache;
1807 }
1808
1809 /* Our frame ID for a normal frame is the current function's starting PC
1810 and the caller's SP when we were called. */
1811
1812 static void
1813 arm_prologue_this_id (struct frame_info *this_frame,
1814 void **this_cache,
1815 struct frame_id *this_id)
1816 {
1817 struct arm_prologue_cache *cache;
1818 struct frame_id id;
1819 CORE_ADDR pc, func;
1820
1821 if (*this_cache == NULL)
1822 *this_cache = arm_make_prologue_cache (this_frame);
1823 cache = *this_cache;
1824
1825 /* This is meant to halt the backtrace at "_start". */
1826 pc = get_frame_pc (this_frame);
1827 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1828 return;
1829
1830 /* If we've hit a wall, stop. */
1831 if (cache->prev_sp == 0)
1832 return;
1833
1834 func = get_frame_func (this_frame);
1835 id = frame_id_build (cache->prev_sp, func);
1836 *this_id = id;
1837 }
1838
1839 static struct value *
1840 arm_prologue_prev_register (struct frame_info *this_frame,
1841 void **this_cache,
1842 int prev_regnum)
1843 {
1844 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1845 struct arm_prologue_cache *cache;
1846
1847 if (*this_cache == NULL)
1848 *this_cache = arm_make_prologue_cache (this_frame);
1849 cache = *this_cache;
1850
1851 /* If we are asked to unwind the PC, then we need to return the LR
1852 instead. The prologue may save PC, but it will point into this
1853 frame's prologue, not the next frame's resume location. Also
1854 strip the saved T bit. A valid LR may have the low bit set, but
1855 a valid PC never does. */
1856 if (prev_regnum == ARM_PC_REGNUM)
1857 {
1858 CORE_ADDR lr;
1859
1860 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1861 return frame_unwind_got_constant (this_frame, prev_regnum,
1862 arm_addr_bits_remove (gdbarch, lr));
1863 }
1864
1865 /* SP is generally not saved to the stack, but this frame is
1866 identified by the next frame's stack pointer at the time of the call.
1867 The value was already reconstructed into PREV_SP. */
1868 if (prev_regnum == ARM_SP_REGNUM)
1869 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1870
1871 /* The CPSR may have been changed by the call instruction and by the
1872 called function. The only bit we can reconstruct is the T bit,
1873 by checking the low bit of LR as of the call. This is a reliable
1874 indicator of Thumb-ness except for some ARM v4T pre-interworking
1875 Thumb code, which could get away with a clear low bit as long as
1876 the called function did not use bx. Guess that all other
1877 bits are unchanged; the condition flags are presumably lost,
1878 but the processor status is likely valid. */
1879 if (prev_regnum == ARM_PS_REGNUM)
1880 {
1881 CORE_ADDR lr, cpsr;
1882 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1883
1884 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1885 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1886 if (IS_THUMB_ADDR (lr))
1887 cpsr |= t_bit;
1888 else
1889 cpsr &= ~t_bit;
1890 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1891 }
1892
1893 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1894 prev_regnum);
1895 }
1896
1897 struct frame_unwind arm_prologue_unwind = {
1898 NORMAL_FRAME,
1899 arm_prologue_this_id,
1900 arm_prologue_prev_register,
1901 NULL,
1902 default_frame_sniffer
1903 };
1904
1905 static struct arm_prologue_cache *
1906 arm_make_stub_cache (struct frame_info *this_frame)
1907 {
1908 struct arm_prologue_cache *cache;
1909
1910 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1911 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1912
1913 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1914
1915 return cache;
1916 }
1917
1918 /* Our frame ID for a stub frame is the current SP and LR. */
1919
1920 static void
1921 arm_stub_this_id (struct frame_info *this_frame,
1922 void **this_cache,
1923 struct frame_id *this_id)
1924 {
1925 struct arm_prologue_cache *cache;
1926
1927 if (*this_cache == NULL)
1928 *this_cache = arm_make_stub_cache (this_frame);
1929 cache = *this_cache;
1930
1931 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1932 }
1933
1934 static int
1935 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1936 struct frame_info *this_frame,
1937 void **this_prologue_cache)
1938 {
1939 CORE_ADDR addr_in_block;
1940 char dummy[4];
1941
1942 addr_in_block = get_frame_address_in_block (this_frame);
1943 if (in_plt_section (addr_in_block, NULL)
1944 /* We also use the stub winder if the target memory is unreadable
1945 to avoid having the prologue unwinder trying to read it. */
1946 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1947 return 1;
1948
1949 return 0;
1950 }
1951
1952 struct frame_unwind arm_stub_unwind = {
1953 NORMAL_FRAME,
1954 arm_stub_this_id,
1955 arm_prologue_prev_register,
1956 NULL,
1957 arm_stub_unwind_sniffer
1958 };
1959
1960 static CORE_ADDR
1961 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1962 {
1963 struct arm_prologue_cache *cache;
1964
1965 if (*this_cache == NULL)
1966 *this_cache = arm_make_prologue_cache (this_frame);
1967 cache = *this_cache;
1968
1969 return cache->prev_sp - cache->framesize;
1970 }
1971
1972 struct frame_base arm_normal_base = {
1973 &arm_prologue_unwind,
1974 arm_normal_frame_base,
1975 arm_normal_frame_base,
1976 arm_normal_frame_base
1977 };
1978
1979 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1980 dummy frame. The frame ID's base needs to match the TOS value
1981 saved by save_dummy_frame_tos() and returned from
1982 arm_push_dummy_call, and the PC needs to match the dummy frame's
1983 breakpoint. */
1984
1985 static struct frame_id
1986 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1987 {
1988 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1989 get_frame_pc (this_frame));
1990 }
1991
1992 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1993 be used to construct the previous frame's ID, after looking up the
1994 containing function). */
1995
1996 static CORE_ADDR
1997 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1998 {
1999 CORE_ADDR pc;
2000 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2001 return arm_addr_bits_remove (gdbarch, pc);
2002 }
2003
2004 static CORE_ADDR
2005 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2006 {
2007 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2008 }
2009
2010 static struct value *
2011 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2012 int regnum)
2013 {
2014 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2015 CORE_ADDR lr, cpsr;
2016 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2017
2018 switch (regnum)
2019 {
2020 case ARM_PC_REGNUM:
2021 /* The PC is normally copied from the return column, which
2022 describes saves of LR. However, that version may have an
2023 extra bit set to indicate Thumb state. The bit is not
2024 part of the PC. */
2025 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2026 return frame_unwind_got_constant (this_frame, regnum,
2027 arm_addr_bits_remove (gdbarch, lr));
2028
2029 case ARM_PS_REGNUM:
2030 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2031 cpsr = get_frame_register_unsigned (this_frame, regnum);
2032 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2033 if (IS_THUMB_ADDR (lr))
2034 cpsr |= t_bit;
2035 else
2036 cpsr &= ~t_bit;
2037 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2038
2039 default:
2040 internal_error (__FILE__, __LINE__,
2041 _("Unexpected register %d"), regnum);
2042 }
2043 }
2044
2045 static void
2046 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2047 struct dwarf2_frame_state_reg *reg,
2048 struct frame_info *this_frame)
2049 {
2050 switch (regnum)
2051 {
2052 case ARM_PC_REGNUM:
2053 case ARM_PS_REGNUM:
2054 reg->how = DWARF2_FRAME_REG_FN;
2055 reg->loc.fn = arm_dwarf2_prev_register;
2056 break;
2057 case ARM_SP_REGNUM:
2058 reg->how = DWARF2_FRAME_REG_CFA;
2059 break;
2060 }
2061 }
2062
2063 /* Return true if we are in the function's epilogue, i.e. after the
2064 instruction that destroyed the function's stack frame. */
2065
2066 static int
2067 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2068 {
2069 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2070 unsigned int insn, insn2;
2071 int found_return = 0, found_stack_adjust = 0;
2072 CORE_ADDR func_start, func_end;
2073 CORE_ADDR scan_pc;
2074 gdb_byte buf[4];
2075
2076 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2077 return 0;
2078
2079 /* The epilogue is a sequence of instructions along the following lines:
2080
2081 - add stack frame size to SP or FP
2082 - [if frame pointer used] restore SP from FP
2083 - restore registers from SP [may include PC]
2084 - a return-type instruction [if PC wasn't already restored]
2085
2086 In a first pass, we scan forward from the current PC and verify the
2087 instructions we find as compatible with this sequence, ending in a
2088 return instruction.
2089
2090 However, this is not sufficient to distinguish indirect function calls
2091 within a function from indirect tail calls in the epilogue in some cases.
2092 Therefore, if we didn't already find any SP-changing instruction during
2093 forward scan, we add a backward scanning heuristic to ensure we actually
2094 are in the epilogue. */
2095
2096 scan_pc = pc;
2097 while (scan_pc < func_end && !found_return)
2098 {
2099 if (target_read_memory (scan_pc, buf, 2))
2100 break;
2101
2102 scan_pc += 2;
2103 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2104
2105 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2106 found_return = 1;
2107 else if (insn == 0x46f7) /* mov pc, lr */
2108 found_return = 1;
2109 else if (insn == 0x46bd) /* mov sp, r7 */
2110 found_stack_adjust = 1;
2111 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2112 found_stack_adjust = 1;
2113 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2114 {
2115 found_stack_adjust = 1;
2116 if (insn & 0x0100) /* <registers> include PC. */
2117 found_return = 1;
2118 }
2119 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2120 {
2121 if (target_read_memory (scan_pc, buf, 2))
2122 break;
2123
2124 scan_pc += 2;
2125 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2126
2127 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2128 {
2129 found_stack_adjust = 1;
2130 if (insn2 & 0x8000) /* <registers> include PC. */
2131 found_return = 1;
2132 }
2133 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2134 && (insn2 & 0x0fff) == 0x0b04)
2135 {
2136 found_stack_adjust = 1;
2137 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2138 found_return = 1;
2139 }
2140 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2141 && (insn2 & 0x0e00) == 0x0a00)
2142 found_stack_adjust = 1;
2143 else
2144 break;
2145 }
2146 else
2147 break;
2148 }
2149
2150 if (!found_return)
2151 return 0;
2152
2153 /* Since any instruction in the epilogue sequence, with the possible
2154 exception of return itself, updates the stack pointer, we need to
2155 scan backwards for at most one instruction. Try either a 16-bit or
2156 a 32-bit instruction. This is just a heuristic, so we do not worry
2157 too much about false positives.*/
2158
2159 if (!found_stack_adjust)
2160 {
2161 if (pc - 4 < func_start)
2162 return 0;
2163 if (target_read_memory (pc - 4, buf, 4))
2164 return 0;
2165
2166 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2167 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2168
2169 if (insn2 == 0x46bd) /* mov sp, r7 */
2170 found_stack_adjust = 1;
2171 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2172 found_stack_adjust = 1;
2173 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2174 found_stack_adjust = 1;
2175 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2176 found_stack_adjust = 1;
2177 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2178 && (insn2 & 0x0fff) == 0x0b04)
2179 found_stack_adjust = 1;
2180 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2181 && (insn2 & 0x0e00) == 0x0a00)
2182 found_stack_adjust = 1;
2183 }
2184
2185 return found_stack_adjust;
2186 }
2187
2188 /* Return true if we are in the function's epilogue, i.e. after the
2189 instruction that destroyed the function's stack frame. */
2190
2191 static int
2192 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2193 {
2194 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2195 unsigned int insn;
2196 int found_return, found_stack_adjust;
2197 CORE_ADDR func_start, func_end;
2198
2199 if (arm_pc_is_thumb (gdbarch, pc))
2200 return thumb_in_function_epilogue_p (gdbarch, pc);
2201
2202 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2203 return 0;
2204
2205 /* We are in the epilogue if the previous instruction was a stack
2206 adjustment and the next instruction is a possible return (bx, mov
2207 pc, or pop). We could have to scan backwards to find the stack
2208 adjustment, or forwards to find the return, but this is a decent
2209 approximation. First scan forwards. */
2210
2211 found_return = 0;
2212 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2213 if (bits (insn, 28, 31) != INST_NV)
2214 {
2215 if ((insn & 0x0ffffff0) == 0x012fff10)
2216 /* BX. */
2217 found_return = 1;
2218 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2219 /* MOV PC. */
2220 found_return = 1;
2221 else if ((insn & 0x0fff0000) == 0x08bd0000
2222 && (insn & 0x0000c000) != 0)
2223 /* POP (LDMIA), including PC or LR. */
2224 found_return = 1;
2225 }
2226
2227 if (!found_return)
2228 return 0;
2229
2230 /* Scan backwards. This is just a heuristic, so do not worry about
2231 false positives from mode changes. */
2232
2233 if (pc < func_start + 4)
2234 return 0;
2235
2236 found_stack_adjust = 0;
2237 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2238 if (bits (insn, 28, 31) != INST_NV)
2239 {
2240 if ((insn & 0x0df0f000) == 0x0080d000)
2241 /* ADD SP (register or immediate). */
2242 found_stack_adjust = 1;
2243 else if ((insn & 0x0df0f000) == 0x0040d000)
2244 /* SUB SP (register or immediate). */
2245 found_stack_adjust = 1;
2246 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2247 /* MOV SP. */
2248 found_stack_adjust = 1;
2249 else if ((insn & 0x0fff0000) == 0x08bd0000)
2250 /* POP (LDMIA). */
2251 found_stack_adjust = 1;
2252 }
2253
2254 if (found_stack_adjust)
2255 return 1;
2256
2257 return 0;
2258 }
2259
2260
2261 /* When arguments must be pushed onto the stack, they go on in reverse
2262 order. The code below implements a FILO (stack) to do this. */
2263
2264 struct stack_item
2265 {
2266 int len;
2267 struct stack_item *prev;
2268 void *data;
2269 };
2270
2271 static struct stack_item *
2272 push_stack_item (struct stack_item *prev, const void *contents, int len)
2273 {
2274 struct stack_item *si;
2275 si = xmalloc (sizeof (struct stack_item));
2276 si->data = xmalloc (len);
2277 si->len = len;
2278 si->prev = prev;
2279 memcpy (si->data, contents, len);
2280 return si;
2281 }
2282
2283 static struct stack_item *
2284 pop_stack_item (struct stack_item *si)
2285 {
2286 struct stack_item *dead = si;
2287 si = si->prev;
2288 xfree (dead->data);
2289 xfree (dead);
2290 return si;
2291 }
2292
2293
2294 /* Return the alignment (in bytes) of the given type. */
2295
2296 static int
2297 arm_type_align (struct type *t)
2298 {
2299 int n;
2300 int align;
2301 int falign;
2302
2303 t = check_typedef (t);
2304 switch (TYPE_CODE (t))
2305 {
2306 default:
2307 /* Should never happen. */
2308 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2309 return 4;
2310
2311 case TYPE_CODE_PTR:
2312 case TYPE_CODE_ENUM:
2313 case TYPE_CODE_INT:
2314 case TYPE_CODE_FLT:
2315 case TYPE_CODE_SET:
2316 case TYPE_CODE_RANGE:
2317 case TYPE_CODE_BITSTRING:
2318 case TYPE_CODE_REF:
2319 case TYPE_CODE_CHAR:
2320 case TYPE_CODE_BOOL:
2321 return TYPE_LENGTH (t);
2322
2323 case TYPE_CODE_ARRAY:
2324 case TYPE_CODE_COMPLEX:
2325 /* TODO: What about vector types? */
2326 return arm_type_align (TYPE_TARGET_TYPE (t));
2327
2328 case TYPE_CODE_STRUCT:
2329 case TYPE_CODE_UNION:
2330 align = 1;
2331 for (n = 0; n < TYPE_NFIELDS (t); n++)
2332 {
2333 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2334 if (falign > align)
2335 align = falign;
2336 }
2337 return align;
2338 }
2339 }
2340
2341 /* Possible base types for a candidate for passing and returning in
2342 VFP registers. */
2343
2344 enum arm_vfp_cprc_base_type
2345 {
2346 VFP_CPRC_UNKNOWN,
2347 VFP_CPRC_SINGLE,
2348 VFP_CPRC_DOUBLE,
2349 VFP_CPRC_VEC64,
2350 VFP_CPRC_VEC128
2351 };
2352
2353 /* The length of one element of base type B. */
2354
2355 static unsigned
2356 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2357 {
2358 switch (b)
2359 {
2360 case VFP_CPRC_SINGLE:
2361 return 4;
2362 case VFP_CPRC_DOUBLE:
2363 return 8;
2364 case VFP_CPRC_VEC64:
2365 return 8;
2366 case VFP_CPRC_VEC128:
2367 return 16;
2368 default:
2369 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2370 (int) b);
2371 }
2372 }
2373
2374 /* The character ('s', 'd' or 'q') for the type of VFP register used
2375 for passing base type B. */
2376
2377 static int
2378 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2379 {
2380 switch (b)
2381 {
2382 case VFP_CPRC_SINGLE:
2383 return 's';
2384 case VFP_CPRC_DOUBLE:
2385 return 'd';
2386 case VFP_CPRC_VEC64:
2387 return 'd';
2388 case VFP_CPRC_VEC128:
2389 return 'q';
2390 default:
2391 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2392 (int) b);
2393 }
2394 }
2395
2396 /* Determine whether T may be part of a candidate for passing and
2397 returning in VFP registers, ignoring the limit on the total number
2398 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2399 classification of the first valid component found; if it is not
2400 VFP_CPRC_UNKNOWN, all components must have the same classification
2401 as *BASE_TYPE. If it is found that T contains a type not permitted
2402 for passing and returning in VFP registers, a type differently
2403 classified from *BASE_TYPE, or two types differently classified
2404 from each other, return -1, otherwise return the total number of
2405 base-type elements found (possibly 0 in an empty structure or
2406 array). Vectors and complex types are not currently supported,
2407 matching the generic AAPCS support. */
2408
2409 static int
2410 arm_vfp_cprc_sub_candidate (struct type *t,
2411 enum arm_vfp_cprc_base_type *base_type)
2412 {
2413 t = check_typedef (t);
2414 switch (TYPE_CODE (t))
2415 {
2416 case TYPE_CODE_FLT:
2417 switch (TYPE_LENGTH (t))
2418 {
2419 case 4:
2420 if (*base_type == VFP_CPRC_UNKNOWN)
2421 *base_type = VFP_CPRC_SINGLE;
2422 else if (*base_type != VFP_CPRC_SINGLE)
2423 return -1;
2424 return 1;
2425
2426 case 8:
2427 if (*base_type == VFP_CPRC_UNKNOWN)
2428 *base_type = VFP_CPRC_DOUBLE;
2429 else if (*base_type != VFP_CPRC_DOUBLE)
2430 return -1;
2431 return 1;
2432
2433 default:
2434 return -1;
2435 }
2436 break;
2437
2438 case TYPE_CODE_ARRAY:
2439 {
2440 int count;
2441 unsigned unitlen;
2442 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2443 if (count == -1)
2444 return -1;
2445 if (TYPE_LENGTH (t) == 0)
2446 {
2447 gdb_assert (count == 0);
2448 return 0;
2449 }
2450 else if (count == 0)
2451 return -1;
2452 unitlen = arm_vfp_cprc_unit_length (*base_type);
2453 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2454 return TYPE_LENGTH (t) / unitlen;
2455 }
2456 break;
2457
2458 case TYPE_CODE_STRUCT:
2459 {
2460 int count = 0;
2461 unsigned unitlen;
2462 int i;
2463 for (i = 0; i < TYPE_NFIELDS (t); i++)
2464 {
2465 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2466 base_type);
2467 if (sub_count == -1)
2468 return -1;
2469 count += sub_count;
2470 }
2471 if (TYPE_LENGTH (t) == 0)
2472 {
2473 gdb_assert (count == 0);
2474 return 0;
2475 }
2476 else if (count == 0)
2477 return -1;
2478 unitlen = arm_vfp_cprc_unit_length (*base_type);
2479 if (TYPE_LENGTH (t) != unitlen * count)
2480 return -1;
2481 return count;
2482 }
2483
2484 case TYPE_CODE_UNION:
2485 {
2486 int count = 0;
2487 unsigned unitlen;
2488 int i;
2489 for (i = 0; i < TYPE_NFIELDS (t); i++)
2490 {
2491 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2492 base_type);
2493 if (sub_count == -1)
2494 return -1;
2495 count = (count > sub_count ? count : sub_count);
2496 }
2497 if (TYPE_LENGTH (t) == 0)
2498 {
2499 gdb_assert (count == 0);
2500 return 0;
2501 }
2502 else if (count == 0)
2503 return -1;
2504 unitlen = arm_vfp_cprc_unit_length (*base_type);
2505 if (TYPE_LENGTH (t) != unitlen * count)
2506 return -1;
2507 return count;
2508 }
2509
2510 default:
2511 break;
2512 }
2513
2514 return -1;
2515 }
2516
2517 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2518 if passed to or returned from a non-variadic function with the VFP
2519 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2520 *BASE_TYPE to the base type for T and *COUNT to the number of
2521 elements of that base type before returning. */
2522
2523 static int
2524 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2525 int *count)
2526 {
2527 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2528 int c = arm_vfp_cprc_sub_candidate (t, &b);
2529 if (c <= 0 || c > 4)
2530 return 0;
2531 *base_type = b;
2532 *count = c;
2533 return 1;
2534 }
2535
2536 /* Return 1 if the VFP ABI should be used for passing arguments to and
2537 returning values from a function of type FUNC_TYPE, 0
2538 otherwise. */
2539
2540 static int
2541 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2542 {
2543 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2544 /* Variadic functions always use the base ABI. Assume that functions
2545 without debug info are not variadic. */
2546 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2547 return 0;
2548 /* The VFP ABI is only supported as a variant of AAPCS. */
2549 if (tdep->arm_abi != ARM_ABI_AAPCS)
2550 return 0;
2551 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2552 }
2553
2554 /* We currently only support passing parameters in integer registers, which
2555 conforms with GCC's default model, and VFP argument passing following
2556 the VFP variant of AAPCS. Several other variants exist and
2557 we should probably support some of them based on the selected ABI. */
2558
2559 static CORE_ADDR
2560 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2561 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2562 struct value **args, CORE_ADDR sp, int struct_return,
2563 CORE_ADDR struct_addr)
2564 {
2565 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2566 int argnum;
2567 int argreg;
2568 int nstack;
2569 struct stack_item *si = NULL;
2570 int use_vfp_abi;
2571 struct type *ftype;
2572 unsigned vfp_regs_free = (1 << 16) - 1;
2573
2574 /* Determine the type of this function and whether the VFP ABI
2575 applies. */
2576 ftype = check_typedef (value_type (function));
2577 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2578 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2579 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2580
2581 /* Set the return address. For the ARM, the return breakpoint is
2582 always at BP_ADDR. */
2583 if (arm_pc_is_thumb (gdbarch, bp_addr))
2584 bp_addr |= 1;
2585 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2586
2587 /* Walk through the list of args and determine how large a temporary
2588 stack is required. Need to take care here as structs may be
2589 passed on the stack, and we have to to push them. */
2590 nstack = 0;
2591
2592 argreg = ARM_A1_REGNUM;
2593 nstack = 0;
2594
2595 /* The struct_return pointer occupies the first parameter
2596 passing register. */
2597 if (struct_return)
2598 {
2599 if (arm_debug)
2600 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2601 gdbarch_register_name (gdbarch, argreg),
2602 paddress (gdbarch, struct_addr));
2603 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2604 argreg++;
2605 }
2606
2607 for (argnum = 0; argnum < nargs; argnum++)
2608 {
2609 int len;
2610 struct type *arg_type;
2611 struct type *target_type;
2612 enum type_code typecode;
2613 const bfd_byte *val;
2614 int align;
2615 enum arm_vfp_cprc_base_type vfp_base_type;
2616 int vfp_base_count;
2617 int may_use_core_reg = 1;
2618
2619 arg_type = check_typedef (value_type (args[argnum]));
2620 len = TYPE_LENGTH (arg_type);
2621 target_type = TYPE_TARGET_TYPE (arg_type);
2622 typecode = TYPE_CODE (arg_type);
2623 val = value_contents (args[argnum]);
2624
2625 align = arm_type_align (arg_type);
2626 /* Round alignment up to a whole number of words. */
2627 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2628 /* Different ABIs have different maximum alignments. */
2629 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2630 {
2631 /* The APCS ABI only requires word alignment. */
2632 align = INT_REGISTER_SIZE;
2633 }
2634 else
2635 {
2636 /* The AAPCS requires at most doubleword alignment. */
2637 if (align > INT_REGISTER_SIZE * 2)
2638 align = INT_REGISTER_SIZE * 2;
2639 }
2640
2641 if (use_vfp_abi
2642 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2643 &vfp_base_count))
2644 {
2645 int regno;
2646 int unit_length;
2647 int shift;
2648 unsigned mask;
2649
2650 /* Because this is a CPRC it cannot go in a core register or
2651 cause a core register to be skipped for alignment.
2652 Either it goes in VFP registers and the rest of this loop
2653 iteration is skipped for this argument, or it goes on the
2654 stack (and the stack alignment code is correct for this
2655 case). */
2656 may_use_core_reg = 0;
2657
2658 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2659 shift = unit_length / 4;
2660 mask = (1 << (shift * vfp_base_count)) - 1;
2661 for (regno = 0; regno < 16; regno += shift)
2662 if (((vfp_regs_free >> regno) & mask) == mask)
2663 break;
2664
2665 if (regno < 16)
2666 {
2667 int reg_char;
2668 int reg_scaled;
2669 int i;
2670
2671 vfp_regs_free &= ~(mask << regno);
2672 reg_scaled = regno / shift;
2673 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2674 for (i = 0; i < vfp_base_count; i++)
2675 {
2676 char name_buf[4];
2677 int regnum;
2678 if (reg_char == 'q')
2679 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2680 val + i * unit_length);
2681 else
2682 {
2683 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2684 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2685 strlen (name_buf));
2686 regcache_cooked_write (regcache, regnum,
2687 val + i * unit_length);
2688 }
2689 }
2690 continue;
2691 }
2692 else
2693 {
2694 /* This CPRC could not go in VFP registers, so all VFP
2695 registers are now marked as used. */
2696 vfp_regs_free = 0;
2697 }
2698 }
2699
2700 /* Push stack padding for dowubleword alignment. */
2701 if (nstack & (align - 1))
2702 {
2703 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2704 nstack += INT_REGISTER_SIZE;
2705 }
2706
2707 /* Doubleword aligned quantities must go in even register pairs. */
2708 if (may_use_core_reg
2709 && argreg <= ARM_LAST_ARG_REGNUM
2710 && align > INT_REGISTER_SIZE
2711 && argreg & 1)
2712 argreg++;
2713
2714 /* If the argument is a pointer to a function, and it is a
2715 Thumb function, create a LOCAL copy of the value and set
2716 the THUMB bit in it. */
2717 if (TYPE_CODE_PTR == typecode
2718 && target_type != NULL
2719 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2720 {
2721 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2722 if (arm_pc_is_thumb (gdbarch, regval))
2723 {
2724 bfd_byte *copy = alloca (len);
2725 store_unsigned_integer (copy, len, byte_order,
2726 MAKE_THUMB_ADDR (regval));
2727 val = copy;
2728 }
2729 }
2730
2731 /* Copy the argument to general registers or the stack in
2732 register-sized pieces. Large arguments are split between
2733 registers and stack. */
2734 while (len > 0)
2735 {
2736 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2737
2738 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2739 {
2740 /* The argument is being passed in a general purpose
2741 register. */
2742 CORE_ADDR regval
2743 = extract_unsigned_integer (val, partial_len, byte_order);
2744 if (byte_order == BFD_ENDIAN_BIG)
2745 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2746 if (arm_debug)
2747 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2748 argnum,
2749 gdbarch_register_name
2750 (gdbarch, argreg),
2751 phex (regval, INT_REGISTER_SIZE));
2752 regcache_cooked_write_unsigned (regcache, argreg, regval);
2753 argreg++;
2754 }
2755 else
2756 {
2757 /* Push the arguments onto the stack. */
2758 if (arm_debug)
2759 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2760 argnum, nstack);
2761 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2762 nstack += INT_REGISTER_SIZE;
2763 }
2764
2765 len -= partial_len;
2766 val += partial_len;
2767 }
2768 }
2769 /* If we have an odd number of words to push, then decrement the stack
2770 by one word now, so first stack argument will be dword aligned. */
2771 if (nstack & 4)
2772 sp -= 4;
2773
2774 while (si)
2775 {
2776 sp -= si->len;
2777 write_memory (sp, si->data, si->len);
2778 si = pop_stack_item (si);
2779 }
2780
2781 /* Finally, update teh SP register. */
2782 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2783
2784 return sp;
2785 }
2786
2787
2788 /* Always align the frame to an 8-byte boundary. This is required on
2789 some platforms and harmless on the rest. */
2790
2791 static CORE_ADDR
2792 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2793 {
2794 /* Align the stack to eight bytes. */
2795 return sp & ~ (CORE_ADDR) 7;
2796 }
2797
2798 static void
2799 print_fpu_flags (int flags)
2800 {
2801 if (flags & (1 << 0))
2802 fputs ("IVO ", stdout);
2803 if (flags & (1 << 1))
2804 fputs ("DVZ ", stdout);
2805 if (flags & (1 << 2))
2806 fputs ("OFL ", stdout);
2807 if (flags & (1 << 3))
2808 fputs ("UFL ", stdout);
2809 if (flags & (1 << 4))
2810 fputs ("INX ", stdout);
2811 putchar ('\n');
2812 }
2813
2814 /* Print interesting information about the floating point processor
2815 (if present) or emulator. */
2816 static void
2817 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2818 struct frame_info *frame, const char *args)
2819 {
2820 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2821 int type;
2822
2823 type = (status >> 24) & 127;
2824 if (status & (1 << 31))
2825 printf (_("Hardware FPU type %d\n"), type);
2826 else
2827 printf (_("Software FPU type %d\n"), type);
2828 /* i18n: [floating point unit] mask */
2829 fputs (_("mask: "), stdout);
2830 print_fpu_flags (status >> 16);
2831 /* i18n: [floating point unit] flags */
2832 fputs (_("flags: "), stdout);
2833 print_fpu_flags (status);
2834 }
2835
2836 /* Construct the ARM extended floating point type. */
2837 static struct type *
2838 arm_ext_type (struct gdbarch *gdbarch)
2839 {
2840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2841
2842 if (!tdep->arm_ext_type)
2843 tdep->arm_ext_type
2844 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2845 floatformats_arm_ext);
2846
2847 return tdep->arm_ext_type;
2848 }
2849
2850 static struct type *
2851 arm_neon_double_type (struct gdbarch *gdbarch)
2852 {
2853 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2854
2855 if (tdep->neon_double_type == NULL)
2856 {
2857 struct type *t, *elem;
2858
2859 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2860 TYPE_CODE_UNION);
2861 elem = builtin_type (gdbarch)->builtin_uint8;
2862 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2863 elem = builtin_type (gdbarch)->builtin_uint16;
2864 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2865 elem = builtin_type (gdbarch)->builtin_uint32;
2866 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2867 elem = builtin_type (gdbarch)->builtin_uint64;
2868 append_composite_type_field (t, "u64", elem);
2869 elem = builtin_type (gdbarch)->builtin_float;
2870 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2871 elem = builtin_type (gdbarch)->builtin_double;
2872 append_composite_type_field (t, "f64", elem);
2873
2874 TYPE_VECTOR (t) = 1;
2875 TYPE_NAME (t) = "neon_d";
2876 tdep->neon_double_type = t;
2877 }
2878
2879 return tdep->neon_double_type;
2880 }
2881
2882 /* FIXME: The vector types are not correctly ordered on big-endian
2883 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2884 bits of d0 - regardless of what unit size is being held in d0. So
2885 the offset of the first uint8 in d0 is 7, but the offset of the
2886 first float is 4. This code works as-is for little-endian
2887 targets. */
2888
2889 static struct type *
2890 arm_neon_quad_type (struct gdbarch *gdbarch)
2891 {
2892 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2893
2894 if (tdep->neon_quad_type == NULL)
2895 {
2896 struct type *t, *elem;
2897
2898 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2899 TYPE_CODE_UNION);
2900 elem = builtin_type (gdbarch)->builtin_uint8;
2901 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2902 elem = builtin_type (gdbarch)->builtin_uint16;
2903 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2904 elem = builtin_type (gdbarch)->builtin_uint32;
2905 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2906 elem = builtin_type (gdbarch)->builtin_uint64;
2907 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2908 elem = builtin_type (gdbarch)->builtin_float;
2909 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2910 elem = builtin_type (gdbarch)->builtin_double;
2911 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2912
2913 TYPE_VECTOR (t) = 1;
2914 TYPE_NAME (t) = "neon_q";
2915 tdep->neon_quad_type = t;
2916 }
2917
2918 return tdep->neon_quad_type;
2919 }
2920
2921 /* Return the GDB type object for the "standard" data type of data in
2922 register N. */
2923
2924 static struct type *
2925 arm_register_type (struct gdbarch *gdbarch, int regnum)
2926 {
2927 int num_regs = gdbarch_num_regs (gdbarch);
2928
2929 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2930 && regnum >= num_regs && regnum < num_regs + 32)
2931 return builtin_type (gdbarch)->builtin_float;
2932
2933 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2934 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2935 return arm_neon_quad_type (gdbarch);
2936
2937 /* If the target description has register information, we are only
2938 in this function so that we can override the types of
2939 double-precision registers for NEON. */
2940 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2941 {
2942 struct type *t = tdesc_register_type (gdbarch, regnum);
2943
2944 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2945 && TYPE_CODE (t) == TYPE_CODE_FLT
2946 && gdbarch_tdep (gdbarch)->have_neon)
2947 return arm_neon_double_type (gdbarch);
2948 else
2949 return t;
2950 }
2951
2952 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2953 {
2954 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2955 return builtin_type (gdbarch)->builtin_void;
2956
2957 return arm_ext_type (gdbarch);
2958 }
2959 else if (regnum == ARM_SP_REGNUM)
2960 return builtin_type (gdbarch)->builtin_data_ptr;
2961 else if (regnum == ARM_PC_REGNUM)
2962 return builtin_type (gdbarch)->builtin_func_ptr;
2963 else if (regnum >= ARRAY_SIZE (arm_register_names))
2964 /* These registers are only supported on targets which supply
2965 an XML description. */
2966 return builtin_type (gdbarch)->builtin_int0;
2967 else
2968 return builtin_type (gdbarch)->builtin_uint32;
2969 }
2970
2971 /* Map a DWARF register REGNUM onto the appropriate GDB register
2972 number. */
2973
2974 static int
2975 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2976 {
2977 /* Core integer regs. */
2978 if (reg >= 0 && reg <= 15)
2979 return reg;
2980
2981 /* Legacy FPA encoding. These were once used in a way which
2982 overlapped with VFP register numbering, so their use is
2983 discouraged, but GDB doesn't support the ARM toolchain
2984 which used them for VFP. */
2985 if (reg >= 16 && reg <= 23)
2986 return ARM_F0_REGNUM + reg - 16;
2987
2988 /* New assignments for the FPA registers. */
2989 if (reg >= 96 && reg <= 103)
2990 return ARM_F0_REGNUM + reg - 96;
2991
2992 /* WMMX register assignments. */
2993 if (reg >= 104 && reg <= 111)
2994 return ARM_WCGR0_REGNUM + reg - 104;
2995
2996 if (reg >= 112 && reg <= 127)
2997 return ARM_WR0_REGNUM + reg - 112;
2998
2999 if (reg >= 192 && reg <= 199)
3000 return ARM_WC0_REGNUM + reg - 192;
3001
3002 /* VFP v2 registers. A double precision value is actually
3003 in d1 rather than s2, but the ABI only defines numbering
3004 for the single precision registers. This will "just work"
3005 in GDB for little endian targets (we'll read eight bytes,
3006 starting in s0 and then progressing to s1), but will be
3007 reversed on big endian targets with VFP. This won't
3008 be a problem for the new Neon quad registers; you're supposed
3009 to use DW_OP_piece for those. */
3010 if (reg >= 64 && reg <= 95)
3011 {
3012 char name_buf[4];
3013
3014 sprintf (name_buf, "s%d", reg - 64);
3015 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3016 strlen (name_buf));
3017 }
3018
3019 /* VFP v3 / Neon registers. This range is also used for VFP v2
3020 registers, except that it now describes d0 instead of s0. */
3021 if (reg >= 256 && reg <= 287)
3022 {
3023 char name_buf[4];
3024
3025 sprintf (name_buf, "d%d", reg - 256);
3026 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3027 strlen (name_buf));
3028 }
3029
3030 return -1;
3031 }
3032
3033 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3034 static int
3035 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3036 {
3037 int reg = regnum;
3038 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3039
3040 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3041 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3042
3043 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3044 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3045
3046 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3047 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3048
3049 if (reg < NUM_GREGS)
3050 return SIM_ARM_R0_REGNUM + reg;
3051 reg -= NUM_GREGS;
3052
3053 if (reg < NUM_FREGS)
3054 return SIM_ARM_FP0_REGNUM + reg;
3055 reg -= NUM_FREGS;
3056
3057 if (reg < NUM_SREGS)
3058 return SIM_ARM_FPS_REGNUM + reg;
3059 reg -= NUM_SREGS;
3060
3061 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3062 }
3063
3064 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3065 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3066 It is thought that this is is the floating-point register format on
3067 little-endian systems. */
3068
3069 static void
3070 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3071 void *dbl, int endianess)
3072 {
3073 DOUBLEST d;
3074
3075 if (endianess == BFD_ENDIAN_BIG)
3076 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3077 else
3078 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3079 ptr, &d);
3080 floatformat_from_doublest (fmt, &d, dbl);
3081 }
3082
3083 static void
3084 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3085 int endianess)
3086 {
3087 DOUBLEST d;
3088
3089 floatformat_to_doublest (fmt, ptr, &d);
3090 if (endianess == BFD_ENDIAN_BIG)
3091 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3092 else
3093 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3094 &d, dbl);
3095 }
3096
3097 static int
3098 condition_true (unsigned long cond, unsigned long status_reg)
3099 {
3100 if (cond == INST_AL || cond == INST_NV)
3101 return 1;
3102
3103 switch (cond)
3104 {
3105 case INST_EQ:
3106 return ((status_reg & FLAG_Z) != 0);
3107 case INST_NE:
3108 return ((status_reg & FLAG_Z) == 0);
3109 case INST_CS:
3110 return ((status_reg & FLAG_C) != 0);
3111 case INST_CC:
3112 return ((status_reg & FLAG_C) == 0);
3113 case INST_MI:
3114 return ((status_reg & FLAG_N) != 0);
3115 case INST_PL:
3116 return ((status_reg & FLAG_N) == 0);
3117 case INST_VS:
3118 return ((status_reg & FLAG_V) != 0);
3119 case INST_VC:
3120 return ((status_reg & FLAG_V) == 0);
3121 case INST_HI:
3122 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3123 case INST_LS:
3124 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3125 case INST_GE:
3126 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3127 case INST_LT:
3128 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3129 case INST_GT:
3130 return (((status_reg & FLAG_Z) == 0)
3131 && (((status_reg & FLAG_N) == 0)
3132 == ((status_reg & FLAG_V) == 0)));
3133 case INST_LE:
3134 return (((status_reg & FLAG_Z) != 0)
3135 || (((status_reg & FLAG_N) == 0)
3136 != ((status_reg & FLAG_V) == 0)));
3137 }
3138 return 1;
3139 }
3140
3141 static unsigned long
3142 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3143 unsigned long pc_val, unsigned long status_reg)
3144 {
3145 unsigned long res, shift;
3146 int rm = bits (inst, 0, 3);
3147 unsigned long shifttype = bits (inst, 5, 6);
3148
3149 if (bit (inst, 4))
3150 {
3151 int rs = bits (inst, 8, 11);
3152 shift = (rs == 15 ? pc_val + 8
3153 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3154 }
3155 else
3156 shift = bits (inst, 7, 11);
3157
3158 res = (rm == 15
3159 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3160 : get_frame_register_unsigned (frame, rm));
3161
3162 switch (shifttype)
3163 {
3164 case 0: /* LSL */
3165 res = shift >= 32 ? 0 : res << shift;
3166 break;
3167
3168 case 1: /* LSR */
3169 res = shift >= 32 ? 0 : res >> shift;
3170 break;
3171
3172 case 2: /* ASR */
3173 if (shift >= 32)
3174 shift = 31;
3175 res = ((res & 0x80000000L)
3176 ? ~((~res) >> shift) : res >> shift);
3177 break;
3178
3179 case 3: /* ROR/RRX */
3180 shift &= 31;
3181 if (shift == 0)
3182 res = (res >> 1) | (carry ? 0x80000000L : 0);
3183 else
3184 res = (res >> shift) | (res << (32 - shift));
3185 break;
3186 }
3187
3188 return res & 0xffffffff;
3189 }
3190
3191 /* Return number of 1-bits in VAL. */
3192
3193 static int
3194 bitcount (unsigned long val)
3195 {
3196 int nbits;
3197 for (nbits = 0; val != 0; nbits++)
3198 val &= val - 1; /* delete rightmost 1-bit in val */
3199 return nbits;
3200 }
3201
3202 /* Return the size in bytes of the complete Thumb instruction whose
3203 first halfword is INST1. */
3204
3205 static int
3206 thumb_insn_size (unsigned short inst1)
3207 {
3208 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3209 return 4;
3210 else
3211 return 2;
3212 }
3213
3214 static int
3215 thumb_advance_itstate (unsigned int itstate)
3216 {
3217 /* Preserve IT[7:5], the first three bits of the condition. Shift
3218 the upcoming condition flags left by one bit. */
3219 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3220
3221 /* If we have finished the IT block, clear the state. */
3222 if ((itstate & 0x0f) == 0)
3223 itstate = 0;
3224
3225 return itstate;
3226 }
3227
3228 /* Find the next PC after the current instruction executes. In some
3229 cases we can not statically determine the answer (see the IT state
3230 handling in this function); in that case, a breakpoint may be
3231 inserted in addition to the returned PC, which will be used to set
3232 another breakpoint by our caller. */
3233
3234 static CORE_ADDR
3235 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3236 {
3237 struct gdbarch *gdbarch = get_frame_arch (frame);
3238 struct address_space *aspace = get_frame_address_space (frame);
3239 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3240 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3241 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3242 unsigned short inst1;
3243 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
3244 unsigned long offset;
3245 ULONGEST status, itstate;
3246
3247 nextpc = MAKE_THUMB_ADDR (nextpc);
3248 pc_val = MAKE_THUMB_ADDR (pc_val);
3249
3250 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3251
3252 /* Thumb-2 conditional execution support. There are eight bits in
3253 the CPSR which describe conditional execution state. Once
3254 reconstructed (they're in a funny order), the low five bits
3255 describe the low bit of the condition for each instruction and
3256 how many instructions remain. The high three bits describe the
3257 base condition. One of the low four bits will be set if an IT
3258 block is active. These bits read as zero on earlier
3259 processors. */
3260 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3261 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3262
3263 /* If-Then handling. On GNU/Linux, where this routine is used, we
3264 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3265 can disable execution of the undefined instruction. So we might
3266 miss the breakpoint if we set it on a skipped conditional
3267 instruction. Because conditional instructions can change the
3268 flags, affecting the execution of further instructions, we may
3269 need to set two breakpoints. */
3270
3271 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3272 {
3273 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3274 {
3275 /* An IT instruction. Because this instruction does not
3276 modify the flags, we can accurately predict the next
3277 executed instruction. */
3278 itstate = inst1 & 0x00ff;
3279 pc += thumb_insn_size (inst1);
3280
3281 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3282 {
3283 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3284 pc += thumb_insn_size (inst1);
3285 itstate = thumb_advance_itstate (itstate);
3286 }
3287
3288 return MAKE_THUMB_ADDR (pc);
3289 }
3290 else if (itstate != 0)
3291 {
3292 /* We are in a conditional block. Check the condition. */
3293 if (! condition_true (itstate >> 4, status))
3294 {
3295 /* Advance to the next executed instruction. */
3296 pc += thumb_insn_size (inst1);
3297 itstate = thumb_advance_itstate (itstate);
3298
3299 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3300 {
3301 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3302 pc += thumb_insn_size (inst1);
3303 itstate = thumb_advance_itstate (itstate);
3304 }
3305
3306 return MAKE_THUMB_ADDR (pc);
3307 }
3308 else if ((itstate & 0x0f) == 0x08)
3309 {
3310 /* This is the last instruction of the conditional
3311 block, and it is executed. We can handle it normally
3312 because the following instruction is not conditional,
3313 and we must handle it normally because it is
3314 permitted to branch. Fall through. */
3315 }
3316 else
3317 {
3318 int cond_negated;
3319
3320 /* There are conditional instructions after this one.
3321 If this instruction modifies the flags, then we can
3322 not predict what the next executed instruction will
3323 be. Fortunately, this instruction is architecturally
3324 forbidden to branch; we know it will fall through.
3325 Start by skipping past it. */
3326 pc += thumb_insn_size (inst1);
3327 itstate = thumb_advance_itstate (itstate);
3328
3329 /* Set a breakpoint on the following instruction. */
3330 gdb_assert ((itstate & 0x0f) != 0);
3331 if (insert_bkpt)
3332 insert_single_step_breakpoint (gdbarch, aspace, pc);
3333 cond_negated = (itstate >> 4) & 1;
3334
3335 /* Skip all following instructions with the same
3336 condition. If there is a later instruction in the IT
3337 block with the opposite condition, set the other
3338 breakpoint there. If not, then set a breakpoint on
3339 the instruction after the IT block. */
3340 do
3341 {
3342 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3343 pc += thumb_insn_size (inst1);
3344 itstate = thumb_advance_itstate (itstate);
3345 }
3346 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3347
3348 return MAKE_THUMB_ADDR (pc);
3349 }
3350 }
3351 }
3352 else if (itstate & 0x0f)
3353 {
3354 /* We are in a conditional block. Check the condition. */
3355 int cond = itstate >> 4;
3356
3357 if (! condition_true (cond, status))
3358 {
3359 /* Advance to the next instruction. All the 32-bit
3360 instructions share a common prefix. */
3361 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3362 return MAKE_THUMB_ADDR (pc + 4);
3363 else
3364 return MAKE_THUMB_ADDR (pc + 2);
3365 }
3366
3367 /* Otherwise, handle the instruction normally. */
3368 }
3369
3370 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3371 {
3372 CORE_ADDR sp;
3373
3374 /* Fetch the saved PC from the stack. It's stored above
3375 all of the other registers. */
3376 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3377 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3378 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3379 }
3380 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3381 {
3382 unsigned long cond = bits (inst1, 8, 11);
3383 if (cond == 0x0f) /* 0x0f = SWI */
3384 {
3385 struct gdbarch_tdep *tdep;
3386 tdep = gdbarch_tdep (gdbarch);
3387
3388 if (tdep->syscall_next_pc != NULL)
3389 nextpc = tdep->syscall_next_pc (frame);
3390
3391 }
3392 else if (cond != 0x0f && condition_true (cond, status))
3393 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3394 }
3395 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3396 {
3397 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3398 }
3399 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3400 {
3401 unsigned short inst2;
3402 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3403
3404 /* Default to the next instruction. */
3405 nextpc = pc + 4;
3406 nextpc = MAKE_THUMB_ADDR (nextpc);
3407
3408 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3409 {
3410 /* Branches and miscellaneous control instructions. */
3411
3412 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3413 {
3414 /* B, BL, BLX. */
3415 int j1, j2, imm1, imm2;
3416
3417 imm1 = sbits (inst1, 0, 10);
3418 imm2 = bits (inst2, 0, 10);
3419 j1 = bit (inst2, 13);
3420 j2 = bit (inst2, 11);
3421
3422 offset = ((imm1 << 12) + (imm2 << 1));
3423 offset ^= ((!j2) << 22) | ((!j1) << 23);
3424
3425 nextpc = pc_val + offset;
3426 /* For BLX make sure to clear the low bits. */
3427 if (bit (inst2, 12) == 0)
3428 nextpc = nextpc & 0xfffffffc;
3429 }
3430 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3431 {
3432 /* SUBS PC, LR, #imm8. */
3433 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3434 nextpc -= inst2 & 0x00ff;
3435 }
3436 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3437 {
3438 /* Conditional branch. */
3439 if (condition_true (bits (inst1, 6, 9), status))
3440 {
3441 int sign, j1, j2, imm1, imm2;
3442
3443 sign = sbits (inst1, 10, 10);
3444 imm1 = bits (inst1, 0, 5);
3445 imm2 = bits (inst2, 0, 10);
3446 j1 = bit (inst2, 13);
3447 j2 = bit (inst2, 11);
3448
3449 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3450 offset += (imm1 << 12) + (imm2 << 1);
3451
3452 nextpc = pc_val + offset;
3453 }
3454 }
3455 }
3456 else if ((inst1 & 0xfe50) == 0xe810)
3457 {
3458 /* Load multiple or RFE. */
3459 int rn, offset, load_pc = 1;
3460
3461 rn = bits (inst1, 0, 3);
3462 if (bit (inst1, 7) && !bit (inst1, 8))
3463 {
3464 /* LDMIA or POP */
3465 if (!bit (inst2, 15))
3466 load_pc = 0;
3467 offset = bitcount (inst2) * 4 - 4;
3468 }
3469 else if (!bit (inst1, 7) && bit (inst1, 8))
3470 {
3471 /* LDMDB */
3472 if (!bit (inst2, 15))
3473 load_pc = 0;
3474 offset = -4;
3475 }
3476 else if (bit (inst1, 7) && bit (inst1, 8))
3477 {
3478 /* RFEIA */
3479 offset = 0;
3480 }
3481 else if (!bit (inst1, 7) && !bit (inst1, 8))
3482 {
3483 /* RFEDB */
3484 offset = -8;
3485 }
3486 else
3487 load_pc = 0;
3488
3489 if (load_pc)
3490 {
3491 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3492 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3493 }
3494 }
3495 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3496 {
3497 /* MOV PC or MOVS PC. */
3498 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3499 nextpc = MAKE_THUMB_ADDR (nextpc);
3500 }
3501 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3502 {
3503 /* LDR PC. */
3504 CORE_ADDR base;
3505 int rn, load_pc = 1;
3506
3507 rn = bits (inst1, 0, 3);
3508 base = get_frame_register_unsigned (frame, rn);
3509 if (rn == 15)
3510 {
3511 base = (base + 4) & ~(CORE_ADDR) 0x3;
3512 if (bit (inst1, 7))
3513 base += bits (inst2, 0, 11);
3514 else
3515 base -= bits (inst2, 0, 11);
3516 }
3517 else if (bit (inst1, 7))
3518 base += bits (inst2, 0, 11);
3519 else if (bit (inst2, 11))
3520 {
3521 if (bit (inst2, 10))
3522 {
3523 if (bit (inst2, 9))
3524 base += bits (inst2, 0, 7);
3525 else
3526 base -= bits (inst2, 0, 7);
3527 }
3528 }
3529 else if ((inst2 & 0x0fc0) == 0x0000)
3530 {
3531 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3532 base += get_frame_register_unsigned (frame, rm) << shift;
3533 }
3534 else
3535 /* Reserved. */
3536 load_pc = 0;
3537
3538 if (load_pc)
3539 nextpc = get_frame_memory_unsigned (frame, base, 4);
3540 }
3541 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3542 {
3543 /* TBB. */
3544 CORE_ADDR tbl_reg, table, offset, length;
3545
3546 tbl_reg = bits (inst1, 0, 3);
3547 if (tbl_reg == 0x0f)
3548 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3549 else
3550 table = get_frame_register_unsigned (frame, tbl_reg);
3551
3552 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3553 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3554 nextpc = pc_val + length;
3555 }
3556 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3557 {
3558 /* TBH. */
3559 CORE_ADDR tbl_reg, table, offset, length;
3560
3561 tbl_reg = bits (inst1, 0, 3);
3562 if (tbl_reg == 0x0f)
3563 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3564 else
3565 table = get_frame_register_unsigned (frame, tbl_reg);
3566
3567 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3568 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3569 nextpc = pc_val + length;
3570 }
3571 }
3572 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3573 {
3574 if (bits (inst1, 3, 6) == 0x0f)
3575 nextpc = pc_val;
3576 else
3577 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3578 }
3579 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
3580 {
3581 if (bits (inst1, 3, 6) == 0x0f)
3582 nextpc = pc_val;
3583 else
3584 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3585
3586 nextpc = MAKE_THUMB_ADDR (nextpc);
3587 }
3588 else if ((inst1 & 0xf500) == 0xb100)
3589 {
3590 /* CBNZ or CBZ. */
3591 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3592 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3593
3594 if (bit (inst1, 11) && reg != 0)
3595 nextpc = pc_val + imm;
3596 else if (!bit (inst1, 11) && reg == 0)
3597 nextpc = pc_val + imm;
3598 }
3599 return nextpc;
3600 }
3601
3602 /* Get the raw next address. PC is the current program counter, in
3603 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3604 the alternative next instruction if there are two options.
3605
3606 The value returned has the execution state of the next instruction
3607 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3608 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3609 address.
3610 */
3611 static CORE_ADDR
3612 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3613 {
3614 struct gdbarch *gdbarch = get_frame_arch (frame);
3615 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3616 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3617 unsigned long pc_val;
3618 unsigned long this_instr;
3619 unsigned long status;
3620 CORE_ADDR nextpc;
3621
3622 if (arm_frame_is_thumb (frame))
3623 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3624
3625 pc_val = (unsigned long) pc;
3626 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3627
3628 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3629 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3630
3631 if (bits (this_instr, 28, 31) == INST_NV)
3632 switch (bits (this_instr, 24, 27))
3633 {
3634 case 0xa:
3635 case 0xb:
3636 {
3637 /* Branch with Link and change to Thumb. */
3638 nextpc = BranchDest (pc, this_instr);
3639 nextpc |= bit (this_instr, 24) << 1;
3640 nextpc = MAKE_THUMB_ADDR (nextpc);
3641 break;
3642 }
3643 case 0xc:
3644 case 0xd:
3645 case 0xe:
3646 /* Coprocessor register transfer. */
3647 if (bits (this_instr, 12, 15) == 15)
3648 error (_("Invalid update to pc in instruction"));
3649 break;
3650 }
3651 else if (condition_true (bits (this_instr, 28, 31), status))
3652 {
3653 switch (bits (this_instr, 24, 27))
3654 {
3655 case 0x0:
3656 case 0x1: /* data processing */
3657 case 0x2:
3658 case 0x3:
3659 {
3660 unsigned long operand1, operand2, result = 0;
3661 unsigned long rn;
3662 int c;
3663
3664 if (bits (this_instr, 12, 15) != 15)
3665 break;
3666
3667 if (bits (this_instr, 22, 25) == 0
3668 && bits (this_instr, 4, 7) == 9) /* multiply */
3669 error (_("Invalid update to pc in instruction"));
3670
3671 /* BX <reg>, BLX <reg> */
3672 if (bits (this_instr, 4, 27) == 0x12fff1
3673 || bits (this_instr, 4, 27) == 0x12fff3)
3674 {
3675 rn = bits (this_instr, 0, 3);
3676 nextpc = (rn == 15) ? pc_val + 8
3677 : get_frame_register_unsigned (frame, rn);
3678 return nextpc;
3679 }
3680
3681 /* Multiply into PC */
3682 c = (status & FLAG_C) ? 1 : 0;
3683 rn = bits (this_instr, 16, 19);
3684 operand1 = (rn == 15) ? pc_val + 8
3685 : get_frame_register_unsigned (frame, rn);
3686
3687 if (bit (this_instr, 25))
3688 {
3689 unsigned long immval = bits (this_instr, 0, 7);
3690 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3691 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3692 & 0xffffffff;
3693 }
3694 else /* operand 2 is a shifted register */
3695 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3696
3697 switch (bits (this_instr, 21, 24))
3698 {
3699 case 0x0: /*and */
3700 result = operand1 & operand2;
3701 break;
3702
3703 case 0x1: /*eor */
3704 result = operand1 ^ operand2;
3705 break;
3706
3707 case 0x2: /*sub */
3708 result = operand1 - operand2;
3709 break;
3710
3711 case 0x3: /*rsb */
3712 result = operand2 - operand1;
3713 break;
3714
3715 case 0x4: /*add */
3716 result = operand1 + operand2;
3717 break;
3718
3719 case 0x5: /*adc */
3720 result = operand1 + operand2 + c;
3721 break;
3722
3723 case 0x6: /*sbc */
3724 result = operand1 - operand2 + c;
3725 break;
3726
3727 case 0x7: /*rsc */
3728 result = operand2 - operand1 + c;
3729 break;
3730
3731 case 0x8:
3732 case 0x9:
3733 case 0xa:
3734 case 0xb: /* tst, teq, cmp, cmn */
3735 result = (unsigned long) nextpc;
3736 break;
3737
3738 case 0xc: /*orr */
3739 result = operand1 | operand2;
3740 break;
3741
3742 case 0xd: /*mov */
3743 /* Always step into a function. */
3744 result = operand2;
3745 break;
3746
3747 case 0xe: /*bic */
3748 result = operand1 & ~operand2;
3749 break;
3750
3751 case 0xf: /*mvn */
3752 result = ~operand2;
3753 break;
3754 }
3755
3756 /* In 26-bit APCS the bottom two bits of the result are
3757 ignored, and we always end up in ARM state. */
3758 if (!arm_apcs_32)
3759 nextpc = arm_addr_bits_remove (gdbarch, result);
3760 else
3761 nextpc = result;
3762
3763 break;
3764 }
3765
3766 case 0x4:
3767 case 0x5: /* data transfer */
3768 case 0x6:
3769 case 0x7:
3770 if (bit (this_instr, 20))
3771 {
3772 /* load */
3773 if (bits (this_instr, 12, 15) == 15)
3774 {
3775 /* rd == pc */
3776 unsigned long rn;
3777 unsigned long base;
3778
3779 if (bit (this_instr, 22))
3780 error (_("Invalid update to pc in instruction"));
3781
3782 /* byte write to PC */
3783 rn = bits (this_instr, 16, 19);
3784 base = (rn == 15) ? pc_val + 8
3785 : get_frame_register_unsigned (frame, rn);
3786 if (bit (this_instr, 24))
3787 {
3788 /* pre-indexed */
3789 int c = (status & FLAG_C) ? 1 : 0;
3790 unsigned long offset =
3791 (bit (this_instr, 25)
3792 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3793 : bits (this_instr, 0, 11));
3794
3795 if (bit (this_instr, 23))
3796 base += offset;
3797 else
3798 base -= offset;
3799 }
3800 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3801 4, byte_order);
3802 }
3803 }
3804 break;
3805
3806 case 0x8:
3807 case 0x9: /* block transfer */
3808 if (bit (this_instr, 20))
3809 {
3810 /* LDM */
3811 if (bit (this_instr, 15))
3812 {
3813 /* loading pc */
3814 int offset = 0;
3815
3816 if (bit (this_instr, 23))
3817 {
3818 /* up */
3819 unsigned long reglist = bits (this_instr, 0, 14);
3820 offset = bitcount (reglist) * 4;
3821 if (bit (this_instr, 24)) /* pre */
3822 offset += 4;
3823 }
3824 else if (bit (this_instr, 24))
3825 offset = -4;
3826
3827 {
3828 unsigned long rn_val =
3829 get_frame_register_unsigned (frame,
3830 bits (this_instr, 16, 19));
3831 nextpc =
3832 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3833 + offset),
3834 4, byte_order);
3835 }
3836 }
3837 }
3838 break;
3839
3840 case 0xb: /* branch & link */
3841 case 0xa: /* branch */
3842 {
3843 nextpc = BranchDest (pc, this_instr);
3844 break;
3845 }
3846
3847 case 0xc:
3848 case 0xd:
3849 case 0xe: /* coproc ops */
3850 break;
3851 case 0xf: /* SWI */
3852 {
3853 struct gdbarch_tdep *tdep;
3854 tdep = gdbarch_tdep (gdbarch);
3855
3856 if (tdep->syscall_next_pc != NULL)
3857 nextpc = tdep->syscall_next_pc (frame);
3858
3859 }
3860 break;
3861
3862 default:
3863 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3864 return (pc);
3865 }
3866 }
3867
3868 return nextpc;
3869 }
3870
3871 CORE_ADDR
3872 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3873 {
3874 struct gdbarch *gdbarch = get_frame_arch (frame);
3875 CORE_ADDR nextpc =
3876 gdbarch_addr_bits_remove (gdbarch,
3877 arm_get_next_pc_raw (frame, pc, TRUE));
3878 if (nextpc == pc)
3879 error (_("Infinite loop detected"));
3880 return nextpc;
3881 }
3882
3883 /* single_step() is called just before we want to resume the inferior,
3884 if we want to single-step it but there is no hardware or kernel
3885 single-step support. We find the target of the coming instruction
3886 and breakpoint it. */
3887
3888 int
3889 arm_software_single_step (struct frame_info *frame)
3890 {
3891 struct gdbarch *gdbarch = get_frame_arch (frame);
3892 struct address_space *aspace = get_frame_address_space (frame);
3893
3894 /* NOTE: This may insert the wrong breakpoint instruction when
3895 single-stepping over a mode-changing instruction, if the
3896 CPSR heuristics are used. */
3897
3898 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3899 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3900
3901 return 1;
3902 }
3903
3904 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3905 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3906 NULL if an error occurs. BUF is freed. */
3907
3908 static gdb_byte *
3909 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3910 int old_len, int new_len)
3911 {
3912 gdb_byte *new_buf, *middle;
3913 int bytes_to_read = new_len - old_len;
3914
3915 new_buf = xmalloc (new_len);
3916 memcpy (new_buf + bytes_to_read, buf, old_len);
3917 xfree (buf);
3918 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3919 {
3920 xfree (new_buf);
3921 return NULL;
3922 }
3923 return new_buf;
3924 }
3925
3926 /* An IT block is at most the 2-byte IT instruction followed by
3927 four 4-byte instructions. The furthest back we must search to
3928 find an IT block that affects the current instruction is thus
3929 2 + 3 * 4 == 14 bytes. */
3930 #define MAX_IT_BLOCK_PREFIX 14
3931
3932 /* Use a quick scan if there are more than this many bytes of
3933 code. */
3934 #define IT_SCAN_THRESHOLD 32
3935
3936 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3937 A breakpoint in an IT block may not be hit, depending on the
3938 condition flags. */
3939 static CORE_ADDR
3940 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3941 {
3942 gdb_byte *buf;
3943 char map_type;
3944 CORE_ADDR boundary, func_start;
3945 int buf_len, buf2_len;
3946 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3947 int i, any, last_it, last_it_count;
3948
3949 /* If we are using BKPT breakpoints, none of this is necessary. */
3950 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3951 return bpaddr;
3952
3953 /* ARM mode does not have this problem. */
3954 if (!arm_pc_is_thumb (gdbarch, bpaddr))
3955 return bpaddr;
3956
3957 /* We are setting a breakpoint in Thumb code that could potentially
3958 contain an IT block. The first step is to find how much Thumb
3959 code there is; we do not need to read outside of known Thumb
3960 sequences. */
3961 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3962 if (map_type == 0)
3963 /* Thumb-2 code must have mapping symbols to have a chance. */
3964 return bpaddr;
3965
3966 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3967
3968 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3969 && func_start > boundary)
3970 boundary = func_start;
3971
3972 /* Search for a candidate IT instruction. We have to do some fancy
3973 footwork to distinguish a real IT instruction from the second
3974 half of a 32-bit instruction, but there is no need for that if
3975 there's no candidate. */
3976 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3977 if (buf_len == 0)
3978 /* No room for an IT instruction. */
3979 return bpaddr;
3980
3981 buf = xmalloc (buf_len);
3982 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3983 return bpaddr;
3984 any = 0;
3985 for (i = 0; i < buf_len; i += 2)
3986 {
3987 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3988 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3989 {
3990 any = 1;
3991 break;
3992 }
3993 }
3994 if (any == 0)
3995 {
3996 xfree (buf);
3997 return bpaddr;
3998 }
3999
4000 /* OK, the code bytes before this instruction contain at least one
4001 halfword which resembles an IT instruction. We know that it's
4002 Thumb code, but there are still two possibilities. Either the
4003 halfword really is an IT instruction, or it is the second half of
4004 a 32-bit Thumb instruction. The only way we can tell is to
4005 scan forwards from a known instruction boundary. */
4006 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4007 {
4008 int definite;
4009
4010 /* There's a lot of code before this instruction. Start with an
4011 optimistic search; it's easy to recognize halfwords that can
4012 not be the start of a 32-bit instruction, and use that to
4013 lock on to the instruction boundaries. */
4014 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4015 if (buf == NULL)
4016 return bpaddr;
4017 buf_len = IT_SCAN_THRESHOLD;
4018
4019 definite = 0;
4020 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4021 {
4022 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4023 if (thumb_insn_size (inst1) == 2)
4024 {
4025 definite = 1;
4026 break;
4027 }
4028 }
4029
4030 /* At this point, if DEFINITE, BUF[I] is the first place we
4031 are sure that we know the instruction boundaries, and it is far
4032 enough from BPADDR that we could not miss an IT instruction
4033 affecting BPADDR. If ! DEFINITE, give up - start from a
4034 known boundary. */
4035 if (! definite)
4036 {
4037 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4038 if (buf == NULL)
4039 return bpaddr;
4040 buf_len = bpaddr - boundary;
4041 i = 0;
4042 }
4043 }
4044 else
4045 {
4046 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4047 if (buf == NULL)
4048 return bpaddr;
4049 buf_len = bpaddr - boundary;
4050 i = 0;
4051 }
4052
4053 /* Scan forwards. Find the last IT instruction before BPADDR. */
4054 last_it = -1;
4055 last_it_count = 0;
4056 while (i < buf_len)
4057 {
4058 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4059 last_it_count--;
4060 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4061 {
4062 last_it = i;
4063 if (inst1 & 0x0001)
4064 last_it_count = 4;
4065 else if (inst1 & 0x0002)
4066 last_it_count = 3;
4067 else if (inst1 & 0x0004)
4068 last_it_count = 2;
4069 else
4070 last_it_count = 1;
4071 }
4072 i += thumb_insn_size (inst1);
4073 }
4074
4075 xfree (buf);
4076
4077 if (last_it == -1)
4078 /* There wasn't really an IT instruction after all. */
4079 return bpaddr;
4080
4081 if (last_it_count < 1)
4082 /* It was too far away. */
4083 return bpaddr;
4084
4085 /* This really is a trouble spot. Move the breakpoint to the IT
4086 instruction. */
4087 return bpaddr - buf_len + last_it;
4088 }
4089
4090 /* ARM displaced stepping support.
4091
4092 Generally ARM displaced stepping works as follows:
4093
4094 1. When an instruction is to be single-stepped, it is first decoded by
4095 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4096 Depending on the type of instruction, it is then copied to a scratch
4097 location, possibly in a modified form. The copy_* set of functions
4098 performs such modification, as necessary. A breakpoint is placed after
4099 the modified instruction in the scratch space to return control to GDB.
4100 Note in particular that instructions which modify the PC will no longer
4101 do so after modification.
4102
4103 2. The instruction is single-stepped, by setting the PC to the scratch
4104 location address, and resuming. Control returns to GDB when the
4105 breakpoint is hit.
4106
4107 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4108 function used for the current instruction. This function's job is to
4109 put the CPU/memory state back to what it would have been if the
4110 instruction had been executed unmodified in its original location. */
4111
4112 /* NOP instruction (mov r0, r0). */
4113 #define ARM_NOP 0xe1a00000
4114
4115 /* Helper for register reads for displaced stepping. In particular, this
4116 returns the PC as it would be seen by the instruction at its original
4117 location. */
4118
4119 ULONGEST
4120 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4121 {
4122 ULONGEST ret;
4123
4124 if (regno == 15)
4125 {
4126 if (debug_displaced)
4127 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4128 (unsigned long) from + 8);
4129 return (ULONGEST) from + 8; /* Pipeline offset. */
4130 }
4131 else
4132 {
4133 regcache_cooked_read_unsigned (regs, regno, &ret);
4134 if (debug_displaced)
4135 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4136 regno, (unsigned long) ret);
4137 return ret;
4138 }
4139 }
4140
4141 static int
4142 displaced_in_arm_mode (struct regcache *regs)
4143 {
4144 ULONGEST ps;
4145 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4146
4147 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4148
4149 return (ps & t_bit) == 0;
4150 }
4151
4152 /* Write to the PC as from a branch instruction. */
4153
4154 static void
4155 branch_write_pc (struct regcache *regs, ULONGEST val)
4156 {
4157 if (displaced_in_arm_mode (regs))
4158 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4159 architecture versions < 6. */
4160 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
4161 else
4162 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
4163 }
4164
4165 /* Write to the PC as from a branch-exchange instruction. */
4166
4167 static void
4168 bx_write_pc (struct regcache *regs, ULONGEST val)
4169 {
4170 ULONGEST ps;
4171 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4172
4173 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4174
4175 if ((val & 1) == 1)
4176 {
4177 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4178 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4179 }
4180 else if ((val & 2) == 0)
4181 {
4182 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4183 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4184 }
4185 else
4186 {
4187 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4188 mode, align dest to 4 bytes). */
4189 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4190 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4191 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4192 }
4193 }
4194
4195 /* Write to the PC as if from a load instruction. */
4196
4197 static void
4198 load_write_pc (struct regcache *regs, ULONGEST val)
4199 {
4200 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4201 bx_write_pc (regs, val);
4202 else
4203 branch_write_pc (regs, val);
4204 }
4205
4206 /* Write to the PC as if from an ALU instruction. */
4207
4208 static void
4209 alu_write_pc (struct regcache *regs, ULONGEST val)
4210 {
4211 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4212 bx_write_pc (regs, val);
4213 else
4214 branch_write_pc (regs, val);
4215 }
4216
4217 /* Helper for writing to registers for displaced stepping. Writing to the PC
4218 has a varying effects depending on the instruction which does the write:
4219 this is controlled by the WRITE_PC argument. */
4220
4221 void
4222 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4223 int regno, ULONGEST val, enum pc_write_style write_pc)
4224 {
4225 if (regno == 15)
4226 {
4227 if (debug_displaced)
4228 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4229 (unsigned long) val);
4230 switch (write_pc)
4231 {
4232 case BRANCH_WRITE_PC:
4233 branch_write_pc (regs, val);
4234 break;
4235
4236 case BX_WRITE_PC:
4237 bx_write_pc (regs, val);
4238 break;
4239
4240 case LOAD_WRITE_PC:
4241 load_write_pc (regs, val);
4242 break;
4243
4244 case ALU_WRITE_PC:
4245 alu_write_pc (regs, val);
4246 break;
4247
4248 case CANNOT_WRITE_PC:
4249 warning (_("Instruction wrote to PC in an unexpected way when "
4250 "single-stepping"));
4251 break;
4252
4253 default:
4254 internal_error (__FILE__, __LINE__,
4255 _("Invalid argument to displaced_write_reg"));
4256 }
4257
4258 dsc->wrote_to_pc = 1;
4259 }
4260 else
4261 {
4262 if (debug_displaced)
4263 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4264 regno, (unsigned long) val);
4265 regcache_cooked_write_unsigned (regs, regno, val);
4266 }
4267 }
4268
4269 /* This function is used to concisely determine if an instruction INSN
4270 references PC. Register fields of interest in INSN should have the
4271 corresponding fields of BITMASK set to 0b1111. The function returns return 1
4272 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
4273 returns 0. */
4274
4275 static int
4276 insn_references_pc (uint32_t insn, uint32_t bitmask)
4277 {
4278 uint32_t lowbit = 1;
4279
4280 while (bitmask != 0)
4281 {
4282 uint32_t mask;
4283
4284 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4285 ;
4286
4287 if (!lowbit)
4288 break;
4289
4290 mask = lowbit * 0xf;
4291
4292 if ((insn & mask) == mask)
4293 return 1;
4294
4295 bitmask &= ~mask;
4296 }
4297
4298 return 0;
4299 }
4300
4301 /* The simplest copy function. Many instructions have the same effect no
4302 matter what address they are executed at: in those cases, use this. */
4303
4304 static int
4305 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4306 const char *iname, struct displaced_step_closure *dsc)
4307 {
4308 if (debug_displaced)
4309 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4310 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4311 iname);
4312
4313 dsc->modinsn[0] = insn;
4314
4315 return 0;
4316 }
4317
4318 /* Preload instructions with immediate offset. */
4319
4320 static void
4321 cleanup_preload (struct gdbarch *gdbarch,
4322 struct regcache *regs, struct displaced_step_closure *dsc)
4323 {
4324 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4325 if (!dsc->u.preload.immed)
4326 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4327 }
4328
4329 static int
4330 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4331 struct displaced_step_closure *dsc)
4332 {
4333 unsigned int rn = bits (insn, 16, 19);
4334 ULONGEST rn_val;
4335 CORE_ADDR from = dsc->insn_addr;
4336
4337 if (!insn_references_pc (insn, 0x000f0000ul))
4338 return copy_unmodified (gdbarch, insn, "preload", dsc);
4339
4340 if (debug_displaced)
4341 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4342 (unsigned long) insn);
4343
4344 /* Preload instructions:
4345
4346 {pli/pld} [rn, #+/-imm]
4347 ->
4348 {pli/pld} [r0, #+/-imm]. */
4349
4350 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4351 rn_val = displaced_read_reg (regs, from, rn);
4352 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4353
4354 dsc->u.preload.immed = 1;
4355
4356 dsc->modinsn[0] = insn & 0xfff0ffff;
4357
4358 dsc->cleanup = &cleanup_preload;
4359
4360 return 0;
4361 }
4362
4363 /* Preload instructions with register offset. */
4364
4365 static int
4366 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4367 struct displaced_step_closure *dsc)
4368 {
4369 unsigned int rn = bits (insn, 16, 19);
4370 unsigned int rm = bits (insn, 0, 3);
4371 ULONGEST rn_val, rm_val;
4372 CORE_ADDR from = dsc->insn_addr;
4373
4374 if (!insn_references_pc (insn, 0x000f000ful))
4375 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4376
4377 if (debug_displaced)
4378 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4379 (unsigned long) insn);
4380
4381 /* Preload register-offset instructions:
4382
4383 {pli/pld} [rn, rm {, shift}]
4384 ->
4385 {pli/pld} [r0, r1 {, shift}]. */
4386
4387 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4388 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4389 rn_val = displaced_read_reg (regs, from, rn);
4390 rm_val = displaced_read_reg (regs, from, rm);
4391 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4392 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4393
4394 dsc->u.preload.immed = 0;
4395
4396 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4397
4398 dsc->cleanup = &cleanup_preload;
4399
4400 return 0;
4401 }
4402
4403 /* Copy/cleanup coprocessor load and store instructions. */
4404
4405 static void
4406 cleanup_copro_load_store (struct gdbarch *gdbarch,
4407 struct regcache *regs,
4408 struct displaced_step_closure *dsc)
4409 {
4410 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4411
4412 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4413
4414 if (dsc->u.ldst.writeback)
4415 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4416 }
4417
4418 static int
4419 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4420 struct regcache *regs,
4421 struct displaced_step_closure *dsc)
4422 {
4423 unsigned int rn = bits (insn, 16, 19);
4424 ULONGEST rn_val;
4425 CORE_ADDR from = dsc->insn_addr;
4426
4427 if (!insn_references_pc (insn, 0x000f0000ul))
4428 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4429
4430 if (debug_displaced)
4431 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4432 "load/store insn %.8lx\n", (unsigned long) insn);
4433
4434 /* Coprocessor load/store instructions:
4435
4436 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4437 ->
4438 {stc/stc2} [r0, #+/-imm].
4439
4440 ldc/ldc2 are handled identically. */
4441
4442 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4443 rn_val = displaced_read_reg (regs, from, rn);
4444 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4445
4446 dsc->u.ldst.writeback = bit (insn, 25);
4447 dsc->u.ldst.rn = rn;
4448
4449 dsc->modinsn[0] = insn & 0xfff0ffff;
4450
4451 dsc->cleanup = &cleanup_copro_load_store;
4452
4453 return 0;
4454 }
4455
4456 /* Clean up branch instructions (actually perform the branch, by setting
4457 PC). */
4458
4459 static void
4460 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4461 struct displaced_step_closure *dsc)
4462 {
4463 ULONGEST from = dsc->insn_addr;
4464 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4465 int branch_taken = condition_true (dsc->u.branch.cond, status);
4466 enum pc_write_style write_pc = dsc->u.branch.exchange
4467 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4468
4469 if (!branch_taken)
4470 return;
4471
4472 if (dsc->u.branch.link)
4473 {
4474 ULONGEST pc = displaced_read_reg (regs, from, 15);
4475 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4476 }
4477
4478 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4479 }
4480
4481 /* Copy B/BL/BLX instructions with immediate destinations. */
4482
4483 static int
4484 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4485 struct regcache *regs, struct displaced_step_closure *dsc)
4486 {
4487 unsigned int cond = bits (insn, 28, 31);
4488 int exchange = (cond == 0xf);
4489 int link = exchange || bit (insn, 24);
4490 CORE_ADDR from = dsc->insn_addr;
4491 long offset;
4492
4493 if (debug_displaced)
4494 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4495 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4496 (unsigned long) insn);
4497
4498 /* Implement "BL<cond> <label>" as:
4499
4500 Preparation: cond <- instruction condition
4501 Insn: mov r0, r0 (nop)
4502 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4503
4504 B<cond> similar, but don't set r14 in cleanup. */
4505
4506 if (exchange)
4507 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4508 then arrange the switch into Thumb mode. */
4509 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4510 else
4511 offset = bits (insn, 0, 23) << 2;
4512
4513 if (bit (offset, 25))
4514 offset = offset | ~0x3ffffff;
4515
4516 dsc->u.branch.cond = cond;
4517 dsc->u.branch.link = link;
4518 dsc->u.branch.exchange = exchange;
4519 dsc->u.branch.dest = from + 8 + offset;
4520
4521 dsc->modinsn[0] = ARM_NOP;
4522
4523 dsc->cleanup = &cleanup_branch;
4524
4525 return 0;
4526 }
4527
4528 /* Copy BX/BLX with register-specified destinations. */
4529
4530 static int
4531 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4532 struct regcache *regs, struct displaced_step_closure *dsc)
4533 {
4534 unsigned int cond = bits (insn, 28, 31);
4535 /* BX: x12xxx1x
4536 BLX: x12xxx3x. */
4537 int link = bit (insn, 5);
4538 unsigned int rm = bits (insn, 0, 3);
4539 CORE_ADDR from = dsc->insn_addr;
4540
4541 if (debug_displaced)
4542 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4543 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
4544
4545 /* Implement {BX,BLX}<cond> <reg>" as:
4546
4547 Preparation: cond <- instruction condition
4548 Insn: mov r0, r0 (nop)
4549 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4550
4551 Don't set r14 in cleanup for BX. */
4552
4553 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4554
4555 dsc->u.branch.cond = cond;
4556 dsc->u.branch.link = link;
4557 dsc->u.branch.exchange = 1;
4558
4559 dsc->modinsn[0] = ARM_NOP;
4560
4561 dsc->cleanup = &cleanup_branch;
4562
4563 return 0;
4564 }
4565
4566 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4567
4568 static void
4569 cleanup_alu_imm (struct gdbarch *gdbarch,
4570 struct regcache *regs, struct displaced_step_closure *dsc)
4571 {
4572 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4573 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4574 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4575 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4576 }
4577
4578 static int
4579 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4580 struct displaced_step_closure *dsc)
4581 {
4582 unsigned int rn = bits (insn, 16, 19);
4583 unsigned int rd = bits (insn, 12, 15);
4584 unsigned int op = bits (insn, 21, 24);
4585 int is_mov = (op == 0xd);
4586 ULONGEST rd_val, rn_val;
4587 CORE_ADDR from = dsc->insn_addr;
4588
4589 if (!insn_references_pc (insn, 0x000ff000ul))
4590 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4591
4592 if (debug_displaced)
4593 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4594 "%.8lx\n", is_mov ? "move" : "ALU",
4595 (unsigned long) insn);
4596
4597 /* Instruction is of form:
4598
4599 <op><cond> rd, [rn,] #imm
4600
4601 Rewrite as:
4602
4603 Preparation: tmp1, tmp2 <- r0, r1;
4604 r0, r1 <- rd, rn
4605 Insn: <op><cond> r0, r1, #imm
4606 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4607 */
4608
4609 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4610 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4611 rn_val = displaced_read_reg (regs, from, rn);
4612 rd_val = displaced_read_reg (regs, from, rd);
4613 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4614 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4615 dsc->rd = rd;
4616
4617 if (is_mov)
4618 dsc->modinsn[0] = insn & 0xfff00fff;
4619 else
4620 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4621
4622 dsc->cleanup = &cleanup_alu_imm;
4623
4624 return 0;
4625 }
4626
4627 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4628
4629 static void
4630 cleanup_alu_reg (struct gdbarch *gdbarch,
4631 struct regcache *regs, struct displaced_step_closure *dsc)
4632 {
4633 ULONGEST rd_val;
4634 int i;
4635
4636 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4637
4638 for (i = 0; i < 3; i++)
4639 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4640
4641 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4642 }
4643
4644 static int
4645 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4646 struct displaced_step_closure *dsc)
4647 {
4648 unsigned int rn = bits (insn, 16, 19);
4649 unsigned int rm = bits (insn, 0, 3);
4650 unsigned int rd = bits (insn, 12, 15);
4651 unsigned int op = bits (insn, 21, 24);
4652 int is_mov = (op == 0xd);
4653 ULONGEST rd_val, rn_val, rm_val;
4654 CORE_ADDR from = dsc->insn_addr;
4655
4656 if (!insn_references_pc (insn, 0x000ff00ful))
4657 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4658
4659 if (debug_displaced)
4660 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4661 is_mov ? "move" : "ALU", (unsigned long) insn);
4662
4663 /* Instruction is of form:
4664
4665 <op><cond> rd, [rn,] rm [, <shift>]
4666
4667 Rewrite as:
4668
4669 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4670 r0, r1, r2 <- rd, rn, rm
4671 Insn: <op><cond> r0, r1, r2 [, <shift>]
4672 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4673 */
4674
4675 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4676 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4677 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4678 rd_val = displaced_read_reg (regs, from, rd);
4679 rn_val = displaced_read_reg (regs, from, rn);
4680 rm_val = displaced_read_reg (regs, from, rm);
4681 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4682 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4683 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4684 dsc->rd = rd;
4685
4686 if (is_mov)
4687 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4688 else
4689 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4690
4691 dsc->cleanup = &cleanup_alu_reg;
4692
4693 return 0;
4694 }
4695
4696 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4697
4698 static void
4699 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4700 struct regcache *regs,
4701 struct displaced_step_closure *dsc)
4702 {
4703 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4704 int i;
4705
4706 for (i = 0; i < 4; i++)
4707 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4708
4709 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4710 }
4711
4712 static int
4713 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4714 struct regcache *regs, struct displaced_step_closure *dsc)
4715 {
4716 unsigned int rn = bits (insn, 16, 19);
4717 unsigned int rm = bits (insn, 0, 3);
4718 unsigned int rd = bits (insn, 12, 15);
4719 unsigned int rs = bits (insn, 8, 11);
4720 unsigned int op = bits (insn, 21, 24);
4721 int is_mov = (op == 0xd), i;
4722 ULONGEST rd_val, rn_val, rm_val, rs_val;
4723 CORE_ADDR from = dsc->insn_addr;
4724
4725 if (!insn_references_pc (insn, 0x000fff0ful))
4726 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4727
4728 if (debug_displaced)
4729 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4730 "%.8lx\n", is_mov ? "move" : "ALU",
4731 (unsigned long) insn);
4732
4733 /* Instruction is of form:
4734
4735 <op><cond> rd, [rn,] rm, <shift> rs
4736
4737 Rewrite as:
4738
4739 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4740 r0, r1, r2, r3 <- rd, rn, rm, rs
4741 Insn: <op><cond> r0, r1, r2, <shift> r3
4742 Cleanup: tmp5 <- r0
4743 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4744 rd <- tmp5
4745 */
4746
4747 for (i = 0; i < 4; i++)
4748 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4749
4750 rd_val = displaced_read_reg (regs, from, rd);
4751 rn_val = displaced_read_reg (regs, from, rn);
4752 rm_val = displaced_read_reg (regs, from, rm);
4753 rs_val = displaced_read_reg (regs, from, rs);
4754 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4755 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4756 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4757 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4758 dsc->rd = rd;
4759
4760 if (is_mov)
4761 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4762 else
4763 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4764
4765 dsc->cleanup = &cleanup_alu_shifted_reg;
4766
4767 return 0;
4768 }
4769
4770 /* Clean up load instructions. */
4771
4772 static void
4773 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4774 struct displaced_step_closure *dsc)
4775 {
4776 ULONGEST rt_val, rt_val2 = 0, rn_val;
4777 CORE_ADDR from = dsc->insn_addr;
4778
4779 rt_val = displaced_read_reg (regs, from, 0);
4780 if (dsc->u.ldst.xfersize == 8)
4781 rt_val2 = displaced_read_reg (regs, from, 1);
4782 rn_val = displaced_read_reg (regs, from, 2);
4783
4784 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4785 if (dsc->u.ldst.xfersize > 4)
4786 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4787 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4788 if (!dsc->u.ldst.immed)
4789 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4790
4791 /* Handle register writeback. */
4792 if (dsc->u.ldst.writeback)
4793 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4794 /* Put result in right place. */
4795 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4796 if (dsc->u.ldst.xfersize == 8)
4797 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4798 }
4799
4800 /* Clean up store instructions. */
4801
4802 static void
4803 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
4804 struct displaced_step_closure *dsc)
4805 {
4806 CORE_ADDR from = dsc->insn_addr;
4807 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4808
4809 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4810 if (dsc->u.ldst.xfersize > 4)
4811 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4812 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4813 if (!dsc->u.ldst.immed)
4814 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4815 if (!dsc->u.ldst.restore_r4)
4816 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4817
4818 /* Writeback. */
4819 if (dsc->u.ldst.writeback)
4820 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4821 }
4822
4823 /* Copy "extra" load/store instructions. These are halfword/doubleword
4824 transfers, which have a different encoding to byte/word transfers. */
4825
4826 static int
4827 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4828 struct regcache *regs, struct displaced_step_closure *dsc)
4829 {
4830 unsigned int op1 = bits (insn, 20, 24);
4831 unsigned int op2 = bits (insn, 5, 6);
4832 unsigned int rt = bits (insn, 12, 15);
4833 unsigned int rn = bits (insn, 16, 19);
4834 unsigned int rm = bits (insn, 0, 3);
4835 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4836 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4837 int immed = (op1 & 0x4) != 0;
4838 int opcode;
4839 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4840 CORE_ADDR from = dsc->insn_addr;
4841
4842 if (!insn_references_pc (insn, 0x000ff00ful))
4843 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4844
4845 if (debug_displaced)
4846 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4847 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4848 (unsigned long) insn);
4849
4850 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4851
4852 if (opcode < 0)
4853 internal_error (__FILE__, __LINE__,
4854 _("copy_extra_ld_st: instruction decode error"));
4855
4856 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4857 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4858 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4859 if (!immed)
4860 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4861
4862 rt_val = displaced_read_reg (regs, from, rt);
4863 if (bytesize[opcode] == 8)
4864 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4865 rn_val = displaced_read_reg (regs, from, rn);
4866 if (!immed)
4867 rm_val = displaced_read_reg (regs, from, rm);
4868
4869 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4870 if (bytesize[opcode] == 8)
4871 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4872 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4873 if (!immed)
4874 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4875
4876 dsc->rd = rt;
4877 dsc->u.ldst.xfersize = bytesize[opcode];
4878 dsc->u.ldst.rn = rn;
4879 dsc->u.ldst.immed = immed;
4880 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4881 dsc->u.ldst.restore_r4 = 0;
4882
4883 if (immed)
4884 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4885 ->
4886 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4887 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4888 else
4889 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4890 ->
4891 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4892 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4893
4894 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4895
4896 return 0;
4897 }
4898
4899 /* Copy byte/word loads and stores. */
4900
4901 static int
4902 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4903 struct regcache *regs,
4904 struct displaced_step_closure *dsc, int load, int byte,
4905 int usermode)
4906 {
4907 int immed = !bit (insn, 25);
4908 unsigned int rt = bits (insn, 12, 15);
4909 unsigned int rn = bits (insn, 16, 19);
4910 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4911 ULONGEST rt_val, rn_val, rm_val = 0;
4912 CORE_ADDR from = dsc->insn_addr;
4913
4914 if (!insn_references_pc (insn, 0x000ff00ful))
4915 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4916
4917 if (debug_displaced)
4918 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4919 load ? (byte ? "ldrb" : "ldr")
4920 : (byte ? "strb" : "str"), usermode ? "t" : "",
4921 (unsigned long) insn);
4922
4923 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4924 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4925 if (!immed)
4926 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4927 if (!load)
4928 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4929
4930 rt_val = displaced_read_reg (regs, from, rt);
4931 rn_val = displaced_read_reg (regs, from, rn);
4932 if (!immed)
4933 rm_val = displaced_read_reg (regs, from, rm);
4934
4935 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4936 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4937 if (!immed)
4938 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4939
4940 dsc->rd = rt;
4941 dsc->u.ldst.xfersize = byte ? 1 : 4;
4942 dsc->u.ldst.rn = rn;
4943 dsc->u.ldst.immed = immed;
4944 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4945
4946 /* To write PC we can do:
4947
4948 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4949 scratch+4: ldr r4, temp
4950 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4951 scratch+12: add r4, r4, #8 (r4 = offset)
4952 scratch+16: add r0, r0, r4
4953 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4954 scratch+24: <temp>
4955
4956 Otherwise we don't know what value to write for PC, since the offset is
4957 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4958
4959 if (load || rt != 15)
4960 {
4961 dsc->u.ldst.restore_r4 = 0;
4962
4963 if (immed)
4964 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4965 ->
4966 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4967 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4968 else
4969 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4970 ->
4971 {ldr,str}[b]<cond> r0, [r2, r3]. */
4972 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4973 }
4974 else
4975 {
4976 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4977 dsc->u.ldst.restore_r4 = 1;
4978
4979 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4980 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4981 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4982 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4983 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4984
4985 /* As above. */
4986 if (immed)
4987 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4988 else
4989 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4990
4991 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4992 dsc->modinsn[7] = 0x0; /* scratch space. */
4993
4994 dsc->numinsns = 6;
4995 }
4996
4997 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4998
4999 return 0;
5000 }
5001
5002 /* Cleanup LDM instructions with fully-populated register list. This is an
5003 unfortunate corner case: it's impossible to implement correctly by modifying
5004 the instruction. The issue is as follows: we have an instruction,
5005
5006 ldm rN, {r0-r15}
5007
5008 which we must rewrite to avoid loading PC. A possible solution would be to
5009 do the load in two halves, something like (with suitable cleanup
5010 afterwards):
5011
5012 mov r8, rN
5013 ldm[id][ab] r8!, {r0-r7}
5014 str r7, <temp>
5015 ldm[id][ab] r8, {r7-r14}
5016 <bkpt>
5017
5018 but at present there's no suitable place for <temp>, since the scratch space
5019 is overwritten before the cleanup routine is called. For now, we simply
5020 emulate the instruction. */
5021
5022 static void
5023 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5024 struct displaced_step_closure *dsc)
5025 {
5026 ULONGEST from = dsc->insn_addr;
5027 int inc = dsc->u.block.increment;
5028 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5029 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5030 uint32_t regmask = dsc->u.block.regmask;
5031 int regno = inc ? 0 : 15;
5032 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5033 int exception_return = dsc->u.block.load && dsc->u.block.user
5034 && (regmask & 0x8000) != 0;
5035 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5036 int do_transfer = condition_true (dsc->u.block.cond, status);
5037 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5038
5039 if (!do_transfer)
5040 return;
5041
5042 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5043 sensible we can do here. Complain loudly. */
5044 if (exception_return)
5045 error (_("Cannot single-step exception return"));
5046
5047 /* We don't handle any stores here for now. */
5048 gdb_assert (dsc->u.block.load != 0);
5049
5050 if (debug_displaced)
5051 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5052 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5053 dsc->u.block.increment ? "inc" : "dec",
5054 dsc->u.block.before ? "before" : "after");
5055
5056 while (regmask)
5057 {
5058 uint32_t memword;
5059
5060 if (inc)
5061 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5062 regno++;
5063 else
5064 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5065 regno--;
5066
5067 xfer_addr += bump_before;
5068
5069 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5070 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5071
5072 xfer_addr += bump_after;
5073
5074 regmask &= ~(1 << regno);
5075 }
5076
5077 if (dsc->u.block.writeback)
5078 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5079 CANNOT_WRITE_PC);
5080 }
5081
5082 /* Clean up an STM which included the PC in the register list. */
5083
5084 static void
5085 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5086 struct displaced_step_closure *dsc)
5087 {
5088 ULONGEST from = dsc->insn_addr;
5089 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5090 int store_executed = condition_true (dsc->u.block.cond, status);
5091 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5092 CORE_ADDR stm_insn_addr;
5093 uint32_t pc_val;
5094 long offset;
5095 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5096
5097 /* If condition code fails, there's nothing else to do. */
5098 if (!store_executed)
5099 return;
5100
5101 if (dsc->u.block.increment)
5102 {
5103 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5104
5105 if (dsc->u.block.before)
5106 pc_stored_at += 4;
5107 }
5108 else
5109 {
5110 pc_stored_at = dsc->u.block.xfer_addr;
5111
5112 if (dsc->u.block.before)
5113 pc_stored_at -= 4;
5114 }
5115
5116 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5117 stm_insn_addr = dsc->scratch_base;
5118 offset = pc_val - stm_insn_addr;
5119
5120 if (debug_displaced)
5121 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5122 "STM instruction\n", offset);
5123
5124 /* Rewrite the stored PC to the proper value for the non-displaced original
5125 instruction. */
5126 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5127 dsc->insn_addr + offset);
5128 }
5129
5130 /* Clean up an LDM which includes the PC in the register list. We clumped all
5131 the registers in the transferred list into a contiguous range r0...rX (to
5132 avoid loading PC directly and losing control of the debugged program), so we
5133 must undo that here. */
5134
5135 static void
5136 cleanup_block_load_pc (struct gdbarch *gdbarch,
5137 struct regcache *regs,
5138 struct displaced_step_closure *dsc)
5139 {
5140 ULONGEST from = dsc->insn_addr;
5141 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5142 int load_executed = condition_true (dsc->u.block.cond, status), i;
5143 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5144 unsigned int regs_loaded = bitcount (mask);
5145 unsigned int num_to_shuffle = regs_loaded, clobbered;
5146
5147 /* The method employed here will fail if the register list is fully populated
5148 (we need to avoid loading PC directly). */
5149 gdb_assert (num_to_shuffle < 16);
5150
5151 if (!load_executed)
5152 return;
5153
5154 clobbered = (1 << num_to_shuffle) - 1;
5155
5156 while (num_to_shuffle > 0)
5157 {
5158 if ((mask & (1 << write_reg)) != 0)
5159 {
5160 unsigned int read_reg = num_to_shuffle - 1;
5161
5162 if (read_reg != write_reg)
5163 {
5164 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5165 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5166 if (debug_displaced)
5167 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5168 "loaded register r%d to r%d\n"), read_reg,
5169 write_reg);
5170 }
5171 else if (debug_displaced)
5172 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5173 "r%d already in the right place\n"),
5174 write_reg);
5175
5176 clobbered &= ~(1 << write_reg);
5177
5178 num_to_shuffle--;
5179 }
5180
5181 write_reg--;
5182 }
5183
5184 /* Restore any registers we scribbled over. */
5185 for (write_reg = 0; clobbered != 0; write_reg++)
5186 {
5187 if ((clobbered & (1 << write_reg)) != 0)
5188 {
5189 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5190 CANNOT_WRITE_PC);
5191 if (debug_displaced)
5192 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5193 "clobbered register r%d\n"), write_reg);
5194 clobbered &= ~(1 << write_reg);
5195 }
5196 }
5197
5198 /* Perform register writeback manually. */
5199 if (dsc->u.block.writeback)
5200 {
5201 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5202
5203 if (dsc->u.block.increment)
5204 new_rn_val += regs_loaded * 4;
5205 else
5206 new_rn_val -= regs_loaded * 4;
5207
5208 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5209 CANNOT_WRITE_PC);
5210 }
5211 }
5212
5213 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5214 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5215
5216 static int
5217 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5218 struct displaced_step_closure *dsc)
5219 {
5220 int load = bit (insn, 20);
5221 int user = bit (insn, 22);
5222 int increment = bit (insn, 23);
5223 int before = bit (insn, 24);
5224 int writeback = bit (insn, 21);
5225 int rn = bits (insn, 16, 19);
5226 CORE_ADDR from = dsc->insn_addr;
5227
5228 /* Block transfers which don't mention PC can be run directly out-of-line. */
5229 if (rn != 15 && (insn & 0x8000) == 0)
5230 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5231
5232 if (rn == 15)
5233 {
5234 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
5235 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5236 }
5237
5238 if (debug_displaced)
5239 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5240 "%.8lx\n", (unsigned long) insn);
5241
5242 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5243 dsc->u.block.rn = rn;
5244
5245 dsc->u.block.load = load;
5246 dsc->u.block.user = user;
5247 dsc->u.block.increment = increment;
5248 dsc->u.block.before = before;
5249 dsc->u.block.writeback = writeback;
5250 dsc->u.block.cond = bits (insn, 28, 31);
5251
5252 dsc->u.block.regmask = insn & 0xffff;
5253
5254 if (load)
5255 {
5256 if ((insn & 0xffff) == 0xffff)
5257 {
5258 /* LDM with a fully-populated register list. This case is
5259 particularly tricky. Implement for now by fully emulating the
5260 instruction (which might not behave perfectly in all cases, but
5261 these instructions should be rare enough for that not to matter
5262 too much). */
5263 dsc->modinsn[0] = ARM_NOP;
5264
5265 dsc->cleanup = &cleanup_block_load_all;
5266 }
5267 else
5268 {
5269 /* LDM of a list of registers which includes PC. Implement by
5270 rewriting the list of registers to be transferred into a
5271 contiguous chunk r0...rX before doing the transfer, then shuffling
5272 registers into the correct places in the cleanup routine. */
5273 unsigned int regmask = insn & 0xffff;
5274 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5275 unsigned int to = 0, from = 0, i, new_rn;
5276
5277 for (i = 0; i < num_in_list; i++)
5278 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5279
5280 /* Writeback makes things complicated. We need to avoid clobbering
5281 the base register with one of the registers in our modified
5282 register list, but just using a different register can't work in
5283 all cases, e.g.:
5284
5285 ldm r14!, {r0-r13,pc}
5286
5287 which would need to be rewritten as:
5288
5289 ldm rN!, {r0-r14}
5290
5291 but that can't work, because there's no free register for N.
5292
5293 Solve this by turning off the writeback bit, and emulating
5294 writeback manually in the cleanup routine. */
5295
5296 if (writeback)
5297 insn &= ~(1 << 21);
5298
5299 new_regmask = (1 << num_in_list) - 1;
5300
5301 if (debug_displaced)
5302 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5303 "{..., pc}: original reg list %.4x, modified "
5304 "list %.4x\n"), rn, writeback ? "!" : "",
5305 (int) insn & 0xffff, new_regmask);
5306
5307 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5308
5309 dsc->cleanup = &cleanup_block_load_pc;
5310 }
5311 }
5312 else
5313 {
5314 /* STM of a list of registers which includes PC. Run the instruction
5315 as-is, but out of line: this will store the wrong value for the PC,
5316 so we must manually fix up the memory in the cleanup routine.
5317 Doing things this way has the advantage that we can auto-detect
5318 the offset of the PC write (which is architecture-dependent) in
5319 the cleanup routine. */
5320 dsc->modinsn[0] = insn;
5321
5322 dsc->cleanup = &cleanup_block_store_pc;
5323 }
5324
5325 return 0;
5326 }
5327
5328 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5329 for Linux, where some SVC instructions must be treated specially. */
5330
5331 static void
5332 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5333 struct displaced_step_closure *dsc)
5334 {
5335 CORE_ADDR from = dsc->insn_addr;
5336 CORE_ADDR resume_addr = from + 4;
5337
5338 if (debug_displaced)
5339 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5340 "%.8lx\n", (unsigned long) resume_addr);
5341
5342 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5343 }
5344
5345 static int
5346 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5347 struct regcache *regs, struct displaced_step_closure *dsc)
5348 {
5349 CORE_ADDR from = dsc->insn_addr;
5350
5351 /* Allow OS-specific code to override SVC handling. */
5352 if (dsc->u.svc.copy_svc_os)
5353 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5354
5355 if (debug_displaced)
5356 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5357 (unsigned long) insn);
5358
5359 /* Preparation: none.
5360 Insn: unmodified svc.
5361 Cleanup: pc <- insn_addr + 4. */
5362
5363 dsc->modinsn[0] = insn;
5364
5365 dsc->cleanup = &cleanup_svc;
5366 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5367 instruction. */
5368 dsc->wrote_to_pc = 1;
5369
5370 return 0;
5371 }
5372
5373 /* Copy undefined instructions. */
5374
5375 static int
5376 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5377 struct displaced_step_closure *dsc)
5378 {
5379 if (debug_displaced)
5380 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
5381 (unsigned long) insn);
5382
5383 dsc->modinsn[0] = insn;
5384
5385 return 0;
5386 }
5387
5388 /* Copy unpredictable instructions. */
5389
5390 static int
5391 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5392 struct displaced_step_closure *dsc)
5393 {
5394 if (debug_displaced)
5395 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5396 "%.8lx\n", (unsigned long) insn);
5397
5398 dsc->modinsn[0] = insn;
5399
5400 return 0;
5401 }
5402
5403 /* The decode_* functions are instruction decoding helpers. They mostly follow
5404 the presentation in the ARM ARM. */
5405
5406 static int
5407 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5408 struct regcache *regs,
5409 struct displaced_step_closure *dsc)
5410 {
5411 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5412 unsigned int rn = bits (insn, 16, 19);
5413
5414 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5415 return copy_unmodified (gdbarch, insn, "cps", dsc);
5416 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5417 return copy_unmodified (gdbarch, insn, "setend", dsc);
5418 else if ((op1 & 0x60) == 0x20)
5419 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5420 else if ((op1 & 0x71) == 0x40)
5421 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5422 else if ((op1 & 0x77) == 0x41)
5423 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5424 else if ((op1 & 0x77) == 0x45)
5425 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5426 else if ((op1 & 0x77) == 0x51)
5427 {
5428 if (rn != 0xf)
5429 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5430 else
5431 return copy_unpred (gdbarch, insn, dsc);
5432 }
5433 else if ((op1 & 0x77) == 0x55)
5434 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5435 else if (op1 == 0x57)
5436 switch (op2)
5437 {
5438 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5439 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5440 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5441 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5442 default: return copy_unpred (gdbarch, insn, dsc);
5443 }
5444 else if ((op1 & 0x63) == 0x43)
5445 return copy_unpred (gdbarch, insn, dsc);
5446 else if ((op2 & 0x1) == 0x0)
5447 switch (op1 & ~0x80)
5448 {
5449 case 0x61:
5450 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5451 case 0x65:
5452 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5453 case 0x71: case 0x75:
5454 /* pld/pldw reg. */
5455 return copy_preload_reg (gdbarch, insn, regs, dsc);
5456 case 0x63: case 0x67: case 0x73: case 0x77:
5457 return copy_unpred (gdbarch, insn, dsc);
5458 default:
5459 return copy_undef (gdbarch, insn, dsc);
5460 }
5461 else
5462 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5463 }
5464
5465 static int
5466 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5467 struct regcache *regs, struct displaced_step_closure *dsc)
5468 {
5469 if (bit (insn, 27) == 0)
5470 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5471 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5472 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5473 {
5474 case 0x0: case 0x2:
5475 return copy_unmodified (gdbarch, insn, "srs", dsc);
5476
5477 case 0x1: case 0x3:
5478 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5479
5480 case 0x4: case 0x5: case 0x6: case 0x7:
5481 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5482
5483 case 0x8:
5484 switch ((insn & 0xe00000) >> 21)
5485 {
5486 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5487 /* stc/stc2. */
5488 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5489
5490 case 0x2:
5491 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5492
5493 default:
5494 return copy_undef (gdbarch, insn, dsc);
5495 }
5496
5497 case 0x9:
5498 {
5499 int rn_f = (bits (insn, 16, 19) == 0xf);
5500 switch ((insn & 0xe00000) >> 21)
5501 {
5502 case 0x1: case 0x3:
5503 /* ldc/ldc2 imm (undefined for rn == pc). */
5504 return rn_f ? copy_undef (gdbarch, insn, dsc)
5505 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5506
5507 case 0x2:
5508 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5509
5510 case 0x4: case 0x5: case 0x6: case 0x7:
5511 /* ldc/ldc2 lit (undefined for rn != pc). */
5512 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5513 : copy_undef (gdbarch, insn, dsc);
5514
5515 default:
5516 return copy_undef (gdbarch, insn, dsc);
5517 }
5518 }
5519
5520 case 0xa:
5521 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5522
5523 case 0xb:
5524 if (bits (insn, 16, 19) == 0xf)
5525 /* ldc/ldc2 lit. */
5526 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5527 else
5528 return copy_undef (gdbarch, insn, dsc);
5529
5530 case 0xc:
5531 if (bit (insn, 4))
5532 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5533 else
5534 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5535
5536 case 0xd:
5537 if (bit (insn, 4))
5538 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5539 else
5540 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5541
5542 default:
5543 return copy_undef (gdbarch, insn, dsc);
5544 }
5545 }
5546
5547 /* Decode miscellaneous instructions in dp/misc encoding space. */
5548
5549 static int
5550 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5551 struct regcache *regs, struct displaced_step_closure *dsc)
5552 {
5553 unsigned int op2 = bits (insn, 4, 6);
5554 unsigned int op = bits (insn, 21, 22);
5555 unsigned int op1 = bits (insn, 16, 19);
5556
5557 switch (op2)
5558 {
5559 case 0x0:
5560 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5561
5562 case 0x1:
5563 if (op == 0x1) /* bx. */
5564 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5565 else if (op == 0x3)
5566 return copy_unmodified (gdbarch, insn, "clz", dsc);
5567 else
5568 return copy_undef (gdbarch, insn, dsc);
5569
5570 case 0x2:
5571 if (op == 0x1)
5572 /* Not really supported. */
5573 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5574 else
5575 return copy_undef (gdbarch, insn, dsc);
5576
5577 case 0x3:
5578 if (op == 0x1)
5579 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
5580 else
5581 return copy_undef (gdbarch, insn, dsc);
5582
5583 case 0x5:
5584 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5585
5586 case 0x7:
5587 if (op == 0x1)
5588 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5589 else if (op == 0x3)
5590 /* Not really supported. */
5591 return copy_unmodified (gdbarch, insn, "smc", dsc);
5592
5593 default:
5594 return copy_undef (gdbarch, insn, dsc);
5595 }
5596 }
5597
5598 static int
5599 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5600 struct displaced_step_closure *dsc)
5601 {
5602 if (bit (insn, 25))
5603 switch (bits (insn, 20, 24))
5604 {
5605 case 0x10:
5606 return copy_unmodified (gdbarch, insn, "movw", dsc);
5607
5608 case 0x14:
5609 return copy_unmodified (gdbarch, insn, "movt", dsc);
5610
5611 case 0x12: case 0x16:
5612 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5613
5614 default:
5615 return copy_alu_imm (gdbarch, insn, regs, dsc);
5616 }
5617 else
5618 {
5619 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5620
5621 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5622 return copy_alu_reg (gdbarch, insn, regs, dsc);
5623 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5624 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5625 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5626 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5627 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5628 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5629 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5630 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5631 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5632 return copy_unmodified (gdbarch, insn, "synch", dsc);
5633 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5634 /* 2nd arg means "unpriveleged". */
5635 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5636 dsc);
5637 }
5638
5639 /* Should be unreachable. */
5640 return 1;
5641 }
5642
5643 static int
5644 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5645 struct regcache *regs,
5646 struct displaced_step_closure *dsc)
5647 {
5648 int a = bit (insn, 25), b = bit (insn, 4);
5649 uint32_t op1 = bits (insn, 20, 24);
5650 int rn_f = bits (insn, 16, 19) == 0xf;
5651
5652 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5653 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5654 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5655 else if ((!a && (op1 & 0x17) == 0x02)
5656 || (a && (op1 & 0x17) == 0x02 && !b))
5657 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5658 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5659 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5660 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5661 else if ((!a && (op1 & 0x17) == 0x03)
5662 || (a && (op1 & 0x17) == 0x03 && !b))
5663 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5664 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5665 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5666 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5667 else if ((!a && (op1 & 0x17) == 0x06)
5668 || (a && (op1 & 0x17) == 0x06 && !b))
5669 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5670 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5671 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5672 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5673 else if ((!a && (op1 & 0x17) == 0x07)
5674 || (a && (op1 & 0x17) == 0x07 && !b))
5675 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5676
5677 /* Should be unreachable. */
5678 return 1;
5679 }
5680
5681 static int
5682 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5683 struct displaced_step_closure *dsc)
5684 {
5685 switch (bits (insn, 20, 24))
5686 {
5687 case 0x00: case 0x01: case 0x02: case 0x03:
5688 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5689
5690 case 0x04: case 0x05: case 0x06: case 0x07:
5691 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5692
5693 case 0x08: case 0x09: case 0x0a: case 0x0b:
5694 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5695 return copy_unmodified (gdbarch, insn,
5696 "decode/pack/unpack/saturate/reverse", dsc);
5697
5698 case 0x18:
5699 if (bits (insn, 5, 7) == 0) /* op2. */
5700 {
5701 if (bits (insn, 12, 15) == 0xf)
5702 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5703 else
5704 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5705 }
5706 else
5707 return copy_undef (gdbarch, insn, dsc);
5708
5709 case 0x1a: case 0x1b:
5710 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5711 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5712 else
5713 return copy_undef (gdbarch, insn, dsc);
5714
5715 case 0x1c: case 0x1d:
5716 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5717 {
5718 if (bits (insn, 0, 3) == 0xf)
5719 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5720 else
5721 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5722 }
5723 else
5724 return copy_undef (gdbarch, insn, dsc);
5725
5726 case 0x1e: case 0x1f:
5727 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5728 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5729 else
5730 return copy_undef (gdbarch, insn, dsc);
5731 }
5732
5733 /* Should be unreachable. */
5734 return 1;
5735 }
5736
5737 static int
5738 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5739 struct regcache *regs, struct displaced_step_closure *dsc)
5740 {
5741 if (bit (insn, 25))
5742 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5743 else
5744 return copy_block_xfer (gdbarch, insn, regs, dsc);
5745 }
5746
5747 static int
5748 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5749 struct regcache *regs, struct displaced_step_closure *dsc)
5750 {
5751 unsigned int opcode = bits (insn, 20, 24);
5752
5753 switch (opcode)
5754 {
5755 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5756 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5757
5758 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5759 case 0x12: case 0x16:
5760 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5761
5762 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5763 case 0x13: case 0x17:
5764 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5765
5766 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5767 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5768 /* Note: no writeback for these instructions. Bit 25 will always be
5769 zero though (via caller), so the following works OK. */
5770 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5771 }
5772
5773 /* Should be unreachable. */
5774 return 1;
5775 }
5776
5777 static int
5778 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5779 struct regcache *regs, struct displaced_step_closure *dsc)
5780 {
5781 unsigned int op1 = bits (insn, 20, 25);
5782 int op = bit (insn, 4);
5783 unsigned int coproc = bits (insn, 8, 11);
5784 unsigned int rn = bits (insn, 16, 19);
5785
5786 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5787 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5788 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5789 && (coproc & 0xe) != 0xa)
5790 /* stc/stc2. */
5791 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5792 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5793 && (coproc & 0xe) != 0xa)
5794 /* ldc/ldc2 imm/lit. */
5795 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5796 else if ((op1 & 0x3e) == 0x00)
5797 return copy_undef (gdbarch, insn, dsc);
5798 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5799 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5800 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5801 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5802 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5803 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5804 else if ((op1 & 0x30) == 0x20 && !op)
5805 {
5806 if ((coproc & 0xe) == 0xa)
5807 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5808 else
5809 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5810 }
5811 else if ((op1 & 0x30) == 0x20 && op)
5812 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5813 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5814 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5815 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5816 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5817 else if ((op1 & 0x30) == 0x30)
5818 return copy_svc (gdbarch, insn, to, regs, dsc);
5819 else
5820 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5821 }
5822
5823 void
5824 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5825 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5826 struct displaced_step_closure *dsc)
5827 {
5828 int err = 0;
5829
5830 if (!displaced_in_arm_mode (regs))
5831 error (_("Displaced stepping is only supported in ARM mode"));
5832
5833 /* Most displaced instructions use a 1-instruction scratch space, so set this
5834 here and override below if/when necessary. */
5835 dsc->numinsns = 1;
5836 dsc->insn_addr = from;
5837 dsc->scratch_base = to;
5838 dsc->cleanup = NULL;
5839 dsc->wrote_to_pc = 0;
5840
5841 if ((insn & 0xf0000000) == 0xf0000000)
5842 err = decode_unconditional (gdbarch, insn, regs, dsc);
5843 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5844 {
5845 case 0x0: case 0x1: case 0x2: case 0x3:
5846 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5847 break;
5848
5849 case 0x4: case 0x5: case 0x6:
5850 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5851 break;
5852
5853 case 0x7:
5854 err = decode_media (gdbarch, insn, dsc);
5855 break;
5856
5857 case 0x8: case 0x9: case 0xa: case 0xb:
5858 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5859 break;
5860
5861 case 0xc: case 0xd: case 0xe: case 0xf:
5862 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5863 break;
5864 }
5865
5866 if (err)
5867 internal_error (__FILE__, __LINE__,
5868 _("arm_process_displaced_insn: Instruction decode error"));
5869 }
5870
5871 /* Actually set up the scratch space for a displaced instruction. */
5872
5873 void
5874 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5875 CORE_ADDR to, struct displaced_step_closure *dsc)
5876 {
5877 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5878 unsigned int i;
5879 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5880
5881 /* Poke modified instruction(s). */
5882 for (i = 0; i < dsc->numinsns; i++)
5883 {
5884 if (debug_displaced)
5885 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5886 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5887 (unsigned long) to + i * 4);
5888 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5889 dsc->modinsn[i]);
5890 }
5891
5892 /* Put breakpoint afterwards. */
5893 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5894 tdep->arm_breakpoint_size);
5895
5896 if (debug_displaced)
5897 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5898 paddress (gdbarch, from), paddress (gdbarch, to));
5899 }
5900
5901 /* Entry point for copying an instruction into scratch space for displaced
5902 stepping. */
5903
5904 struct displaced_step_closure *
5905 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5906 CORE_ADDR from, CORE_ADDR to,
5907 struct regcache *regs)
5908 {
5909 struct displaced_step_closure *dsc
5910 = xmalloc (sizeof (struct displaced_step_closure));
5911 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5912 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5913
5914 if (debug_displaced)
5915 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5916 "at %.8lx\n", (unsigned long) insn,
5917 (unsigned long) from);
5918
5919 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5920 arm_displaced_init_closure (gdbarch, from, to, dsc);
5921
5922 return dsc;
5923 }
5924
5925 /* Entry point for cleaning things up after a displaced instruction has been
5926 single-stepped. */
5927
5928 void
5929 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5930 struct displaced_step_closure *dsc,
5931 CORE_ADDR from, CORE_ADDR to,
5932 struct regcache *regs)
5933 {
5934 if (dsc->cleanup)
5935 dsc->cleanup (gdbarch, regs, dsc);
5936
5937 if (!dsc->wrote_to_pc)
5938 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5939 }
5940
5941 #include "bfd-in2.h"
5942 #include "libcoff.h"
5943
5944 static int
5945 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5946 {
5947 struct gdbarch *gdbarch = info->application_data;
5948
5949 if (arm_pc_is_thumb (gdbarch, memaddr))
5950 {
5951 static asymbol *asym;
5952 static combined_entry_type ce;
5953 static struct coff_symbol_struct csym;
5954 static struct bfd fake_bfd;
5955 static bfd_target fake_target;
5956
5957 if (csym.native == NULL)
5958 {
5959 /* Create a fake symbol vector containing a Thumb symbol.
5960 This is solely so that the code in print_insn_little_arm()
5961 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5962 the presence of a Thumb symbol and switch to decoding
5963 Thumb instructions. */
5964
5965 fake_target.flavour = bfd_target_coff_flavour;
5966 fake_bfd.xvec = &fake_target;
5967 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5968 csym.native = &ce;
5969 csym.symbol.the_bfd = &fake_bfd;
5970 csym.symbol.name = "fake";
5971 asym = (asymbol *) & csym;
5972 }
5973
5974 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5975 info->symbols = &asym;
5976 }
5977 else
5978 info->symbols = NULL;
5979
5980 if (info->endian == BFD_ENDIAN_BIG)
5981 return print_insn_big_arm (memaddr, info);
5982 else
5983 return print_insn_little_arm (memaddr, info);
5984 }
5985
5986 /* The following define instruction sequences that will cause ARM
5987 cpu's to take an undefined instruction trap. These are used to
5988 signal a breakpoint to GDB.
5989
5990 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5991 modes. A different instruction is required for each mode. The ARM
5992 cpu's can also be big or little endian. Thus four different
5993 instructions are needed to support all cases.
5994
5995 Note: ARMv4 defines several new instructions that will take the
5996 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5997 not in fact add the new instructions. The new undefined
5998 instructions in ARMv4 are all instructions that had no defined
5999 behaviour in earlier chips. There is no guarantee that they will
6000 raise an exception, but may be treated as NOP's. In practice, it
6001 may only safe to rely on instructions matching:
6002
6003 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6004 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6005 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6006
6007 Even this may only true if the condition predicate is true. The
6008 following use a condition predicate of ALWAYS so it is always TRUE.
6009
6010 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6011 and NetBSD all use a software interrupt rather than an undefined
6012 instruction to force a trap. This can be handled by by the
6013 abi-specific code during establishment of the gdbarch vector. */
6014
6015 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6016 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6017 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6018 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6019
6020 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6021 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6022 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6023 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6024
6025 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6026 the program counter value to determine whether a 16-bit or 32-bit
6027 breakpoint should be used. It returns a pointer to a string of
6028 bytes that encode a breakpoint instruction, stores the length of
6029 the string to *lenptr, and adjusts the program counter (if
6030 necessary) to point to the actual memory location where the
6031 breakpoint should be inserted. */
6032
6033 static const unsigned char *
6034 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6035 {
6036 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6037 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6038
6039 if (arm_pc_is_thumb (gdbarch, *pcptr))
6040 {
6041 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6042
6043 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6044 check whether we are replacing a 32-bit instruction. */
6045 if (tdep->thumb2_breakpoint != NULL)
6046 {
6047 gdb_byte buf[2];
6048 if (target_read_memory (*pcptr, buf, 2) == 0)
6049 {
6050 unsigned short inst1;
6051 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6052 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6053 {
6054 *lenptr = tdep->thumb2_breakpoint_size;
6055 return tdep->thumb2_breakpoint;
6056 }
6057 }
6058 }
6059
6060 *lenptr = tdep->thumb_breakpoint_size;
6061 return tdep->thumb_breakpoint;
6062 }
6063 else
6064 {
6065 *lenptr = tdep->arm_breakpoint_size;
6066 return tdep->arm_breakpoint;
6067 }
6068 }
6069
6070 static void
6071 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6072 int *kindptr)
6073 {
6074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6075
6076 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6077
6078 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6079 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6080 that this is not confused with a 32-bit ARM breakpoint. */
6081 *kindptr = 3;
6082 }
6083
6084 /* Extract from an array REGBUF containing the (raw) register state a
6085 function return value of type TYPE, and copy that, in virtual
6086 format, into VALBUF. */
6087
6088 static void
6089 arm_extract_return_value (struct type *type, struct regcache *regs,
6090 gdb_byte *valbuf)
6091 {
6092 struct gdbarch *gdbarch = get_regcache_arch (regs);
6093 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6094
6095 if (TYPE_CODE_FLT == TYPE_CODE (type))
6096 {
6097 switch (gdbarch_tdep (gdbarch)->fp_model)
6098 {
6099 case ARM_FLOAT_FPA:
6100 {
6101 /* The value is in register F0 in internal format. We need to
6102 extract the raw value and then convert it to the desired
6103 internal type. */
6104 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6105
6106 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6107 convert_from_extended (floatformat_from_type (type), tmpbuf,
6108 valbuf, gdbarch_byte_order (gdbarch));
6109 }
6110 break;
6111
6112 case ARM_FLOAT_SOFT_FPA:
6113 case ARM_FLOAT_SOFT_VFP:
6114 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6115 not using the VFP ABI code. */
6116 case ARM_FLOAT_VFP:
6117 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6118 if (TYPE_LENGTH (type) > 4)
6119 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6120 valbuf + INT_REGISTER_SIZE);
6121 break;
6122
6123 default:
6124 internal_error
6125 (__FILE__, __LINE__,
6126 _("arm_extract_return_value: Floating point model not supported"));
6127 break;
6128 }
6129 }
6130 else if (TYPE_CODE (type) == TYPE_CODE_INT
6131 || TYPE_CODE (type) == TYPE_CODE_CHAR
6132 || TYPE_CODE (type) == TYPE_CODE_BOOL
6133 || TYPE_CODE (type) == TYPE_CODE_PTR
6134 || TYPE_CODE (type) == TYPE_CODE_REF
6135 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6136 {
6137 /* If the the type is a plain integer, then the access is
6138 straight-forward. Otherwise we have to play around a bit more. */
6139 int len = TYPE_LENGTH (type);
6140 int regno = ARM_A1_REGNUM;
6141 ULONGEST tmp;
6142
6143 while (len > 0)
6144 {
6145 /* By using store_unsigned_integer we avoid having to do
6146 anything special for small big-endian values. */
6147 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6148 store_unsigned_integer (valbuf,
6149 (len > INT_REGISTER_SIZE
6150 ? INT_REGISTER_SIZE : len),
6151 byte_order, tmp);
6152 len -= INT_REGISTER_SIZE;
6153 valbuf += INT_REGISTER_SIZE;
6154 }
6155 }
6156 else
6157 {
6158 /* For a structure or union the behaviour is as if the value had
6159 been stored to word-aligned memory and then loaded into
6160 registers with 32-bit load instruction(s). */
6161 int len = TYPE_LENGTH (type);
6162 int regno = ARM_A1_REGNUM;
6163 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6164
6165 while (len > 0)
6166 {
6167 regcache_cooked_read (regs, regno++, tmpbuf);
6168 memcpy (valbuf, tmpbuf,
6169 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6170 len -= INT_REGISTER_SIZE;
6171 valbuf += INT_REGISTER_SIZE;
6172 }
6173 }
6174 }
6175
6176
6177 /* Will a function return an aggregate type in memory or in a
6178 register? Return 0 if an aggregate type can be returned in a
6179 register, 1 if it must be returned in memory. */
6180
6181 static int
6182 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6183 {
6184 int nRc;
6185 enum type_code code;
6186
6187 CHECK_TYPEDEF (type);
6188
6189 /* In the ARM ABI, "integer" like aggregate types are returned in
6190 registers. For an aggregate type to be integer like, its size
6191 must be less than or equal to INT_REGISTER_SIZE and the
6192 offset of each addressable subfield must be zero. Note that bit
6193 fields are not addressable, and all addressable subfields of
6194 unions always start at offset zero.
6195
6196 This function is based on the behaviour of GCC 2.95.1.
6197 See: gcc/arm.c: arm_return_in_memory() for details.
6198
6199 Note: All versions of GCC before GCC 2.95.2 do not set up the
6200 parameters correctly for a function returning the following
6201 structure: struct { float f;}; This should be returned in memory,
6202 not a register. Richard Earnshaw sent me a patch, but I do not
6203 know of any way to detect if a function like the above has been
6204 compiled with the correct calling convention. */
6205
6206 /* All aggregate types that won't fit in a register must be returned
6207 in memory. */
6208 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6209 {
6210 return 1;
6211 }
6212
6213 /* The AAPCS says all aggregates not larger than a word are returned
6214 in a register. */
6215 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6216 return 0;
6217
6218 /* The only aggregate types that can be returned in a register are
6219 structs and unions. Arrays must be returned in memory. */
6220 code = TYPE_CODE (type);
6221 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6222 {
6223 return 1;
6224 }
6225
6226 /* Assume all other aggregate types can be returned in a register.
6227 Run a check for structures, unions and arrays. */
6228 nRc = 0;
6229
6230 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6231 {
6232 int i;
6233 /* Need to check if this struct/union is "integer" like. For
6234 this to be true, its size must be less than or equal to
6235 INT_REGISTER_SIZE and the offset of each addressable
6236 subfield must be zero. Note that bit fields are not
6237 addressable, and unions always start at offset zero. If any
6238 of the subfields is a floating point type, the struct/union
6239 cannot be an integer type. */
6240
6241 /* For each field in the object, check:
6242 1) Is it FP? --> yes, nRc = 1;
6243 2) Is it addressable (bitpos != 0) and
6244 not packed (bitsize == 0)?
6245 --> yes, nRc = 1
6246 */
6247
6248 for (i = 0; i < TYPE_NFIELDS (type); i++)
6249 {
6250 enum type_code field_type_code;
6251 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
6252
6253 /* Is it a floating point type field? */
6254 if (field_type_code == TYPE_CODE_FLT)
6255 {
6256 nRc = 1;
6257 break;
6258 }
6259
6260 /* If bitpos != 0, then we have to care about it. */
6261 if (TYPE_FIELD_BITPOS (type, i) != 0)
6262 {
6263 /* Bitfields are not addressable. If the field bitsize is
6264 zero, then the field is not packed. Hence it cannot be
6265 a bitfield or any other packed type. */
6266 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6267 {
6268 nRc = 1;
6269 break;
6270 }
6271 }
6272 }
6273 }
6274
6275 return nRc;
6276 }
6277
6278 /* Write into appropriate registers a function return value of type
6279 TYPE, given in virtual format. */
6280
6281 static void
6282 arm_store_return_value (struct type *type, struct regcache *regs,
6283 const gdb_byte *valbuf)
6284 {
6285 struct gdbarch *gdbarch = get_regcache_arch (regs);
6286 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6287
6288 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6289 {
6290 char buf[MAX_REGISTER_SIZE];
6291
6292 switch (gdbarch_tdep (gdbarch)->fp_model)
6293 {
6294 case ARM_FLOAT_FPA:
6295
6296 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6297 gdbarch_byte_order (gdbarch));
6298 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6299 break;
6300
6301 case ARM_FLOAT_SOFT_FPA:
6302 case ARM_FLOAT_SOFT_VFP:
6303 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6304 not using the VFP ABI code. */
6305 case ARM_FLOAT_VFP:
6306 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6307 if (TYPE_LENGTH (type) > 4)
6308 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6309 valbuf + INT_REGISTER_SIZE);
6310 break;
6311
6312 default:
6313 internal_error
6314 (__FILE__, __LINE__,
6315 _("arm_store_return_value: Floating point model not supported"));
6316 break;
6317 }
6318 }
6319 else if (TYPE_CODE (type) == TYPE_CODE_INT
6320 || TYPE_CODE (type) == TYPE_CODE_CHAR
6321 || TYPE_CODE (type) == TYPE_CODE_BOOL
6322 || TYPE_CODE (type) == TYPE_CODE_PTR
6323 || TYPE_CODE (type) == TYPE_CODE_REF
6324 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6325 {
6326 if (TYPE_LENGTH (type) <= 4)
6327 {
6328 /* Values of one word or less are zero/sign-extended and
6329 returned in r0. */
6330 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6331 LONGEST val = unpack_long (type, valbuf);
6332
6333 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6334 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6335 }
6336 else
6337 {
6338 /* Integral values greater than one word are stored in consecutive
6339 registers starting with r0. This will always be a multiple of
6340 the regiser size. */
6341 int len = TYPE_LENGTH (type);
6342 int regno = ARM_A1_REGNUM;
6343
6344 while (len > 0)
6345 {
6346 regcache_cooked_write (regs, regno++, valbuf);
6347 len -= INT_REGISTER_SIZE;
6348 valbuf += INT_REGISTER_SIZE;
6349 }
6350 }
6351 }
6352 else
6353 {
6354 /* For a structure or union the behaviour is as if the value had
6355 been stored to word-aligned memory and then loaded into
6356 registers with 32-bit load instruction(s). */
6357 int len = TYPE_LENGTH (type);
6358 int regno = ARM_A1_REGNUM;
6359 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6360
6361 while (len > 0)
6362 {
6363 memcpy (tmpbuf, valbuf,
6364 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6365 regcache_cooked_write (regs, regno++, tmpbuf);
6366 len -= INT_REGISTER_SIZE;
6367 valbuf += INT_REGISTER_SIZE;
6368 }
6369 }
6370 }
6371
6372
6373 /* Handle function return values. */
6374
6375 static enum return_value_convention
6376 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6377 struct type *valtype, struct regcache *regcache,
6378 gdb_byte *readbuf, const gdb_byte *writebuf)
6379 {
6380 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6381 enum arm_vfp_cprc_base_type vfp_base_type;
6382 int vfp_base_count;
6383
6384 if (arm_vfp_abi_for_function (gdbarch, func_type)
6385 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6386 {
6387 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6388 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6389 int i;
6390 for (i = 0; i < vfp_base_count; i++)
6391 {
6392 if (reg_char == 'q')
6393 {
6394 if (writebuf)
6395 arm_neon_quad_write (gdbarch, regcache, i,
6396 writebuf + i * unit_length);
6397
6398 if (readbuf)
6399 arm_neon_quad_read (gdbarch, regcache, i,
6400 readbuf + i * unit_length);
6401 }
6402 else
6403 {
6404 char name_buf[4];
6405 int regnum;
6406
6407 sprintf (name_buf, "%c%d", reg_char, i);
6408 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6409 strlen (name_buf));
6410 if (writebuf)
6411 regcache_cooked_write (regcache, regnum,
6412 writebuf + i * unit_length);
6413 if (readbuf)
6414 regcache_cooked_read (regcache, regnum,
6415 readbuf + i * unit_length);
6416 }
6417 }
6418 return RETURN_VALUE_REGISTER_CONVENTION;
6419 }
6420
6421 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6422 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6423 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6424 {
6425 if (tdep->struct_return == pcc_struct_return
6426 || arm_return_in_memory (gdbarch, valtype))
6427 return RETURN_VALUE_STRUCT_CONVENTION;
6428 }
6429
6430 if (writebuf)
6431 arm_store_return_value (valtype, regcache, writebuf);
6432
6433 if (readbuf)
6434 arm_extract_return_value (valtype, regcache, readbuf);
6435
6436 return RETURN_VALUE_REGISTER_CONVENTION;
6437 }
6438
6439
6440 static int
6441 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6442 {
6443 struct gdbarch *gdbarch = get_frame_arch (frame);
6444 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6445 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6446 CORE_ADDR jb_addr;
6447 char buf[INT_REGISTER_SIZE];
6448
6449 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6450
6451 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6452 INT_REGISTER_SIZE))
6453 return 0;
6454
6455 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6456 return 1;
6457 }
6458
6459 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6460 return the target PC. Otherwise return 0. */
6461
6462 CORE_ADDR
6463 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6464 {
6465 char *name;
6466 int namelen;
6467 CORE_ADDR start_addr;
6468
6469 /* Find the starting address and name of the function containing the PC. */
6470 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6471 return 0;
6472
6473 /* If PC is in a Thumb call or return stub, return the address of the
6474 target PC, which is in a register. The thunk functions are called
6475 _call_via_xx, where x is the register name. The possible names
6476 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6477 functions, named __ARM_call_via_r[0-7]. */
6478 if (strncmp (name, "_call_via_", 10) == 0
6479 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6480 {
6481 /* Use the name suffix to determine which register contains the
6482 target PC. */
6483 static char *table[15] =
6484 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6485 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6486 };
6487 int regno;
6488 int offset = strlen (name) - 2;
6489
6490 for (regno = 0; regno <= 14; regno++)
6491 if (strcmp (&name[offset], table[regno]) == 0)
6492 return get_frame_register_unsigned (frame, regno);
6493 }
6494
6495 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6496 non-interworking calls to foo. We could decode the stubs
6497 to find the target but it's easier to use the symbol table. */
6498 namelen = strlen (name);
6499 if (name[0] == '_' && name[1] == '_'
6500 && ((namelen > 2 + strlen ("_from_thumb")
6501 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6502 strlen ("_from_thumb")) == 0)
6503 || (namelen > 2 + strlen ("_from_arm")
6504 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6505 strlen ("_from_arm")) == 0)))
6506 {
6507 char *target_name;
6508 int target_len = namelen - 2;
6509 struct minimal_symbol *minsym;
6510 struct objfile *objfile;
6511 struct obj_section *sec;
6512
6513 if (name[namelen - 1] == 'b')
6514 target_len -= strlen ("_from_thumb");
6515 else
6516 target_len -= strlen ("_from_arm");
6517
6518 target_name = alloca (target_len + 1);
6519 memcpy (target_name, name + 2, target_len);
6520 target_name[target_len] = '\0';
6521
6522 sec = find_pc_section (pc);
6523 objfile = (sec == NULL) ? NULL : sec->objfile;
6524 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6525 if (minsym != NULL)
6526 return SYMBOL_VALUE_ADDRESS (minsym);
6527 else
6528 return 0;
6529 }
6530
6531 return 0; /* not a stub */
6532 }
6533
6534 static void
6535 set_arm_command (char *args, int from_tty)
6536 {
6537 printf_unfiltered (_("\
6538 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6539 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6540 }
6541
6542 static void
6543 show_arm_command (char *args, int from_tty)
6544 {
6545 cmd_show_list (showarmcmdlist, from_tty, "");
6546 }
6547
6548 static void
6549 arm_update_current_architecture (void)
6550 {
6551 struct gdbarch_info info;
6552
6553 /* If the current architecture is not ARM, we have nothing to do. */
6554 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6555 return;
6556
6557 /* Update the architecture. */
6558 gdbarch_info_init (&info);
6559
6560 if (!gdbarch_update_p (info))
6561 internal_error (__FILE__, __LINE__, "could not update architecture");
6562 }
6563
6564 static void
6565 set_fp_model_sfunc (char *args, int from_tty,
6566 struct cmd_list_element *c)
6567 {
6568 enum arm_float_model fp_model;
6569
6570 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6571 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6572 {
6573 arm_fp_model = fp_model;
6574 break;
6575 }
6576
6577 if (fp_model == ARM_FLOAT_LAST)
6578 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6579 current_fp_model);
6580
6581 arm_update_current_architecture ();
6582 }
6583
6584 static void
6585 show_fp_model (struct ui_file *file, int from_tty,
6586 struct cmd_list_element *c, const char *value)
6587 {
6588 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6589
6590 if (arm_fp_model == ARM_FLOAT_AUTO
6591 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6592 fprintf_filtered (file, _("\
6593 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6594 fp_model_strings[tdep->fp_model]);
6595 else
6596 fprintf_filtered (file, _("\
6597 The current ARM floating point model is \"%s\".\n"),
6598 fp_model_strings[arm_fp_model]);
6599 }
6600
6601 static void
6602 arm_set_abi (char *args, int from_tty,
6603 struct cmd_list_element *c)
6604 {
6605 enum arm_abi_kind arm_abi;
6606
6607 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6608 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6609 {
6610 arm_abi_global = arm_abi;
6611 break;
6612 }
6613
6614 if (arm_abi == ARM_ABI_LAST)
6615 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6616 arm_abi_string);
6617
6618 arm_update_current_architecture ();
6619 }
6620
6621 static void
6622 arm_show_abi (struct ui_file *file, int from_tty,
6623 struct cmd_list_element *c, const char *value)
6624 {
6625 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6626
6627 if (arm_abi_global == ARM_ABI_AUTO
6628 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6629 fprintf_filtered (file, _("\
6630 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6631 arm_abi_strings[tdep->arm_abi]);
6632 else
6633 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6634 arm_abi_string);
6635 }
6636
6637 static void
6638 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6639 struct cmd_list_element *c, const char *value)
6640 {
6641 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6642
6643 fprintf_filtered (file, _("\
6644 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6645 arm_fallback_mode_string);
6646 }
6647
6648 static void
6649 arm_show_force_mode (struct ui_file *file, int from_tty,
6650 struct cmd_list_element *c, const char *value)
6651 {
6652 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6653
6654 fprintf_filtered (file, _("\
6655 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6656 arm_force_mode_string);
6657 }
6658
6659 /* If the user changes the register disassembly style used for info
6660 register and other commands, we have to also switch the style used
6661 in opcodes for disassembly output. This function is run in the "set
6662 arm disassembly" command, and does that. */
6663
6664 static void
6665 set_disassembly_style_sfunc (char *args, int from_tty,
6666 struct cmd_list_element *c)
6667 {
6668 set_disassembly_style ();
6669 }
6670 \f
6671 /* Return the ARM register name corresponding to register I. */
6672 static const char *
6673 arm_register_name (struct gdbarch *gdbarch, int i)
6674 {
6675 const int num_regs = gdbarch_num_regs (gdbarch);
6676
6677 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6678 && i >= num_regs && i < num_regs + 32)
6679 {
6680 static const char *const vfp_pseudo_names[] = {
6681 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6682 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6683 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6684 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6685 };
6686
6687 return vfp_pseudo_names[i - num_regs];
6688 }
6689
6690 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6691 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6692 {
6693 static const char *const neon_pseudo_names[] = {
6694 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6695 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6696 };
6697
6698 return neon_pseudo_names[i - num_regs - 32];
6699 }
6700
6701 if (i >= ARRAY_SIZE (arm_register_names))
6702 /* These registers are only supported on targets which supply
6703 an XML description. */
6704 return "";
6705
6706 return arm_register_names[i];
6707 }
6708
6709 static void
6710 set_disassembly_style (void)
6711 {
6712 int current;
6713
6714 /* Find the style that the user wants. */
6715 for (current = 0; current < num_disassembly_options; current++)
6716 if (disassembly_style == valid_disassembly_styles[current])
6717 break;
6718 gdb_assert (current < num_disassembly_options);
6719
6720 /* Synchronize the disassembler. */
6721 set_arm_regname_option (current);
6722 }
6723
6724 /* Test whether the coff symbol specific value corresponds to a Thumb
6725 function. */
6726
6727 static int
6728 coff_sym_is_thumb (int val)
6729 {
6730 return (val == C_THUMBEXT
6731 || val == C_THUMBSTAT
6732 || val == C_THUMBEXTFUNC
6733 || val == C_THUMBSTATFUNC
6734 || val == C_THUMBLABEL);
6735 }
6736
6737 /* arm_coff_make_msymbol_special()
6738 arm_elf_make_msymbol_special()
6739
6740 These functions test whether the COFF or ELF symbol corresponds to
6741 an address in thumb code, and set a "special" bit in a minimal
6742 symbol to indicate that it does. */
6743
6744 static void
6745 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6746 {
6747 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6748 STT_ARM_TFUNC). */
6749 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6750 == STT_LOPROC)
6751 MSYMBOL_SET_SPECIAL (msym);
6752 }
6753
6754 static void
6755 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6756 {
6757 if (coff_sym_is_thumb (val))
6758 MSYMBOL_SET_SPECIAL (msym);
6759 }
6760
6761 static void
6762 arm_objfile_data_free (struct objfile *objfile, void *arg)
6763 {
6764 struct arm_per_objfile *data = arg;
6765 unsigned int i;
6766
6767 for (i = 0; i < objfile->obfd->section_count; i++)
6768 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6769 }
6770
6771 static void
6772 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6773 asymbol *sym)
6774 {
6775 const char *name = bfd_asymbol_name (sym);
6776 struct arm_per_objfile *data;
6777 VEC(arm_mapping_symbol_s) **map_p;
6778 struct arm_mapping_symbol new_map_sym;
6779
6780 gdb_assert (name[0] == '$');
6781 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6782 return;
6783
6784 data = objfile_data (objfile, arm_objfile_data_key);
6785 if (data == NULL)
6786 {
6787 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6788 struct arm_per_objfile);
6789 set_objfile_data (objfile, arm_objfile_data_key, data);
6790 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6791 objfile->obfd->section_count,
6792 VEC(arm_mapping_symbol_s) *);
6793 }
6794 map_p = &data->section_maps[bfd_get_section (sym)->index];
6795
6796 new_map_sym.value = sym->value;
6797 new_map_sym.type = name[1];
6798
6799 /* Assume that most mapping symbols appear in order of increasing
6800 value. If they were randomly distributed, it would be faster to
6801 always push here and then sort at first use. */
6802 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6803 {
6804 struct arm_mapping_symbol *prev_map_sym;
6805
6806 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6807 if (prev_map_sym->value >= sym->value)
6808 {
6809 unsigned int idx;
6810 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6811 arm_compare_mapping_symbols);
6812 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6813 return;
6814 }
6815 }
6816
6817 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6818 }
6819
6820 static void
6821 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6822 {
6823 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6824 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6825
6826 /* If necessary, set the T bit. */
6827 if (arm_apcs_32)
6828 {
6829 ULONGEST val, t_bit;
6830 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6831 t_bit = arm_psr_thumb_bit (gdbarch);
6832 if (arm_pc_is_thumb (gdbarch, pc))
6833 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6834 val | t_bit);
6835 else
6836 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6837 val & ~t_bit);
6838 }
6839 }
6840
6841 /* Read the contents of a NEON quad register, by reading from two
6842 double registers. This is used to implement the quad pseudo
6843 registers, and for argument passing in case the quad registers are
6844 missing; vectors are passed in quad registers when using the VFP
6845 ABI, even if a NEON unit is not present. REGNUM is the index of
6846 the quad register, in [0, 15]. */
6847
6848 static void
6849 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6850 int regnum, gdb_byte *buf)
6851 {
6852 char name_buf[4];
6853 gdb_byte reg_buf[8];
6854 int offset, double_regnum;
6855
6856 sprintf (name_buf, "d%d", regnum << 1);
6857 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6858 strlen (name_buf));
6859
6860 /* d0 is always the least significant half of q0. */
6861 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6862 offset = 8;
6863 else
6864 offset = 0;
6865
6866 regcache_raw_read (regcache, double_regnum, reg_buf);
6867 memcpy (buf + offset, reg_buf, 8);
6868
6869 offset = 8 - offset;
6870 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6871 memcpy (buf + offset, reg_buf, 8);
6872 }
6873
6874 static void
6875 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6876 int regnum, gdb_byte *buf)
6877 {
6878 const int num_regs = gdbarch_num_regs (gdbarch);
6879 char name_buf[4];
6880 gdb_byte reg_buf[8];
6881 int offset, double_regnum;
6882
6883 gdb_assert (regnum >= num_regs);
6884 regnum -= num_regs;
6885
6886 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6887 /* Quad-precision register. */
6888 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6889 else
6890 {
6891 /* Single-precision register. */
6892 gdb_assert (regnum < 32);
6893
6894 /* s0 is always the least significant half of d0. */
6895 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6896 offset = (regnum & 1) ? 0 : 4;
6897 else
6898 offset = (regnum & 1) ? 4 : 0;
6899
6900 sprintf (name_buf, "d%d", regnum >> 1);
6901 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6902 strlen (name_buf));
6903
6904 regcache_raw_read (regcache, double_regnum, reg_buf);
6905 memcpy (buf, reg_buf + offset, 4);
6906 }
6907 }
6908
6909 /* Store the contents of BUF to a NEON quad register, by writing to
6910 two double registers. This is used to implement the quad pseudo
6911 registers, and for argument passing in case the quad registers are
6912 missing; vectors are passed in quad registers when using the VFP
6913 ABI, even if a NEON unit is not present. REGNUM is the index
6914 of the quad register, in [0, 15]. */
6915
6916 static void
6917 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6918 int regnum, const gdb_byte *buf)
6919 {
6920 char name_buf[4];
6921 gdb_byte reg_buf[8];
6922 int offset, double_regnum;
6923
6924 sprintf (name_buf, "d%d", regnum << 1);
6925 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6926 strlen (name_buf));
6927
6928 /* d0 is always the least significant half of q0. */
6929 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6930 offset = 8;
6931 else
6932 offset = 0;
6933
6934 regcache_raw_write (regcache, double_regnum, buf + offset);
6935 offset = 8 - offset;
6936 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6937 }
6938
6939 static void
6940 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6941 int regnum, const gdb_byte *buf)
6942 {
6943 const int num_regs = gdbarch_num_regs (gdbarch);
6944 char name_buf[4];
6945 gdb_byte reg_buf[8];
6946 int offset, double_regnum;
6947
6948 gdb_assert (regnum >= num_regs);
6949 regnum -= num_regs;
6950
6951 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6952 /* Quad-precision register. */
6953 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6954 else
6955 {
6956 /* Single-precision register. */
6957 gdb_assert (regnum < 32);
6958
6959 /* s0 is always the least significant half of d0. */
6960 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6961 offset = (regnum & 1) ? 0 : 4;
6962 else
6963 offset = (regnum & 1) ? 4 : 0;
6964
6965 sprintf (name_buf, "d%d", regnum >> 1);
6966 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6967 strlen (name_buf));
6968
6969 regcache_raw_read (regcache, double_regnum, reg_buf);
6970 memcpy (reg_buf + offset, buf, 4);
6971 regcache_raw_write (regcache, double_regnum, reg_buf);
6972 }
6973 }
6974
6975 static struct value *
6976 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6977 {
6978 const int *reg_p = baton;
6979 return value_of_register (*reg_p, frame);
6980 }
6981 \f
6982 static enum gdb_osabi
6983 arm_elf_osabi_sniffer (bfd *abfd)
6984 {
6985 unsigned int elfosabi;
6986 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6987
6988 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6989
6990 if (elfosabi == ELFOSABI_ARM)
6991 /* GNU tools use this value. Check note sections in this case,
6992 as well. */
6993 bfd_map_over_sections (abfd,
6994 generic_elf_osabi_sniff_abi_tag_sections,
6995 &osabi);
6996
6997 /* Anything else will be handled by the generic ELF sniffer. */
6998 return osabi;
6999 }
7000
7001 \f
7002 /* Initialize the current architecture based on INFO. If possible,
7003 re-use an architecture from ARCHES, which is a list of
7004 architectures already created during this debugging session.
7005
7006 Called e.g. at program startup, when reading a core file, and when
7007 reading a binary file. */
7008
7009 static struct gdbarch *
7010 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
7011 {
7012 struct gdbarch_tdep *tdep;
7013 struct gdbarch *gdbarch;
7014 struct gdbarch_list *best_arch;
7015 enum arm_abi_kind arm_abi = arm_abi_global;
7016 enum arm_float_model fp_model = arm_fp_model;
7017 struct tdesc_arch_data *tdesc_data = NULL;
7018 int i, is_m = 0;
7019 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7020 int have_neon = 0;
7021 int have_fpa_registers = 1;
7022 const struct target_desc *tdesc = info.target_desc;
7023
7024 /* If we have an object to base this architecture on, try to determine
7025 its ABI. */
7026
7027 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7028 {
7029 int ei_osabi, e_flags;
7030
7031 switch (bfd_get_flavour (info.abfd))
7032 {
7033 case bfd_target_aout_flavour:
7034 /* Assume it's an old APCS-style ABI. */
7035 arm_abi = ARM_ABI_APCS;
7036 break;
7037
7038 case bfd_target_coff_flavour:
7039 /* Assume it's an old APCS-style ABI. */
7040 /* XXX WinCE? */
7041 arm_abi = ARM_ABI_APCS;
7042 break;
7043
7044 case bfd_target_elf_flavour:
7045 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7046 e_flags = elf_elfheader (info.abfd)->e_flags;
7047
7048 if (ei_osabi == ELFOSABI_ARM)
7049 {
7050 /* GNU tools used to use this value, but do not for EABI
7051 objects. There's nowhere to tag an EABI version
7052 anyway, so assume APCS. */
7053 arm_abi = ARM_ABI_APCS;
7054 }
7055 else if (ei_osabi == ELFOSABI_NONE)
7056 {
7057 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7058 int attr_arch, attr_profile;
7059
7060 switch (eabi_ver)
7061 {
7062 case EF_ARM_EABI_UNKNOWN:
7063 /* Assume GNU tools. */
7064 arm_abi = ARM_ABI_APCS;
7065 break;
7066
7067 case EF_ARM_EABI_VER4:
7068 case EF_ARM_EABI_VER5:
7069 arm_abi = ARM_ABI_AAPCS;
7070 /* EABI binaries default to VFP float ordering.
7071 They may also contain build attributes that can
7072 be used to identify if the VFP argument-passing
7073 ABI is in use. */
7074 if (fp_model == ARM_FLOAT_AUTO)
7075 {
7076 #ifdef HAVE_ELF
7077 switch (bfd_elf_get_obj_attr_int (info.abfd,
7078 OBJ_ATTR_PROC,
7079 Tag_ABI_VFP_args))
7080 {
7081 case 0:
7082 /* "The user intended FP parameter/result
7083 passing to conform to AAPCS, base
7084 variant". */
7085 fp_model = ARM_FLOAT_SOFT_VFP;
7086 break;
7087 case 1:
7088 /* "The user intended FP parameter/result
7089 passing to conform to AAPCS, VFP
7090 variant". */
7091 fp_model = ARM_FLOAT_VFP;
7092 break;
7093 case 2:
7094 /* "The user intended FP parameter/result
7095 passing to conform to tool chain-specific
7096 conventions" - we don't know any such
7097 conventions, so leave it as "auto". */
7098 break;
7099 default:
7100 /* Attribute value not mentioned in the
7101 October 2008 ABI, so leave it as
7102 "auto". */
7103 break;
7104 }
7105 #else
7106 fp_model = ARM_FLOAT_SOFT_VFP;
7107 #endif
7108 }
7109 break;
7110
7111 default:
7112 /* Leave it as "auto". */
7113 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7114 break;
7115 }
7116
7117 #ifdef HAVE_ELF
7118 /* Detect M-profile programs. This only works if the
7119 executable file includes build attributes; GCC does
7120 copy them to the executable, but e.g. RealView does
7121 not. */
7122 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7123 Tag_CPU_arch);
7124 attr_profile = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7125 Tag_CPU_arch_profile);
7126 /* GCC specifies the profile for v6-M; RealView only
7127 specifies the profile for architectures starting with
7128 V7 (as opposed to architectures with a tag
7129 numerically greater than TAG_CPU_ARCH_V7). */
7130 if (!tdesc_has_registers (tdesc)
7131 && (attr_arch == TAG_CPU_ARCH_V6_M
7132 || attr_arch == TAG_CPU_ARCH_V6S_M
7133 || attr_profile == 'M'))
7134 tdesc = tdesc_arm_with_m;
7135 #endif
7136 }
7137
7138 if (fp_model == ARM_FLOAT_AUTO)
7139 {
7140 int e_flags = elf_elfheader (info.abfd)->e_flags;
7141
7142 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7143 {
7144 case 0:
7145 /* Leave it as "auto". Strictly speaking this case
7146 means FPA, but almost nobody uses that now, and
7147 many toolchains fail to set the appropriate bits
7148 for the floating-point model they use. */
7149 break;
7150 case EF_ARM_SOFT_FLOAT:
7151 fp_model = ARM_FLOAT_SOFT_FPA;
7152 break;
7153 case EF_ARM_VFP_FLOAT:
7154 fp_model = ARM_FLOAT_VFP;
7155 break;
7156 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7157 fp_model = ARM_FLOAT_SOFT_VFP;
7158 break;
7159 }
7160 }
7161
7162 if (e_flags & EF_ARM_BE8)
7163 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7164
7165 break;
7166
7167 default:
7168 /* Leave it as "auto". */
7169 break;
7170 }
7171 }
7172
7173 /* Check any target description for validity. */
7174 if (tdesc_has_registers (tdesc))
7175 {
7176 /* For most registers we require GDB's default names; but also allow
7177 the numeric names for sp / lr / pc, as a convenience. */
7178 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7179 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7180 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7181
7182 const struct tdesc_feature *feature;
7183 int valid_p;
7184
7185 feature = tdesc_find_feature (tdesc,
7186 "org.gnu.gdb.arm.core");
7187 if (feature == NULL)
7188 {
7189 feature = tdesc_find_feature (tdesc,
7190 "org.gnu.gdb.arm.m-profile");
7191 if (feature == NULL)
7192 return NULL;
7193 else
7194 is_m = 1;
7195 }
7196
7197 tdesc_data = tdesc_data_alloc ();
7198
7199 valid_p = 1;
7200 for (i = 0; i < ARM_SP_REGNUM; i++)
7201 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7202 arm_register_names[i]);
7203 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7204 ARM_SP_REGNUM,
7205 arm_sp_names);
7206 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7207 ARM_LR_REGNUM,
7208 arm_lr_names);
7209 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7210 ARM_PC_REGNUM,
7211 arm_pc_names);
7212 if (is_m)
7213 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7214 ARM_PS_REGNUM, "xpsr");
7215 else
7216 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7217 ARM_PS_REGNUM, "cpsr");
7218
7219 if (!valid_p)
7220 {
7221 tdesc_data_cleanup (tdesc_data);
7222 return NULL;
7223 }
7224
7225 feature = tdesc_find_feature (tdesc,
7226 "org.gnu.gdb.arm.fpa");
7227 if (feature != NULL)
7228 {
7229 valid_p = 1;
7230 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7231 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7232 arm_register_names[i]);
7233 if (!valid_p)
7234 {
7235 tdesc_data_cleanup (tdesc_data);
7236 return NULL;
7237 }
7238 }
7239 else
7240 have_fpa_registers = 0;
7241
7242 feature = tdesc_find_feature (tdesc,
7243 "org.gnu.gdb.xscale.iwmmxt");
7244 if (feature != NULL)
7245 {
7246 static const char *const iwmmxt_names[] = {
7247 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7248 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7249 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7250 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7251 };
7252
7253 valid_p = 1;
7254 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7255 valid_p
7256 &= tdesc_numbered_register (feature, tdesc_data, i,
7257 iwmmxt_names[i - ARM_WR0_REGNUM]);
7258
7259 /* Check for the control registers, but do not fail if they
7260 are missing. */
7261 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7262 tdesc_numbered_register (feature, tdesc_data, i,
7263 iwmmxt_names[i - ARM_WR0_REGNUM]);
7264
7265 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7266 valid_p
7267 &= tdesc_numbered_register (feature, tdesc_data, i,
7268 iwmmxt_names[i - ARM_WR0_REGNUM]);
7269
7270 if (!valid_p)
7271 {
7272 tdesc_data_cleanup (tdesc_data);
7273 return NULL;
7274 }
7275 }
7276
7277 /* If we have a VFP unit, check whether the single precision registers
7278 are present. If not, then we will synthesize them as pseudo
7279 registers. */
7280 feature = tdesc_find_feature (tdesc,
7281 "org.gnu.gdb.arm.vfp");
7282 if (feature != NULL)
7283 {
7284 static const char *const vfp_double_names[] = {
7285 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7286 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7287 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7288 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7289 };
7290
7291 /* Require the double precision registers. There must be either
7292 16 or 32. */
7293 valid_p = 1;
7294 for (i = 0; i < 32; i++)
7295 {
7296 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7297 ARM_D0_REGNUM + i,
7298 vfp_double_names[i]);
7299 if (!valid_p)
7300 break;
7301 }
7302
7303 if (!valid_p && i != 16)
7304 {
7305 tdesc_data_cleanup (tdesc_data);
7306 return NULL;
7307 }
7308
7309 if (tdesc_unnumbered_register (feature, "s0") == 0)
7310 have_vfp_pseudos = 1;
7311
7312 have_vfp_registers = 1;
7313
7314 /* If we have VFP, also check for NEON. The architecture allows
7315 NEON without VFP (integer vector operations only), but GDB
7316 does not support that. */
7317 feature = tdesc_find_feature (tdesc,
7318 "org.gnu.gdb.arm.neon");
7319 if (feature != NULL)
7320 {
7321 /* NEON requires 32 double-precision registers. */
7322 if (i != 32)
7323 {
7324 tdesc_data_cleanup (tdesc_data);
7325 return NULL;
7326 }
7327
7328 /* If there are quad registers defined by the stub, use
7329 their type; otherwise (normally) provide them with
7330 the default type. */
7331 if (tdesc_unnumbered_register (feature, "q0") == 0)
7332 have_neon_pseudos = 1;
7333
7334 have_neon = 1;
7335 }
7336 }
7337 }
7338
7339 /* If there is already a candidate, use it. */
7340 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7341 best_arch != NULL;
7342 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7343 {
7344 if (arm_abi != ARM_ABI_AUTO
7345 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7346 continue;
7347
7348 if (fp_model != ARM_FLOAT_AUTO
7349 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7350 continue;
7351
7352 /* There are various other properties in tdep that we do not
7353 need to check here: those derived from a target description,
7354 since gdbarches with a different target description are
7355 automatically disqualified. */
7356
7357 /* Do check is_m, though, since it might come from the binary. */
7358 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7359 continue;
7360
7361 /* Found a match. */
7362 break;
7363 }
7364
7365 if (best_arch != NULL)
7366 {
7367 if (tdesc_data != NULL)
7368 tdesc_data_cleanup (tdesc_data);
7369 return best_arch->gdbarch;
7370 }
7371
7372 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7373 gdbarch = gdbarch_alloc (&info, tdep);
7374
7375 /* Record additional information about the architecture we are defining.
7376 These are gdbarch discriminators, like the OSABI. */
7377 tdep->arm_abi = arm_abi;
7378 tdep->fp_model = fp_model;
7379 tdep->is_m = is_m;
7380 tdep->have_fpa_registers = have_fpa_registers;
7381 tdep->have_vfp_registers = have_vfp_registers;
7382 tdep->have_vfp_pseudos = have_vfp_pseudos;
7383 tdep->have_neon_pseudos = have_neon_pseudos;
7384 tdep->have_neon = have_neon;
7385
7386 /* Breakpoints. */
7387 switch (info.byte_order_for_code)
7388 {
7389 case BFD_ENDIAN_BIG:
7390 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7391 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7392 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7393 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7394
7395 break;
7396
7397 case BFD_ENDIAN_LITTLE:
7398 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7399 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7400 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7401 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7402
7403 break;
7404
7405 default:
7406 internal_error (__FILE__, __LINE__,
7407 _("arm_gdbarch_init: bad byte order for float format"));
7408 }
7409
7410 /* On ARM targets char defaults to unsigned. */
7411 set_gdbarch_char_signed (gdbarch, 0);
7412
7413 /* Note: for displaced stepping, this includes the breakpoint, and one word
7414 of additional scratch space. This setting isn't used for anything beside
7415 displaced stepping at present. */
7416 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7417
7418 /* This should be low enough for everything. */
7419 tdep->lowest_pc = 0x20;
7420 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7421
7422 /* The default, for both APCS and AAPCS, is to return small
7423 structures in registers. */
7424 tdep->struct_return = reg_struct_return;
7425
7426 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7427 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7428
7429 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7430
7431 /* Frame handling. */
7432 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7433 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7434 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7435
7436 frame_base_set_default (gdbarch, &arm_normal_base);
7437
7438 /* Address manipulation. */
7439 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7440 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7441
7442 /* Advance PC across function entry code. */
7443 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7444
7445 /* Detect whether PC is in function epilogue. */
7446 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7447
7448 /* Skip trampolines. */
7449 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7450
7451 /* The stack grows downward. */
7452 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7453
7454 /* Breakpoint manipulation. */
7455 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7456 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7457 arm_remote_breakpoint_from_pc);
7458
7459 /* Information about registers, etc. */
7460 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
7461 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7462 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7463 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7464 set_gdbarch_register_type (gdbarch, arm_register_type);
7465
7466 /* This "info float" is FPA-specific. Use the generic version if we
7467 do not have FPA. */
7468 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7469 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7470
7471 /* Internal <-> external register number maps. */
7472 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7473 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7474
7475 set_gdbarch_register_name (gdbarch, arm_register_name);
7476
7477 /* Returning results. */
7478 set_gdbarch_return_value (gdbarch, arm_return_value);
7479
7480 /* Disassembly. */
7481 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7482
7483 /* Minsymbol frobbing. */
7484 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7485 set_gdbarch_coff_make_msymbol_special (gdbarch,
7486 arm_coff_make_msymbol_special);
7487 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7488
7489 /* Thumb-2 IT block support. */
7490 set_gdbarch_adjust_breakpoint_address (gdbarch,
7491 arm_adjust_breakpoint_address);
7492
7493 /* Virtual tables. */
7494 set_gdbarch_vbit_in_delta (gdbarch, 1);
7495
7496 /* Hook in the ABI-specific overrides, if they have been registered. */
7497 gdbarch_init_osabi (info, gdbarch);
7498
7499 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7500
7501 /* Add some default predicates. */
7502 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7503 dwarf2_append_unwinders (gdbarch);
7504 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7505
7506 /* Now we have tuned the configuration, set a few final things,
7507 based on what the OS ABI has told us. */
7508
7509 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7510 binaries are always marked. */
7511 if (tdep->arm_abi == ARM_ABI_AUTO)
7512 tdep->arm_abi = ARM_ABI_APCS;
7513
7514 /* We used to default to FPA for generic ARM, but almost nobody
7515 uses that now, and we now provide a way for the user to force
7516 the model. So default to the most useful variant. */
7517 if (tdep->fp_model == ARM_FLOAT_AUTO)
7518 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7519
7520 if (tdep->jb_pc >= 0)
7521 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7522
7523 /* Floating point sizes and format. */
7524 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7525 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7526 {
7527 set_gdbarch_double_format
7528 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7529 set_gdbarch_long_double_format
7530 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7531 }
7532 else
7533 {
7534 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7535 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7536 }
7537
7538 if (have_vfp_pseudos)
7539 {
7540 /* NOTE: These are the only pseudo registers used by
7541 the ARM target at the moment. If more are added, a
7542 little more care in numbering will be needed. */
7543
7544 int num_pseudos = 32;
7545 if (have_neon_pseudos)
7546 num_pseudos += 16;
7547 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7548 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7549 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7550 }
7551
7552 if (tdesc_data)
7553 {
7554 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7555
7556 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7557
7558 /* Override tdesc_register_type to adjust the types of VFP
7559 registers for NEON. */
7560 set_gdbarch_register_type (gdbarch, arm_register_type);
7561 }
7562
7563 /* Add standard register aliases. We add aliases even for those
7564 nanes which are used by the current architecture - it's simpler,
7565 and does no harm, since nothing ever lists user registers. */
7566 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7567 user_reg_add (gdbarch, arm_register_aliases[i].name,
7568 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7569
7570 return gdbarch;
7571 }
7572
7573 static void
7574 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7575 {
7576 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7577
7578 if (tdep == NULL)
7579 return;
7580
7581 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7582 (unsigned long) tdep->lowest_pc);
7583 }
7584
7585 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7586
7587 void
7588 _initialize_arm_tdep (void)
7589 {
7590 struct ui_file *stb;
7591 long length;
7592 struct cmd_list_element *new_set, *new_show;
7593 const char *setname;
7594 const char *setdesc;
7595 const char *const *regnames;
7596 int numregs, i, j;
7597 static char *helptext;
7598 char regdesc[1024], *rdptr = regdesc;
7599 size_t rest = sizeof (regdesc);
7600
7601 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7602
7603 arm_objfile_data_key
7604 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7605
7606 /* Register an ELF OS ABI sniffer for ARM binaries. */
7607 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7608 bfd_target_elf_flavour,
7609 arm_elf_osabi_sniffer);
7610
7611 /* Initialize the standard target descriptions. */
7612 initialize_tdesc_arm_with_m ();
7613
7614 /* Get the number of possible sets of register names defined in opcodes. */
7615 num_disassembly_options = get_arm_regname_num_options ();
7616
7617 /* Add root prefix command for all "set arm"/"show arm" commands. */
7618 add_prefix_cmd ("arm", no_class, set_arm_command,
7619 _("Various ARM-specific commands."),
7620 &setarmcmdlist, "set arm ", 0, &setlist);
7621
7622 add_prefix_cmd ("arm", no_class, show_arm_command,
7623 _("Various ARM-specific commands."),
7624 &showarmcmdlist, "show arm ", 0, &showlist);
7625
7626 /* Sync the opcode insn printer with our register viewer. */
7627 parse_arm_disassembler_option ("reg-names-std");
7628
7629 /* Initialize the array that will be passed to
7630 add_setshow_enum_cmd(). */
7631 valid_disassembly_styles
7632 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7633 for (i = 0; i < num_disassembly_options; i++)
7634 {
7635 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7636 valid_disassembly_styles[i] = setname;
7637 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7638 rdptr += length;
7639 rest -= length;
7640 /* When we find the default names, tell the disassembler to use
7641 them. */
7642 if (!strcmp (setname, "std"))
7643 {
7644 disassembly_style = setname;
7645 set_arm_regname_option (i);
7646 }
7647 }
7648 /* Mark the end of valid options. */
7649 valid_disassembly_styles[num_disassembly_options] = NULL;
7650
7651 /* Create the help text. */
7652 stb = mem_fileopen ();
7653 fprintf_unfiltered (stb, "%s%s%s",
7654 _("The valid values are:\n"),
7655 regdesc,
7656 _("The default is \"std\"."));
7657 helptext = ui_file_xstrdup (stb, NULL);
7658 ui_file_delete (stb);
7659
7660 add_setshow_enum_cmd("disassembler", no_class,
7661 valid_disassembly_styles, &disassembly_style,
7662 _("Set the disassembly style."),
7663 _("Show the disassembly style."),
7664 helptext,
7665 set_disassembly_style_sfunc,
7666 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7667 &setarmcmdlist, &showarmcmdlist);
7668
7669 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7670 _("Set usage of ARM 32-bit mode."),
7671 _("Show usage of ARM 32-bit mode."),
7672 _("When off, a 26-bit PC will be used."),
7673 NULL,
7674 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7675 &setarmcmdlist, &showarmcmdlist);
7676
7677 /* Add a command to allow the user to force the FPU model. */
7678 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7679 _("Set the floating point type."),
7680 _("Show the floating point type."),
7681 _("auto - Determine the FP typefrom the OS-ABI.\n\
7682 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7683 fpa - FPA co-processor (GCC compiled).\n\
7684 softvfp - Software FP with pure-endian doubles.\n\
7685 vfp - VFP co-processor."),
7686 set_fp_model_sfunc, show_fp_model,
7687 &setarmcmdlist, &showarmcmdlist);
7688
7689 /* Add a command to allow the user to force the ABI. */
7690 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7691 _("Set the ABI."),
7692 _("Show the ABI."),
7693 NULL, arm_set_abi, arm_show_abi,
7694 &setarmcmdlist, &showarmcmdlist);
7695
7696 /* Add two commands to allow the user to force the assumed
7697 execution mode. */
7698 add_setshow_enum_cmd ("fallback-mode", class_support,
7699 arm_mode_strings, &arm_fallback_mode_string,
7700 _("Set the mode assumed when symbols are unavailable."),
7701 _("Show the mode assumed when symbols are unavailable."),
7702 NULL, NULL, arm_show_fallback_mode,
7703 &setarmcmdlist, &showarmcmdlist);
7704 add_setshow_enum_cmd ("force-mode", class_support,
7705 arm_mode_strings, &arm_force_mode_string,
7706 _("Set the mode assumed even when symbols are available."),
7707 _("Show the mode assumed even when symbols are available."),
7708 NULL, NULL, arm_show_force_mode,
7709 &setarmcmdlist, &showarmcmdlist);
7710
7711 /* Debugging flag. */
7712 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7713 _("Set ARM debugging."),
7714 _("Show ARM debugging."),
7715 _("When on, arm-specific debugging is enabled."),
7716 NULL,
7717 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7718 &setdebuglist, &showdebuglist);
7719 }
This page took 0.213613 seconds and 4 git commands to generate.