1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper () */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
34 #include "arch-utils.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
40 #include "dwarf2-frame.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
47 #include "gdb/sim-arm.h"
50 #include "coff/internal.h"
53 #include "gdb_assert.h"
56 #include "features/arm-with-m.c"
60 /* Macros for setting and testing a bit in a minimal symbol that marks
61 it as Thumb function. The MSB of the minimal symbol's "info" field
62 is used for this purpose.
64 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
65 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
67 #define MSYMBOL_SET_SPECIAL(msym) \
68 MSYMBOL_TARGET_FLAG_1 (msym) = 1
70 #define MSYMBOL_IS_SPECIAL(msym) \
71 MSYMBOL_TARGET_FLAG_1 (msym)
73 /* Per-objfile data used for mapping symbols. */
74 static const struct objfile_data
*arm_objfile_data_key
;
76 struct arm_mapping_symbol
81 typedef struct arm_mapping_symbol arm_mapping_symbol_s
;
82 DEF_VEC_O(arm_mapping_symbol_s
);
84 struct arm_per_objfile
86 VEC(arm_mapping_symbol_s
) **section_maps
;
89 /* The list of available "set arm ..." and "show arm ..." commands. */
90 static struct cmd_list_element
*setarmcmdlist
= NULL
;
91 static struct cmd_list_element
*showarmcmdlist
= NULL
;
93 /* The type of floating-point to use. Keep this in sync with enum
94 arm_float_model, and the help string in _initialize_arm_tdep. */
95 static const char *fp_model_strings
[] =
105 /* A variable that can be configured by the user. */
106 static enum arm_float_model arm_fp_model
= ARM_FLOAT_AUTO
;
107 static const char *current_fp_model
= "auto";
109 /* The ABI to use. Keep this in sync with arm_abi_kind. */
110 static const char *arm_abi_strings
[] =
118 /* A variable that can be configured by the user. */
119 static enum arm_abi_kind arm_abi_global
= ARM_ABI_AUTO
;
120 static const char *arm_abi_string
= "auto";
122 /* The execution mode to assume. */
123 static const char *arm_mode_strings
[] =
131 static const char *arm_fallback_mode_string
= "auto";
132 static const char *arm_force_mode_string
= "auto";
134 /* Number of different reg name sets (options). */
135 static int num_disassembly_options
;
137 /* The standard register names, and all the valid aliases for them. */
142 } arm_register_aliases
[] = {
143 /* Basic register numbers. */
160 /* Synonyms (argument and variable registers). */
173 /* Other platform-specific names for r9. */
181 /* Names used by GCC (not listed in the ARM EABI). */
184 /* A special name from the older ATPCS. */
188 static const char *const arm_register_names
[] =
189 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
190 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
191 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
192 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
193 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
194 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
195 "fps", "cpsr" }; /* 24 25 */
197 /* Valid register name styles. */
198 static const char **valid_disassembly_styles
;
200 /* Disassembly style to use. Default to "std" register names. */
201 static const char *disassembly_style
;
203 /* This is used to keep the bfd arch_info in sync with the disassembly
205 static void set_disassembly_style_sfunc(char *, int,
206 struct cmd_list_element
*);
207 static void set_disassembly_style (void);
209 static void convert_from_extended (const struct floatformat
*, const void *,
211 static void convert_to_extended (const struct floatformat
*, void *,
214 static void arm_neon_quad_read (struct gdbarch
*gdbarch
,
215 struct regcache
*regcache
,
216 int regnum
, gdb_byte
*buf
);
217 static void arm_neon_quad_write (struct gdbarch
*gdbarch
,
218 struct regcache
*regcache
,
219 int regnum
, const gdb_byte
*buf
);
221 struct arm_prologue_cache
223 /* The stack pointer at the time this frame was created; i.e. the
224 caller's stack pointer when this function was called. It is used
225 to identify this frame. */
228 /* The frame base for this frame is just prev_sp - frame size.
229 FRAMESIZE is the distance from the frame pointer to the
230 initial stack pointer. */
234 /* The register used to hold the frame pointer for this frame. */
237 /* Saved register offsets. */
238 struct trad_frame_saved_reg
*saved_regs
;
241 static CORE_ADDR
arm_analyze_prologue (struct gdbarch
*gdbarch
,
242 CORE_ADDR prologue_start
,
243 CORE_ADDR prologue_end
,
244 struct arm_prologue_cache
*cache
);
246 /* Architecture version for displaced stepping. This effects the behaviour of
247 certain instructions, and really should not be hard-wired. */
249 #define DISPLACED_STEPPING_ARCH_VERSION 5
251 /* Addresses for calling Thumb functions have the bit 0 set.
252 Here are some macros to test, set, or clear bit 0 of addresses. */
253 #define IS_THUMB_ADDR(addr) ((addr) & 1)
254 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
255 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
257 /* Set to true if the 32-bit mode is in use. */
261 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
264 arm_psr_thumb_bit (struct gdbarch
*gdbarch
)
266 if (gdbarch_tdep (gdbarch
)->is_m
)
272 /* Determine if FRAME is executing in Thumb mode. */
275 arm_frame_is_thumb (struct frame_info
*frame
)
278 ULONGEST t_bit
= arm_psr_thumb_bit (get_frame_arch (frame
));
280 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
281 directly (from a signal frame or dummy frame) or by interpreting
282 the saved LR (from a prologue or DWARF frame). So consult it and
283 trust the unwinders. */
284 cpsr
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
286 return (cpsr
& t_bit
) != 0;
289 /* Callback for VEC_lower_bound. */
292 arm_compare_mapping_symbols (const struct arm_mapping_symbol
*lhs
,
293 const struct arm_mapping_symbol
*rhs
)
295 return lhs
->value
< rhs
->value
;
298 /* Search for the mapping symbol covering MEMADDR. If one is found,
299 return its type. Otherwise, return 0. If START is non-NULL,
300 set *START to the location of the mapping symbol. */
303 arm_find_mapping_symbol (CORE_ADDR memaddr
, CORE_ADDR
*start
)
305 struct obj_section
*sec
;
307 /* If there are mapping symbols, consult them. */
308 sec
= find_pc_section (memaddr
);
311 struct arm_per_objfile
*data
;
312 VEC(arm_mapping_symbol_s
) *map
;
313 struct arm_mapping_symbol map_key
= { memaddr
- obj_section_addr (sec
),
317 data
= objfile_data (sec
->objfile
, arm_objfile_data_key
);
320 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
321 if (!VEC_empty (arm_mapping_symbol_s
, map
))
323 struct arm_mapping_symbol
*map_sym
;
325 idx
= VEC_lower_bound (arm_mapping_symbol_s
, map
, &map_key
,
326 arm_compare_mapping_symbols
);
328 /* VEC_lower_bound finds the earliest ordered insertion
329 point. If the following symbol starts at this exact
330 address, we use that; otherwise, the preceding
331 mapping symbol covers this address. */
332 if (idx
< VEC_length (arm_mapping_symbol_s
, map
))
334 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
);
335 if (map_sym
->value
== map_key
.value
)
338 *start
= map_sym
->value
+ obj_section_addr (sec
);
339 return map_sym
->type
;
345 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
- 1);
347 *start
= map_sym
->value
+ obj_section_addr (sec
);
348 return map_sym
->type
;
357 static CORE_ADDR
arm_get_next_pc_raw (struct frame_info
*frame
,
358 CORE_ADDR pc
, int insert_bkpt
);
360 /* Determine if the program counter specified in MEMADDR is in a Thumb
361 function. This function should be called for addresses unrelated to
362 any executing frame; otherwise, prefer arm_frame_is_thumb. */
365 arm_pc_is_thumb (struct gdbarch
*gdbarch
, CORE_ADDR memaddr
)
367 struct obj_section
*sec
;
368 struct minimal_symbol
*sym
;
371 /* If bit 0 of the address is set, assume this is a Thumb address. */
372 if (IS_THUMB_ADDR (memaddr
))
375 /* If the user wants to override the symbol table, let him. */
376 if (strcmp (arm_force_mode_string
, "arm") == 0)
378 if (strcmp (arm_force_mode_string
, "thumb") == 0)
381 /* ARM v6-M and v7-M are always in Thumb mode. */
382 if (gdbarch_tdep (gdbarch
)->is_m
)
385 /* If there are mapping symbols, consult them. */
386 type
= arm_find_mapping_symbol (memaddr
, NULL
);
390 /* Thumb functions have a "special" bit set in minimal symbols. */
391 sym
= lookup_minimal_symbol_by_pc (memaddr
);
393 return (MSYMBOL_IS_SPECIAL (sym
));
395 /* If the user wants to override the fallback mode, let them. */
396 if (strcmp (arm_fallback_mode_string
, "arm") == 0)
398 if (strcmp (arm_fallback_mode_string
, "thumb") == 0)
401 /* If we couldn't find any symbol, but we're talking to a running
402 target, then trust the current value of $cpsr. This lets
403 "display/i $pc" always show the correct mode (though if there is
404 a symbol table we will not reach here, so it still may not be
405 displayed in the mode it will be executed).
407 As a further heuristic if we detect that we are doing a single-step we
408 see what state executing the current instruction ends up with us being
410 if (target_has_registers
)
412 struct frame_info
*current_frame
= get_current_frame ();
413 CORE_ADDR current_pc
= get_frame_pc (current_frame
);
414 int is_thumb
= arm_frame_is_thumb (current_frame
);
416 if (memaddr
== current_pc
)
420 struct gdbarch
*gdbarch
= get_frame_arch (current_frame
);
421 next_pc
= arm_get_next_pc_raw (current_frame
, current_pc
, FALSE
);
422 if (memaddr
== gdbarch_addr_bits_remove (gdbarch
, next_pc
))
423 return IS_THUMB_ADDR (next_pc
);
429 /* Otherwise we're out of luck; we assume ARM. */
433 /* Remove useless bits from addresses in a running program. */
435 arm_addr_bits_remove (struct gdbarch
*gdbarch
, CORE_ADDR val
)
438 return UNMAKE_THUMB_ADDR (val
);
440 return (val
& 0x03fffffc);
443 /* When reading symbols, we need to zap the low bit of the address,
444 which may be set to 1 for Thumb functions. */
446 arm_smash_text_address (struct gdbarch
*gdbarch
, CORE_ADDR val
)
451 /* Return 1 if PC is the start of a compiler helper function which
452 can be safely ignored during prologue skipping. */
454 skip_prologue_function (CORE_ADDR pc
)
456 struct minimal_symbol
*msym
;
459 msym
= lookup_minimal_symbol_by_pc (pc
);
460 if (msym
== NULL
|| SYMBOL_VALUE_ADDRESS (msym
) != pc
)
463 name
= SYMBOL_LINKAGE_NAME (msym
);
467 /* The GNU linker's Thumb call stub to foo is named
469 if (strstr (name
, "_from_thumb") != NULL
)
472 /* On soft-float targets, __truncdfsf2 is called to convert promoted
473 arguments to their argument types in non-prototyped
475 if (strncmp (name
, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
477 if (strncmp (name
, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
480 /* Internal functions related to thread-local storage. */
481 if (strncmp (name
, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
483 if (strncmp (name
, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
489 /* Support routines for instruction parsing. */
490 #define submask(x) ((1L << ((x) + 1)) - 1)
491 #define bit(obj,st) (((obj) >> (st)) & 1)
492 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
493 #define sbits(obj,st,fn) \
494 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
495 #define BranchDest(addr,instr) \
496 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
498 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
501 thumb_expand_immediate (unsigned int imm
)
503 unsigned int count
= imm
>> 7;
511 return (imm
& 0xff) | ((imm
& 0xff) << 16);
513 return ((imm
& 0xff) << 8) | ((imm
& 0xff) << 24);
515 return (imm
& 0xff) | ((imm
& 0xff) << 8)
516 | ((imm
& 0xff) << 16) | ((imm
& 0xff) << 24);
519 return (0x80 | (imm
& 0x7f)) << (32 - count
);
522 /* Return 1 if the 16-bit Thumb instruction INST might change
523 control flow, 0 otherwise. */
526 thumb_instruction_changes_pc (unsigned short inst
)
528 if ((inst
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
531 if ((inst
& 0xf000) == 0xd000) /* conditional branch */
534 if ((inst
& 0xf800) == 0xe000) /* unconditional branch */
537 if ((inst
& 0xff00) == 0x4700) /* bx REG, blx REG */
540 if ((inst
& 0xff87) == 0x4687) /* mov pc, REG */
543 if ((inst
& 0xf500) == 0xb100) /* CBNZ or CBZ. */
549 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
550 might change control flow, 0 otherwise. */
553 thumb2_instruction_changes_pc (unsigned short inst1
, unsigned short inst2
)
555 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
557 /* Branches and miscellaneous control instructions. */
559 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
564 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
566 /* SUBS PC, LR, #imm8. */
569 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
571 /* Conditional branch. */
578 if ((inst1
& 0xfe50) == 0xe810)
580 /* Load multiple or RFE. */
582 if (bit (inst1
, 7) && !bit (inst1
, 8))
588 else if (!bit (inst1
, 7) && bit (inst1
, 8))
594 else if (bit (inst1
, 7) && bit (inst1
, 8))
599 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
608 if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
610 /* MOV PC or MOVS PC. */
614 if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
617 if (bits (inst1
, 0, 3) == 15)
623 if ((inst2
& 0x0fc0) == 0x0000)
629 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
635 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
644 /* Analyze a Thumb prologue, looking for a recognizable stack frame
645 and frame pointer. Scan until we encounter a store that could
646 clobber the stack frame unexpectedly, or an unknown instruction.
647 Return the last address which is definitely safe to skip for an
648 initial breakpoint. */
651 thumb_analyze_prologue (struct gdbarch
*gdbarch
,
652 CORE_ADDR start
, CORE_ADDR limit
,
653 struct arm_prologue_cache
*cache
)
655 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
656 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
659 struct pv_area
*stack
;
660 struct cleanup
*back_to
;
662 CORE_ADDR unrecognized_pc
= 0;
664 for (i
= 0; i
< 16; i
++)
665 regs
[i
] = pv_register (i
, 0);
666 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
667 back_to
= make_cleanup_free_pv_area (stack
);
669 while (start
< limit
)
673 insn
= read_memory_unsigned_integer (start
, 2, byte_order_for_code
);
675 if ((insn
& 0xfe00) == 0xb400) /* push { rlist } */
680 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
683 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
684 whether to save LR (R14). */
685 mask
= (insn
& 0xff) | ((insn
& 0x100) << 6);
687 /* Calculate offsets of saved R0-R7 and LR. */
688 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
689 if (mask
& (1 << regno
))
691 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
693 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
696 else if ((insn
& 0xff00) == 0xb000) /* add sp, #simm OR
699 offset
= (insn
& 0x7f) << 2; /* get scaled offset */
700 if (insn
& 0x80) /* Check for SUB. */
701 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
704 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
707 else if ((insn
& 0xf800) == 0xa800) /* add Rd, sp, #imm */
708 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[ARM_SP_REGNUM
],
710 else if ((insn
& 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
711 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
712 regs
[bits (insn
, 0, 2)] = pv_add_constant (regs
[bits (insn
, 3, 5)],
714 else if ((insn
& 0xf800) == 0x3000 /* add Rd, #imm */
715 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
716 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[bits (insn
, 8, 10)],
718 else if ((insn
& 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
719 && pv_is_register (regs
[bits (insn
, 6, 8)], ARM_SP_REGNUM
)
720 && pv_is_constant (regs
[bits (insn
, 3, 5)]))
721 regs
[bits (insn
, 0, 2)] = pv_add (regs
[bits (insn
, 3, 5)],
722 regs
[bits (insn
, 6, 8)]);
723 else if ((insn
& 0xff00) == 0x4400 /* add Rd, Rm */
724 && pv_is_constant (regs
[bits (insn
, 3, 6)]))
726 int rd
= (bit (insn
, 7) << 3) + bits (insn
, 0, 2);
727 int rm
= bits (insn
, 3, 6);
728 regs
[rd
] = pv_add (regs
[rd
], regs
[rm
]);
730 else if ((insn
& 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
732 int dst_reg
= (insn
& 0x7) + ((insn
& 0x80) >> 4);
733 int src_reg
= (insn
& 0x78) >> 3;
734 regs
[dst_reg
] = regs
[src_reg
];
736 else if ((insn
& 0xf800) == 0x9000) /* str rd, [sp, #off] */
738 /* Handle stores to the stack. Normally pushes are used,
739 but with GCC -mtpcs-frame, there may be other stores
740 in the prologue to create the frame. */
741 int regno
= (insn
>> 8) & 0x7;
744 offset
= (insn
& 0xff) << 2;
745 addr
= pv_add_constant (regs
[ARM_SP_REGNUM
], offset
);
747 if (pv_area_store_would_trash (stack
, addr
))
750 pv_area_store (stack
, addr
, 4, regs
[regno
]);
752 else if ((insn
& 0xf800) == 0x6000) /* str rd, [rn, #off] */
754 int rd
= bits (insn
, 0, 2);
755 int rn
= bits (insn
, 3, 5);
758 offset
= bits (insn
, 6, 10) << 2;
759 addr
= pv_add_constant (regs
[rn
], offset
);
761 if (pv_area_store_would_trash (stack
, addr
))
764 pv_area_store (stack
, addr
, 4, regs
[rd
]);
766 else if (((insn
& 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
767 || (insn
& 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
768 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
769 /* Ignore stores of argument registers to the stack. */
771 else if ((insn
& 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
772 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
773 /* Ignore block loads from the stack, potentially copying
774 parameters from memory. */
776 else if ((insn
& 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
777 || ((insn
& 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
778 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
)))
779 /* Similarly ignore single loads from the stack. */
781 else if ((insn
& 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
782 || (insn
& 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
783 /* Skip register copies, i.e. saves to another register
784 instead of the stack. */
786 else if ((insn
& 0xf800) == 0x2000) /* movs Rd, #imm */
787 /* Recognize constant loads; even with small stacks these are necessary
789 regs
[bits (insn
, 8, 10)] = pv_constant (bits (insn
, 0, 7));
790 else if ((insn
& 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
792 /* Constant pool loads, for the same reason. */
793 unsigned int constant
;
796 loc
= start
+ 4 + bits (insn
, 0, 7) * 4;
797 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
798 regs
[bits (insn
, 8, 10)] = pv_constant (constant
);
800 else if ((insn
& 0xe000) == 0xe000)
802 unsigned short inst2
;
804 inst2
= read_memory_unsigned_integer (start
+ 2, 2,
805 byte_order_for_code
);
807 if ((insn
& 0xf800) == 0xf000 && (inst2
& 0xe800) == 0xe800)
809 /* BL, BLX. Allow some special function calls when
810 skipping the prologue; GCC generates these before
811 storing arguments to the stack. */
813 int j1
, j2
, imm1
, imm2
;
815 imm1
= sbits (insn
, 0, 10);
816 imm2
= bits (inst2
, 0, 10);
817 j1
= bit (inst2
, 13);
818 j2
= bit (inst2
, 11);
820 offset
= ((imm1
<< 12) + (imm2
<< 1));
821 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
823 nextpc
= start
+ 4 + offset
;
824 /* For BLX make sure to clear the low bits. */
825 if (bit (inst2
, 12) == 0)
826 nextpc
= nextpc
& 0xfffffffc;
828 if (!skip_prologue_function (nextpc
))
832 else if ((insn
& 0xffd0) == 0xe900 /* stmdb Rn{!}, { registers } */
833 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
835 pv_t addr
= regs
[bits (insn
, 0, 3)];
838 if (pv_area_store_would_trash (stack
, addr
))
841 /* Calculate offsets of saved registers. */
842 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
843 if (inst2
& (1 << regno
))
845 addr
= pv_add_constant (addr
, -4);
846 pv_area_store (stack
, addr
, 4, regs
[regno
]);
850 regs
[bits (insn
, 0, 3)] = addr
;
853 else if ((insn
& 0xff50) == 0xe940 /* strd Rt, Rt2, [Rn, #+/-imm]{!} */
854 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
856 int regno1
= bits (inst2
, 12, 15);
857 int regno2
= bits (inst2
, 8, 11);
858 pv_t addr
= regs
[bits (insn
, 0, 3)];
860 offset
= inst2
& 0xff;
862 addr
= pv_add_constant (addr
, offset
);
864 addr
= pv_add_constant (addr
, -offset
);
866 if (pv_area_store_would_trash (stack
, addr
))
869 pv_area_store (stack
, addr
, 4, regs
[regno1
]);
870 pv_area_store (stack
, pv_add_constant (addr
, 4),
874 regs
[bits (insn
, 0, 3)] = addr
;
877 else if ((insn
& 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
878 && (inst2
& 0x0c00) == 0x0c00
879 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
881 int regno
= bits (inst2
, 12, 15);
882 pv_t addr
= regs
[bits (insn
, 0, 3)];
884 offset
= inst2
& 0xff;
886 addr
= pv_add_constant (addr
, offset
);
888 addr
= pv_add_constant (addr
, -offset
);
890 if (pv_area_store_would_trash (stack
, addr
))
893 pv_area_store (stack
, addr
, 4, regs
[regno
]);
896 regs
[bits (insn
, 0, 3)] = addr
;
899 else if ((insn
& 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
900 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
902 int regno
= bits (inst2
, 12, 15);
905 offset
= inst2
& 0xfff;
906 addr
= pv_add_constant (regs
[bits (insn
, 0, 3)], offset
);
908 if (pv_area_store_would_trash (stack
, addr
))
911 pv_area_store (stack
, addr
, 4, regs
[regno
]);
914 else if ((insn
& 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
915 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
916 /* Ignore stores of argument registers to the stack. */
919 else if ((insn
& 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
920 && (inst2
& 0x0d00) == 0x0c00
921 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
922 /* Ignore stores of argument registers to the stack. */
925 else if ((insn
& 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
926 && (inst2
& 0x8000) == 0x0000
927 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
928 /* Ignore block loads from the stack, potentially copying
929 parameters from memory. */
932 else if ((insn
& 0xffb0) == 0xe950 /* ldrd Rt, Rt2, [Rn, #+/-imm] */
933 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
934 /* Similarly ignore dual loads from the stack. */
937 else if ((insn
& 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
938 && (inst2
& 0x0d00) == 0x0c00
939 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
940 /* Similarly ignore single loads from the stack. */
943 else if ((insn
& 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
944 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
945 /* Similarly ignore single loads from the stack. */
948 else if ((insn
& 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
949 && (inst2
& 0x8000) == 0x0000)
951 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
952 | (bits (inst2
, 12, 14) << 8)
953 | bits (inst2
, 0, 7));
955 regs
[bits (inst2
, 8, 11)]
956 = pv_add_constant (regs
[bits (insn
, 0, 3)],
957 thumb_expand_immediate (imm
));
960 else if ((insn
& 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
961 && (inst2
& 0x8000) == 0x0000)
963 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
964 | (bits (inst2
, 12, 14) << 8)
965 | bits (inst2
, 0, 7));
967 regs
[bits (inst2
, 8, 11)]
968 = pv_add_constant (regs
[bits (insn
, 0, 3)], imm
);
971 else if ((insn
& 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
972 && (inst2
& 0x8000) == 0x0000)
974 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
975 | (bits (inst2
, 12, 14) << 8)
976 | bits (inst2
, 0, 7));
978 regs
[bits (inst2
, 8, 11)]
979 = pv_add_constant (regs
[bits (insn
, 0, 3)],
980 - (CORE_ADDR
) thumb_expand_immediate (imm
));
983 else if ((insn
& 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
984 && (inst2
& 0x8000) == 0x0000)
986 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
987 | (bits (inst2
, 12, 14) << 8)
988 | bits (inst2
, 0, 7));
990 regs
[bits (inst2
, 8, 11)]
991 = pv_add_constant (regs
[bits (insn
, 0, 3)], - (CORE_ADDR
) imm
);
994 else if ((insn
& 0xfbff) == 0xf04f) /* mov.w Rd, #const */
996 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
997 | (bits (inst2
, 12, 14) << 8)
998 | bits (inst2
, 0, 7));
1000 regs
[bits (inst2
, 8, 11)]
1001 = pv_constant (thumb_expand_immediate (imm
));
1004 else if ((insn
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1006 unsigned int imm
= ((bits (insn
, 0, 3) << 12)
1007 | (bits (insn
, 10, 10) << 11)
1008 | (bits (inst2
, 12, 14) << 8)
1009 | bits (inst2
, 0, 7));
1011 regs
[bits (inst2
, 8, 11)] = pv_constant (imm
);
1014 else if (insn
== 0xea5f /* mov.w Rd,Rm */
1015 && (inst2
& 0xf0f0) == 0)
1017 int dst_reg
= (inst2
& 0x0f00) >> 8;
1018 int src_reg
= inst2
& 0xf;
1019 regs
[dst_reg
] = regs
[src_reg
];
1022 else if ((insn
& 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1024 /* Constant pool loads. */
1025 unsigned int constant
;
1028 offset
= bits (insn
, 0, 11);
1030 loc
= start
+ 4 + offset
;
1032 loc
= start
+ 4 - offset
;
1034 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1035 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1038 else if ((insn
& 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1040 /* Constant pool loads. */
1041 unsigned int constant
;
1044 offset
= bits (insn
, 0, 7) << 2;
1046 loc
= start
+ 4 + offset
;
1048 loc
= start
+ 4 - offset
;
1050 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1051 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1053 constant
= read_memory_unsigned_integer (loc
+ 4, 4, byte_order
);
1054 regs
[bits (inst2
, 8, 11)] = pv_constant (constant
);
1057 else if (thumb2_instruction_changes_pc (insn
, inst2
))
1059 /* Don't scan past anything that might change control flow. */
1064 /* The optimizer might shove anything into the prologue,
1065 so we just skip what we don't recognize. */
1066 unrecognized_pc
= start
;
1071 else if (thumb_instruction_changes_pc (insn
))
1073 /* Don't scan past anything that might change control flow. */
1078 /* The optimizer might shove anything into the prologue,
1079 so we just skip what we don't recognize. */
1080 unrecognized_pc
= start
;
1087 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1088 paddress (gdbarch
, start
));
1090 if (unrecognized_pc
== 0)
1091 unrecognized_pc
= start
;
1095 do_cleanups (back_to
);
1096 return unrecognized_pc
;
1099 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1101 /* Frame pointer is fp. Frame size is constant. */
1102 cache
->framereg
= ARM_FP_REGNUM
;
1103 cache
->framesize
= -regs
[ARM_FP_REGNUM
].k
;
1105 else if (pv_is_register (regs
[THUMB_FP_REGNUM
], ARM_SP_REGNUM
))
1107 /* Frame pointer is r7. Frame size is constant. */
1108 cache
->framereg
= THUMB_FP_REGNUM
;
1109 cache
->framesize
= -regs
[THUMB_FP_REGNUM
].k
;
1111 else if (pv_is_register (regs
[ARM_SP_REGNUM
], ARM_SP_REGNUM
))
1113 /* Try the stack pointer... this is a bit desperate. */
1114 cache
->framereg
= ARM_SP_REGNUM
;
1115 cache
->framesize
= -regs
[ARM_SP_REGNUM
].k
;
1119 /* We're just out of luck. We don't know where the frame is. */
1120 cache
->framereg
= -1;
1121 cache
->framesize
= 0;
1124 for (i
= 0; i
< 16; i
++)
1125 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
1126 cache
->saved_regs
[i
].addr
= offset
;
1128 do_cleanups (back_to
);
1129 return unrecognized_pc
;
1132 /* Advance the PC across any function entry prologue instructions to
1133 reach some "real" code.
1135 The APCS (ARM Procedure Call Standard) defines the following
1139 [stmfd sp!, {a1,a2,a3,a4}]
1140 stmfd sp!, {...,fp,ip,lr,pc}
1141 [stfe f7, [sp, #-12]!]
1142 [stfe f6, [sp, #-12]!]
1143 [stfe f5, [sp, #-12]!]
1144 [stfe f4, [sp, #-12]!]
1145 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
1148 arm_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1150 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1153 CORE_ADDR func_addr
, limit_pc
;
1154 struct symtab_and_line sal
;
1156 /* See if we can determine the end of the prologue via the symbol table.
1157 If so, then return either PC, or the PC after the prologue, whichever
1159 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
1161 CORE_ADDR post_prologue_pc
1162 = skip_prologue_using_sal (gdbarch
, func_addr
);
1163 struct symtab
*s
= find_pc_symtab (func_addr
);
1165 /* GCC always emits a line note before the prologue and another
1166 one after, even if the two are at the same address or on the
1167 same line. Take advantage of this so that we do not need to
1168 know every instruction that might appear in the prologue. We
1169 will have producer information for most binaries; if it is
1170 missing (e.g. for -gstabs), assuming the GNU tools. */
1171 if (post_prologue_pc
1173 || s
->producer
== NULL
1174 || strncmp (s
->producer
, "GNU ", sizeof ("GNU ") - 1) == 0))
1175 return post_prologue_pc
;
1177 if (post_prologue_pc
!= 0)
1179 CORE_ADDR analyzed_limit
;
1181 /* For non-GCC compilers, make sure the entire line is an
1182 acceptable prologue; GDB will round this function's
1183 return value up to the end of the following line so we
1184 can not skip just part of a line (and we do not want to).
1186 RealView does not treat the prologue specially, but does
1187 associate prologue code with the opening brace; so this
1188 lets us skip the first line if we think it is the opening
1190 if (arm_pc_is_thumb (gdbarch
, func_addr
))
1191 analyzed_limit
= thumb_analyze_prologue (gdbarch
, func_addr
,
1192 post_prologue_pc
, NULL
);
1194 analyzed_limit
= arm_analyze_prologue (gdbarch
, func_addr
,
1195 post_prologue_pc
, NULL
);
1197 if (analyzed_limit
!= post_prologue_pc
)
1200 return post_prologue_pc
;
1204 /* Can't determine prologue from the symbol table, need to examine
1207 /* Find an upper limit on the function prologue using the debug
1208 information. If the debug information could not be used to provide
1209 that bound, then use an arbitrary large number as the upper bound. */
1210 /* Like arm_scan_prologue, stop no later than pc + 64. */
1211 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
1213 limit_pc
= pc
+ 64; /* Magic. */
1216 /* Check if this is Thumb code. */
1217 if (arm_pc_is_thumb (gdbarch
, pc
))
1218 return thumb_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1220 for (skip_pc
= pc
; skip_pc
< limit_pc
; skip_pc
+= 4)
1222 inst
= read_memory_unsigned_integer (skip_pc
, 4, byte_order_for_code
);
1224 /* "mov ip, sp" is no longer a required part of the prologue. */
1225 if (inst
== 0xe1a0c00d) /* mov ip, sp */
1228 if ((inst
& 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1231 if ((inst
& 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1234 /* Some prologues begin with "str lr, [sp, #-4]!". */
1235 if (inst
== 0xe52de004) /* str lr, [sp, #-4]! */
1238 if ((inst
& 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1241 if ((inst
& 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1244 /* Any insns after this point may float into the code, if it makes
1245 for better instruction scheduling, so we skip them only if we
1246 find them, but still consider the function to be frame-ful. */
1248 /* We may have either one sfmfd instruction here, or several stfe
1249 insns, depending on the version of floating point code we
1251 if ((inst
& 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1254 if ((inst
& 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1257 if ((inst
& 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1260 if ((inst
& 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1263 if ((inst
& 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1264 || (inst
& 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1265 || (inst
& 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1268 if ((inst
& 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1269 || (inst
& 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1270 || (inst
& 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1273 /* Un-recognized instruction; stop scanning. */
1277 return skip_pc
; /* End of prologue */
1281 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1282 This function decodes a Thumb function prologue to determine:
1283 1) the size of the stack frame
1284 2) which registers are saved on it
1285 3) the offsets of saved regs
1286 4) the offset from the stack pointer to the frame pointer
1288 A typical Thumb function prologue would create this stack frame
1289 (offsets relative to FP)
1290 old SP -> 24 stack parameters
1293 R7 -> 0 local variables (16 bytes)
1294 SP -> -12 additional stack space (12 bytes)
1295 The frame size would thus be 36 bytes, and the frame offset would be
1296 12 bytes. The frame register is R7.
1298 The comments for thumb_skip_prolog() describe the algorithm we use
1299 to detect the end of the prolog. */
1303 thumb_scan_prologue (struct gdbarch
*gdbarch
, CORE_ADDR prev_pc
,
1304 CORE_ADDR block_addr
, struct arm_prologue_cache
*cache
)
1306 CORE_ADDR prologue_start
;
1307 CORE_ADDR prologue_end
;
1308 CORE_ADDR current_pc
;
1310 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1313 /* See comment in arm_scan_prologue for an explanation of
1315 if (prologue_end
> prologue_start
+ 64)
1317 prologue_end
= prologue_start
+ 64;
1321 /* We're in the boondocks: we have no idea where the start of the
1325 prologue_end
= min (prologue_end
, prev_pc
);
1327 thumb_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1330 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1333 arm_instruction_changes_pc (uint32_t this_instr
)
1335 if (bits (this_instr
, 28, 31) == INST_NV
)
1336 /* Unconditional instructions. */
1337 switch (bits (this_instr
, 24, 27))
1341 /* Branch with Link and change to Thumb. */
1346 /* Coprocessor register transfer. */
1347 if (bits (this_instr
, 12, 15) == 15)
1348 error (_("Invalid update to pc in instruction"));
1354 switch (bits (this_instr
, 25, 27))
1357 if (bits (this_instr
, 23, 24) == 2 && bit (this_instr
, 20) == 0)
1359 /* Multiplies and extra load/stores. */
1360 if (bit (this_instr
, 4) == 1 && bit (this_instr
, 7) == 1)
1361 /* Neither multiplies nor extension load/stores are allowed
1365 /* Otherwise, miscellaneous instructions. */
1367 /* BX <reg>, BXJ <reg>, BLX <reg> */
1368 if (bits (this_instr
, 4, 27) == 0x12fff1
1369 || bits (this_instr
, 4, 27) == 0x12fff2
1370 || bits (this_instr
, 4, 27) == 0x12fff3)
1373 /* Other miscellaneous instructions are unpredictable if they
1377 /* Data processing instruction. Fall through. */
1380 if (bits (this_instr
, 12, 15) == 15)
1387 /* Media instructions and architecturally undefined instructions. */
1388 if (bits (this_instr
, 25, 27) == 3 && bit (this_instr
, 4) == 1)
1392 if (bit (this_instr
, 20) == 0)
1396 if (bits (this_instr
, 12, 15) == ARM_PC_REGNUM
)
1402 /* Load/store multiple. */
1403 if (bit (this_instr
, 20) == 1 && bit (this_instr
, 15) == 1)
1409 /* Branch and branch with link. */
1414 /* Coprocessor transfers or SWIs can not affect PC. */
1418 internal_error (__FILE__
, __LINE__
, "bad value in switch");
1422 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1423 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1424 fill it in. Return the first address not recognized as a prologue
1427 We recognize all the instructions typically found in ARM prologues,
1428 plus harmless instructions which can be skipped (either for analysis
1429 purposes, or a more restrictive set that can be skipped when finding
1430 the end of the prologue). */
1433 arm_analyze_prologue (struct gdbarch
*gdbarch
,
1434 CORE_ADDR prologue_start
, CORE_ADDR prologue_end
,
1435 struct arm_prologue_cache
*cache
)
1437 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1438 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1440 CORE_ADDR offset
, current_pc
;
1441 pv_t regs
[ARM_FPS_REGNUM
];
1442 struct pv_area
*stack
;
1443 struct cleanup
*back_to
;
1444 int framereg
, framesize
;
1445 CORE_ADDR unrecognized_pc
= 0;
1447 /* Search the prologue looking for instructions that set up the
1448 frame pointer, adjust the stack pointer, and save registers.
1450 Be careful, however, and if it doesn't look like a prologue,
1451 don't try to scan it. If, for instance, a frameless function
1452 begins with stmfd sp!, then we will tell ourselves there is
1453 a frame, which will confuse stack traceback, as well as "finish"
1454 and other operations that rely on a knowledge of the stack
1457 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1458 regs
[regno
] = pv_register (regno
, 0);
1459 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
1460 back_to
= make_cleanup_free_pv_area (stack
);
1462 for (current_pc
= prologue_start
;
1463 current_pc
< prologue_end
;
1467 = read_memory_unsigned_integer (current_pc
, 4, byte_order_for_code
);
1469 if (insn
== 0xe1a0c00d) /* mov ip, sp */
1471 regs
[ARM_IP_REGNUM
] = regs
[ARM_SP_REGNUM
];
1474 else if ((insn
& 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1475 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1477 unsigned imm
= insn
& 0xff; /* immediate value */
1478 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1479 int rd
= bits (insn
, 12, 15);
1480 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1481 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], imm
);
1484 else if ((insn
& 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1485 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1487 unsigned imm
= insn
& 0xff; /* immediate value */
1488 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1489 int rd
= bits (insn
, 12, 15);
1490 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1491 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], -imm
);
1494 else if ((insn
& 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1496 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1498 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1499 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4,
1500 regs
[bits (insn
, 12, 15)]);
1503 else if ((insn
& 0xffff0000) == 0xe92d0000)
1504 /* stmfd sp!, {..., fp, ip, lr, pc}
1506 stmfd sp!, {a1, a2, a3, a4} */
1508 int mask
= insn
& 0xffff;
1510 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1513 /* Calculate offsets of saved registers. */
1514 for (regno
= ARM_PC_REGNUM
; regno
>= 0; regno
--)
1515 if (mask
& (1 << regno
))
1517 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1518 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
1521 else if ((insn
& 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1522 || (insn
& 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1523 || (insn
& 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1525 /* No need to add this to saved_regs -- it's just an arg reg. */
1528 else if ((insn
& 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1529 || (insn
& 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1530 || (insn
& 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1532 /* No need to add this to saved_regs -- it's just an arg reg. */
1535 else if ((insn
& 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1536 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1538 /* No need to add this to saved_regs -- it's just arg regs. */
1541 else if ((insn
& 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1543 unsigned imm
= insn
& 0xff; /* immediate value */
1544 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1545 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1546 regs
[ARM_FP_REGNUM
] = pv_add_constant (regs
[ARM_IP_REGNUM
], -imm
);
1548 else if ((insn
& 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1550 unsigned imm
= insn
& 0xff; /* immediate value */
1551 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1552 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1553 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -imm
);
1555 else if ((insn
& 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1556 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1558 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1561 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1562 regno
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x07);
1563 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12, regs
[regno
]);
1565 else if ((insn
& 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1566 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1568 int n_saved_fp_regs
;
1569 unsigned int fp_start_reg
, fp_bound_reg
;
1571 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1574 if ((insn
& 0x800) == 0x800) /* N0 is set */
1576 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1577 n_saved_fp_regs
= 3;
1579 n_saved_fp_regs
= 1;
1583 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1584 n_saved_fp_regs
= 2;
1586 n_saved_fp_regs
= 4;
1589 fp_start_reg
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x7);
1590 fp_bound_reg
= fp_start_reg
+ n_saved_fp_regs
;
1591 for (; fp_start_reg
< fp_bound_reg
; fp_start_reg
++)
1593 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1594 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12,
1595 regs
[fp_start_reg
++]);
1598 else if ((insn
& 0xff000000) == 0xeb000000 && cache
== NULL
) /* bl */
1600 /* Allow some special function calls when skipping the
1601 prologue; GCC generates these before storing arguments to
1603 CORE_ADDR dest
= BranchDest (current_pc
, insn
);
1605 if (skip_prologue_function (dest
))
1610 else if ((insn
& 0xf0000000) != 0xe0000000)
1611 break; /* Condition not true, exit early */
1612 else if (arm_instruction_changes_pc (insn
))
1613 /* Don't scan past anything that might change control flow. */
1615 else if ((insn
& 0xfe500000) == 0xe8100000) /* ldm */
1617 /* Ignore block loads from the stack, potentially copying
1618 parameters from memory. */
1619 if (pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1624 else if ((insn
& 0xfc500000) == 0xe4100000)
1626 /* Similarly ignore single loads from the stack. */
1627 if (pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1632 else if ((insn
& 0xffff0ff0) == 0xe1a00000)
1633 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1634 register instead of the stack. */
1638 /* The optimizer might shove anything into the prologue,
1639 so we just skip what we don't recognize. */
1640 unrecognized_pc
= current_pc
;
1645 if (unrecognized_pc
== 0)
1646 unrecognized_pc
= current_pc
;
1648 /* The frame size is just the distance from the frame register
1649 to the original stack pointer. */
1650 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1652 /* Frame pointer is fp. */
1653 framereg
= ARM_FP_REGNUM
;
1654 framesize
= -regs
[ARM_FP_REGNUM
].k
;
1656 else if (pv_is_register (regs
[ARM_SP_REGNUM
], ARM_SP_REGNUM
))
1658 /* Try the stack pointer... this is a bit desperate. */
1659 framereg
= ARM_SP_REGNUM
;
1660 framesize
= -regs
[ARM_SP_REGNUM
].k
;
1664 /* We're just out of luck. We don't know where the frame is. */
1671 cache
->framereg
= framereg
;
1672 cache
->framesize
= framesize
;
1674 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1675 if (pv_area_find_reg (stack
, gdbarch
, regno
, &offset
))
1676 cache
->saved_regs
[regno
].addr
= offset
;
1680 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1681 paddress (gdbarch
, unrecognized_pc
));
1683 do_cleanups (back_to
);
1684 return unrecognized_pc
;
1688 arm_scan_prologue (struct frame_info
*this_frame
,
1689 struct arm_prologue_cache
*cache
)
1691 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1692 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1694 CORE_ADDR prologue_start
, prologue_end
, current_pc
;
1695 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1696 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1697 pv_t regs
[ARM_FPS_REGNUM
];
1698 struct pv_area
*stack
;
1699 struct cleanup
*back_to
;
1702 /* Assume there is no frame until proven otherwise. */
1703 cache
->framereg
= ARM_SP_REGNUM
;
1704 cache
->framesize
= 0;
1706 /* Check for Thumb prologue. */
1707 if (arm_frame_is_thumb (this_frame
))
1709 thumb_scan_prologue (gdbarch
, prev_pc
, block_addr
, cache
);
1713 /* Find the function prologue. If we can't find the function in
1714 the symbol table, peek in the stack frame to find the PC. */
1715 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1718 /* One way to find the end of the prologue (which works well
1719 for unoptimized code) is to do the following:
1721 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1724 prologue_end = prev_pc;
1725 else if (sal.end < prologue_end)
1726 prologue_end = sal.end;
1728 This mechanism is very accurate so long as the optimizer
1729 doesn't move any instructions from the function body into the
1730 prologue. If this happens, sal.end will be the last
1731 instruction in the first hunk of prologue code just before
1732 the first instruction that the scheduler has moved from
1733 the body to the prologue.
1735 In order to make sure that we scan all of the prologue
1736 instructions, we use a slightly less accurate mechanism which
1737 may scan more than necessary. To help compensate for this
1738 lack of accuracy, the prologue scanning loop below contains
1739 several clauses which'll cause the loop to terminate early if
1740 an implausible prologue instruction is encountered.
1746 is a suitable endpoint since it accounts for the largest
1747 possible prologue plus up to five instructions inserted by
1750 if (prologue_end
> prologue_start
+ 64)
1752 prologue_end
= prologue_start
+ 64; /* See above. */
1757 /* We have no symbol information. Our only option is to assume this
1758 function has a standard stack frame and the normal frame register.
1759 Then, we can find the value of our frame pointer on entrance to
1760 the callee (or at the present moment if this is the innermost frame).
1761 The value stored there should be the address of the stmfd + 8. */
1762 CORE_ADDR frame_loc
;
1763 LONGEST return_value
;
1765 frame_loc
= get_frame_register_unsigned (this_frame
, ARM_FP_REGNUM
);
1766 if (!safe_read_memory_integer (frame_loc
, 4, byte_order
, &return_value
))
1770 prologue_start
= gdbarch_addr_bits_remove
1771 (gdbarch
, return_value
) - 8;
1772 prologue_end
= prologue_start
+ 64; /* See above. */
1776 if (prev_pc
< prologue_end
)
1777 prologue_end
= prev_pc
;
1779 arm_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1782 static struct arm_prologue_cache
*
1783 arm_make_prologue_cache (struct frame_info
*this_frame
)
1786 struct arm_prologue_cache
*cache
;
1787 CORE_ADDR unwound_fp
;
1789 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
1790 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1792 arm_scan_prologue (this_frame
, cache
);
1794 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
1795 if (unwound_fp
== 0)
1798 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
1800 /* Calculate actual addresses of saved registers using offsets
1801 determined by arm_scan_prologue. */
1802 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
1803 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
1804 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
1809 /* Our frame ID for a normal frame is the current function's starting PC
1810 and the caller's SP when we were called. */
1813 arm_prologue_this_id (struct frame_info
*this_frame
,
1815 struct frame_id
*this_id
)
1817 struct arm_prologue_cache
*cache
;
1821 if (*this_cache
== NULL
)
1822 *this_cache
= arm_make_prologue_cache (this_frame
);
1823 cache
= *this_cache
;
1825 /* This is meant to halt the backtrace at "_start". */
1826 pc
= get_frame_pc (this_frame
);
1827 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1830 /* If we've hit a wall, stop. */
1831 if (cache
->prev_sp
== 0)
1834 func
= get_frame_func (this_frame
);
1835 id
= frame_id_build (cache
->prev_sp
, func
);
1839 static struct value
*
1840 arm_prologue_prev_register (struct frame_info
*this_frame
,
1844 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1845 struct arm_prologue_cache
*cache
;
1847 if (*this_cache
== NULL
)
1848 *this_cache
= arm_make_prologue_cache (this_frame
);
1849 cache
= *this_cache
;
1851 /* If we are asked to unwind the PC, then we need to return the LR
1852 instead. The prologue may save PC, but it will point into this
1853 frame's prologue, not the next frame's resume location. Also
1854 strip the saved T bit. A valid LR may have the low bit set, but
1855 a valid PC never does. */
1856 if (prev_regnum
== ARM_PC_REGNUM
)
1860 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
1861 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1862 arm_addr_bits_remove (gdbarch
, lr
));
1865 /* SP is generally not saved to the stack, but this frame is
1866 identified by the next frame's stack pointer at the time of the call.
1867 The value was already reconstructed into PREV_SP. */
1868 if (prev_regnum
== ARM_SP_REGNUM
)
1869 return frame_unwind_got_constant (this_frame
, prev_regnum
, cache
->prev_sp
);
1871 /* The CPSR may have been changed by the call instruction and by the
1872 called function. The only bit we can reconstruct is the T bit,
1873 by checking the low bit of LR as of the call. This is a reliable
1874 indicator of Thumb-ness except for some ARM v4T pre-interworking
1875 Thumb code, which could get away with a clear low bit as long as
1876 the called function did not use bx. Guess that all other
1877 bits are unchanged; the condition flags are presumably lost,
1878 but the processor status is likely valid. */
1879 if (prev_regnum
== ARM_PS_REGNUM
)
1882 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
1884 cpsr
= get_frame_register_unsigned (this_frame
, prev_regnum
);
1885 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
1886 if (IS_THUMB_ADDR (lr
))
1890 return frame_unwind_got_constant (this_frame
, prev_regnum
, cpsr
);
1893 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1897 struct frame_unwind arm_prologue_unwind
= {
1899 arm_prologue_this_id
,
1900 arm_prologue_prev_register
,
1902 default_frame_sniffer
1905 static struct arm_prologue_cache
*
1906 arm_make_stub_cache (struct frame_info
*this_frame
)
1908 struct arm_prologue_cache
*cache
;
1910 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
1911 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1913 cache
->prev_sp
= get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
);
1918 /* Our frame ID for a stub frame is the current SP and LR. */
1921 arm_stub_this_id (struct frame_info
*this_frame
,
1923 struct frame_id
*this_id
)
1925 struct arm_prologue_cache
*cache
;
1927 if (*this_cache
== NULL
)
1928 *this_cache
= arm_make_stub_cache (this_frame
);
1929 cache
= *this_cache
;
1931 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
1935 arm_stub_unwind_sniffer (const struct frame_unwind
*self
,
1936 struct frame_info
*this_frame
,
1937 void **this_prologue_cache
)
1939 CORE_ADDR addr_in_block
;
1942 addr_in_block
= get_frame_address_in_block (this_frame
);
1943 if (in_plt_section (addr_in_block
, NULL
)
1944 /* We also use the stub winder if the target memory is unreadable
1945 to avoid having the prologue unwinder trying to read it. */
1946 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1952 struct frame_unwind arm_stub_unwind
= {
1955 arm_prologue_prev_register
,
1957 arm_stub_unwind_sniffer
1961 arm_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1963 struct arm_prologue_cache
*cache
;
1965 if (*this_cache
== NULL
)
1966 *this_cache
= arm_make_prologue_cache (this_frame
);
1967 cache
= *this_cache
;
1969 return cache
->prev_sp
- cache
->framesize
;
1972 struct frame_base arm_normal_base
= {
1973 &arm_prologue_unwind
,
1974 arm_normal_frame_base
,
1975 arm_normal_frame_base
,
1976 arm_normal_frame_base
1979 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1980 dummy frame. The frame ID's base needs to match the TOS value
1981 saved by save_dummy_frame_tos() and returned from
1982 arm_push_dummy_call, and the PC needs to match the dummy frame's
1985 static struct frame_id
1986 arm_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1988 return frame_id_build (get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
),
1989 get_frame_pc (this_frame
));
1992 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1993 be used to construct the previous frame's ID, after looking up the
1994 containing function). */
1997 arm_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2000 pc
= frame_unwind_register_unsigned (this_frame
, ARM_PC_REGNUM
);
2001 return arm_addr_bits_remove (gdbarch
, pc
);
2005 arm_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2007 return frame_unwind_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2010 static struct value
*
2011 arm_dwarf2_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2014 struct gdbarch
* gdbarch
= get_frame_arch (this_frame
);
2016 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2021 /* The PC is normally copied from the return column, which
2022 describes saves of LR. However, that version may have an
2023 extra bit set to indicate Thumb state. The bit is not
2025 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2026 return frame_unwind_got_constant (this_frame
, regnum
,
2027 arm_addr_bits_remove (gdbarch
, lr
));
2030 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2031 cpsr
= get_frame_register_unsigned (this_frame
, regnum
);
2032 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2033 if (IS_THUMB_ADDR (lr
))
2037 return frame_unwind_got_constant (this_frame
, regnum
, cpsr
);
2040 internal_error (__FILE__
, __LINE__
,
2041 _("Unexpected register %d"), regnum
);
2046 arm_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
2047 struct dwarf2_frame_state_reg
*reg
,
2048 struct frame_info
*this_frame
)
2054 reg
->how
= DWARF2_FRAME_REG_FN
;
2055 reg
->loc
.fn
= arm_dwarf2_prev_register
;
2058 reg
->how
= DWARF2_FRAME_REG_CFA
;
2063 /* Return true if we are in the function's epilogue, i.e. after the
2064 instruction that destroyed the function's stack frame. */
2067 thumb_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2069 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2070 unsigned int insn
, insn2
;
2071 int found_return
= 0, found_stack_adjust
= 0;
2072 CORE_ADDR func_start
, func_end
;
2076 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
2079 /* The epilogue is a sequence of instructions along the following lines:
2081 - add stack frame size to SP or FP
2082 - [if frame pointer used] restore SP from FP
2083 - restore registers from SP [may include PC]
2084 - a return-type instruction [if PC wasn't already restored]
2086 In a first pass, we scan forward from the current PC and verify the
2087 instructions we find as compatible with this sequence, ending in a
2090 However, this is not sufficient to distinguish indirect function calls
2091 within a function from indirect tail calls in the epilogue in some cases.
2092 Therefore, if we didn't already find any SP-changing instruction during
2093 forward scan, we add a backward scanning heuristic to ensure we actually
2094 are in the epilogue. */
2097 while (scan_pc
< func_end
&& !found_return
)
2099 if (target_read_memory (scan_pc
, buf
, 2))
2103 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
2105 if ((insn
& 0xff80) == 0x4700) /* bx <Rm> */
2107 else if (insn
== 0x46f7) /* mov pc, lr */
2109 else if (insn
== 0x46bd) /* mov sp, r7 */
2110 found_stack_adjust
= 1;
2111 else if ((insn
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2112 found_stack_adjust
= 1;
2113 else if ((insn
& 0xfe00) == 0xbc00) /* pop <registers> */
2115 found_stack_adjust
= 1;
2116 if (insn
& 0x0100) /* <registers> include PC. */
2119 else if ((insn
& 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2121 if (target_read_memory (scan_pc
, buf
, 2))
2125 insn2
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
2127 if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
2129 found_stack_adjust
= 1;
2130 if (insn2
& 0x8000) /* <registers> include PC. */
2133 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
2134 && (insn2
& 0x0fff) == 0x0b04)
2136 found_stack_adjust
= 1;
2137 if ((insn2
& 0xf000) == 0xf000) /* <Rt> is PC. */
2140 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
2141 && (insn2
& 0x0e00) == 0x0a00)
2142 found_stack_adjust
= 1;
2153 /* Since any instruction in the epilogue sequence, with the possible
2154 exception of return itself, updates the stack pointer, we need to
2155 scan backwards for at most one instruction. Try either a 16-bit or
2156 a 32-bit instruction. This is just a heuristic, so we do not worry
2157 too much about false positives.*/
2159 if (!found_stack_adjust
)
2161 if (pc
- 4 < func_start
)
2163 if (target_read_memory (pc
- 4, buf
, 4))
2166 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
2167 insn2
= extract_unsigned_integer (buf
+ 2, 2, byte_order_for_code
);
2169 if (insn2
== 0x46bd) /* mov sp, r7 */
2170 found_stack_adjust
= 1;
2171 else if ((insn2
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2172 found_stack_adjust
= 1;
2173 else if ((insn2
& 0xff00) == 0xbc00) /* pop <registers> without PC */
2174 found_stack_adjust
= 1;
2175 else if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
2176 found_stack_adjust
= 1;
2177 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
2178 && (insn2
& 0x0fff) == 0x0b04)
2179 found_stack_adjust
= 1;
2180 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
2181 && (insn2
& 0x0e00) == 0x0a00)
2182 found_stack_adjust
= 1;
2185 return found_stack_adjust
;
2188 /* Return true if we are in the function's epilogue, i.e. after the
2189 instruction that destroyed the function's stack frame. */
2192 arm_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2194 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2196 int found_return
, found_stack_adjust
;
2197 CORE_ADDR func_start
, func_end
;
2199 if (arm_pc_is_thumb (gdbarch
, pc
))
2200 return thumb_in_function_epilogue_p (gdbarch
, pc
);
2202 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
2205 /* We are in the epilogue if the previous instruction was a stack
2206 adjustment and the next instruction is a possible return (bx, mov
2207 pc, or pop). We could have to scan backwards to find the stack
2208 adjustment, or forwards to find the return, but this is a decent
2209 approximation. First scan forwards. */
2212 insn
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
2213 if (bits (insn
, 28, 31) != INST_NV
)
2215 if ((insn
& 0x0ffffff0) == 0x012fff10)
2218 else if ((insn
& 0x0ffffff0) == 0x01a0f000)
2221 else if ((insn
& 0x0fff0000) == 0x08bd0000
2222 && (insn
& 0x0000c000) != 0)
2223 /* POP (LDMIA), including PC or LR. */
2230 /* Scan backwards. This is just a heuristic, so do not worry about
2231 false positives from mode changes. */
2233 if (pc
< func_start
+ 4)
2236 found_stack_adjust
= 0;
2237 insn
= read_memory_unsigned_integer (pc
- 4, 4, byte_order_for_code
);
2238 if (bits (insn
, 28, 31) != INST_NV
)
2240 if ((insn
& 0x0df0f000) == 0x0080d000)
2241 /* ADD SP (register or immediate). */
2242 found_stack_adjust
= 1;
2243 else if ((insn
& 0x0df0f000) == 0x0040d000)
2244 /* SUB SP (register or immediate). */
2245 found_stack_adjust
= 1;
2246 else if ((insn
& 0x0ffffff0) == 0x01a0d000)
2248 found_stack_adjust
= 1;
2249 else if ((insn
& 0x0fff0000) == 0x08bd0000)
2251 found_stack_adjust
= 1;
2254 if (found_stack_adjust
)
2261 /* When arguments must be pushed onto the stack, they go on in reverse
2262 order. The code below implements a FILO (stack) to do this. */
2267 struct stack_item
*prev
;
2271 static struct stack_item
*
2272 push_stack_item (struct stack_item
*prev
, const void *contents
, int len
)
2274 struct stack_item
*si
;
2275 si
= xmalloc (sizeof (struct stack_item
));
2276 si
->data
= xmalloc (len
);
2279 memcpy (si
->data
, contents
, len
);
2283 static struct stack_item
*
2284 pop_stack_item (struct stack_item
*si
)
2286 struct stack_item
*dead
= si
;
2294 /* Return the alignment (in bytes) of the given type. */
2297 arm_type_align (struct type
*t
)
2303 t
= check_typedef (t
);
2304 switch (TYPE_CODE (t
))
2307 /* Should never happen. */
2308 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
2312 case TYPE_CODE_ENUM
:
2316 case TYPE_CODE_RANGE
:
2317 case TYPE_CODE_BITSTRING
:
2319 case TYPE_CODE_CHAR
:
2320 case TYPE_CODE_BOOL
:
2321 return TYPE_LENGTH (t
);
2323 case TYPE_CODE_ARRAY
:
2324 case TYPE_CODE_COMPLEX
:
2325 /* TODO: What about vector types? */
2326 return arm_type_align (TYPE_TARGET_TYPE (t
));
2328 case TYPE_CODE_STRUCT
:
2329 case TYPE_CODE_UNION
:
2331 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
2333 falign
= arm_type_align (TYPE_FIELD_TYPE (t
, n
));
2341 /* Possible base types for a candidate for passing and returning in
2344 enum arm_vfp_cprc_base_type
2353 /* The length of one element of base type B. */
2356 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b
)
2360 case VFP_CPRC_SINGLE
:
2362 case VFP_CPRC_DOUBLE
:
2364 case VFP_CPRC_VEC64
:
2366 case VFP_CPRC_VEC128
:
2369 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
2374 /* The character ('s', 'd' or 'q') for the type of VFP register used
2375 for passing base type B. */
2378 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b
)
2382 case VFP_CPRC_SINGLE
:
2384 case VFP_CPRC_DOUBLE
:
2386 case VFP_CPRC_VEC64
:
2388 case VFP_CPRC_VEC128
:
2391 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
2396 /* Determine whether T may be part of a candidate for passing and
2397 returning in VFP registers, ignoring the limit on the total number
2398 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2399 classification of the first valid component found; if it is not
2400 VFP_CPRC_UNKNOWN, all components must have the same classification
2401 as *BASE_TYPE. If it is found that T contains a type not permitted
2402 for passing and returning in VFP registers, a type differently
2403 classified from *BASE_TYPE, or two types differently classified
2404 from each other, return -1, otherwise return the total number of
2405 base-type elements found (possibly 0 in an empty structure or
2406 array). Vectors and complex types are not currently supported,
2407 matching the generic AAPCS support. */
2410 arm_vfp_cprc_sub_candidate (struct type
*t
,
2411 enum arm_vfp_cprc_base_type
*base_type
)
2413 t
= check_typedef (t
);
2414 switch (TYPE_CODE (t
))
2417 switch (TYPE_LENGTH (t
))
2420 if (*base_type
== VFP_CPRC_UNKNOWN
)
2421 *base_type
= VFP_CPRC_SINGLE
;
2422 else if (*base_type
!= VFP_CPRC_SINGLE
)
2427 if (*base_type
== VFP_CPRC_UNKNOWN
)
2428 *base_type
= VFP_CPRC_DOUBLE
;
2429 else if (*base_type
!= VFP_CPRC_DOUBLE
)
2438 case TYPE_CODE_ARRAY
:
2442 count
= arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t
), base_type
);
2445 if (TYPE_LENGTH (t
) == 0)
2447 gdb_assert (count
== 0);
2450 else if (count
== 0)
2452 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
2453 gdb_assert ((TYPE_LENGTH (t
) % unitlen
) == 0);
2454 return TYPE_LENGTH (t
) / unitlen
;
2458 case TYPE_CODE_STRUCT
:
2463 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
2465 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
2467 if (sub_count
== -1)
2471 if (TYPE_LENGTH (t
) == 0)
2473 gdb_assert (count
== 0);
2476 else if (count
== 0)
2478 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
2479 if (TYPE_LENGTH (t
) != unitlen
* count
)
2484 case TYPE_CODE_UNION
:
2489 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
2491 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
2493 if (sub_count
== -1)
2495 count
= (count
> sub_count
? count
: sub_count
);
2497 if (TYPE_LENGTH (t
) == 0)
2499 gdb_assert (count
== 0);
2502 else if (count
== 0)
2504 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
2505 if (TYPE_LENGTH (t
) != unitlen
* count
)
2517 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2518 if passed to or returned from a non-variadic function with the VFP
2519 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2520 *BASE_TYPE to the base type for T and *COUNT to the number of
2521 elements of that base type before returning. */
2524 arm_vfp_call_candidate (struct type
*t
, enum arm_vfp_cprc_base_type
*base_type
,
2527 enum arm_vfp_cprc_base_type b
= VFP_CPRC_UNKNOWN
;
2528 int c
= arm_vfp_cprc_sub_candidate (t
, &b
);
2529 if (c
<= 0 || c
> 4)
2536 /* Return 1 if the VFP ABI should be used for passing arguments to and
2537 returning values from a function of type FUNC_TYPE, 0
2541 arm_vfp_abi_for_function (struct gdbarch
*gdbarch
, struct type
*func_type
)
2543 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2544 /* Variadic functions always use the base ABI. Assume that functions
2545 without debug info are not variadic. */
2546 if (func_type
&& TYPE_VARARGS (check_typedef (func_type
)))
2548 /* The VFP ABI is only supported as a variant of AAPCS. */
2549 if (tdep
->arm_abi
!= ARM_ABI_AAPCS
)
2551 return gdbarch_tdep (gdbarch
)->fp_model
== ARM_FLOAT_VFP
;
2554 /* We currently only support passing parameters in integer registers, which
2555 conforms with GCC's default model, and VFP argument passing following
2556 the VFP variant of AAPCS. Several other variants exist and
2557 we should probably support some of them based on the selected ABI. */
2560 arm_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
2561 struct regcache
*regcache
, CORE_ADDR bp_addr
, int nargs
,
2562 struct value
**args
, CORE_ADDR sp
, int struct_return
,
2563 CORE_ADDR struct_addr
)
2565 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2569 struct stack_item
*si
= NULL
;
2572 unsigned vfp_regs_free
= (1 << 16) - 1;
2574 /* Determine the type of this function and whether the VFP ABI
2576 ftype
= check_typedef (value_type (function
));
2577 if (TYPE_CODE (ftype
) == TYPE_CODE_PTR
)
2578 ftype
= check_typedef (TYPE_TARGET_TYPE (ftype
));
2579 use_vfp_abi
= arm_vfp_abi_for_function (gdbarch
, ftype
);
2581 /* Set the return address. For the ARM, the return breakpoint is
2582 always at BP_ADDR. */
2583 if (arm_pc_is_thumb (gdbarch
, bp_addr
))
2585 regcache_cooked_write_unsigned (regcache
, ARM_LR_REGNUM
, bp_addr
);
2587 /* Walk through the list of args and determine how large a temporary
2588 stack is required. Need to take care here as structs may be
2589 passed on the stack, and we have to to push them. */
2592 argreg
= ARM_A1_REGNUM
;
2595 /* The struct_return pointer occupies the first parameter
2596 passing register. */
2600 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = %s\n",
2601 gdbarch_register_name (gdbarch
, argreg
),
2602 paddress (gdbarch
, struct_addr
));
2603 regcache_cooked_write_unsigned (regcache
, argreg
, struct_addr
);
2607 for (argnum
= 0; argnum
< nargs
; argnum
++)
2610 struct type
*arg_type
;
2611 struct type
*target_type
;
2612 enum type_code typecode
;
2613 const bfd_byte
*val
;
2615 enum arm_vfp_cprc_base_type vfp_base_type
;
2617 int may_use_core_reg
= 1;
2619 arg_type
= check_typedef (value_type (args
[argnum
]));
2620 len
= TYPE_LENGTH (arg_type
);
2621 target_type
= TYPE_TARGET_TYPE (arg_type
);
2622 typecode
= TYPE_CODE (arg_type
);
2623 val
= value_contents (args
[argnum
]);
2625 align
= arm_type_align (arg_type
);
2626 /* Round alignment up to a whole number of words. */
2627 align
= (align
+ INT_REGISTER_SIZE
- 1) & ~(INT_REGISTER_SIZE
- 1);
2628 /* Different ABIs have different maximum alignments. */
2629 if (gdbarch_tdep (gdbarch
)->arm_abi
== ARM_ABI_APCS
)
2631 /* The APCS ABI only requires word alignment. */
2632 align
= INT_REGISTER_SIZE
;
2636 /* The AAPCS requires at most doubleword alignment. */
2637 if (align
> INT_REGISTER_SIZE
* 2)
2638 align
= INT_REGISTER_SIZE
* 2;
2642 && arm_vfp_call_candidate (arg_type
, &vfp_base_type
,
2650 /* Because this is a CPRC it cannot go in a core register or
2651 cause a core register to be skipped for alignment.
2652 Either it goes in VFP registers and the rest of this loop
2653 iteration is skipped for this argument, or it goes on the
2654 stack (and the stack alignment code is correct for this
2656 may_use_core_reg
= 0;
2658 unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
2659 shift
= unit_length
/ 4;
2660 mask
= (1 << (shift
* vfp_base_count
)) - 1;
2661 for (regno
= 0; regno
< 16; regno
+= shift
)
2662 if (((vfp_regs_free
>> regno
) & mask
) == mask
)
2671 vfp_regs_free
&= ~(mask
<< regno
);
2672 reg_scaled
= regno
/ shift
;
2673 reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
2674 for (i
= 0; i
< vfp_base_count
; i
++)
2678 if (reg_char
== 'q')
2679 arm_neon_quad_write (gdbarch
, regcache
, reg_scaled
+ i
,
2680 val
+ i
* unit_length
);
2683 sprintf (name_buf
, "%c%d", reg_char
, reg_scaled
+ i
);
2684 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
2686 regcache_cooked_write (regcache
, regnum
,
2687 val
+ i
* unit_length
);
2694 /* This CPRC could not go in VFP registers, so all VFP
2695 registers are now marked as used. */
2700 /* Push stack padding for dowubleword alignment. */
2701 if (nstack
& (align
- 1))
2703 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
2704 nstack
+= INT_REGISTER_SIZE
;
2707 /* Doubleword aligned quantities must go in even register pairs. */
2708 if (may_use_core_reg
2709 && argreg
<= ARM_LAST_ARG_REGNUM
2710 && align
> INT_REGISTER_SIZE
2714 /* If the argument is a pointer to a function, and it is a
2715 Thumb function, create a LOCAL copy of the value and set
2716 the THUMB bit in it. */
2717 if (TYPE_CODE_PTR
== typecode
2718 && target_type
!= NULL
2719 && TYPE_CODE_FUNC
== TYPE_CODE (check_typedef (target_type
)))
2721 CORE_ADDR regval
= extract_unsigned_integer (val
, len
, byte_order
);
2722 if (arm_pc_is_thumb (gdbarch
, regval
))
2724 bfd_byte
*copy
= alloca (len
);
2725 store_unsigned_integer (copy
, len
, byte_order
,
2726 MAKE_THUMB_ADDR (regval
));
2731 /* Copy the argument to general registers or the stack in
2732 register-sized pieces. Large arguments are split between
2733 registers and stack. */
2736 int partial_len
= len
< INT_REGISTER_SIZE
? len
: INT_REGISTER_SIZE
;
2738 if (may_use_core_reg
&& argreg
<= ARM_LAST_ARG_REGNUM
)
2740 /* The argument is being passed in a general purpose
2743 = extract_unsigned_integer (val
, partial_len
, byte_order
);
2744 if (byte_order
== BFD_ENDIAN_BIG
)
2745 regval
<<= (INT_REGISTER_SIZE
- partial_len
) * 8;
2747 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
2749 gdbarch_register_name
2751 phex (regval
, INT_REGISTER_SIZE
));
2752 regcache_cooked_write_unsigned (regcache
, argreg
, regval
);
2757 /* Push the arguments onto the stack. */
2759 fprintf_unfiltered (gdb_stdlog
, "arg %d @ sp + %d\n",
2761 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
2762 nstack
+= INT_REGISTER_SIZE
;
2769 /* If we have an odd number of words to push, then decrement the stack
2770 by one word now, so first stack argument will be dword aligned. */
2777 write_memory (sp
, si
->data
, si
->len
);
2778 si
= pop_stack_item (si
);
2781 /* Finally, update teh SP register. */
2782 regcache_cooked_write_unsigned (regcache
, ARM_SP_REGNUM
, sp
);
2788 /* Always align the frame to an 8-byte boundary. This is required on
2789 some platforms and harmless on the rest. */
2792 arm_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
2794 /* Align the stack to eight bytes. */
2795 return sp
& ~ (CORE_ADDR
) 7;
2799 print_fpu_flags (int flags
)
2801 if (flags
& (1 << 0))
2802 fputs ("IVO ", stdout
);
2803 if (flags
& (1 << 1))
2804 fputs ("DVZ ", stdout
);
2805 if (flags
& (1 << 2))
2806 fputs ("OFL ", stdout
);
2807 if (flags
& (1 << 3))
2808 fputs ("UFL ", stdout
);
2809 if (flags
& (1 << 4))
2810 fputs ("INX ", stdout
);
2814 /* Print interesting information about the floating point processor
2815 (if present) or emulator. */
2817 arm_print_float_info (struct gdbarch
*gdbarch
, struct ui_file
*file
,
2818 struct frame_info
*frame
, const char *args
)
2820 unsigned long status
= get_frame_register_unsigned (frame
, ARM_FPS_REGNUM
);
2823 type
= (status
>> 24) & 127;
2824 if (status
& (1 << 31))
2825 printf (_("Hardware FPU type %d\n"), type
);
2827 printf (_("Software FPU type %d\n"), type
);
2828 /* i18n: [floating point unit] mask */
2829 fputs (_("mask: "), stdout
);
2830 print_fpu_flags (status
>> 16);
2831 /* i18n: [floating point unit] flags */
2832 fputs (_("flags: "), stdout
);
2833 print_fpu_flags (status
);
2836 /* Construct the ARM extended floating point type. */
2837 static struct type
*
2838 arm_ext_type (struct gdbarch
*gdbarch
)
2840 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2842 if (!tdep
->arm_ext_type
)
2844 = arch_float_type (gdbarch
, -1, "builtin_type_arm_ext",
2845 floatformats_arm_ext
);
2847 return tdep
->arm_ext_type
;
2850 static struct type
*
2851 arm_neon_double_type (struct gdbarch
*gdbarch
)
2853 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2855 if (tdep
->neon_double_type
== NULL
)
2857 struct type
*t
, *elem
;
2859 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_d",
2861 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2862 append_composite_type_field (t
, "u8", init_vector_type (elem
, 8));
2863 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2864 append_composite_type_field (t
, "u16", init_vector_type (elem
, 4));
2865 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2866 append_composite_type_field (t
, "u32", init_vector_type (elem
, 2));
2867 elem
= builtin_type (gdbarch
)->builtin_uint64
;
2868 append_composite_type_field (t
, "u64", elem
);
2869 elem
= builtin_type (gdbarch
)->builtin_float
;
2870 append_composite_type_field (t
, "f32", init_vector_type (elem
, 2));
2871 elem
= builtin_type (gdbarch
)->builtin_double
;
2872 append_composite_type_field (t
, "f64", elem
);
2874 TYPE_VECTOR (t
) = 1;
2875 TYPE_NAME (t
) = "neon_d";
2876 tdep
->neon_double_type
= t
;
2879 return tdep
->neon_double_type
;
2882 /* FIXME: The vector types are not correctly ordered on big-endian
2883 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2884 bits of d0 - regardless of what unit size is being held in d0. So
2885 the offset of the first uint8 in d0 is 7, but the offset of the
2886 first float is 4. This code works as-is for little-endian
2889 static struct type
*
2890 arm_neon_quad_type (struct gdbarch
*gdbarch
)
2892 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2894 if (tdep
->neon_quad_type
== NULL
)
2896 struct type
*t
, *elem
;
2898 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_q",
2900 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2901 append_composite_type_field (t
, "u8", init_vector_type (elem
, 16));
2902 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2903 append_composite_type_field (t
, "u16", init_vector_type (elem
, 8));
2904 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2905 append_composite_type_field (t
, "u32", init_vector_type (elem
, 4));
2906 elem
= builtin_type (gdbarch
)->builtin_uint64
;
2907 append_composite_type_field (t
, "u64", init_vector_type (elem
, 2));
2908 elem
= builtin_type (gdbarch
)->builtin_float
;
2909 append_composite_type_field (t
, "f32", init_vector_type (elem
, 4));
2910 elem
= builtin_type (gdbarch
)->builtin_double
;
2911 append_composite_type_field (t
, "f64", init_vector_type (elem
, 2));
2913 TYPE_VECTOR (t
) = 1;
2914 TYPE_NAME (t
) = "neon_q";
2915 tdep
->neon_quad_type
= t
;
2918 return tdep
->neon_quad_type
;
2921 /* Return the GDB type object for the "standard" data type of data in
2924 static struct type
*
2925 arm_register_type (struct gdbarch
*gdbarch
, int regnum
)
2927 int num_regs
= gdbarch_num_regs (gdbarch
);
2929 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
2930 && regnum
>= num_regs
&& regnum
< num_regs
+ 32)
2931 return builtin_type (gdbarch
)->builtin_float
;
2933 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
2934 && regnum
>= num_regs
+ 32 && regnum
< num_regs
+ 32 + 16)
2935 return arm_neon_quad_type (gdbarch
);
2937 /* If the target description has register information, we are only
2938 in this function so that we can override the types of
2939 double-precision registers for NEON. */
2940 if (tdesc_has_registers (gdbarch_target_desc (gdbarch
)))
2942 struct type
*t
= tdesc_register_type (gdbarch
, regnum
);
2944 if (regnum
>= ARM_D0_REGNUM
&& regnum
< ARM_D0_REGNUM
+ 32
2945 && TYPE_CODE (t
) == TYPE_CODE_FLT
2946 && gdbarch_tdep (gdbarch
)->have_neon
)
2947 return arm_neon_double_type (gdbarch
);
2952 if (regnum
>= ARM_F0_REGNUM
&& regnum
< ARM_F0_REGNUM
+ NUM_FREGS
)
2954 if (!gdbarch_tdep (gdbarch
)->have_fpa_registers
)
2955 return builtin_type (gdbarch
)->builtin_void
;
2957 return arm_ext_type (gdbarch
);
2959 else if (regnum
== ARM_SP_REGNUM
)
2960 return builtin_type (gdbarch
)->builtin_data_ptr
;
2961 else if (regnum
== ARM_PC_REGNUM
)
2962 return builtin_type (gdbarch
)->builtin_func_ptr
;
2963 else if (regnum
>= ARRAY_SIZE (arm_register_names
))
2964 /* These registers are only supported on targets which supply
2965 an XML description. */
2966 return builtin_type (gdbarch
)->builtin_int0
;
2968 return builtin_type (gdbarch
)->builtin_uint32
;
2971 /* Map a DWARF register REGNUM onto the appropriate GDB register
2975 arm_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2977 /* Core integer regs. */
2978 if (reg
>= 0 && reg
<= 15)
2981 /* Legacy FPA encoding. These were once used in a way which
2982 overlapped with VFP register numbering, so their use is
2983 discouraged, but GDB doesn't support the ARM toolchain
2984 which used them for VFP. */
2985 if (reg
>= 16 && reg
<= 23)
2986 return ARM_F0_REGNUM
+ reg
- 16;
2988 /* New assignments for the FPA registers. */
2989 if (reg
>= 96 && reg
<= 103)
2990 return ARM_F0_REGNUM
+ reg
- 96;
2992 /* WMMX register assignments. */
2993 if (reg
>= 104 && reg
<= 111)
2994 return ARM_WCGR0_REGNUM
+ reg
- 104;
2996 if (reg
>= 112 && reg
<= 127)
2997 return ARM_WR0_REGNUM
+ reg
- 112;
2999 if (reg
>= 192 && reg
<= 199)
3000 return ARM_WC0_REGNUM
+ reg
- 192;
3002 /* VFP v2 registers. A double precision value is actually
3003 in d1 rather than s2, but the ABI only defines numbering
3004 for the single precision registers. This will "just work"
3005 in GDB for little endian targets (we'll read eight bytes,
3006 starting in s0 and then progressing to s1), but will be
3007 reversed on big endian targets with VFP. This won't
3008 be a problem for the new Neon quad registers; you're supposed
3009 to use DW_OP_piece for those. */
3010 if (reg
>= 64 && reg
<= 95)
3014 sprintf (name_buf
, "s%d", reg
- 64);
3015 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3019 /* VFP v3 / Neon registers. This range is also used for VFP v2
3020 registers, except that it now describes d0 instead of s0. */
3021 if (reg
>= 256 && reg
<= 287)
3025 sprintf (name_buf
, "d%d", reg
- 256);
3026 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3033 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3035 arm_register_sim_regno (struct gdbarch
*gdbarch
, int regnum
)
3038 gdb_assert (reg
>= 0 && reg
< gdbarch_num_regs (gdbarch
));
3040 if (regnum
>= ARM_WR0_REGNUM
&& regnum
<= ARM_WR15_REGNUM
)
3041 return regnum
- ARM_WR0_REGNUM
+ SIM_ARM_IWMMXT_COP0R0_REGNUM
;
3043 if (regnum
>= ARM_WC0_REGNUM
&& regnum
<= ARM_WC7_REGNUM
)
3044 return regnum
- ARM_WC0_REGNUM
+ SIM_ARM_IWMMXT_COP1R0_REGNUM
;
3046 if (regnum
>= ARM_WCGR0_REGNUM
&& regnum
<= ARM_WCGR7_REGNUM
)
3047 return regnum
- ARM_WCGR0_REGNUM
+ SIM_ARM_IWMMXT_COP1R8_REGNUM
;
3049 if (reg
< NUM_GREGS
)
3050 return SIM_ARM_R0_REGNUM
+ reg
;
3053 if (reg
< NUM_FREGS
)
3054 return SIM_ARM_FP0_REGNUM
+ reg
;
3057 if (reg
< NUM_SREGS
)
3058 return SIM_ARM_FPS_REGNUM
+ reg
;
3061 internal_error (__FILE__
, __LINE__
, _("Bad REGNUM %d"), regnum
);
3064 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3065 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3066 It is thought that this is is the floating-point register format on
3067 little-endian systems. */
3070 convert_from_extended (const struct floatformat
*fmt
, const void *ptr
,
3071 void *dbl
, int endianess
)
3075 if (endianess
== BFD_ENDIAN_BIG
)
3076 floatformat_to_doublest (&floatformat_arm_ext_big
, ptr
, &d
);
3078 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword
,
3080 floatformat_from_doublest (fmt
, &d
, dbl
);
3084 convert_to_extended (const struct floatformat
*fmt
, void *dbl
, const void *ptr
,
3089 floatformat_to_doublest (fmt
, ptr
, &d
);
3090 if (endianess
== BFD_ENDIAN_BIG
)
3091 floatformat_from_doublest (&floatformat_arm_ext_big
, &d
, dbl
);
3093 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword
,
3098 condition_true (unsigned long cond
, unsigned long status_reg
)
3100 if (cond
== INST_AL
|| cond
== INST_NV
)
3106 return ((status_reg
& FLAG_Z
) != 0);
3108 return ((status_reg
& FLAG_Z
) == 0);
3110 return ((status_reg
& FLAG_C
) != 0);
3112 return ((status_reg
& FLAG_C
) == 0);
3114 return ((status_reg
& FLAG_N
) != 0);
3116 return ((status_reg
& FLAG_N
) == 0);
3118 return ((status_reg
& FLAG_V
) != 0);
3120 return ((status_reg
& FLAG_V
) == 0);
3122 return ((status_reg
& (FLAG_C
| FLAG_Z
)) == FLAG_C
);
3124 return ((status_reg
& (FLAG_C
| FLAG_Z
)) != FLAG_C
);
3126 return (((status_reg
& FLAG_N
) == 0) == ((status_reg
& FLAG_V
) == 0));
3128 return (((status_reg
& FLAG_N
) == 0) != ((status_reg
& FLAG_V
) == 0));
3130 return (((status_reg
& FLAG_Z
) == 0)
3131 && (((status_reg
& FLAG_N
) == 0)
3132 == ((status_reg
& FLAG_V
) == 0)));
3134 return (((status_reg
& FLAG_Z
) != 0)
3135 || (((status_reg
& FLAG_N
) == 0)
3136 != ((status_reg
& FLAG_V
) == 0)));
3141 static unsigned long
3142 shifted_reg_val (struct frame_info
*frame
, unsigned long inst
, int carry
,
3143 unsigned long pc_val
, unsigned long status_reg
)
3145 unsigned long res
, shift
;
3146 int rm
= bits (inst
, 0, 3);
3147 unsigned long shifttype
= bits (inst
, 5, 6);
3151 int rs
= bits (inst
, 8, 11);
3152 shift
= (rs
== 15 ? pc_val
+ 8
3153 : get_frame_register_unsigned (frame
, rs
)) & 0xFF;
3156 shift
= bits (inst
, 7, 11);
3159 ? (pc_val
+ (bit (inst
, 4) ? 12 : 8))
3160 : get_frame_register_unsigned (frame
, rm
));
3165 res
= shift
>= 32 ? 0 : res
<< shift
;
3169 res
= shift
>= 32 ? 0 : res
>> shift
;
3175 res
= ((res
& 0x80000000L
)
3176 ? ~((~res
) >> shift
) : res
>> shift
);
3179 case 3: /* ROR/RRX */
3182 res
= (res
>> 1) | (carry
? 0x80000000L
: 0);
3184 res
= (res
>> shift
) | (res
<< (32 - shift
));
3188 return res
& 0xffffffff;
3191 /* Return number of 1-bits in VAL. */
3194 bitcount (unsigned long val
)
3197 for (nbits
= 0; val
!= 0; nbits
++)
3198 val
&= val
- 1; /* delete rightmost 1-bit in val */
3202 /* Return the size in bytes of the complete Thumb instruction whose
3203 first halfword is INST1. */
3206 thumb_insn_size (unsigned short inst1
)
3208 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
3215 thumb_advance_itstate (unsigned int itstate
)
3217 /* Preserve IT[7:5], the first three bits of the condition. Shift
3218 the upcoming condition flags left by one bit. */
3219 itstate
= (itstate
& 0xe0) | ((itstate
<< 1) & 0x1f);
3221 /* If we have finished the IT block, clear the state. */
3222 if ((itstate
& 0x0f) == 0)
3228 /* Find the next PC after the current instruction executes. In some
3229 cases we can not statically determine the answer (see the IT state
3230 handling in this function); in that case, a breakpoint may be
3231 inserted in addition to the returned PC, which will be used to set
3232 another breakpoint by our caller. */
3235 thumb_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
, int insert_bkpt
)
3237 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3238 struct address_space
*aspace
= get_frame_address_space (frame
);
3239 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3240 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3241 unsigned long pc_val
= ((unsigned long) pc
) + 4; /* PC after prefetch */
3242 unsigned short inst1
;
3243 CORE_ADDR nextpc
= pc
+ 2; /* default is next instruction */
3244 unsigned long offset
;
3245 ULONGEST status
, itstate
;
3247 nextpc
= MAKE_THUMB_ADDR (nextpc
);
3248 pc_val
= MAKE_THUMB_ADDR (pc_val
);
3250 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
3252 /* Thumb-2 conditional execution support. There are eight bits in
3253 the CPSR which describe conditional execution state. Once
3254 reconstructed (they're in a funny order), the low five bits
3255 describe the low bit of the condition for each instruction and
3256 how many instructions remain. The high three bits describe the
3257 base condition. One of the low four bits will be set if an IT
3258 block is active. These bits read as zero on earlier
3260 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
3261 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
3263 /* If-Then handling. On GNU/Linux, where this routine is used, we
3264 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3265 can disable execution of the undefined instruction. So we might
3266 miss the breakpoint if we set it on a skipped conditional
3267 instruction. Because conditional instructions can change the
3268 flags, affecting the execution of further instructions, we may
3269 need to set two breakpoints. */
3271 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
!= NULL
)
3273 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
3275 /* An IT instruction. Because this instruction does not
3276 modify the flags, we can accurately predict the next
3277 executed instruction. */
3278 itstate
= inst1
& 0x00ff;
3279 pc
+= thumb_insn_size (inst1
);
3281 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
3283 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
3284 pc
+= thumb_insn_size (inst1
);
3285 itstate
= thumb_advance_itstate (itstate
);
3288 return MAKE_THUMB_ADDR (pc
);
3290 else if (itstate
!= 0)
3292 /* We are in a conditional block. Check the condition. */
3293 if (! condition_true (itstate
>> 4, status
))
3295 /* Advance to the next executed instruction. */
3296 pc
+= thumb_insn_size (inst1
);
3297 itstate
= thumb_advance_itstate (itstate
);
3299 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
3301 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
3302 pc
+= thumb_insn_size (inst1
);
3303 itstate
= thumb_advance_itstate (itstate
);
3306 return MAKE_THUMB_ADDR (pc
);
3308 else if ((itstate
& 0x0f) == 0x08)
3310 /* This is the last instruction of the conditional
3311 block, and it is executed. We can handle it normally
3312 because the following instruction is not conditional,
3313 and we must handle it normally because it is
3314 permitted to branch. Fall through. */
3320 /* There are conditional instructions after this one.
3321 If this instruction modifies the flags, then we can
3322 not predict what the next executed instruction will
3323 be. Fortunately, this instruction is architecturally
3324 forbidden to branch; we know it will fall through.
3325 Start by skipping past it. */
3326 pc
+= thumb_insn_size (inst1
);
3327 itstate
= thumb_advance_itstate (itstate
);
3329 /* Set a breakpoint on the following instruction. */
3330 gdb_assert ((itstate
& 0x0f) != 0);
3332 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
3333 cond_negated
= (itstate
>> 4) & 1;
3335 /* Skip all following instructions with the same
3336 condition. If there is a later instruction in the IT
3337 block with the opposite condition, set the other
3338 breakpoint there. If not, then set a breakpoint on
3339 the instruction after the IT block. */
3342 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
3343 pc
+= thumb_insn_size (inst1
);
3344 itstate
= thumb_advance_itstate (itstate
);
3346 while (itstate
!= 0 && ((itstate
>> 4) & 1) == cond_negated
);
3348 return MAKE_THUMB_ADDR (pc
);
3352 else if (itstate
& 0x0f)
3354 /* We are in a conditional block. Check the condition. */
3355 int cond
= itstate
>> 4;
3357 if (! condition_true (cond
, status
))
3359 /* Advance to the next instruction. All the 32-bit
3360 instructions share a common prefix. */
3361 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
3362 return MAKE_THUMB_ADDR (pc
+ 4);
3364 return MAKE_THUMB_ADDR (pc
+ 2);
3367 /* Otherwise, handle the instruction normally. */
3370 if ((inst1
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
3374 /* Fetch the saved PC from the stack. It's stored above
3375 all of the other registers. */
3376 offset
= bitcount (bits (inst1
, 0, 7)) * INT_REGISTER_SIZE
;
3377 sp
= get_frame_register_unsigned (frame
, ARM_SP_REGNUM
);
3378 nextpc
= read_memory_unsigned_integer (sp
+ offset
, 4, byte_order
);
3380 else if ((inst1
& 0xf000) == 0xd000) /* conditional branch */
3382 unsigned long cond
= bits (inst1
, 8, 11);
3383 if (cond
== 0x0f) /* 0x0f = SWI */
3385 struct gdbarch_tdep
*tdep
;
3386 tdep
= gdbarch_tdep (gdbarch
);
3388 if (tdep
->syscall_next_pc
!= NULL
)
3389 nextpc
= tdep
->syscall_next_pc (frame
);
3392 else if (cond
!= 0x0f && condition_true (cond
, status
))
3393 nextpc
= pc_val
+ (sbits (inst1
, 0, 7) << 1);
3395 else if ((inst1
& 0xf800) == 0xe000) /* unconditional branch */
3397 nextpc
= pc_val
+ (sbits (inst1
, 0, 10) << 1);
3399 else if ((inst1
& 0xe000) == 0xe000) /* 32-bit instruction */
3401 unsigned short inst2
;
3402 inst2
= read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
3404 /* Default to the next instruction. */
3406 nextpc
= MAKE_THUMB_ADDR (nextpc
);
3408 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
3410 /* Branches and miscellaneous control instructions. */
3412 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
3415 int j1
, j2
, imm1
, imm2
;
3417 imm1
= sbits (inst1
, 0, 10);
3418 imm2
= bits (inst2
, 0, 10);
3419 j1
= bit (inst2
, 13);
3420 j2
= bit (inst2
, 11);
3422 offset
= ((imm1
<< 12) + (imm2
<< 1));
3423 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
3425 nextpc
= pc_val
+ offset
;
3426 /* For BLX make sure to clear the low bits. */
3427 if (bit (inst2
, 12) == 0)
3428 nextpc
= nextpc
& 0xfffffffc;
3430 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
3432 /* SUBS PC, LR, #imm8. */
3433 nextpc
= get_frame_register_unsigned (frame
, ARM_LR_REGNUM
);
3434 nextpc
-= inst2
& 0x00ff;
3436 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
3438 /* Conditional branch. */
3439 if (condition_true (bits (inst1
, 6, 9), status
))
3441 int sign
, j1
, j2
, imm1
, imm2
;
3443 sign
= sbits (inst1
, 10, 10);
3444 imm1
= bits (inst1
, 0, 5);
3445 imm2
= bits (inst2
, 0, 10);
3446 j1
= bit (inst2
, 13);
3447 j2
= bit (inst2
, 11);
3449 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
3450 offset
+= (imm1
<< 12) + (imm2
<< 1);
3452 nextpc
= pc_val
+ offset
;
3456 else if ((inst1
& 0xfe50) == 0xe810)
3458 /* Load multiple or RFE. */
3459 int rn
, offset
, load_pc
= 1;
3461 rn
= bits (inst1
, 0, 3);
3462 if (bit (inst1
, 7) && !bit (inst1
, 8))
3465 if (!bit (inst2
, 15))
3467 offset
= bitcount (inst2
) * 4 - 4;
3469 else if (!bit (inst1
, 7) && bit (inst1
, 8))
3472 if (!bit (inst2
, 15))
3476 else if (bit (inst1
, 7) && bit (inst1
, 8))
3481 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
3491 CORE_ADDR addr
= get_frame_register_unsigned (frame
, rn
);
3492 nextpc
= get_frame_memory_unsigned (frame
, addr
+ offset
, 4);
3495 else if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
3497 /* MOV PC or MOVS PC. */
3498 nextpc
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
3499 nextpc
= MAKE_THUMB_ADDR (nextpc
);
3501 else if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
3505 int rn
, load_pc
= 1;
3507 rn
= bits (inst1
, 0, 3);
3508 base
= get_frame_register_unsigned (frame
, rn
);
3511 base
= (base
+ 4) & ~(CORE_ADDR
) 0x3;
3513 base
+= bits (inst2
, 0, 11);
3515 base
-= bits (inst2
, 0, 11);
3517 else if (bit (inst1
, 7))
3518 base
+= bits (inst2
, 0, 11);
3519 else if (bit (inst2
, 11))
3521 if (bit (inst2
, 10))
3524 base
+= bits (inst2
, 0, 7);
3526 base
-= bits (inst2
, 0, 7);
3529 else if ((inst2
& 0x0fc0) == 0x0000)
3531 int shift
= bits (inst2
, 4, 5), rm
= bits (inst2
, 0, 3);
3532 base
+= get_frame_register_unsigned (frame
, rm
) << shift
;
3539 nextpc
= get_frame_memory_unsigned (frame
, base
, 4);
3541 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
3544 CORE_ADDR tbl_reg
, table
, offset
, length
;
3546 tbl_reg
= bits (inst1
, 0, 3);
3547 if (tbl_reg
== 0x0f)
3548 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
3550 table
= get_frame_register_unsigned (frame
, tbl_reg
);
3552 offset
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
3553 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 1);
3554 nextpc
= pc_val
+ length
;
3556 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
3559 CORE_ADDR tbl_reg
, table
, offset
, length
;
3561 tbl_reg
= bits (inst1
, 0, 3);
3562 if (tbl_reg
== 0x0f)
3563 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
3565 table
= get_frame_register_unsigned (frame
, tbl_reg
);
3567 offset
= 2 * get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
3568 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 2);
3569 nextpc
= pc_val
+ length
;
3572 else if ((inst1
& 0xff00) == 0x4700) /* bx REG, blx REG */
3574 if (bits (inst1
, 3, 6) == 0x0f)
3577 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
3579 else if ((inst1
& 0xff87) == 0x4687) /* mov pc, REG */
3581 if (bits (inst1
, 3, 6) == 0x0f)
3584 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
3586 nextpc
= MAKE_THUMB_ADDR (nextpc
);
3588 else if ((inst1
& 0xf500) == 0xb100)
3591 int imm
= (bit (inst1
, 9) << 6) + (bits (inst1
, 3, 7) << 1);
3592 ULONGEST reg
= get_frame_register_unsigned (frame
, bits (inst1
, 0, 2));
3594 if (bit (inst1
, 11) && reg
!= 0)
3595 nextpc
= pc_val
+ imm
;
3596 else if (!bit (inst1
, 11) && reg
== 0)
3597 nextpc
= pc_val
+ imm
;
3602 /* Get the raw next address. PC is the current program counter, in
3603 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3604 the alternative next instruction if there are two options.
3606 The value returned has the execution state of the next instruction
3607 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3608 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3612 arm_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
, int insert_bkpt
)
3614 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3615 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3616 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3617 unsigned long pc_val
;
3618 unsigned long this_instr
;
3619 unsigned long status
;
3622 if (arm_frame_is_thumb (frame
))
3623 return thumb_get_next_pc_raw (frame
, pc
, insert_bkpt
);
3625 pc_val
= (unsigned long) pc
;
3626 this_instr
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
3628 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
3629 nextpc
= (CORE_ADDR
) (pc_val
+ 4); /* Default case */
3631 if (bits (this_instr
, 28, 31) == INST_NV
)
3632 switch (bits (this_instr
, 24, 27))
3637 /* Branch with Link and change to Thumb. */
3638 nextpc
= BranchDest (pc
, this_instr
);
3639 nextpc
|= bit (this_instr
, 24) << 1;
3640 nextpc
= MAKE_THUMB_ADDR (nextpc
);
3646 /* Coprocessor register transfer. */
3647 if (bits (this_instr
, 12, 15) == 15)
3648 error (_("Invalid update to pc in instruction"));
3651 else if (condition_true (bits (this_instr
, 28, 31), status
))
3653 switch (bits (this_instr
, 24, 27))
3656 case 0x1: /* data processing */
3660 unsigned long operand1
, operand2
, result
= 0;
3664 if (bits (this_instr
, 12, 15) != 15)
3667 if (bits (this_instr
, 22, 25) == 0
3668 && bits (this_instr
, 4, 7) == 9) /* multiply */
3669 error (_("Invalid update to pc in instruction"));
3671 /* BX <reg>, BLX <reg> */
3672 if (bits (this_instr
, 4, 27) == 0x12fff1
3673 || bits (this_instr
, 4, 27) == 0x12fff3)
3675 rn
= bits (this_instr
, 0, 3);
3676 nextpc
= (rn
== 15) ? pc_val
+ 8
3677 : get_frame_register_unsigned (frame
, rn
);
3681 /* Multiply into PC */
3682 c
= (status
& FLAG_C
) ? 1 : 0;
3683 rn
= bits (this_instr
, 16, 19);
3684 operand1
= (rn
== 15) ? pc_val
+ 8
3685 : get_frame_register_unsigned (frame
, rn
);
3687 if (bit (this_instr
, 25))
3689 unsigned long immval
= bits (this_instr
, 0, 7);
3690 unsigned long rotate
= 2 * bits (this_instr
, 8, 11);
3691 operand2
= ((immval
>> rotate
) | (immval
<< (32 - rotate
)))
3694 else /* operand 2 is a shifted register */
3695 operand2
= shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
);
3697 switch (bits (this_instr
, 21, 24))
3700 result
= operand1
& operand2
;
3704 result
= operand1
^ operand2
;
3708 result
= operand1
- operand2
;
3712 result
= operand2
- operand1
;
3716 result
= operand1
+ operand2
;
3720 result
= operand1
+ operand2
+ c
;
3724 result
= operand1
- operand2
+ c
;
3728 result
= operand2
- operand1
+ c
;
3734 case 0xb: /* tst, teq, cmp, cmn */
3735 result
= (unsigned long) nextpc
;
3739 result
= operand1
| operand2
;
3743 /* Always step into a function. */
3748 result
= operand1
& ~operand2
;
3756 /* In 26-bit APCS the bottom two bits of the result are
3757 ignored, and we always end up in ARM state. */
3759 nextpc
= arm_addr_bits_remove (gdbarch
, result
);
3767 case 0x5: /* data transfer */
3770 if (bit (this_instr
, 20))
3773 if (bits (this_instr
, 12, 15) == 15)
3779 if (bit (this_instr
, 22))
3780 error (_("Invalid update to pc in instruction"));
3782 /* byte write to PC */
3783 rn
= bits (this_instr
, 16, 19);
3784 base
= (rn
== 15) ? pc_val
+ 8
3785 : get_frame_register_unsigned (frame
, rn
);
3786 if (bit (this_instr
, 24))
3789 int c
= (status
& FLAG_C
) ? 1 : 0;
3790 unsigned long offset
=
3791 (bit (this_instr
, 25)
3792 ? shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
)
3793 : bits (this_instr
, 0, 11));
3795 if (bit (this_instr
, 23))
3800 nextpc
= (CORE_ADDR
) read_memory_integer ((CORE_ADDR
) base
,
3807 case 0x9: /* block transfer */
3808 if (bit (this_instr
, 20))
3811 if (bit (this_instr
, 15))
3816 if (bit (this_instr
, 23))
3819 unsigned long reglist
= bits (this_instr
, 0, 14);
3820 offset
= bitcount (reglist
) * 4;
3821 if (bit (this_instr
, 24)) /* pre */
3824 else if (bit (this_instr
, 24))
3828 unsigned long rn_val
=
3829 get_frame_register_unsigned (frame
,
3830 bits (this_instr
, 16, 19));
3832 (CORE_ADDR
) read_memory_integer ((CORE_ADDR
) (rn_val
3840 case 0xb: /* branch & link */
3841 case 0xa: /* branch */
3843 nextpc
= BranchDest (pc
, this_instr
);
3849 case 0xe: /* coproc ops */
3853 struct gdbarch_tdep
*tdep
;
3854 tdep
= gdbarch_tdep (gdbarch
);
3856 if (tdep
->syscall_next_pc
!= NULL
)
3857 nextpc
= tdep
->syscall_next_pc (frame
);
3863 fprintf_filtered (gdb_stderr
, _("Bad bit-field extraction\n"));
3872 arm_get_next_pc (struct frame_info
*frame
, CORE_ADDR pc
)
3874 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3876 gdbarch_addr_bits_remove (gdbarch
,
3877 arm_get_next_pc_raw (frame
, pc
, TRUE
));
3879 error (_("Infinite loop detected"));
3883 /* single_step() is called just before we want to resume the inferior,
3884 if we want to single-step it but there is no hardware or kernel
3885 single-step support. We find the target of the coming instruction
3886 and breakpoint it. */
3889 arm_software_single_step (struct frame_info
*frame
)
3891 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3892 struct address_space
*aspace
= get_frame_address_space (frame
);
3894 /* NOTE: This may insert the wrong breakpoint instruction when
3895 single-stepping over a mode-changing instruction, if the
3896 CPSR heuristics are used. */
3898 CORE_ADDR next_pc
= arm_get_next_pc (frame
, get_frame_pc (frame
));
3899 insert_single_step_breakpoint (gdbarch
, aspace
, next_pc
);
3904 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3905 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3906 NULL if an error occurs. BUF is freed. */
3909 extend_buffer_earlier (gdb_byte
*buf
, CORE_ADDR endaddr
,
3910 int old_len
, int new_len
)
3912 gdb_byte
*new_buf
, *middle
;
3913 int bytes_to_read
= new_len
- old_len
;
3915 new_buf
= xmalloc (new_len
);
3916 memcpy (new_buf
+ bytes_to_read
, buf
, old_len
);
3918 if (target_read_memory (endaddr
- new_len
, new_buf
, bytes_to_read
) != 0)
3926 /* An IT block is at most the 2-byte IT instruction followed by
3927 four 4-byte instructions. The furthest back we must search to
3928 find an IT block that affects the current instruction is thus
3929 2 + 3 * 4 == 14 bytes. */
3930 #define MAX_IT_BLOCK_PREFIX 14
3932 /* Use a quick scan if there are more than this many bytes of
3934 #define IT_SCAN_THRESHOLD 32
3936 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3937 A breakpoint in an IT block may not be hit, depending on the
3940 arm_adjust_breakpoint_address (struct gdbarch
*gdbarch
, CORE_ADDR bpaddr
)
3944 CORE_ADDR boundary
, func_start
;
3945 int buf_len
, buf2_len
;
3946 enum bfd_endian order
= gdbarch_byte_order_for_code (gdbarch
);
3947 int i
, any
, last_it
, last_it_count
;
3949 /* If we are using BKPT breakpoints, none of this is necessary. */
3950 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
== NULL
)
3953 /* ARM mode does not have this problem. */
3954 if (!arm_pc_is_thumb (gdbarch
, bpaddr
))
3957 /* We are setting a breakpoint in Thumb code that could potentially
3958 contain an IT block. The first step is to find how much Thumb
3959 code there is; we do not need to read outside of known Thumb
3961 map_type
= arm_find_mapping_symbol (bpaddr
, &boundary
);
3963 /* Thumb-2 code must have mapping symbols to have a chance. */
3966 bpaddr
= gdbarch_addr_bits_remove (gdbarch
, bpaddr
);
3968 if (find_pc_partial_function (bpaddr
, NULL
, &func_start
, NULL
)
3969 && func_start
> boundary
)
3970 boundary
= func_start
;
3972 /* Search for a candidate IT instruction. We have to do some fancy
3973 footwork to distinguish a real IT instruction from the second
3974 half of a 32-bit instruction, but there is no need for that if
3975 there's no candidate. */
3976 buf_len
= min (bpaddr
- boundary
, MAX_IT_BLOCK_PREFIX
);
3978 /* No room for an IT instruction. */
3981 buf
= xmalloc (buf_len
);
3982 if (target_read_memory (bpaddr
- buf_len
, buf
, buf_len
) != 0)
3985 for (i
= 0; i
< buf_len
; i
+= 2)
3987 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
3988 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4000 /* OK, the code bytes before this instruction contain at least one
4001 halfword which resembles an IT instruction. We know that it's
4002 Thumb code, but there are still two possibilities. Either the
4003 halfword really is an IT instruction, or it is the second half of
4004 a 32-bit Thumb instruction. The only way we can tell is to
4005 scan forwards from a known instruction boundary. */
4006 if (bpaddr
- boundary
> IT_SCAN_THRESHOLD
)
4010 /* There's a lot of code before this instruction. Start with an
4011 optimistic search; it's easy to recognize halfwords that can
4012 not be the start of a 32-bit instruction, and use that to
4013 lock on to the instruction boundaries. */
4014 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, IT_SCAN_THRESHOLD
);
4017 buf_len
= IT_SCAN_THRESHOLD
;
4020 for (i
= 0; i
< buf_len
- sizeof (buf
) && ! definite
; i
+= 2)
4022 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
4023 if (thumb_insn_size (inst1
) == 2)
4030 /* At this point, if DEFINITE, BUF[I] is the first place we
4031 are sure that we know the instruction boundaries, and it is far
4032 enough from BPADDR that we could not miss an IT instruction
4033 affecting BPADDR. If ! DEFINITE, give up - start from a
4037 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
4040 buf_len
= bpaddr
- boundary
;
4046 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
4049 buf_len
= bpaddr
- boundary
;
4053 /* Scan forwards. Find the last IT instruction before BPADDR. */
4058 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
4060 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4065 else if (inst1
& 0x0002)
4067 else if (inst1
& 0x0004)
4072 i
+= thumb_insn_size (inst1
);
4078 /* There wasn't really an IT instruction after all. */
4081 if (last_it_count
< 1)
4082 /* It was too far away. */
4085 /* This really is a trouble spot. Move the breakpoint to the IT
4087 return bpaddr
- buf_len
+ last_it
;
4090 /* ARM displaced stepping support.
4092 Generally ARM displaced stepping works as follows:
4094 1. When an instruction is to be single-stepped, it is first decoded by
4095 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4096 Depending on the type of instruction, it is then copied to a scratch
4097 location, possibly in a modified form. The copy_* set of functions
4098 performs such modification, as necessary. A breakpoint is placed after
4099 the modified instruction in the scratch space to return control to GDB.
4100 Note in particular that instructions which modify the PC will no longer
4101 do so after modification.
4103 2. The instruction is single-stepped, by setting the PC to the scratch
4104 location address, and resuming. Control returns to GDB when the
4107 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4108 function used for the current instruction. This function's job is to
4109 put the CPU/memory state back to what it would have been if the
4110 instruction had been executed unmodified in its original location. */
4112 /* NOP instruction (mov r0, r0). */
4113 #define ARM_NOP 0xe1a00000
4115 /* Helper for register reads for displaced stepping. In particular, this
4116 returns the PC as it would be seen by the instruction at its original
4120 displaced_read_reg (struct regcache
*regs
, CORE_ADDR from
, int regno
)
4126 if (debug_displaced
)
4127 fprintf_unfiltered (gdb_stdlog
, "displaced: read pc value %.8lx\n",
4128 (unsigned long) from
+ 8);
4129 return (ULONGEST
) from
+ 8; /* Pipeline offset. */
4133 regcache_cooked_read_unsigned (regs
, regno
, &ret
);
4134 if (debug_displaced
)
4135 fprintf_unfiltered (gdb_stdlog
, "displaced: read r%d value %.8lx\n",
4136 regno
, (unsigned long) ret
);
4142 displaced_in_arm_mode (struct regcache
*regs
)
4145 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
4147 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
4149 return (ps
& t_bit
) == 0;
4152 /* Write to the PC as from a branch instruction. */
4155 branch_write_pc (struct regcache
*regs
, ULONGEST val
)
4157 if (displaced_in_arm_mode (regs
))
4158 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4159 architecture versions < 6. */
4160 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& ~(ULONGEST
) 0x3);
4162 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& ~(ULONGEST
) 0x1);
4165 /* Write to the PC as from a branch-exchange instruction. */
4168 bx_write_pc (struct regcache
*regs
, ULONGEST val
)
4171 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
4173 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
4177 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
| t_bit
);
4178 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffe);
4180 else if ((val
& 2) == 0)
4182 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
4183 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
);
4187 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4188 mode, align dest to 4 bytes). */
4189 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4190 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
4191 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffc);
4195 /* Write to the PC as if from a load instruction. */
4198 load_write_pc (struct regcache
*regs
, ULONGEST val
)
4200 if (DISPLACED_STEPPING_ARCH_VERSION
>= 5)
4201 bx_write_pc (regs
, val
);
4203 branch_write_pc (regs
, val
);
4206 /* Write to the PC as if from an ALU instruction. */
4209 alu_write_pc (struct regcache
*regs
, ULONGEST val
)
4211 if (DISPLACED_STEPPING_ARCH_VERSION
>= 7 && displaced_in_arm_mode (regs
))
4212 bx_write_pc (regs
, val
);
4214 branch_write_pc (regs
, val
);
4217 /* Helper for writing to registers for displaced stepping. Writing to the PC
4218 has a varying effects depending on the instruction which does the write:
4219 this is controlled by the WRITE_PC argument. */
4222 displaced_write_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
4223 int regno
, ULONGEST val
, enum pc_write_style write_pc
)
4227 if (debug_displaced
)
4228 fprintf_unfiltered (gdb_stdlog
, "displaced: writing pc %.8lx\n",
4229 (unsigned long) val
);
4232 case BRANCH_WRITE_PC
:
4233 branch_write_pc (regs
, val
);
4237 bx_write_pc (regs
, val
);
4241 load_write_pc (regs
, val
);
4245 alu_write_pc (regs
, val
);
4248 case CANNOT_WRITE_PC
:
4249 warning (_("Instruction wrote to PC in an unexpected way when "
4250 "single-stepping"));
4254 internal_error (__FILE__
, __LINE__
,
4255 _("Invalid argument to displaced_write_reg"));
4258 dsc
->wrote_to_pc
= 1;
4262 if (debug_displaced
)
4263 fprintf_unfiltered (gdb_stdlog
, "displaced: writing r%d value %.8lx\n",
4264 regno
, (unsigned long) val
);
4265 regcache_cooked_write_unsigned (regs
, regno
, val
);
4269 /* This function is used to concisely determine if an instruction INSN
4270 references PC. Register fields of interest in INSN should have the
4271 corresponding fields of BITMASK set to 0b1111. The function returns return 1
4272 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
4276 insn_references_pc (uint32_t insn
, uint32_t bitmask
)
4278 uint32_t lowbit
= 1;
4280 while (bitmask
!= 0)
4284 for (; lowbit
&& (bitmask
& lowbit
) == 0; lowbit
<<= 1)
4290 mask
= lowbit
* 0xf;
4292 if ((insn
& mask
) == mask
)
4301 /* The simplest copy function. Many instructions have the same effect no
4302 matter what address they are executed at: in those cases, use this. */
4305 copy_unmodified (struct gdbarch
*gdbarch
, uint32_t insn
,
4306 const char *iname
, struct displaced_step_closure
*dsc
)
4308 if (debug_displaced
)
4309 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx, "
4310 "opcode/class '%s' unmodified\n", (unsigned long) insn
,
4313 dsc
->modinsn
[0] = insn
;
4318 /* Preload instructions with immediate offset. */
4321 cleanup_preload (struct gdbarch
*gdbarch
,
4322 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4324 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
4325 if (!dsc
->u
.preload
.immed
)
4326 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
4330 copy_preload (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
4331 struct displaced_step_closure
*dsc
)
4333 unsigned int rn
= bits (insn
, 16, 19);
4335 CORE_ADDR from
= dsc
->insn_addr
;
4337 if (!insn_references_pc (insn
, 0x000f0000ul
))
4338 return copy_unmodified (gdbarch
, insn
, "preload", dsc
);
4340 if (debug_displaced
)
4341 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
4342 (unsigned long) insn
);
4344 /* Preload instructions:
4346 {pli/pld} [rn, #+/-imm]
4348 {pli/pld} [r0, #+/-imm]. */
4350 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4351 rn_val
= displaced_read_reg (regs
, from
, rn
);
4352 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
4354 dsc
->u
.preload
.immed
= 1;
4356 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
4358 dsc
->cleanup
= &cleanup_preload
;
4363 /* Preload instructions with register offset. */
4366 copy_preload_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
4367 struct displaced_step_closure
*dsc
)
4369 unsigned int rn
= bits (insn
, 16, 19);
4370 unsigned int rm
= bits (insn
, 0, 3);
4371 ULONGEST rn_val
, rm_val
;
4372 CORE_ADDR from
= dsc
->insn_addr
;
4374 if (!insn_references_pc (insn
, 0x000f000ful
))
4375 return copy_unmodified (gdbarch
, insn
, "preload reg", dsc
);
4377 if (debug_displaced
)
4378 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
4379 (unsigned long) insn
);
4381 /* Preload register-offset instructions:
4383 {pli/pld} [rn, rm {, shift}]
4385 {pli/pld} [r0, r1 {, shift}]. */
4387 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4388 dsc
->tmp
[1] = displaced_read_reg (regs
, from
, 1);
4389 rn_val
= displaced_read_reg (regs
, from
, rn
);
4390 rm_val
= displaced_read_reg (regs
, from
, rm
);
4391 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
4392 displaced_write_reg (regs
, dsc
, 1, rm_val
, CANNOT_WRITE_PC
);
4394 dsc
->u
.preload
.immed
= 0;
4396 dsc
->modinsn
[0] = (insn
& 0xfff0fff0) | 0x1;
4398 dsc
->cleanup
= &cleanup_preload
;
4403 /* Copy/cleanup coprocessor load and store instructions. */
4406 cleanup_copro_load_store (struct gdbarch
*gdbarch
,
4407 struct regcache
*regs
,
4408 struct displaced_step_closure
*dsc
)
4410 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
->insn_addr
, 0);
4412 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
4414 if (dsc
->u
.ldst
.writeback
)
4415 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, LOAD_WRITE_PC
);
4419 copy_copro_load_store (struct gdbarch
*gdbarch
, uint32_t insn
,
4420 struct regcache
*regs
,
4421 struct displaced_step_closure
*dsc
)
4423 unsigned int rn
= bits (insn
, 16, 19);
4425 CORE_ADDR from
= dsc
->insn_addr
;
4427 if (!insn_references_pc (insn
, 0x000f0000ul
))
4428 return copy_unmodified (gdbarch
, insn
, "copro load/store", dsc
);
4430 if (debug_displaced
)
4431 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
4432 "load/store insn %.8lx\n", (unsigned long) insn
);
4434 /* Coprocessor load/store instructions:
4436 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4438 {stc/stc2} [r0, #+/-imm].
4440 ldc/ldc2 are handled identically. */
4442 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4443 rn_val
= displaced_read_reg (regs
, from
, rn
);
4444 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
4446 dsc
->u
.ldst
.writeback
= bit (insn
, 25);
4447 dsc
->u
.ldst
.rn
= rn
;
4449 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
4451 dsc
->cleanup
= &cleanup_copro_load_store
;
4456 /* Clean up branch instructions (actually perform the branch, by setting
4460 cleanup_branch (struct gdbarch
*gdbarch
, struct regcache
*regs
,
4461 struct displaced_step_closure
*dsc
)
4463 ULONGEST from
= dsc
->insn_addr
;
4464 uint32_t status
= displaced_read_reg (regs
, from
, ARM_PS_REGNUM
);
4465 int branch_taken
= condition_true (dsc
->u
.branch
.cond
, status
);
4466 enum pc_write_style write_pc
= dsc
->u
.branch
.exchange
4467 ? BX_WRITE_PC
: BRANCH_WRITE_PC
;
4472 if (dsc
->u
.branch
.link
)
4474 ULONGEST pc
= displaced_read_reg (regs
, from
, 15);
4475 displaced_write_reg (regs
, dsc
, 14, pc
- 4, CANNOT_WRITE_PC
);
4478 displaced_write_reg (regs
, dsc
, 15, dsc
->u
.branch
.dest
, write_pc
);
4481 /* Copy B/BL/BLX instructions with immediate destinations. */
4484 copy_b_bl_blx (struct gdbarch
*gdbarch
, uint32_t insn
,
4485 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4487 unsigned int cond
= bits (insn
, 28, 31);
4488 int exchange
= (cond
== 0xf);
4489 int link
= exchange
|| bit (insn
, 24);
4490 CORE_ADDR from
= dsc
->insn_addr
;
4493 if (debug_displaced
)
4494 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s immediate insn "
4495 "%.8lx\n", (exchange
) ? "blx" : (link
) ? "bl" : "b",
4496 (unsigned long) insn
);
4498 /* Implement "BL<cond> <label>" as:
4500 Preparation: cond <- instruction condition
4501 Insn: mov r0, r0 (nop)
4502 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4504 B<cond> similar, but don't set r14 in cleanup. */
4507 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4508 then arrange the switch into Thumb mode. */
4509 offset
= (bits (insn
, 0, 23) << 2) | (bit (insn
, 24) << 1) | 1;
4511 offset
= bits (insn
, 0, 23) << 2;
4513 if (bit (offset
, 25))
4514 offset
= offset
| ~0x3ffffff;
4516 dsc
->u
.branch
.cond
= cond
;
4517 dsc
->u
.branch
.link
= link
;
4518 dsc
->u
.branch
.exchange
= exchange
;
4519 dsc
->u
.branch
.dest
= from
+ 8 + offset
;
4521 dsc
->modinsn
[0] = ARM_NOP
;
4523 dsc
->cleanup
= &cleanup_branch
;
4528 /* Copy BX/BLX with register-specified destinations. */
4531 copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
4532 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4534 unsigned int cond
= bits (insn
, 28, 31);
4537 int link
= bit (insn
, 5);
4538 unsigned int rm
= bits (insn
, 0, 3);
4539 CORE_ADDR from
= dsc
->insn_addr
;
4541 if (debug_displaced
)
4542 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s register insn "
4543 "%.8lx\n", (link
) ? "blx" : "bx", (unsigned long) insn
);
4545 /* Implement {BX,BLX}<cond> <reg>" as:
4547 Preparation: cond <- instruction condition
4548 Insn: mov r0, r0 (nop)
4549 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4551 Don't set r14 in cleanup for BX. */
4553 dsc
->u
.branch
.dest
= displaced_read_reg (regs
, from
, rm
);
4555 dsc
->u
.branch
.cond
= cond
;
4556 dsc
->u
.branch
.link
= link
;
4557 dsc
->u
.branch
.exchange
= 1;
4559 dsc
->modinsn
[0] = ARM_NOP
;
4561 dsc
->cleanup
= &cleanup_branch
;
4566 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4569 cleanup_alu_imm (struct gdbarch
*gdbarch
,
4570 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4572 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
->insn_addr
, 0);
4573 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
4574 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
4575 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
4579 copy_alu_imm (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
4580 struct displaced_step_closure
*dsc
)
4582 unsigned int rn
= bits (insn
, 16, 19);
4583 unsigned int rd
= bits (insn
, 12, 15);
4584 unsigned int op
= bits (insn
, 21, 24);
4585 int is_mov
= (op
== 0xd);
4586 ULONGEST rd_val
, rn_val
;
4587 CORE_ADDR from
= dsc
->insn_addr
;
4589 if (!insn_references_pc (insn
, 0x000ff000ul
))
4590 return copy_unmodified (gdbarch
, insn
, "ALU immediate", dsc
);
4592 if (debug_displaced
)
4593 fprintf_unfiltered (gdb_stdlog
, "displaced: copying immediate %s insn "
4594 "%.8lx\n", is_mov
? "move" : "ALU",
4595 (unsigned long) insn
);
4597 /* Instruction is of form:
4599 <op><cond> rd, [rn,] #imm
4603 Preparation: tmp1, tmp2 <- r0, r1;
4605 Insn: <op><cond> r0, r1, #imm
4606 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4609 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4610 dsc
->tmp
[1] = displaced_read_reg (regs
, from
, 1);
4611 rn_val
= displaced_read_reg (regs
, from
, rn
);
4612 rd_val
= displaced_read_reg (regs
, from
, rd
);
4613 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
4614 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
4618 dsc
->modinsn
[0] = insn
& 0xfff00fff;
4620 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x10000;
4622 dsc
->cleanup
= &cleanup_alu_imm
;
4627 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4630 cleanup_alu_reg (struct gdbarch
*gdbarch
,
4631 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4636 rd_val
= displaced_read_reg (regs
, dsc
->insn_addr
, 0);
4638 for (i
= 0; i
< 3; i
++)
4639 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
4641 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
4645 copy_alu_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
4646 struct displaced_step_closure
*dsc
)
4648 unsigned int rn
= bits (insn
, 16, 19);
4649 unsigned int rm
= bits (insn
, 0, 3);
4650 unsigned int rd
= bits (insn
, 12, 15);
4651 unsigned int op
= bits (insn
, 21, 24);
4652 int is_mov
= (op
== 0xd);
4653 ULONGEST rd_val
, rn_val
, rm_val
;
4654 CORE_ADDR from
= dsc
->insn_addr
;
4656 if (!insn_references_pc (insn
, 0x000ff00ful
))
4657 return copy_unmodified (gdbarch
, insn
, "ALU reg", dsc
);
4659 if (debug_displaced
)
4660 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.8lx\n",
4661 is_mov
? "move" : "ALU", (unsigned long) insn
);
4663 /* Instruction is of form:
4665 <op><cond> rd, [rn,] rm [, <shift>]
4669 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4670 r0, r1, r2 <- rd, rn, rm
4671 Insn: <op><cond> r0, r1, r2 [, <shift>]
4672 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4675 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4676 dsc
->tmp
[1] = displaced_read_reg (regs
, from
, 1);
4677 dsc
->tmp
[2] = displaced_read_reg (regs
, from
, 2);
4678 rd_val
= displaced_read_reg (regs
, from
, rd
);
4679 rn_val
= displaced_read_reg (regs
, from
, rn
);
4680 rm_val
= displaced_read_reg (regs
, from
, rm
);
4681 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
4682 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
4683 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
4687 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x2;
4689 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x10002;
4691 dsc
->cleanup
= &cleanup_alu_reg
;
4696 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4699 cleanup_alu_shifted_reg (struct gdbarch
*gdbarch
,
4700 struct regcache
*regs
,
4701 struct displaced_step_closure
*dsc
)
4703 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
->insn_addr
, 0);
4706 for (i
= 0; i
< 4; i
++)
4707 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
4709 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
4713 copy_alu_shifted_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
4714 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4716 unsigned int rn
= bits (insn
, 16, 19);
4717 unsigned int rm
= bits (insn
, 0, 3);
4718 unsigned int rd
= bits (insn
, 12, 15);
4719 unsigned int rs
= bits (insn
, 8, 11);
4720 unsigned int op
= bits (insn
, 21, 24);
4721 int is_mov
= (op
== 0xd), i
;
4722 ULONGEST rd_val
, rn_val
, rm_val
, rs_val
;
4723 CORE_ADDR from
= dsc
->insn_addr
;
4725 if (!insn_references_pc (insn
, 0x000fff0ful
))
4726 return copy_unmodified (gdbarch
, insn
, "ALU shifted reg", dsc
);
4728 if (debug_displaced
)
4729 fprintf_unfiltered (gdb_stdlog
, "displaced: copying shifted reg %s insn "
4730 "%.8lx\n", is_mov
? "move" : "ALU",
4731 (unsigned long) insn
);
4733 /* Instruction is of form:
4735 <op><cond> rd, [rn,] rm, <shift> rs
4739 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4740 r0, r1, r2, r3 <- rd, rn, rm, rs
4741 Insn: <op><cond> r0, r1, r2, <shift> r3
4743 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4747 for (i
= 0; i
< 4; i
++)
4748 dsc
->tmp
[i
] = displaced_read_reg (regs
, from
, i
);
4750 rd_val
= displaced_read_reg (regs
, from
, rd
);
4751 rn_val
= displaced_read_reg (regs
, from
, rn
);
4752 rm_val
= displaced_read_reg (regs
, from
, rm
);
4753 rs_val
= displaced_read_reg (regs
, from
, rs
);
4754 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
4755 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
4756 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
4757 displaced_write_reg (regs
, dsc
, 3, rs_val
, CANNOT_WRITE_PC
);
4761 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x302;
4763 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x10302;
4765 dsc
->cleanup
= &cleanup_alu_shifted_reg
;
4770 /* Clean up load instructions. */
4773 cleanup_load (struct gdbarch
*gdbarch
, struct regcache
*regs
,
4774 struct displaced_step_closure
*dsc
)
4776 ULONGEST rt_val
, rt_val2
= 0, rn_val
;
4777 CORE_ADDR from
= dsc
->insn_addr
;
4779 rt_val
= displaced_read_reg (regs
, from
, 0);
4780 if (dsc
->u
.ldst
.xfersize
== 8)
4781 rt_val2
= displaced_read_reg (regs
, from
, 1);
4782 rn_val
= displaced_read_reg (regs
, from
, 2);
4784 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
4785 if (dsc
->u
.ldst
.xfersize
> 4)
4786 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
4787 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
4788 if (!dsc
->u
.ldst
.immed
)
4789 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
4791 /* Handle register writeback. */
4792 if (dsc
->u
.ldst
.writeback
)
4793 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
4794 /* Put result in right place. */
4795 displaced_write_reg (regs
, dsc
, dsc
->rd
, rt_val
, LOAD_WRITE_PC
);
4796 if (dsc
->u
.ldst
.xfersize
== 8)
4797 displaced_write_reg (regs
, dsc
, dsc
->rd
+ 1, rt_val2
, LOAD_WRITE_PC
);
4800 /* Clean up store instructions. */
4803 cleanup_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
4804 struct displaced_step_closure
*dsc
)
4806 CORE_ADDR from
= dsc
->insn_addr
;
4807 ULONGEST rn_val
= displaced_read_reg (regs
, from
, 2);
4809 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
4810 if (dsc
->u
.ldst
.xfersize
> 4)
4811 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
4812 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
4813 if (!dsc
->u
.ldst
.immed
)
4814 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
4815 if (!dsc
->u
.ldst
.restore_r4
)
4816 displaced_write_reg (regs
, dsc
, 4, dsc
->tmp
[4], CANNOT_WRITE_PC
);
4819 if (dsc
->u
.ldst
.writeback
)
4820 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
4823 /* Copy "extra" load/store instructions. These are halfword/doubleword
4824 transfers, which have a different encoding to byte/word transfers. */
4827 copy_extra_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
, int unpriveleged
,
4828 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
4830 unsigned int op1
= bits (insn
, 20, 24);
4831 unsigned int op2
= bits (insn
, 5, 6);
4832 unsigned int rt
= bits (insn
, 12, 15);
4833 unsigned int rn
= bits (insn
, 16, 19);
4834 unsigned int rm
= bits (insn
, 0, 3);
4835 char load
[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4836 char bytesize
[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4837 int immed
= (op1
& 0x4) != 0;
4839 ULONGEST rt_val
, rt_val2
= 0, rn_val
, rm_val
= 0;
4840 CORE_ADDR from
= dsc
->insn_addr
;
4842 if (!insn_references_pc (insn
, 0x000ff00ful
))
4843 return copy_unmodified (gdbarch
, insn
, "extra load/store", dsc
);
4845 if (debug_displaced
)
4846 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %sextra load/store "
4847 "insn %.8lx\n", unpriveleged
? "unpriveleged " : "",
4848 (unsigned long) insn
);
4850 opcode
= ((op2
<< 2) | (op1
& 0x1) | ((op1
& 0x4) >> 1)) - 4;
4853 internal_error (__FILE__
, __LINE__
,
4854 _("copy_extra_ld_st: instruction decode error"));
4856 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4857 dsc
->tmp
[1] = displaced_read_reg (regs
, from
, 1);
4858 dsc
->tmp
[2] = displaced_read_reg (regs
, from
, 2);
4860 dsc
->tmp
[3] = displaced_read_reg (regs
, from
, 3);
4862 rt_val
= displaced_read_reg (regs
, from
, rt
);
4863 if (bytesize
[opcode
] == 8)
4864 rt_val2
= displaced_read_reg (regs
, from
, rt
+ 1);
4865 rn_val
= displaced_read_reg (regs
, from
, rn
);
4867 rm_val
= displaced_read_reg (regs
, from
, rm
);
4869 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
4870 if (bytesize
[opcode
] == 8)
4871 displaced_write_reg (regs
, dsc
, 1, rt_val2
, CANNOT_WRITE_PC
);
4872 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
4874 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
4877 dsc
->u
.ldst
.xfersize
= bytesize
[opcode
];
4878 dsc
->u
.ldst
.rn
= rn
;
4879 dsc
->u
.ldst
.immed
= immed
;
4880 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
4881 dsc
->u
.ldst
.restore_r4
= 0;
4884 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4886 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4887 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
4889 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4891 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4892 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
4894 dsc
->cleanup
= load
[opcode
] ? &cleanup_load
: &cleanup_store
;
4899 /* Copy byte/word loads and stores. */
4902 copy_ldr_str_ldrb_strb (struct gdbarch
*gdbarch
, uint32_t insn
,
4903 struct regcache
*regs
,
4904 struct displaced_step_closure
*dsc
, int load
, int byte
,
4907 int immed
= !bit (insn
, 25);
4908 unsigned int rt
= bits (insn
, 12, 15);
4909 unsigned int rn
= bits (insn
, 16, 19);
4910 unsigned int rm
= bits (insn
, 0, 3); /* Only valid if !immed. */
4911 ULONGEST rt_val
, rn_val
, rm_val
= 0;
4912 CORE_ADDR from
= dsc
->insn_addr
;
4914 if (!insn_references_pc (insn
, 0x000ff00ful
))
4915 return copy_unmodified (gdbarch
, insn
, "load/store", dsc
);
4917 if (debug_displaced
)
4918 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s%s insn %.8lx\n",
4919 load
? (byte
? "ldrb" : "ldr")
4920 : (byte
? "strb" : "str"), usermode
? "t" : "",
4921 (unsigned long) insn
);
4923 dsc
->tmp
[0] = displaced_read_reg (regs
, from
, 0);
4924 dsc
->tmp
[2] = displaced_read_reg (regs
, from
, 2);
4926 dsc
->tmp
[3] = displaced_read_reg (regs
, from
, 3);
4928 dsc
->tmp
[4] = displaced_read_reg (regs
, from
, 4);
4930 rt_val
= displaced_read_reg (regs
, from
, rt
);
4931 rn_val
= displaced_read_reg (regs
, from
, rn
);
4933 rm_val
= displaced_read_reg (regs
, from
, rm
);
4935 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
4936 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
4938 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
4941 dsc
->u
.ldst
.xfersize
= byte
? 1 : 4;
4942 dsc
->u
.ldst
.rn
= rn
;
4943 dsc
->u
.ldst
.immed
= immed
;
4944 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
4946 /* To write PC we can do:
4948 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4949 scratch+4: ldr r4, temp
4950 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4951 scratch+12: add r4, r4, #8 (r4 = offset)
4952 scratch+16: add r0, r0, r4
4953 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4956 Otherwise we don't know what value to write for PC, since the offset is
4957 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4959 if (load
|| rt
!= 15)
4961 dsc
->u
.ldst
.restore_r4
= 0;
4964 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4966 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4967 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
4969 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4971 {ldr,str}[b]<cond> r0, [r2, r3]. */
4972 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
4976 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4977 dsc
->u
.ldst
.restore_r4
= 1;
4979 dsc
->modinsn
[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4980 dsc
->modinsn
[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4981 dsc
->modinsn
[2] = 0xe044400f; /* sub r4, r4, pc. */
4982 dsc
->modinsn
[3] = 0xe2844008; /* add r4, r4, #8. */
4983 dsc
->modinsn
[4] = 0xe0800004; /* add r0, r0, r4. */
4987 dsc
->modinsn
[5] = (insn
& 0xfff00fff) | 0x20000;
4989 dsc
->modinsn
[5] = (insn
& 0xfff00ff0) | 0x20003;
4991 dsc
->modinsn
[6] = 0x0; /* breakpoint location. */
4992 dsc
->modinsn
[7] = 0x0; /* scratch space. */
4997 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
5002 /* Cleanup LDM instructions with fully-populated register list. This is an
5003 unfortunate corner case: it's impossible to implement correctly by modifying
5004 the instruction. The issue is as follows: we have an instruction,
5008 which we must rewrite to avoid loading PC. A possible solution would be to
5009 do the load in two halves, something like (with suitable cleanup
5013 ldm[id][ab] r8!, {r0-r7}
5015 ldm[id][ab] r8, {r7-r14}
5018 but at present there's no suitable place for <temp>, since the scratch space
5019 is overwritten before the cleanup routine is called. For now, we simply
5020 emulate the instruction. */
5023 cleanup_block_load_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5024 struct displaced_step_closure
*dsc
)
5026 ULONGEST from
= dsc
->insn_addr
;
5027 int inc
= dsc
->u
.block
.increment
;
5028 int bump_before
= dsc
->u
.block
.before
? (inc
? 4 : -4) : 0;
5029 int bump_after
= dsc
->u
.block
.before
? 0 : (inc
? 4 : -4);
5030 uint32_t regmask
= dsc
->u
.block
.regmask
;
5031 int regno
= inc
? 0 : 15;
5032 CORE_ADDR xfer_addr
= dsc
->u
.block
.xfer_addr
;
5033 int exception_return
= dsc
->u
.block
.load
&& dsc
->u
.block
.user
5034 && (regmask
& 0x8000) != 0;
5035 uint32_t status
= displaced_read_reg (regs
, from
, ARM_PS_REGNUM
);
5036 int do_transfer
= condition_true (dsc
->u
.block
.cond
, status
);
5037 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
5042 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5043 sensible we can do here. Complain loudly. */
5044 if (exception_return
)
5045 error (_("Cannot single-step exception return"));
5047 /* We don't handle any stores here for now. */
5048 gdb_assert (dsc
->u
.block
.load
!= 0);
5050 if (debug_displaced
)
5051 fprintf_unfiltered (gdb_stdlog
, "displaced: emulating block transfer: "
5052 "%s %s %s\n", dsc
->u
.block
.load
? "ldm" : "stm",
5053 dsc
->u
.block
.increment
? "inc" : "dec",
5054 dsc
->u
.block
.before
? "before" : "after");
5061 while (regno
<= 15 && (regmask
& (1 << regno
)) == 0)
5064 while (regno
>= 0 && (regmask
& (1 << regno
)) == 0)
5067 xfer_addr
+= bump_before
;
5069 memword
= read_memory_unsigned_integer (xfer_addr
, 4, byte_order
);
5070 displaced_write_reg (regs
, dsc
, regno
, memword
, LOAD_WRITE_PC
);
5072 xfer_addr
+= bump_after
;
5074 regmask
&= ~(1 << regno
);
5077 if (dsc
->u
.block
.writeback
)
5078 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, xfer_addr
,
5082 /* Clean up an STM which included the PC in the register list. */
5085 cleanup_block_store_pc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5086 struct displaced_step_closure
*dsc
)
5088 ULONGEST from
= dsc
->insn_addr
;
5089 uint32_t status
= displaced_read_reg (regs
, from
, ARM_PS_REGNUM
);
5090 int store_executed
= condition_true (dsc
->u
.block
.cond
, status
);
5091 CORE_ADDR pc_stored_at
, transferred_regs
= bitcount (dsc
->u
.block
.regmask
);
5092 CORE_ADDR stm_insn_addr
;
5095 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
5097 /* If condition code fails, there's nothing else to do. */
5098 if (!store_executed
)
5101 if (dsc
->u
.block
.increment
)
5103 pc_stored_at
= dsc
->u
.block
.xfer_addr
+ 4 * transferred_regs
;
5105 if (dsc
->u
.block
.before
)
5110 pc_stored_at
= dsc
->u
.block
.xfer_addr
;
5112 if (dsc
->u
.block
.before
)
5116 pc_val
= read_memory_unsigned_integer (pc_stored_at
, 4, byte_order
);
5117 stm_insn_addr
= dsc
->scratch_base
;
5118 offset
= pc_val
- stm_insn_addr
;
5120 if (debug_displaced
)
5121 fprintf_unfiltered (gdb_stdlog
, "displaced: detected PC offset %.8lx for "
5122 "STM instruction\n", offset
);
5124 /* Rewrite the stored PC to the proper value for the non-displaced original
5126 write_memory_unsigned_integer (pc_stored_at
, 4, byte_order
,
5127 dsc
->insn_addr
+ offset
);
5130 /* Clean up an LDM which includes the PC in the register list. We clumped all
5131 the registers in the transferred list into a contiguous range r0...rX (to
5132 avoid loading PC directly and losing control of the debugged program), so we
5133 must undo that here. */
5136 cleanup_block_load_pc (struct gdbarch
*gdbarch
,
5137 struct regcache
*regs
,
5138 struct displaced_step_closure
*dsc
)
5140 ULONGEST from
= dsc
->insn_addr
;
5141 uint32_t status
= displaced_read_reg (regs
, from
, ARM_PS_REGNUM
);
5142 int load_executed
= condition_true (dsc
->u
.block
.cond
, status
), i
;
5143 unsigned int mask
= dsc
->u
.block
.regmask
, write_reg
= 15;
5144 unsigned int regs_loaded
= bitcount (mask
);
5145 unsigned int num_to_shuffle
= regs_loaded
, clobbered
;
5147 /* The method employed here will fail if the register list is fully populated
5148 (we need to avoid loading PC directly). */
5149 gdb_assert (num_to_shuffle
< 16);
5154 clobbered
= (1 << num_to_shuffle
) - 1;
5156 while (num_to_shuffle
> 0)
5158 if ((mask
& (1 << write_reg
)) != 0)
5160 unsigned int read_reg
= num_to_shuffle
- 1;
5162 if (read_reg
!= write_reg
)
5164 ULONGEST rval
= displaced_read_reg (regs
, from
, read_reg
);
5165 displaced_write_reg (regs
, dsc
, write_reg
, rval
, LOAD_WRITE_PC
);
5166 if (debug_displaced
)
5167 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: move "
5168 "loaded register r%d to r%d\n"), read_reg
,
5171 else if (debug_displaced
)
5172 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: register "
5173 "r%d already in the right place\n"),
5176 clobbered
&= ~(1 << write_reg
);
5184 /* Restore any registers we scribbled over. */
5185 for (write_reg
= 0; clobbered
!= 0; write_reg
++)
5187 if ((clobbered
& (1 << write_reg
)) != 0)
5189 displaced_write_reg (regs
, dsc
, write_reg
, dsc
->tmp
[write_reg
],
5191 if (debug_displaced
)
5192 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: restored "
5193 "clobbered register r%d\n"), write_reg
);
5194 clobbered
&= ~(1 << write_reg
);
5198 /* Perform register writeback manually. */
5199 if (dsc
->u
.block
.writeback
)
5201 ULONGEST new_rn_val
= dsc
->u
.block
.xfer_addr
;
5203 if (dsc
->u
.block
.increment
)
5204 new_rn_val
+= regs_loaded
* 4;
5206 new_rn_val
-= regs_loaded
* 4;
5208 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, new_rn_val
,
5213 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5214 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5217 copy_block_xfer (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5218 struct displaced_step_closure
*dsc
)
5220 int load
= bit (insn
, 20);
5221 int user
= bit (insn
, 22);
5222 int increment
= bit (insn
, 23);
5223 int before
= bit (insn
, 24);
5224 int writeback
= bit (insn
, 21);
5225 int rn
= bits (insn
, 16, 19);
5226 CORE_ADDR from
= dsc
->insn_addr
;
5228 /* Block transfers which don't mention PC can be run directly out-of-line. */
5229 if (rn
!= 15 && (insn
& 0x8000) == 0)
5230 return copy_unmodified (gdbarch
, insn
, "ldm/stm", dsc
);
5234 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
5235 return copy_unmodified (gdbarch
, insn
, "unpredictable ldm/stm", dsc
);
5238 if (debug_displaced
)
5239 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
5240 "%.8lx\n", (unsigned long) insn
);
5242 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, from
, rn
);
5243 dsc
->u
.block
.rn
= rn
;
5245 dsc
->u
.block
.load
= load
;
5246 dsc
->u
.block
.user
= user
;
5247 dsc
->u
.block
.increment
= increment
;
5248 dsc
->u
.block
.before
= before
;
5249 dsc
->u
.block
.writeback
= writeback
;
5250 dsc
->u
.block
.cond
= bits (insn
, 28, 31);
5252 dsc
->u
.block
.regmask
= insn
& 0xffff;
5256 if ((insn
& 0xffff) == 0xffff)
5258 /* LDM with a fully-populated register list. This case is
5259 particularly tricky. Implement for now by fully emulating the
5260 instruction (which might not behave perfectly in all cases, but
5261 these instructions should be rare enough for that not to matter
5263 dsc
->modinsn
[0] = ARM_NOP
;
5265 dsc
->cleanup
= &cleanup_block_load_all
;
5269 /* LDM of a list of registers which includes PC. Implement by
5270 rewriting the list of registers to be transferred into a
5271 contiguous chunk r0...rX before doing the transfer, then shuffling
5272 registers into the correct places in the cleanup routine. */
5273 unsigned int regmask
= insn
& 0xffff;
5274 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
5275 unsigned int to
= 0, from
= 0, i
, new_rn
;
5277 for (i
= 0; i
< num_in_list
; i
++)
5278 dsc
->tmp
[i
] = displaced_read_reg (regs
, from
, i
);
5280 /* Writeback makes things complicated. We need to avoid clobbering
5281 the base register with one of the registers in our modified
5282 register list, but just using a different register can't work in
5285 ldm r14!, {r0-r13,pc}
5287 which would need to be rewritten as:
5291 but that can't work, because there's no free register for N.
5293 Solve this by turning off the writeback bit, and emulating
5294 writeback manually in the cleanup routine. */
5299 new_regmask
= (1 << num_in_list
) - 1;
5301 if (debug_displaced
)
5302 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
5303 "{..., pc}: original reg list %.4x, modified "
5304 "list %.4x\n"), rn
, writeback
? "!" : "",
5305 (int) insn
& 0xffff, new_regmask
);
5307 dsc
->modinsn
[0] = (insn
& ~0xffff) | (new_regmask
& 0xffff);
5309 dsc
->cleanup
= &cleanup_block_load_pc
;
5314 /* STM of a list of registers which includes PC. Run the instruction
5315 as-is, but out of line: this will store the wrong value for the PC,
5316 so we must manually fix up the memory in the cleanup routine.
5317 Doing things this way has the advantage that we can auto-detect
5318 the offset of the PC write (which is architecture-dependent) in
5319 the cleanup routine. */
5320 dsc
->modinsn
[0] = insn
;
5322 dsc
->cleanup
= &cleanup_block_store_pc
;
5328 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5329 for Linux, where some SVC instructions must be treated specially. */
5332 cleanup_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5333 struct displaced_step_closure
*dsc
)
5335 CORE_ADDR from
= dsc
->insn_addr
;
5336 CORE_ADDR resume_addr
= from
+ 4;
5338 if (debug_displaced
)
5339 fprintf_unfiltered (gdb_stdlog
, "displaced: cleanup for svc, resume at "
5340 "%.8lx\n", (unsigned long) resume_addr
);
5342 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, resume_addr
, BRANCH_WRITE_PC
);
5346 copy_svc (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
5347 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5349 CORE_ADDR from
= dsc
->insn_addr
;
5351 /* Allow OS-specific code to override SVC handling. */
5352 if (dsc
->u
.svc
.copy_svc_os
)
5353 return dsc
->u
.svc
.copy_svc_os (gdbarch
, insn
, to
, regs
, dsc
);
5355 if (debug_displaced
)
5356 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.8lx\n",
5357 (unsigned long) insn
);
5359 /* Preparation: none.
5360 Insn: unmodified svc.
5361 Cleanup: pc <- insn_addr + 4. */
5363 dsc
->modinsn
[0] = insn
;
5365 dsc
->cleanup
= &cleanup_svc
;
5366 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5368 dsc
->wrote_to_pc
= 1;
5373 /* Copy undefined instructions. */
5376 copy_undef (struct gdbarch
*gdbarch
, uint32_t insn
,
5377 struct displaced_step_closure
*dsc
)
5379 if (debug_displaced
)
5380 fprintf_unfiltered (gdb_stdlog
, "displaced: copying undefined insn %.8lx\n",
5381 (unsigned long) insn
);
5383 dsc
->modinsn
[0] = insn
;
5388 /* Copy unpredictable instructions. */
5391 copy_unpred (struct gdbarch
*gdbarch
, uint32_t insn
,
5392 struct displaced_step_closure
*dsc
)
5394 if (debug_displaced
)
5395 fprintf_unfiltered (gdb_stdlog
, "displaced: copying unpredictable insn "
5396 "%.8lx\n", (unsigned long) insn
);
5398 dsc
->modinsn
[0] = insn
;
5403 /* The decode_* functions are instruction decoding helpers. They mostly follow
5404 the presentation in the ARM ARM. */
5407 decode_misc_memhint_neon (struct gdbarch
*gdbarch
, uint32_t insn
,
5408 struct regcache
*regs
,
5409 struct displaced_step_closure
*dsc
)
5411 unsigned int op1
= bits (insn
, 20, 26), op2
= bits (insn
, 4, 7);
5412 unsigned int rn
= bits (insn
, 16, 19);
5414 if (op1
== 0x10 && (op2
& 0x2) == 0x0 && (rn
& 0xe) == 0x0)
5415 return copy_unmodified (gdbarch
, insn
, "cps", dsc
);
5416 else if (op1
== 0x10 && op2
== 0x0 && (rn
& 0xe) == 0x1)
5417 return copy_unmodified (gdbarch
, insn
, "setend", dsc
);
5418 else if ((op1
& 0x60) == 0x20)
5419 return copy_unmodified (gdbarch
, insn
, "neon dataproc", dsc
);
5420 else if ((op1
& 0x71) == 0x40)
5421 return copy_unmodified (gdbarch
, insn
, "neon elt/struct load/store", dsc
);
5422 else if ((op1
& 0x77) == 0x41)
5423 return copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
5424 else if ((op1
& 0x77) == 0x45)
5425 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pli. */
5426 else if ((op1
& 0x77) == 0x51)
5429 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
5431 return copy_unpred (gdbarch
, insn
, dsc
);
5433 else if ((op1
& 0x77) == 0x55)
5434 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
5435 else if (op1
== 0x57)
5438 case 0x1: return copy_unmodified (gdbarch
, insn
, "clrex", dsc
);
5439 case 0x4: return copy_unmodified (gdbarch
, insn
, "dsb", dsc
);
5440 case 0x5: return copy_unmodified (gdbarch
, insn
, "dmb", dsc
);
5441 case 0x6: return copy_unmodified (gdbarch
, insn
, "isb", dsc
);
5442 default: return copy_unpred (gdbarch
, insn
, dsc
);
5444 else if ((op1
& 0x63) == 0x43)
5445 return copy_unpred (gdbarch
, insn
, dsc
);
5446 else if ((op2
& 0x1) == 0x0)
5447 switch (op1
& ~0x80)
5450 return copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
5452 return copy_preload_reg (gdbarch
, insn
, regs
, dsc
); /* pli reg. */
5453 case 0x71: case 0x75:
5455 return copy_preload_reg (gdbarch
, insn
, regs
, dsc
);
5456 case 0x63: case 0x67: case 0x73: case 0x77:
5457 return copy_unpred (gdbarch
, insn
, dsc
);
5459 return copy_undef (gdbarch
, insn
, dsc
);
5462 return copy_undef (gdbarch
, insn
, dsc
); /* Probably unreachable. */
5466 decode_unconditional (struct gdbarch
*gdbarch
, uint32_t insn
,
5467 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5469 if (bit (insn
, 27) == 0)
5470 return decode_misc_memhint_neon (gdbarch
, insn
, regs
, dsc
);
5471 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5472 else switch (((insn
& 0x7000000) >> 23) | ((insn
& 0x100000) >> 20))
5475 return copy_unmodified (gdbarch
, insn
, "srs", dsc
);
5478 return copy_unmodified (gdbarch
, insn
, "rfe", dsc
);
5480 case 0x4: case 0x5: case 0x6: case 0x7:
5481 return copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
5484 switch ((insn
& 0xe00000) >> 21)
5486 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5488 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5491 return copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
5494 return copy_undef (gdbarch
, insn
, dsc
);
5499 int rn_f
= (bits (insn
, 16, 19) == 0xf);
5500 switch ((insn
& 0xe00000) >> 21)
5503 /* ldc/ldc2 imm (undefined for rn == pc). */
5504 return rn_f
? copy_undef (gdbarch
, insn
, dsc
)
5505 : copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5508 return copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
5510 case 0x4: case 0x5: case 0x6: case 0x7:
5511 /* ldc/ldc2 lit (undefined for rn != pc). */
5512 return rn_f
? copy_copro_load_store (gdbarch
, insn
, regs
, dsc
)
5513 : copy_undef (gdbarch
, insn
, dsc
);
5516 return copy_undef (gdbarch
, insn
, dsc
);
5521 return copy_unmodified (gdbarch
, insn
, "stc/stc2", dsc
);
5524 if (bits (insn
, 16, 19) == 0xf)
5526 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5528 return copy_undef (gdbarch
, insn
, dsc
);
5532 return copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
5534 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
5538 return copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
5540 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
5543 return copy_undef (gdbarch
, insn
, dsc
);
5547 /* Decode miscellaneous instructions in dp/misc encoding space. */
5550 decode_miscellaneous (struct gdbarch
*gdbarch
, uint32_t insn
,
5551 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5553 unsigned int op2
= bits (insn
, 4, 6);
5554 unsigned int op
= bits (insn
, 21, 22);
5555 unsigned int op1
= bits (insn
, 16, 19);
5560 return copy_unmodified (gdbarch
, insn
, "mrs/msr", dsc
);
5563 if (op
== 0x1) /* bx. */
5564 return copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
);
5566 return copy_unmodified (gdbarch
, insn
, "clz", dsc
);
5568 return copy_undef (gdbarch
, insn
, dsc
);
5572 /* Not really supported. */
5573 return copy_unmodified (gdbarch
, insn
, "bxj", dsc
);
5575 return copy_undef (gdbarch
, insn
, dsc
);
5579 return copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
); /* blx register. */
5581 return copy_undef (gdbarch
, insn
, dsc
);
5584 return copy_unmodified (gdbarch
, insn
, "saturating add/sub", dsc
);
5588 return copy_unmodified (gdbarch
, insn
, "bkpt", dsc
);
5590 /* Not really supported. */
5591 return copy_unmodified (gdbarch
, insn
, "smc", dsc
);
5594 return copy_undef (gdbarch
, insn
, dsc
);
5599 decode_dp_misc (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5600 struct displaced_step_closure
*dsc
)
5603 switch (bits (insn
, 20, 24))
5606 return copy_unmodified (gdbarch
, insn
, "movw", dsc
);
5609 return copy_unmodified (gdbarch
, insn
, "movt", dsc
);
5611 case 0x12: case 0x16:
5612 return copy_unmodified (gdbarch
, insn
, "msr imm", dsc
);
5615 return copy_alu_imm (gdbarch
, insn
, regs
, dsc
);
5619 uint32_t op1
= bits (insn
, 20, 24), op2
= bits (insn
, 4, 7);
5621 if ((op1
& 0x19) != 0x10 && (op2
& 0x1) == 0x0)
5622 return copy_alu_reg (gdbarch
, insn
, regs
, dsc
);
5623 else if ((op1
& 0x19) != 0x10 && (op2
& 0x9) == 0x1)
5624 return copy_alu_shifted_reg (gdbarch
, insn
, regs
, dsc
);
5625 else if ((op1
& 0x19) == 0x10 && (op2
& 0x8) == 0x0)
5626 return decode_miscellaneous (gdbarch
, insn
, regs
, dsc
);
5627 else if ((op1
& 0x19) == 0x10 && (op2
& 0x9) == 0x8)
5628 return copy_unmodified (gdbarch
, insn
, "halfword mul/mla", dsc
);
5629 else if ((op1
& 0x10) == 0x00 && op2
== 0x9)
5630 return copy_unmodified (gdbarch
, insn
, "mul/mla", dsc
);
5631 else if ((op1
& 0x10) == 0x10 && op2
== 0x9)
5632 return copy_unmodified (gdbarch
, insn
, "synch", dsc
);
5633 else if (op2
== 0xb || (op2
& 0xd) == 0xd)
5634 /* 2nd arg means "unpriveleged". */
5635 return copy_extra_ld_st (gdbarch
, insn
, (op1
& 0x12) == 0x02, regs
,
5639 /* Should be unreachable. */
5644 decode_ld_st_word_ubyte (struct gdbarch
*gdbarch
, uint32_t insn
,
5645 struct regcache
*regs
,
5646 struct displaced_step_closure
*dsc
)
5648 int a
= bit (insn
, 25), b
= bit (insn
, 4);
5649 uint32_t op1
= bits (insn
, 20, 24);
5650 int rn_f
= bits (insn
, 16, 19) == 0xf;
5652 if ((!a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02)
5653 || (a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02 && !b
))
5654 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 0, 0);
5655 else if ((!a
&& (op1
& 0x17) == 0x02)
5656 || (a
&& (op1
& 0x17) == 0x02 && !b
))
5657 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 0, 1);
5658 else if ((!a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03)
5659 || (a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03 && !b
))
5660 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 0, 0);
5661 else if ((!a
&& (op1
& 0x17) == 0x03)
5662 || (a
&& (op1
& 0x17) == 0x03 && !b
))
5663 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 0, 1);
5664 else if ((!a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06)
5665 || (a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06 && !b
))
5666 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 0);
5667 else if ((!a
&& (op1
& 0x17) == 0x06)
5668 || (a
&& (op1
& 0x17) == 0x06 && !b
))
5669 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 1);
5670 else if ((!a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07)
5671 || (a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07 && !b
))
5672 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 0);
5673 else if ((!a
&& (op1
& 0x17) == 0x07)
5674 || (a
&& (op1
& 0x17) == 0x07 && !b
))
5675 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 1);
5677 /* Should be unreachable. */
5682 decode_media (struct gdbarch
*gdbarch
, uint32_t insn
,
5683 struct displaced_step_closure
*dsc
)
5685 switch (bits (insn
, 20, 24))
5687 case 0x00: case 0x01: case 0x02: case 0x03:
5688 return copy_unmodified (gdbarch
, insn
, "parallel add/sub signed", dsc
);
5690 case 0x04: case 0x05: case 0x06: case 0x07:
5691 return copy_unmodified (gdbarch
, insn
, "parallel add/sub unsigned", dsc
);
5693 case 0x08: case 0x09: case 0x0a: case 0x0b:
5694 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5695 return copy_unmodified (gdbarch
, insn
,
5696 "decode/pack/unpack/saturate/reverse", dsc
);
5699 if (bits (insn
, 5, 7) == 0) /* op2. */
5701 if (bits (insn
, 12, 15) == 0xf)
5702 return copy_unmodified (gdbarch
, insn
, "usad8", dsc
);
5704 return copy_unmodified (gdbarch
, insn
, "usada8", dsc
);
5707 return copy_undef (gdbarch
, insn
, dsc
);
5709 case 0x1a: case 0x1b:
5710 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
5711 return copy_unmodified (gdbarch
, insn
, "sbfx", dsc
);
5713 return copy_undef (gdbarch
, insn
, dsc
);
5715 case 0x1c: case 0x1d:
5716 if (bits (insn
, 5, 6) == 0x0) /* op2[1:0]. */
5718 if (bits (insn
, 0, 3) == 0xf)
5719 return copy_unmodified (gdbarch
, insn
, "bfc", dsc
);
5721 return copy_unmodified (gdbarch
, insn
, "bfi", dsc
);
5724 return copy_undef (gdbarch
, insn
, dsc
);
5726 case 0x1e: case 0x1f:
5727 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
5728 return copy_unmodified (gdbarch
, insn
, "ubfx", dsc
);
5730 return copy_undef (gdbarch
, insn
, dsc
);
5733 /* Should be unreachable. */
5738 decode_b_bl_ldmstm (struct gdbarch
*gdbarch
, int32_t insn
,
5739 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5742 return copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
5744 return copy_block_xfer (gdbarch
, insn
, regs
, dsc
);
5748 decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
,
5749 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5751 unsigned int opcode
= bits (insn
, 20, 24);
5755 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5756 return copy_unmodified (gdbarch
, insn
, "vfp/neon mrrc/mcrr", dsc
);
5758 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5759 case 0x12: case 0x16:
5760 return copy_unmodified (gdbarch
, insn
, "vfp/neon vstm/vpush", dsc
);
5762 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5763 case 0x13: case 0x17:
5764 return copy_unmodified (gdbarch
, insn
, "vfp/neon vldm/vpop", dsc
);
5766 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5767 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5768 /* Note: no writeback for these instructions. Bit 25 will always be
5769 zero though (via caller), so the following works OK. */
5770 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5773 /* Should be unreachable. */
5778 decode_svc_copro (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
5779 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5781 unsigned int op1
= bits (insn
, 20, 25);
5782 int op
= bit (insn
, 4);
5783 unsigned int coproc
= bits (insn
, 8, 11);
5784 unsigned int rn
= bits (insn
, 16, 19);
5786 if ((op1
& 0x20) == 0x00 && (op1
& 0x3a) != 0x00 && (coproc
& 0xe) == 0xa)
5787 return decode_ext_reg_ld_st (gdbarch
, insn
, regs
, dsc
);
5788 else if ((op1
& 0x21) == 0x00 && (op1
& 0x3a) != 0x00
5789 && (coproc
& 0xe) != 0xa)
5791 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5792 else if ((op1
& 0x21) == 0x01 && (op1
& 0x3a) != 0x00
5793 && (coproc
& 0xe) != 0xa)
5794 /* ldc/ldc2 imm/lit. */
5795 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
5796 else if ((op1
& 0x3e) == 0x00)
5797 return copy_undef (gdbarch
, insn
, dsc
);
5798 else if ((op1
& 0x3e) == 0x04 && (coproc
& 0xe) == 0xa)
5799 return copy_unmodified (gdbarch
, insn
, "neon 64bit xfer", dsc
);
5800 else if (op1
== 0x04 && (coproc
& 0xe) != 0xa)
5801 return copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
5802 else if (op1
== 0x05 && (coproc
& 0xe) != 0xa)
5803 return copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
5804 else if ((op1
& 0x30) == 0x20 && !op
)
5806 if ((coproc
& 0xe) == 0xa)
5807 return copy_unmodified (gdbarch
, insn
, "vfp dataproc", dsc
);
5809 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
5811 else if ((op1
& 0x30) == 0x20 && op
)
5812 return copy_unmodified (gdbarch
, insn
, "neon 8/16/32 bit xfer", dsc
);
5813 else if ((op1
& 0x31) == 0x20 && op
&& (coproc
& 0xe) != 0xa)
5814 return copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
5815 else if ((op1
& 0x31) == 0x21 && op
&& (coproc
& 0xe) != 0xa)
5816 return copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
5817 else if ((op1
& 0x30) == 0x30)
5818 return copy_svc (gdbarch
, insn
, to
, regs
, dsc
);
5820 return copy_undef (gdbarch
, insn
, dsc
); /* Possibly unreachable. */
5824 arm_process_displaced_insn (struct gdbarch
*gdbarch
, uint32_t insn
,
5825 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
,
5826 struct displaced_step_closure
*dsc
)
5830 if (!displaced_in_arm_mode (regs
))
5831 error (_("Displaced stepping is only supported in ARM mode"));
5833 /* Most displaced instructions use a 1-instruction scratch space, so set this
5834 here and override below if/when necessary. */
5836 dsc
->insn_addr
= from
;
5837 dsc
->scratch_base
= to
;
5838 dsc
->cleanup
= NULL
;
5839 dsc
->wrote_to_pc
= 0;
5841 if ((insn
& 0xf0000000) == 0xf0000000)
5842 err
= decode_unconditional (gdbarch
, insn
, regs
, dsc
);
5843 else switch (((insn
& 0x10) >> 4) | ((insn
& 0xe000000) >> 24))
5845 case 0x0: case 0x1: case 0x2: case 0x3:
5846 err
= decode_dp_misc (gdbarch
, insn
, regs
, dsc
);
5849 case 0x4: case 0x5: case 0x6:
5850 err
= decode_ld_st_word_ubyte (gdbarch
, insn
, regs
, dsc
);
5854 err
= decode_media (gdbarch
, insn
, dsc
);
5857 case 0x8: case 0x9: case 0xa: case 0xb:
5858 err
= decode_b_bl_ldmstm (gdbarch
, insn
, regs
, dsc
);
5861 case 0xc: case 0xd: case 0xe: case 0xf:
5862 err
= decode_svc_copro (gdbarch
, insn
, to
, regs
, dsc
);
5867 internal_error (__FILE__
, __LINE__
,
5868 _("arm_process_displaced_insn: Instruction decode error"));
5871 /* Actually set up the scratch space for a displaced instruction. */
5874 arm_displaced_init_closure (struct gdbarch
*gdbarch
, CORE_ADDR from
,
5875 CORE_ADDR to
, struct displaced_step_closure
*dsc
)
5877 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
5879 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
5881 /* Poke modified instruction(s). */
5882 for (i
= 0; i
< dsc
->numinsns
; i
++)
5884 if (debug_displaced
)
5885 fprintf_unfiltered (gdb_stdlog
, "displaced: writing insn %.8lx at "
5886 "%.8lx\n", (unsigned long) dsc
->modinsn
[i
],
5887 (unsigned long) to
+ i
* 4);
5888 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
5892 /* Put breakpoint afterwards. */
5893 write_memory (to
+ dsc
->numinsns
* 4, tdep
->arm_breakpoint
,
5894 tdep
->arm_breakpoint_size
);
5896 if (debug_displaced
)
5897 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
5898 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
5901 /* Entry point for copying an instruction into scratch space for displaced
5904 struct displaced_step_closure
*
5905 arm_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
5906 CORE_ADDR from
, CORE_ADDR to
,
5907 struct regcache
*regs
)
5909 struct displaced_step_closure
*dsc
5910 = xmalloc (sizeof (struct displaced_step_closure
));
5911 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
5912 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
5914 if (debug_displaced
)
5915 fprintf_unfiltered (gdb_stdlog
, "displaced: stepping insn %.8lx "
5916 "at %.8lx\n", (unsigned long) insn
,
5917 (unsigned long) from
);
5919 arm_process_displaced_insn (gdbarch
, insn
, from
, to
, regs
, dsc
);
5920 arm_displaced_init_closure (gdbarch
, from
, to
, dsc
);
5925 /* Entry point for cleaning things up after a displaced instruction has been
5929 arm_displaced_step_fixup (struct gdbarch
*gdbarch
,
5930 struct displaced_step_closure
*dsc
,
5931 CORE_ADDR from
, CORE_ADDR to
,
5932 struct regcache
*regs
)
5935 dsc
->cleanup (gdbarch
, regs
, dsc
);
5937 if (!dsc
->wrote_to_pc
)
5938 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, dsc
->insn_addr
+ 4);
5941 #include "bfd-in2.h"
5942 #include "libcoff.h"
5945 gdb_print_insn_arm (bfd_vma memaddr
, disassemble_info
*info
)
5947 struct gdbarch
*gdbarch
= info
->application_data
;
5949 if (arm_pc_is_thumb (gdbarch
, memaddr
))
5951 static asymbol
*asym
;
5952 static combined_entry_type ce
;
5953 static struct coff_symbol_struct csym
;
5954 static struct bfd fake_bfd
;
5955 static bfd_target fake_target
;
5957 if (csym
.native
== NULL
)
5959 /* Create a fake symbol vector containing a Thumb symbol.
5960 This is solely so that the code in print_insn_little_arm()
5961 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5962 the presence of a Thumb symbol and switch to decoding
5963 Thumb instructions. */
5965 fake_target
.flavour
= bfd_target_coff_flavour
;
5966 fake_bfd
.xvec
= &fake_target
;
5967 ce
.u
.syment
.n_sclass
= C_THUMBEXTFUNC
;
5969 csym
.symbol
.the_bfd
= &fake_bfd
;
5970 csym
.symbol
.name
= "fake";
5971 asym
= (asymbol
*) & csym
;
5974 memaddr
= UNMAKE_THUMB_ADDR (memaddr
);
5975 info
->symbols
= &asym
;
5978 info
->symbols
= NULL
;
5980 if (info
->endian
== BFD_ENDIAN_BIG
)
5981 return print_insn_big_arm (memaddr
, info
);
5983 return print_insn_little_arm (memaddr
, info
);
5986 /* The following define instruction sequences that will cause ARM
5987 cpu's to take an undefined instruction trap. These are used to
5988 signal a breakpoint to GDB.
5990 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5991 modes. A different instruction is required for each mode. The ARM
5992 cpu's can also be big or little endian. Thus four different
5993 instructions are needed to support all cases.
5995 Note: ARMv4 defines several new instructions that will take the
5996 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5997 not in fact add the new instructions. The new undefined
5998 instructions in ARMv4 are all instructions that had no defined
5999 behaviour in earlier chips. There is no guarantee that they will
6000 raise an exception, but may be treated as NOP's. In practice, it
6001 may only safe to rely on instructions matching:
6003 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6004 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6005 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6007 Even this may only true if the condition predicate is true. The
6008 following use a condition predicate of ALWAYS so it is always TRUE.
6010 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6011 and NetBSD all use a software interrupt rather than an undefined
6012 instruction to force a trap. This can be handled by by the
6013 abi-specific code during establishment of the gdbarch vector. */
6015 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6016 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6017 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6018 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6020 static const char arm_default_arm_le_breakpoint
[] = ARM_LE_BREAKPOINT
;
6021 static const char arm_default_arm_be_breakpoint
[] = ARM_BE_BREAKPOINT
;
6022 static const char arm_default_thumb_le_breakpoint
[] = THUMB_LE_BREAKPOINT
;
6023 static const char arm_default_thumb_be_breakpoint
[] = THUMB_BE_BREAKPOINT
;
6025 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6026 the program counter value to determine whether a 16-bit or 32-bit
6027 breakpoint should be used. It returns a pointer to a string of
6028 bytes that encode a breakpoint instruction, stores the length of
6029 the string to *lenptr, and adjusts the program counter (if
6030 necessary) to point to the actual memory location where the
6031 breakpoint should be inserted. */
6033 static const unsigned char *
6034 arm_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
, int *lenptr
)
6036 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
6037 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
6039 if (arm_pc_is_thumb (gdbarch
, *pcptr
))
6041 *pcptr
= UNMAKE_THUMB_ADDR (*pcptr
);
6043 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6044 check whether we are replacing a 32-bit instruction. */
6045 if (tdep
->thumb2_breakpoint
!= NULL
)
6048 if (target_read_memory (*pcptr
, buf
, 2) == 0)
6050 unsigned short inst1
;
6051 inst1
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
6052 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
6054 *lenptr
= tdep
->thumb2_breakpoint_size
;
6055 return tdep
->thumb2_breakpoint
;
6060 *lenptr
= tdep
->thumb_breakpoint_size
;
6061 return tdep
->thumb_breakpoint
;
6065 *lenptr
= tdep
->arm_breakpoint_size
;
6066 return tdep
->arm_breakpoint
;
6071 arm_remote_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
6074 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
6076 arm_breakpoint_from_pc (gdbarch
, pcptr
, kindptr
);
6078 if (arm_pc_is_thumb (gdbarch
, *pcptr
) && *kindptr
== 4)
6079 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6080 that this is not confused with a 32-bit ARM breakpoint. */
6084 /* Extract from an array REGBUF containing the (raw) register state a
6085 function return value of type TYPE, and copy that, in virtual
6086 format, into VALBUF. */
6089 arm_extract_return_value (struct type
*type
, struct regcache
*regs
,
6092 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
6093 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6095 if (TYPE_CODE_FLT
== TYPE_CODE (type
))
6097 switch (gdbarch_tdep (gdbarch
)->fp_model
)
6101 /* The value is in register F0 in internal format. We need to
6102 extract the raw value and then convert it to the desired
6104 bfd_byte tmpbuf
[FP_REGISTER_SIZE
];
6106 regcache_cooked_read (regs
, ARM_F0_REGNUM
, tmpbuf
);
6107 convert_from_extended (floatformat_from_type (type
), tmpbuf
,
6108 valbuf
, gdbarch_byte_order (gdbarch
));
6112 case ARM_FLOAT_SOFT_FPA
:
6113 case ARM_FLOAT_SOFT_VFP
:
6114 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6115 not using the VFP ABI code. */
6117 regcache_cooked_read (regs
, ARM_A1_REGNUM
, valbuf
);
6118 if (TYPE_LENGTH (type
) > 4)
6119 regcache_cooked_read (regs
, ARM_A1_REGNUM
+ 1,
6120 valbuf
+ INT_REGISTER_SIZE
);
6125 (__FILE__
, __LINE__
,
6126 _("arm_extract_return_value: Floating point model not supported"));
6130 else if (TYPE_CODE (type
) == TYPE_CODE_INT
6131 || TYPE_CODE (type
) == TYPE_CODE_CHAR
6132 || TYPE_CODE (type
) == TYPE_CODE_BOOL
6133 || TYPE_CODE (type
) == TYPE_CODE_PTR
6134 || TYPE_CODE (type
) == TYPE_CODE_REF
6135 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
6137 /* If the the type is a plain integer, then the access is
6138 straight-forward. Otherwise we have to play around a bit more. */
6139 int len
= TYPE_LENGTH (type
);
6140 int regno
= ARM_A1_REGNUM
;
6145 /* By using store_unsigned_integer we avoid having to do
6146 anything special for small big-endian values. */
6147 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
6148 store_unsigned_integer (valbuf
,
6149 (len
> INT_REGISTER_SIZE
6150 ? INT_REGISTER_SIZE
: len
),
6152 len
-= INT_REGISTER_SIZE
;
6153 valbuf
+= INT_REGISTER_SIZE
;
6158 /* For a structure or union the behaviour is as if the value had
6159 been stored to word-aligned memory and then loaded into
6160 registers with 32-bit load instruction(s). */
6161 int len
= TYPE_LENGTH (type
);
6162 int regno
= ARM_A1_REGNUM
;
6163 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
6167 regcache_cooked_read (regs
, regno
++, tmpbuf
);
6168 memcpy (valbuf
, tmpbuf
,
6169 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
6170 len
-= INT_REGISTER_SIZE
;
6171 valbuf
+= INT_REGISTER_SIZE
;
6177 /* Will a function return an aggregate type in memory or in a
6178 register? Return 0 if an aggregate type can be returned in a
6179 register, 1 if it must be returned in memory. */
6182 arm_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
6185 enum type_code code
;
6187 CHECK_TYPEDEF (type
);
6189 /* In the ARM ABI, "integer" like aggregate types are returned in
6190 registers. For an aggregate type to be integer like, its size
6191 must be less than or equal to INT_REGISTER_SIZE and the
6192 offset of each addressable subfield must be zero. Note that bit
6193 fields are not addressable, and all addressable subfields of
6194 unions always start at offset zero.
6196 This function is based on the behaviour of GCC 2.95.1.
6197 See: gcc/arm.c: arm_return_in_memory() for details.
6199 Note: All versions of GCC before GCC 2.95.2 do not set up the
6200 parameters correctly for a function returning the following
6201 structure: struct { float f;}; This should be returned in memory,
6202 not a register. Richard Earnshaw sent me a patch, but I do not
6203 know of any way to detect if a function like the above has been
6204 compiled with the correct calling convention. */
6206 /* All aggregate types that won't fit in a register must be returned
6208 if (TYPE_LENGTH (type
) > INT_REGISTER_SIZE
)
6213 /* The AAPCS says all aggregates not larger than a word are returned
6215 if (gdbarch_tdep (gdbarch
)->arm_abi
!= ARM_ABI_APCS
)
6218 /* The only aggregate types that can be returned in a register are
6219 structs and unions. Arrays must be returned in memory. */
6220 code
= TYPE_CODE (type
);
6221 if ((TYPE_CODE_STRUCT
!= code
) && (TYPE_CODE_UNION
!= code
))
6226 /* Assume all other aggregate types can be returned in a register.
6227 Run a check for structures, unions and arrays. */
6230 if ((TYPE_CODE_STRUCT
== code
) || (TYPE_CODE_UNION
== code
))
6233 /* Need to check if this struct/union is "integer" like. For
6234 this to be true, its size must be less than or equal to
6235 INT_REGISTER_SIZE and the offset of each addressable
6236 subfield must be zero. Note that bit fields are not
6237 addressable, and unions always start at offset zero. If any
6238 of the subfields is a floating point type, the struct/union
6239 cannot be an integer type. */
6241 /* For each field in the object, check:
6242 1) Is it FP? --> yes, nRc = 1;
6243 2) Is it addressable (bitpos != 0) and
6244 not packed (bitsize == 0)?
6248 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
6250 enum type_code field_type_code
;
6251 field_type_code
= TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type
, i
)));
6253 /* Is it a floating point type field? */
6254 if (field_type_code
== TYPE_CODE_FLT
)
6260 /* If bitpos != 0, then we have to care about it. */
6261 if (TYPE_FIELD_BITPOS (type
, i
) != 0)
6263 /* Bitfields are not addressable. If the field bitsize is
6264 zero, then the field is not packed. Hence it cannot be
6265 a bitfield or any other packed type. */
6266 if (TYPE_FIELD_BITSIZE (type
, i
) == 0)
6278 /* Write into appropriate registers a function return value of type
6279 TYPE, given in virtual format. */
6282 arm_store_return_value (struct type
*type
, struct regcache
*regs
,
6283 const gdb_byte
*valbuf
)
6285 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
6286 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6288 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
6290 char buf
[MAX_REGISTER_SIZE
];
6292 switch (gdbarch_tdep (gdbarch
)->fp_model
)
6296 convert_to_extended (floatformat_from_type (type
), buf
, valbuf
,
6297 gdbarch_byte_order (gdbarch
));
6298 regcache_cooked_write (regs
, ARM_F0_REGNUM
, buf
);
6301 case ARM_FLOAT_SOFT_FPA
:
6302 case ARM_FLOAT_SOFT_VFP
:
6303 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6304 not using the VFP ABI code. */
6306 regcache_cooked_write (regs
, ARM_A1_REGNUM
, valbuf
);
6307 if (TYPE_LENGTH (type
) > 4)
6308 regcache_cooked_write (regs
, ARM_A1_REGNUM
+ 1,
6309 valbuf
+ INT_REGISTER_SIZE
);
6314 (__FILE__
, __LINE__
,
6315 _("arm_store_return_value: Floating point model not supported"));
6319 else if (TYPE_CODE (type
) == TYPE_CODE_INT
6320 || TYPE_CODE (type
) == TYPE_CODE_CHAR
6321 || TYPE_CODE (type
) == TYPE_CODE_BOOL
6322 || TYPE_CODE (type
) == TYPE_CODE_PTR
6323 || TYPE_CODE (type
) == TYPE_CODE_REF
6324 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
6326 if (TYPE_LENGTH (type
) <= 4)
6328 /* Values of one word or less are zero/sign-extended and
6330 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
6331 LONGEST val
= unpack_long (type
, valbuf
);
6333 store_signed_integer (tmpbuf
, INT_REGISTER_SIZE
, byte_order
, val
);
6334 regcache_cooked_write (regs
, ARM_A1_REGNUM
, tmpbuf
);
6338 /* Integral values greater than one word are stored in consecutive
6339 registers starting with r0. This will always be a multiple of
6340 the regiser size. */
6341 int len
= TYPE_LENGTH (type
);
6342 int regno
= ARM_A1_REGNUM
;
6346 regcache_cooked_write (regs
, regno
++, valbuf
);
6347 len
-= INT_REGISTER_SIZE
;
6348 valbuf
+= INT_REGISTER_SIZE
;
6354 /* For a structure or union the behaviour is as if the value had
6355 been stored to word-aligned memory and then loaded into
6356 registers with 32-bit load instruction(s). */
6357 int len
= TYPE_LENGTH (type
);
6358 int regno
= ARM_A1_REGNUM
;
6359 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
6363 memcpy (tmpbuf
, valbuf
,
6364 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
6365 regcache_cooked_write (regs
, regno
++, tmpbuf
);
6366 len
-= INT_REGISTER_SIZE
;
6367 valbuf
+= INT_REGISTER_SIZE
;
6373 /* Handle function return values. */
6375 static enum return_value_convention
6376 arm_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
6377 struct type
*valtype
, struct regcache
*regcache
,
6378 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
6380 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
6381 enum arm_vfp_cprc_base_type vfp_base_type
;
6384 if (arm_vfp_abi_for_function (gdbarch
, func_type
)
6385 && arm_vfp_call_candidate (valtype
, &vfp_base_type
, &vfp_base_count
))
6387 int reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
6388 int unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
6390 for (i
= 0; i
< vfp_base_count
; i
++)
6392 if (reg_char
== 'q')
6395 arm_neon_quad_write (gdbarch
, regcache
, i
,
6396 writebuf
+ i
* unit_length
);
6399 arm_neon_quad_read (gdbarch
, regcache
, i
,
6400 readbuf
+ i
* unit_length
);
6407 sprintf (name_buf
, "%c%d", reg_char
, i
);
6408 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
6411 regcache_cooked_write (regcache
, regnum
,
6412 writebuf
+ i
* unit_length
);
6414 regcache_cooked_read (regcache
, regnum
,
6415 readbuf
+ i
* unit_length
);
6418 return RETURN_VALUE_REGISTER_CONVENTION
;
6421 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
6422 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
6423 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
6425 if (tdep
->struct_return
== pcc_struct_return
6426 || arm_return_in_memory (gdbarch
, valtype
))
6427 return RETURN_VALUE_STRUCT_CONVENTION
;
6431 arm_store_return_value (valtype
, regcache
, writebuf
);
6434 arm_extract_return_value (valtype
, regcache
, readbuf
);
6436 return RETURN_VALUE_REGISTER_CONVENTION
;
6441 arm_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
6443 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
6444 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
6445 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6447 char buf
[INT_REGISTER_SIZE
];
6449 jb_addr
= get_frame_register_unsigned (frame
, ARM_A1_REGNUM
);
6451 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
6455 *pc
= extract_unsigned_integer (buf
, INT_REGISTER_SIZE
, byte_order
);
6459 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6460 return the target PC. Otherwise return 0. */
6463 arm_skip_stub (struct frame_info
*frame
, CORE_ADDR pc
)
6467 CORE_ADDR start_addr
;
6469 /* Find the starting address and name of the function containing the PC. */
6470 if (find_pc_partial_function (pc
, &name
, &start_addr
, NULL
) == 0)
6473 /* If PC is in a Thumb call or return stub, return the address of the
6474 target PC, which is in a register. The thunk functions are called
6475 _call_via_xx, where x is the register name. The possible names
6476 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6477 functions, named __ARM_call_via_r[0-7]. */
6478 if (strncmp (name
, "_call_via_", 10) == 0
6479 || strncmp (name
, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6481 /* Use the name suffix to determine which register contains the
6483 static char *table
[15] =
6484 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6485 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6488 int offset
= strlen (name
) - 2;
6490 for (regno
= 0; regno
<= 14; regno
++)
6491 if (strcmp (&name
[offset
], table
[regno
]) == 0)
6492 return get_frame_register_unsigned (frame
, regno
);
6495 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6496 non-interworking calls to foo. We could decode the stubs
6497 to find the target but it's easier to use the symbol table. */
6498 namelen
= strlen (name
);
6499 if (name
[0] == '_' && name
[1] == '_'
6500 && ((namelen
> 2 + strlen ("_from_thumb")
6501 && strncmp (name
+ namelen
- strlen ("_from_thumb"), "_from_thumb",
6502 strlen ("_from_thumb")) == 0)
6503 || (namelen
> 2 + strlen ("_from_arm")
6504 && strncmp (name
+ namelen
- strlen ("_from_arm"), "_from_arm",
6505 strlen ("_from_arm")) == 0)))
6508 int target_len
= namelen
- 2;
6509 struct minimal_symbol
*minsym
;
6510 struct objfile
*objfile
;
6511 struct obj_section
*sec
;
6513 if (name
[namelen
- 1] == 'b')
6514 target_len
-= strlen ("_from_thumb");
6516 target_len
-= strlen ("_from_arm");
6518 target_name
= alloca (target_len
+ 1);
6519 memcpy (target_name
, name
+ 2, target_len
);
6520 target_name
[target_len
] = '\0';
6522 sec
= find_pc_section (pc
);
6523 objfile
= (sec
== NULL
) ? NULL
: sec
->objfile
;
6524 minsym
= lookup_minimal_symbol (target_name
, NULL
, objfile
);
6526 return SYMBOL_VALUE_ADDRESS (minsym
);
6531 return 0; /* not a stub */
6535 set_arm_command (char *args
, int from_tty
)
6537 printf_unfiltered (_("\
6538 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6539 help_list (setarmcmdlist
, "set arm ", all_commands
, gdb_stdout
);
6543 show_arm_command (char *args
, int from_tty
)
6545 cmd_show_list (showarmcmdlist
, from_tty
, "");
6549 arm_update_current_architecture (void)
6551 struct gdbarch_info info
;
6553 /* If the current architecture is not ARM, we have nothing to do. */
6554 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
!= bfd_arch_arm
)
6557 /* Update the architecture. */
6558 gdbarch_info_init (&info
);
6560 if (!gdbarch_update_p (info
))
6561 internal_error (__FILE__
, __LINE__
, "could not update architecture");
6565 set_fp_model_sfunc (char *args
, int from_tty
,
6566 struct cmd_list_element
*c
)
6568 enum arm_float_model fp_model
;
6570 for (fp_model
= ARM_FLOAT_AUTO
; fp_model
!= ARM_FLOAT_LAST
; fp_model
++)
6571 if (strcmp (current_fp_model
, fp_model_strings
[fp_model
]) == 0)
6573 arm_fp_model
= fp_model
;
6577 if (fp_model
== ARM_FLOAT_LAST
)
6578 internal_error (__FILE__
, __LINE__
, _("Invalid fp model accepted: %s."),
6581 arm_update_current_architecture ();
6585 show_fp_model (struct ui_file
*file
, int from_tty
,
6586 struct cmd_list_element
*c
, const char *value
)
6588 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
6590 if (arm_fp_model
== ARM_FLOAT_AUTO
6591 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
6592 fprintf_filtered (file
, _("\
6593 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6594 fp_model_strings
[tdep
->fp_model
]);
6596 fprintf_filtered (file
, _("\
6597 The current ARM floating point model is \"%s\".\n"),
6598 fp_model_strings
[arm_fp_model
]);
6602 arm_set_abi (char *args
, int from_tty
,
6603 struct cmd_list_element
*c
)
6605 enum arm_abi_kind arm_abi
;
6607 for (arm_abi
= ARM_ABI_AUTO
; arm_abi
!= ARM_ABI_LAST
; arm_abi
++)
6608 if (strcmp (arm_abi_string
, arm_abi_strings
[arm_abi
]) == 0)
6610 arm_abi_global
= arm_abi
;
6614 if (arm_abi
== ARM_ABI_LAST
)
6615 internal_error (__FILE__
, __LINE__
, _("Invalid ABI accepted: %s."),
6618 arm_update_current_architecture ();
6622 arm_show_abi (struct ui_file
*file
, int from_tty
,
6623 struct cmd_list_element
*c
, const char *value
)
6625 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
6627 if (arm_abi_global
== ARM_ABI_AUTO
6628 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
6629 fprintf_filtered (file
, _("\
6630 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6631 arm_abi_strings
[tdep
->arm_abi
]);
6633 fprintf_filtered (file
, _("The current ARM ABI is \"%s\".\n"),
6638 arm_show_fallback_mode (struct ui_file
*file
, int from_tty
,
6639 struct cmd_list_element
*c
, const char *value
)
6641 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
6643 fprintf_filtered (file
, _("\
6644 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6645 arm_fallback_mode_string
);
6649 arm_show_force_mode (struct ui_file
*file
, int from_tty
,
6650 struct cmd_list_element
*c
, const char *value
)
6652 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
6654 fprintf_filtered (file
, _("\
6655 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6656 arm_force_mode_string
);
6659 /* If the user changes the register disassembly style used for info
6660 register and other commands, we have to also switch the style used
6661 in opcodes for disassembly output. This function is run in the "set
6662 arm disassembly" command, and does that. */
6665 set_disassembly_style_sfunc (char *args
, int from_tty
,
6666 struct cmd_list_element
*c
)
6668 set_disassembly_style ();
6671 /* Return the ARM register name corresponding to register I. */
6673 arm_register_name (struct gdbarch
*gdbarch
, int i
)
6675 const int num_regs
= gdbarch_num_regs (gdbarch
);
6677 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
6678 && i
>= num_regs
&& i
< num_regs
+ 32)
6680 static const char *const vfp_pseudo_names
[] = {
6681 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6682 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6683 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6684 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6687 return vfp_pseudo_names
[i
- num_regs
];
6690 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
6691 && i
>= num_regs
+ 32 && i
< num_regs
+ 32 + 16)
6693 static const char *const neon_pseudo_names
[] = {
6694 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6695 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6698 return neon_pseudo_names
[i
- num_regs
- 32];
6701 if (i
>= ARRAY_SIZE (arm_register_names
))
6702 /* These registers are only supported on targets which supply
6703 an XML description. */
6706 return arm_register_names
[i
];
6710 set_disassembly_style (void)
6714 /* Find the style that the user wants. */
6715 for (current
= 0; current
< num_disassembly_options
; current
++)
6716 if (disassembly_style
== valid_disassembly_styles
[current
])
6718 gdb_assert (current
< num_disassembly_options
);
6720 /* Synchronize the disassembler. */
6721 set_arm_regname_option (current
);
6724 /* Test whether the coff symbol specific value corresponds to a Thumb
6728 coff_sym_is_thumb (int val
)
6730 return (val
== C_THUMBEXT
6731 || val
== C_THUMBSTAT
6732 || val
== C_THUMBEXTFUNC
6733 || val
== C_THUMBSTATFUNC
6734 || val
== C_THUMBLABEL
);
6737 /* arm_coff_make_msymbol_special()
6738 arm_elf_make_msymbol_special()
6740 These functions test whether the COFF or ELF symbol corresponds to
6741 an address in thumb code, and set a "special" bit in a minimal
6742 symbol to indicate that it does. */
6745 arm_elf_make_msymbol_special(asymbol
*sym
, struct minimal_symbol
*msym
)
6747 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6749 if (ELF_ST_TYPE (((elf_symbol_type
*)sym
)->internal_elf_sym
.st_info
)
6751 MSYMBOL_SET_SPECIAL (msym
);
6755 arm_coff_make_msymbol_special(int val
, struct minimal_symbol
*msym
)
6757 if (coff_sym_is_thumb (val
))
6758 MSYMBOL_SET_SPECIAL (msym
);
6762 arm_objfile_data_free (struct objfile
*objfile
, void *arg
)
6764 struct arm_per_objfile
*data
= arg
;
6767 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
6768 VEC_free (arm_mapping_symbol_s
, data
->section_maps
[i
]);
6772 arm_record_special_symbol (struct gdbarch
*gdbarch
, struct objfile
*objfile
,
6775 const char *name
= bfd_asymbol_name (sym
);
6776 struct arm_per_objfile
*data
;
6777 VEC(arm_mapping_symbol_s
) **map_p
;
6778 struct arm_mapping_symbol new_map_sym
;
6780 gdb_assert (name
[0] == '$');
6781 if (name
[1] != 'a' && name
[1] != 't' && name
[1] != 'd')
6784 data
= objfile_data (objfile
, arm_objfile_data_key
);
6787 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
,
6788 struct arm_per_objfile
);
6789 set_objfile_data (objfile
, arm_objfile_data_key
, data
);
6790 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
6791 objfile
->obfd
->section_count
,
6792 VEC(arm_mapping_symbol_s
) *);
6794 map_p
= &data
->section_maps
[bfd_get_section (sym
)->index
];
6796 new_map_sym
.value
= sym
->value
;
6797 new_map_sym
.type
= name
[1];
6799 /* Assume that most mapping symbols appear in order of increasing
6800 value. If they were randomly distributed, it would be faster to
6801 always push here and then sort at first use. */
6802 if (!VEC_empty (arm_mapping_symbol_s
, *map_p
))
6804 struct arm_mapping_symbol
*prev_map_sym
;
6806 prev_map_sym
= VEC_last (arm_mapping_symbol_s
, *map_p
);
6807 if (prev_map_sym
->value
>= sym
->value
)
6810 idx
= VEC_lower_bound (arm_mapping_symbol_s
, *map_p
, &new_map_sym
,
6811 arm_compare_mapping_symbols
);
6812 VEC_safe_insert (arm_mapping_symbol_s
, *map_p
, idx
, &new_map_sym
);
6817 VEC_safe_push (arm_mapping_symbol_s
, *map_p
, &new_map_sym
);
6821 arm_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
6823 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
6824 regcache_cooked_write_unsigned (regcache
, ARM_PC_REGNUM
, pc
);
6826 /* If necessary, set the T bit. */
6829 ULONGEST val
, t_bit
;
6830 regcache_cooked_read_unsigned (regcache
, ARM_PS_REGNUM
, &val
);
6831 t_bit
= arm_psr_thumb_bit (gdbarch
);
6832 if (arm_pc_is_thumb (gdbarch
, pc
))
6833 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
6836 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
6841 /* Read the contents of a NEON quad register, by reading from two
6842 double registers. This is used to implement the quad pseudo
6843 registers, and for argument passing in case the quad registers are
6844 missing; vectors are passed in quad registers when using the VFP
6845 ABI, even if a NEON unit is not present. REGNUM is the index of
6846 the quad register, in [0, 15]. */
6849 arm_neon_quad_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
6850 int regnum
, gdb_byte
*buf
)
6853 gdb_byte reg_buf
[8];
6854 int offset
, double_regnum
;
6856 sprintf (name_buf
, "d%d", regnum
<< 1);
6857 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
6860 /* d0 is always the least significant half of q0. */
6861 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
6866 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
6867 memcpy (buf
+ offset
, reg_buf
, 8);
6869 offset
= 8 - offset
;
6870 regcache_raw_read (regcache
, double_regnum
+ 1, reg_buf
);
6871 memcpy (buf
+ offset
, reg_buf
, 8);
6875 arm_pseudo_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
6876 int regnum
, gdb_byte
*buf
)
6878 const int num_regs
= gdbarch_num_regs (gdbarch
);
6880 gdb_byte reg_buf
[8];
6881 int offset
, double_regnum
;
6883 gdb_assert (regnum
>= num_regs
);
6886 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
6887 /* Quad-precision register. */
6888 arm_neon_quad_read (gdbarch
, regcache
, regnum
- 32, buf
);
6891 /* Single-precision register. */
6892 gdb_assert (regnum
< 32);
6894 /* s0 is always the least significant half of d0. */
6895 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
6896 offset
= (regnum
& 1) ? 0 : 4;
6898 offset
= (regnum
& 1) ? 4 : 0;
6900 sprintf (name_buf
, "d%d", regnum
>> 1);
6901 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
6904 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
6905 memcpy (buf
, reg_buf
+ offset
, 4);
6909 /* Store the contents of BUF to a NEON quad register, by writing to
6910 two double registers. This is used to implement the quad pseudo
6911 registers, and for argument passing in case the quad registers are
6912 missing; vectors are passed in quad registers when using the VFP
6913 ABI, even if a NEON unit is not present. REGNUM is the index
6914 of the quad register, in [0, 15]. */
6917 arm_neon_quad_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
6918 int regnum
, const gdb_byte
*buf
)
6921 gdb_byte reg_buf
[8];
6922 int offset
, double_regnum
;
6924 sprintf (name_buf
, "d%d", regnum
<< 1);
6925 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
6928 /* d0 is always the least significant half of q0. */
6929 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
6934 regcache_raw_write (regcache
, double_regnum
, buf
+ offset
);
6935 offset
= 8 - offset
;
6936 regcache_raw_write (regcache
, double_regnum
+ 1, buf
+ offset
);
6940 arm_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
6941 int regnum
, const gdb_byte
*buf
)
6943 const int num_regs
= gdbarch_num_regs (gdbarch
);
6945 gdb_byte reg_buf
[8];
6946 int offset
, double_regnum
;
6948 gdb_assert (regnum
>= num_regs
);
6951 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
6952 /* Quad-precision register. */
6953 arm_neon_quad_write (gdbarch
, regcache
, regnum
- 32, buf
);
6956 /* Single-precision register. */
6957 gdb_assert (regnum
< 32);
6959 /* s0 is always the least significant half of d0. */
6960 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
6961 offset
= (regnum
& 1) ? 0 : 4;
6963 offset
= (regnum
& 1) ? 4 : 0;
6965 sprintf (name_buf
, "d%d", regnum
>> 1);
6966 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
6969 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
6970 memcpy (reg_buf
+ offset
, buf
, 4);
6971 regcache_raw_write (regcache
, double_regnum
, reg_buf
);
6975 static struct value
*
6976 value_of_arm_user_reg (struct frame_info
*frame
, const void *baton
)
6978 const int *reg_p
= baton
;
6979 return value_of_register (*reg_p
, frame
);
6982 static enum gdb_osabi
6983 arm_elf_osabi_sniffer (bfd
*abfd
)
6985 unsigned int elfosabi
;
6986 enum gdb_osabi osabi
= GDB_OSABI_UNKNOWN
;
6988 elfosabi
= elf_elfheader (abfd
)->e_ident
[EI_OSABI
];
6990 if (elfosabi
== ELFOSABI_ARM
)
6991 /* GNU tools use this value. Check note sections in this case,
6993 bfd_map_over_sections (abfd
,
6994 generic_elf_osabi_sniff_abi_tag_sections
,
6997 /* Anything else will be handled by the generic ELF sniffer. */
7002 /* Initialize the current architecture based on INFO. If possible,
7003 re-use an architecture from ARCHES, which is a list of
7004 architectures already created during this debugging session.
7006 Called e.g. at program startup, when reading a core file, and when
7007 reading a binary file. */
7009 static struct gdbarch
*
7010 arm_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
7012 struct gdbarch_tdep
*tdep
;
7013 struct gdbarch
*gdbarch
;
7014 struct gdbarch_list
*best_arch
;
7015 enum arm_abi_kind arm_abi
= arm_abi_global
;
7016 enum arm_float_model fp_model
= arm_fp_model
;
7017 struct tdesc_arch_data
*tdesc_data
= NULL
;
7019 int have_vfp_registers
= 0, have_vfp_pseudos
= 0, have_neon_pseudos
= 0;
7021 int have_fpa_registers
= 1;
7022 const struct target_desc
*tdesc
= info
.target_desc
;
7024 /* If we have an object to base this architecture on, try to determine
7027 if (arm_abi
== ARM_ABI_AUTO
&& info
.abfd
!= NULL
)
7029 int ei_osabi
, e_flags
;
7031 switch (bfd_get_flavour (info
.abfd
))
7033 case bfd_target_aout_flavour
:
7034 /* Assume it's an old APCS-style ABI. */
7035 arm_abi
= ARM_ABI_APCS
;
7038 case bfd_target_coff_flavour
:
7039 /* Assume it's an old APCS-style ABI. */
7041 arm_abi
= ARM_ABI_APCS
;
7044 case bfd_target_elf_flavour
:
7045 ei_osabi
= elf_elfheader (info
.abfd
)->e_ident
[EI_OSABI
];
7046 e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
7048 if (ei_osabi
== ELFOSABI_ARM
)
7050 /* GNU tools used to use this value, but do not for EABI
7051 objects. There's nowhere to tag an EABI version
7052 anyway, so assume APCS. */
7053 arm_abi
= ARM_ABI_APCS
;
7055 else if (ei_osabi
== ELFOSABI_NONE
)
7057 int eabi_ver
= EF_ARM_EABI_VERSION (e_flags
);
7058 int attr_arch
, attr_profile
;
7062 case EF_ARM_EABI_UNKNOWN
:
7063 /* Assume GNU tools. */
7064 arm_abi
= ARM_ABI_APCS
;
7067 case EF_ARM_EABI_VER4
:
7068 case EF_ARM_EABI_VER5
:
7069 arm_abi
= ARM_ABI_AAPCS
;
7070 /* EABI binaries default to VFP float ordering.
7071 They may also contain build attributes that can
7072 be used to identify if the VFP argument-passing
7074 if (fp_model
== ARM_FLOAT_AUTO
)
7077 switch (bfd_elf_get_obj_attr_int (info
.abfd
,
7082 /* "The user intended FP parameter/result
7083 passing to conform to AAPCS, base
7085 fp_model
= ARM_FLOAT_SOFT_VFP
;
7088 /* "The user intended FP parameter/result
7089 passing to conform to AAPCS, VFP
7091 fp_model
= ARM_FLOAT_VFP
;
7094 /* "The user intended FP parameter/result
7095 passing to conform to tool chain-specific
7096 conventions" - we don't know any such
7097 conventions, so leave it as "auto". */
7100 /* Attribute value not mentioned in the
7101 October 2008 ABI, so leave it as
7106 fp_model
= ARM_FLOAT_SOFT_VFP
;
7112 /* Leave it as "auto". */
7113 warning (_("unknown ARM EABI version 0x%x"), eabi_ver
);
7118 /* Detect M-profile programs. This only works if the
7119 executable file includes build attributes; GCC does
7120 copy them to the executable, but e.g. RealView does
7122 attr_arch
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
7124 attr_profile
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
7125 Tag_CPU_arch_profile
);
7126 /* GCC specifies the profile for v6-M; RealView only
7127 specifies the profile for architectures starting with
7128 V7 (as opposed to architectures with a tag
7129 numerically greater than TAG_CPU_ARCH_V7). */
7130 if (!tdesc_has_registers (tdesc
)
7131 && (attr_arch
== TAG_CPU_ARCH_V6_M
7132 || attr_arch
== TAG_CPU_ARCH_V6S_M
7133 || attr_profile
== 'M'))
7134 tdesc
= tdesc_arm_with_m
;
7138 if (fp_model
== ARM_FLOAT_AUTO
)
7140 int e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
7142 switch (e_flags
& (EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
))
7145 /* Leave it as "auto". Strictly speaking this case
7146 means FPA, but almost nobody uses that now, and
7147 many toolchains fail to set the appropriate bits
7148 for the floating-point model they use. */
7150 case EF_ARM_SOFT_FLOAT
:
7151 fp_model
= ARM_FLOAT_SOFT_FPA
;
7153 case EF_ARM_VFP_FLOAT
:
7154 fp_model
= ARM_FLOAT_VFP
;
7156 case EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
:
7157 fp_model
= ARM_FLOAT_SOFT_VFP
;
7162 if (e_flags
& EF_ARM_BE8
)
7163 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
7168 /* Leave it as "auto". */
7173 /* Check any target description for validity. */
7174 if (tdesc_has_registers (tdesc
))
7176 /* For most registers we require GDB's default names; but also allow
7177 the numeric names for sp / lr / pc, as a convenience. */
7178 static const char *const arm_sp_names
[] = { "r13", "sp", NULL
};
7179 static const char *const arm_lr_names
[] = { "r14", "lr", NULL
};
7180 static const char *const arm_pc_names
[] = { "r15", "pc", NULL
};
7182 const struct tdesc_feature
*feature
;
7185 feature
= tdesc_find_feature (tdesc
,
7186 "org.gnu.gdb.arm.core");
7187 if (feature
== NULL
)
7189 feature
= tdesc_find_feature (tdesc
,
7190 "org.gnu.gdb.arm.m-profile");
7191 if (feature
== NULL
)
7197 tdesc_data
= tdesc_data_alloc ();
7200 for (i
= 0; i
< ARM_SP_REGNUM
; i
++)
7201 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
7202 arm_register_names
[i
]);
7203 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
7206 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
7209 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
7213 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
7214 ARM_PS_REGNUM
, "xpsr");
7216 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
7217 ARM_PS_REGNUM
, "cpsr");
7221 tdesc_data_cleanup (tdesc_data
);
7225 feature
= tdesc_find_feature (tdesc
,
7226 "org.gnu.gdb.arm.fpa");
7227 if (feature
!= NULL
)
7230 for (i
= ARM_F0_REGNUM
; i
<= ARM_FPS_REGNUM
; i
++)
7231 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
7232 arm_register_names
[i
]);
7235 tdesc_data_cleanup (tdesc_data
);
7240 have_fpa_registers
= 0;
7242 feature
= tdesc_find_feature (tdesc
,
7243 "org.gnu.gdb.xscale.iwmmxt");
7244 if (feature
!= NULL
)
7246 static const char *const iwmmxt_names
[] = {
7247 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7248 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7249 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7250 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7254 for (i
= ARM_WR0_REGNUM
; i
<= ARM_WR15_REGNUM
; i
++)
7256 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
7257 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
7259 /* Check for the control registers, but do not fail if they
7261 for (i
= ARM_WC0_REGNUM
; i
<= ARM_WCASF_REGNUM
; i
++)
7262 tdesc_numbered_register (feature
, tdesc_data
, i
,
7263 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
7265 for (i
= ARM_WCGR0_REGNUM
; i
<= ARM_WCGR3_REGNUM
; i
++)
7267 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
7268 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
7272 tdesc_data_cleanup (tdesc_data
);
7277 /* If we have a VFP unit, check whether the single precision registers
7278 are present. If not, then we will synthesize them as pseudo
7280 feature
= tdesc_find_feature (tdesc
,
7281 "org.gnu.gdb.arm.vfp");
7282 if (feature
!= NULL
)
7284 static const char *const vfp_double_names
[] = {
7285 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7286 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7287 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7288 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7291 /* Require the double precision registers. There must be either
7294 for (i
= 0; i
< 32; i
++)
7296 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
7298 vfp_double_names
[i
]);
7303 if (!valid_p
&& i
!= 16)
7305 tdesc_data_cleanup (tdesc_data
);
7309 if (tdesc_unnumbered_register (feature
, "s0") == 0)
7310 have_vfp_pseudos
= 1;
7312 have_vfp_registers
= 1;
7314 /* If we have VFP, also check for NEON. The architecture allows
7315 NEON without VFP (integer vector operations only), but GDB
7316 does not support that. */
7317 feature
= tdesc_find_feature (tdesc
,
7318 "org.gnu.gdb.arm.neon");
7319 if (feature
!= NULL
)
7321 /* NEON requires 32 double-precision registers. */
7324 tdesc_data_cleanup (tdesc_data
);
7328 /* If there are quad registers defined by the stub, use
7329 their type; otherwise (normally) provide them with
7330 the default type. */
7331 if (tdesc_unnumbered_register (feature
, "q0") == 0)
7332 have_neon_pseudos
= 1;
7339 /* If there is already a candidate, use it. */
7340 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
7342 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
7344 if (arm_abi
!= ARM_ABI_AUTO
7345 && arm_abi
!= gdbarch_tdep (best_arch
->gdbarch
)->arm_abi
)
7348 if (fp_model
!= ARM_FLOAT_AUTO
7349 && fp_model
!= gdbarch_tdep (best_arch
->gdbarch
)->fp_model
)
7352 /* There are various other properties in tdep that we do not
7353 need to check here: those derived from a target description,
7354 since gdbarches with a different target description are
7355 automatically disqualified. */
7357 /* Do check is_m, though, since it might come from the binary. */
7358 if (is_m
!= gdbarch_tdep (best_arch
->gdbarch
)->is_m
)
7361 /* Found a match. */
7365 if (best_arch
!= NULL
)
7367 if (tdesc_data
!= NULL
)
7368 tdesc_data_cleanup (tdesc_data
);
7369 return best_arch
->gdbarch
;
7372 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
7373 gdbarch
= gdbarch_alloc (&info
, tdep
);
7375 /* Record additional information about the architecture we are defining.
7376 These are gdbarch discriminators, like the OSABI. */
7377 tdep
->arm_abi
= arm_abi
;
7378 tdep
->fp_model
= fp_model
;
7380 tdep
->have_fpa_registers
= have_fpa_registers
;
7381 tdep
->have_vfp_registers
= have_vfp_registers
;
7382 tdep
->have_vfp_pseudos
= have_vfp_pseudos
;
7383 tdep
->have_neon_pseudos
= have_neon_pseudos
;
7384 tdep
->have_neon
= have_neon
;
7387 switch (info
.byte_order_for_code
)
7389 case BFD_ENDIAN_BIG
:
7390 tdep
->arm_breakpoint
= arm_default_arm_be_breakpoint
;
7391 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_be_breakpoint
);
7392 tdep
->thumb_breakpoint
= arm_default_thumb_be_breakpoint
;
7393 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_be_breakpoint
);
7397 case BFD_ENDIAN_LITTLE
:
7398 tdep
->arm_breakpoint
= arm_default_arm_le_breakpoint
;
7399 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_le_breakpoint
);
7400 tdep
->thumb_breakpoint
= arm_default_thumb_le_breakpoint
;
7401 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_le_breakpoint
);
7406 internal_error (__FILE__
, __LINE__
,
7407 _("arm_gdbarch_init: bad byte order for float format"));
7410 /* On ARM targets char defaults to unsigned. */
7411 set_gdbarch_char_signed (gdbarch
, 0);
7413 /* Note: for displaced stepping, this includes the breakpoint, and one word
7414 of additional scratch space. This setting isn't used for anything beside
7415 displaced stepping at present. */
7416 set_gdbarch_max_insn_length (gdbarch
, 4 * DISPLACED_MODIFIED_INSNS
);
7418 /* This should be low enough for everything. */
7419 tdep
->lowest_pc
= 0x20;
7420 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
7422 /* The default, for both APCS and AAPCS, is to return small
7423 structures in registers. */
7424 tdep
->struct_return
= reg_struct_return
;
7426 set_gdbarch_push_dummy_call (gdbarch
, arm_push_dummy_call
);
7427 set_gdbarch_frame_align (gdbarch
, arm_frame_align
);
7429 set_gdbarch_write_pc (gdbarch
, arm_write_pc
);
7431 /* Frame handling. */
7432 set_gdbarch_dummy_id (gdbarch
, arm_dummy_id
);
7433 set_gdbarch_unwind_pc (gdbarch
, arm_unwind_pc
);
7434 set_gdbarch_unwind_sp (gdbarch
, arm_unwind_sp
);
7436 frame_base_set_default (gdbarch
, &arm_normal_base
);
7438 /* Address manipulation. */
7439 set_gdbarch_smash_text_address (gdbarch
, arm_smash_text_address
);
7440 set_gdbarch_addr_bits_remove (gdbarch
, arm_addr_bits_remove
);
7442 /* Advance PC across function entry code. */
7443 set_gdbarch_skip_prologue (gdbarch
, arm_skip_prologue
);
7445 /* Detect whether PC is in function epilogue. */
7446 set_gdbarch_in_function_epilogue_p (gdbarch
, arm_in_function_epilogue_p
);
7448 /* Skip trampolines. */
7449 set_gdbarch_skip_trampoline_code (gdbarch
, arm_skip_stub
);
7451 /* The stack grows downward. */
7452 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
7454 /* Breakpoint manipulation. */
7455 set_gdbarch_breakpoint_from_pc (gdbarch
, arm_breakpoint_from_pc
);
7456 set_gdbarch_remote_breakpoint_from_pc (gdbarch
,
7457 arm_remote_breakpoint_from_pc
);
7459 /* Information about registers, etc. */
7460 set_gdbarch_deprecated_fp_regnum (gdbarch
, ARM_FP_REGNUM
); /* ??? */
7461 set_gdbarch_sp_regnum (gdbarch
, ARM_SP_REGNUM
);
7462 set_gdbarch_pc_regnum (gdbarch
, ARM_PC_REGNUM
);
7463 set_gdbarch_num_regs (gdbarch
, ARM_NUM_REGS
);
7464 set_gdbarch_register_type (gdbarch
, arm_register_type
);
7466 /* This "info float" is FPA-specific. Use the generic version if we
7468 if (gdbarch_tdep (gdbarch
)->have_fpa_registers
)
7469 set_gdbarch_print_float_info (gdbarch
, arm_print_float_info
);
7471 /* Internal <-> external register number maps. */
7472 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, arm_dwarf_reg_to_regnum
);
7473 set_gdbarch_register_sim_regno (gdbarch
, arm_register_sim_regno
);
7475 set_gdbarch_register_name (gdbarch
, arm_register_name
);
7477 /* Returning results. */
7478 set_gdbarch_return_value (gdbarch
, arm_return_value
);
7481 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_arm
);
7483 /* Minsymbol frobbing. */
7484 set_gdbarch_elf_make_msymbol_special (gdbarch
, arm_elf_make_msymbol_special
);
7485 set_gdbarch_coff_make_msymbol_special (gdbarch
,
7486 arm_coff_make_msymbol_special
);
7487 set_gdbarch_record_special_symbol (gdbarch
, arm_record_special_symbol
);
7489 /* Thumb-2 IT block support. */
7490 set_gdbarch_adjust_breakpoint_address (gdbarch
,
7491 arm_adjust_breakpoint_address
);
7493 /* Virtual tables. */
7494 set_gdbarch_vbit_in_delta (gdbarch
, 1);
7496 /* Hook in the ABI-specific overrides, if they have been registered. */
7497 gdbarch_init_osabi (info
, gdbarch
);
7499 dwarf2_frame_set_init_reg (gdbarch
, arm_dwarf2_frame_init_reg
);
7501 /* Add some default predicates. */
7502 frame_unwind_append_unwinder (gdbarch
, &arm_stub_unwind
);
7503 dwarf2_append_unwinders (gdbarch
);
7504 frame_unwind_append_unwinder (gdbarch
, &arm_prologue_unwind
);
7506 /* Now we have tuned the configuration, set a few final things,
7507 based on what the OS ABI has told us. */
7509 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7510 binaries are always marked. */
7511 if (tdep
->arm_abi
== ARM_ABI_AUTO
)
7512 tdep
->arm_abi
= ARM_ABI_APCS
;
7514 /* We used to default to FPA for generic ARM, but almost nobody
7515 uses that now, and we now provide a way for the user to force
7516 the model. So default to the most useful variant. */
7517 if (tdep
->fp_model
== ARM_FLOAT_AUTO
)
7518 tdep
->fp_model
= ARM_FLOAT_SOFT_FPA
;
7520 if (tdep
->jb_pc
>= 0)
7521 set_gdbarch_get_longjmp_target (gdbarch
, arm_get_longjmp_target
);
7523 /* Floating point sizes and format. */
7524 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
7525 if (tdep
->fp_model
== ARM_FLOAT_SOFT_FPA
|| tdep
->fp_model
== ARM_FLOAT_FPA
)
7527 set_gdbarch_double_format
7528 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
7529 set_gdbarch_long_double_format
7530 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
7534 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
7535 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
7538 if (have_vfp_pseudos
)
7540 /* NOTE: These are the only pseudo registers used by
7541 the ARM target at the moment. If more are added, a
7542 little more care in numbering will be needed. */
7544 int num_pseudos
= 32;
7545 if (have_neon_pseudos
)
7547 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudos
);
7548 set_gdbarch_pseudo_register_read (gdbarch
, arm_pseudo_read
);
7549 set_gdbarch_pseudo_register_write (gdbarch
, arm_pseudo_write
);
7554 set_tdesc_pseudo_register_name (gdbarch
, arm_register_name
);
7556 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
7558 /* Override tdesc_register_type to adjust the types of VFP
7559 registers for NEON. */
7560 set_gdbarch_register_type (gdbarch
, arm_register_type
);
7563 /* Add standard register aliases. We add aliases even for those
7564 nanes which are used by the current architecture - it's simpler,
7565 and does no harm, since nothing ever lists user registers. */
7566 for (i
= 0; i
< ARRAY_SIZE (arm_register_aliases
); i
++)
7567 user_reg_add (gdbarch
, arm_register_aliases
[i
].name
,
7568 value_of_arm_user_reg
, &arm_register_aliases
[i
].regnum
);
7574 arm_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
7576 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
7581 fprintf_unfiltered (file
, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7582 (unsigned long) tdep
->lowest_pc
);
7585 extern initialize_file_ftype _initialize_arm_tdep
; /* -Wmissing-prototypes */
7588 _initialize_arm_tdep (void)
7590 struct ui_file
*stb
;
7592 struct cmd_list_element
*new_set
, *new_show
;
7593 const char *setname
;
7594 const char *setdesc
;
7595 const char *const *regnames
;
7597 static char *helptext
;
7598 char regdesc
[1024], *rdptr
= regdesc
;
7599 size_t rest
= sizeof (regdesc
);
7601 gdbarch_register (bfd_arch_arm
, arm_gdbarch_init
, arm_dump_tdep
);
7603 arm_objfile_data_key
7604 = register_objfile_data_with_cleanup (NULL
, arm_objfile_data_free
);
7606 /* Register an ELF OS ABI sniffer for ARM binaries. */
7607 gdbarch_register_osabi_sniffer (bfd_arch_arm
,
7608 bfd_target_elf_flavour
,
7609 arm_elf_osabi_sniffer
);
7611 /* Initialize the standard target descriptions. */
7612 initialize_tdesc_arm_with_m ();
7614 /* Get the number of possible sets of register names defined in opcodes. */
7615 num_disassembly_options
= get_arm_regname_num_options ();
7617 /* Add root prefix command for all "set arm"/"show arm" commands. */
7618 add_prefix_cmd ("arm", no_class
, set_arm_command
,
7619 _("Various ARM-specific commands."),
7620 &setarmcmdlist
, "set arm ", 0, &setlist
);
7622 add_prefix_cmd ("arm", no_class
, show_arm_command
,
7623 _("Various ARM-specific commands."),
7624 &showarmcmdlist
, "show arm ", 0, &showlist
);
7626 /* Sync the opcode insn printer with our register viewer. */
7627 parse_arm_disassembler_option ("reg-names-std");
7629 /* Initialize the array that will be passed to
7630 add_setshow_enum_cmd(). */
7631 valid_disassembly_styles
7632 = xmalloc ((num_disassembly_options
+ 1) * sizeof (char *));
7633 for (i
= 0; i
< num_disassembly_options
; i
++)
7635 numregs
= get_arm_regnames (i
, &setname
, &setdesc
, ®names
);
7636 valid_disassembly_styles
[i
] = setname
;
7637 length
= snprintf (rdptr
, rest
, "%s - %s\n", setname
, setdesc
);
7640 /* When we find the default names, tell the disassembler to use
7642 if (!strcmp (setname
, "std"))
7644 disassembly_style
= setname
;
7645 set_arm_regname_option (i
);
7648 /* Mark the end of valid options. */
7649 valid_disassembly_styles
[num_disassembly_options
] = NULL
;
7651 /* Create the help text. */
7652 stb
= mem_fileopen ();
7653 fprintf_unfiltered (stb
, "%s%s%s",
7654 _("The valid values are:\n"),
7656 _("The default is \"std\"."));
7657 helptext
= ui_file_xstrdup (stb
, NULL
);
7658 ui_file_delete (stb
);
7660 add_setshow_enum_cmd("disassembler", no_class
,
7661 valid_disassembly_styles
, &disassembly_style
,
7662 _("Set the disassembly style."),
7663 _("Show the disassembly style."),
7665 set_disassembly_style_sfunc
,
7666 NULL
, /* FIXME: i18n: The disassembly style is \"%s\". */
7667 &setarmcmdlist
, &showarmcmdlist
);
7669 add_setshow_boolean_cmd ("apcs32", no_class
, &arm_apcs_32
,
7670 _("Set usage of ARM 32-bit mode."),
7671 _("Show usage of ARM 32-bit mode."),
7672 _("When off, a 26-bit PC will be used."),
7674 NULL
, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7675 &setarmcmdlist
, &showarmcmdlist
);
7677 /* Add a command to allow the user to force the FPU model. */
7678 add_setshow_enum_cmd ("fpu", no_class
, fp_model_strings
, ¤t_fp_model
,
7679 _("Set the floating point type."),
7680 _("Show the floating point type."),
7681 _("auto - Determine the FP typefrom the OS-ABI.\n\
7682 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7683 fpa - FPA co-processor (GCC compiled).\n\
7684 softvfp - Software FP with pure-endian doubles.\n\
7685 vfp - VFP co-processor."),
7686 set_fp_model_sfunc
, show_fp_model
,
7687 &setarmcmdlist
, &showarmcmdlist
);
7689 /* Add a command to allow the user to force the ABI. */
7690 add_setshow_enum_cmd ("abi", class_support
, arm_abi_strings
, &arm_abi_string
,
7693 NULL
, arm_set_abi
, arm_show_abi
,
7694 &setarmcmdlist
, &showarmcmdlist
);
7696 /* Add two commands to allow the user to force the assumed
7698 add_setshow_enum_cmd ("fallback-mode", class_support
,
7699 arm_mode_strings
, &arm_fallback_mode_string
,
7700 _("Set the mode assumed when symbols are unavailable."),
7701 _("Show the mode assumed when symbols are unavailable."),
7702 NULL
, NULL
, arm_show_fallback_mode
,
7703 &setarmcmdlist
, &showarmcmdlist
);
7704 add_setshow_enum_cmd ("force-mode", class_support
,
7705 arm_mode_strings
, &arm_force_mode_string
,
7706 _("Set the mode assumed even when symbols are available."),
7707 _("Show the mode assumed even when symbols are available."),
7708 NULL
, NULL
, arm_show_force_mode
,
7709 &setarmcmdlist
, &showarmcmdlist
);
7711 /* Debugging flag. */
7712 add_setshow_boolean_cmd ("arm", class_maintenance
, &arm_debug
,
7713 _("Set ARM debugging."),
7714 _("Show ARM debugging."),
7715 _("When on, arm-specific debugging is enabled."),
7717 NULL
, /* FIXME: i18n: "ARM debugging is %s. */
7718 &setdebuglist
, &showdebuglist
);