1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include <ctype.h> /* XXX for isupper (). */
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
32 #include "reggroups.h"
35 #include "arch-utils.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
41 #include "dwarf2-frame.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
49 #include "gdb/sim-arm.h"
52 #include "coff/internal.h"
55 #include "gdb_assert.h"
58 #include "features/arm-with-m.c"
62 /* Macros for setting and testing a bit in a minimal symbol that marks
63 it as Thumb function. The MSB of the minimal symbol's "info" field
64 is used for this purpose.
66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
69 #define MSYMBOL_SET_SPECIAL(msym) \
70 MSYMBOL_TARGET_FLAG_1 (msym) = 1
72 #define MSYMBOL_IS_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym)
75 /* Per-objfile data used for mapping symbols. */
76 static const struct objfile_data
*arm_objfile_data_key
;
78 struct arm_mapping_symbol
83 typedef struct arm_mapping_symbol arm_mapping_symbol_s
;
84 DEF_VEC_O(arm_mapping_symbol_s
);
86 struct arm_per_objfile
88 VEC(arm_mapping_symbol_s
) **section_maps
;
91 /* The list of available "set arm ..." and "show arm ..." commands. */
92 static struct cmd_list_element
*setarmcmdlist
= NULL
;
93 static struct cmd_list_element
*showarmcmdlist
= NULL
;
95 /* The type of floating-point to use. Keep this in sync with enum
96 arm_float_model, and the help string in _initialize_arm_tdep. */
97 static const char *fp_model_strings
[] =
107 /* A variable that can be configured by the user. */
108 static enum arm_float_model arm_fp_model
= ARM_FLOAT_AUTO
;
109 static const char *current_fp_model
= "auto";
111 /* The ABI to use. Keep this in sync with arm_abi_kind. */
112 static const char *arm_abi_strings
[] =
120 /* A variable that can be configured by the user. */
121 static enum arm_abi_kind arm_abi_global
= ARM_ABI_AUTO
;
122 static const char *arm_abi_string
= "auto";
124 /* The execution mode to assume. */
125 static const char *arm_mode_strings
[] =
133 static const char *arm_fallback_mode_string
= "auto";
134 static const char *arm_force_mode_string
= "auto";
136 /* Number of different reg name sets (options). */
137 static int num_disassembly_options
;
139 /* The standard register names, and all the valid aliases for them. Note
140 that `fp', `sp' and `pc' are not added in this alias list, because they
141 have been added as builtin user registers in
142 std-regs.c:_initialize_frame_reg. */
147 } arm_register_aliases
[] = {
148 /* Basic register numbers. */
165 /* Synonyms (argument and variable registers). */
178 /* Other platform-specific names for r9. */
184 /* Names used by GCC (not listed in the ARM EABI). */
186 /* A special name from the older ATPCS. */
190 static const char *const arm_register_names
[] =
191 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
192 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
193 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
194 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
195 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
196 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
197 "fps", "cpsr" }; /* 24 25 */
199 /* Valid register name styles. */
200 static const char **valid_disassembly_styles
;
202 /* Disassembly style to use. Default to "std" register names. */
203 static const char *disassembly_style
;
205 /* This is used to keep the bfd arch_info in sync with the disassembly
207 static void set_disassembly_style_sfunc(char *, int,
208 struct cmd_list_element
*);
209 static void set_disassembly_style (void);
211 static void convert_from_extended (const struct floatformat
*, const void *,
213 static void convert_to_extended (const struct floatformat
*, void *,
216 static enum register_status
arm_neon_quad_read (struct gdbarch
*gdbarch
,
217 struct regcache
*regcache
,
218 int regnum
, gdb_byte
*buf
);
219 static void arm_neon_quad_write (struct gdbarch
*gdbarch
,
220 struct regcache
*regcache
,
221 int regnum
, const gdb_byte
*buf
);
223 struct arm_prologue_cache
225 /* The stack pointer at the time this frame was created; i.e. the
226 caller's stack pointer when this function was called. It is used
227 to identify this frame. */
230 /* The frame base for this frame is just prev_sp - frame size.
231 FRAMESIZE is the distance from the frame pointer to the
232 initial stack pointer. */
236 /* The register used to hold the frame pointer for this frame. */
239 /* Saved register offsets. */
240 struct trad_frame_saved_reg
*saved_regs
;
243 static CORE_ADDR
arm_analyze_prologue (struct gdbarch
*gdbarch
,
244 CORE_ADDR prologue_start
,
245 CORE_ADDR prologue_end
,
246 struct arm_prologue_cache
*cache
);
248 /* Architecture version for displaced stepping. This effects the behaviour of
249 certain instructions, and really should not be hard-wired. */
251 #define DISPLACED_STEPPING_ARCH_VERSION 5
253 /* Addresses for calling Thumb functions have the bit 0 set.
254 Here are some macros to test, set, or clear bit 0 of addresses. */
255 #define IS_THUMB_ADDR(addr) ((addr) & 1)
256 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
257 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
259 /* Set to true if the 32-bit mode is in use. */
263 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
266 arm_psr_thumb_bit (struct gdbarch
*gdbarch
)
268 if (gdbarch_tdep (gdbarch
)->is_m
)
274 /* Determine if FRAME is executing in Thumb mode. */
277 arm_frame_is_thumb (struct frame_info
*frame
)
280 ULONGEST t_bit
= arm_psr_thumb_bit (get_frame_arch (frame
));
282 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
283 directly (from a signal frame or dummy frame) or by interpreting
284 the saved LR (from a prologue or DWARF frame). So consult it and
285 trust the unwinders. */
286 cpsr
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
288 return (cpsr
& t_bit
) != 0;
291 /* Callback for VEC_lower_bound. */
294 arm_compare_mapping_symbols (const struct arm_mapping_symbol
*lhs
,
295 const struct arm_mapping_symbol
*rhs
)
297 return lhs
->value
< rhs
->value
;
300 /* Search for the mapping symbol covering MEMADDR. If one is found,
301 return its type. Otherwise, return 0. If START is non-NULL,
302 set *START to the location of the mapping symbol. */
305 arm_find_mapping_symbol (CORE_ADDR memaddr
, CORE_ADDR
*start
)
307 struct obj_section
*sec
;
309 /* If there are mapping symbols, consult them. */
310 sec
= find_pc_section (memaddr
);
313 struct arm_per_objfile
*data
;
314 VEC(arm_mapping_symbol_s
) *map
;
315 struct arm_mapping_symbol map_key
= { memaddr
- obj_section_addr (sec
),
319 data
= objfile_data (sec
->objfile
, arm_objfile_data_key
);
322 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
323 if (!VEC_empty (arm_mapping_symbol_s
, map
))
325 struct arm_mapping_symbol
*map_sym
;
327 idx
= VEC_lower_bound (arm_mapping_symbol_s
, map
, &map_key
,
328 arm_compare_mapping_symbols
);
330 /* VEC_lower_bound finds the earliest ordered insertion
331 point. If the following symbol starts at this exact
332 address, we use that; otherwise, the preceding
333 mapping symbol covers this address. */
334 if (idx
< VEC_length (arm_mapping_symbol_s
, map
))
336 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
);
337 if (map_sym
->value
== map_key
.value
)
340 *start
= map_sym
->value
+ obj_section_addr (sec
);
341 return map_sym
->type
;
347 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
- 1);
349 *start
= map_sym
->value
+ obj_section_addr (sec
);
350 return map_sym
->type
;
359 static CORE_ADDR
arm_get_next_pc_raw (struct frame_info
*frame
,
360 CORE_ADDR pc
, int insert_bkpt
);
362 /* Determine if the program counter specified in MEMADDR is in a Thumb
363 function. This function should be called for addresses unrelated to
364 any executing frame; otherwise, prefer arm_frame_is_thumb. */
367 arm_pc_is_thumb (struct gdbarch
*gdbarch
, CORE_ADDR memaddr
)
369 struct obj_section
*sec
;
370 struct minimal_symbol
*sym
;
372 struct displaced_step_closure
* dsc
373 = get_displaced_step_closure_by_addr(memaddr
);
375 /* If checking the mode of displaced instruction in copy area, the mode
376 should be determined by instruction on the original address. */
380 fprintf_unfiltered (gdb_stdlog
,
381 "displaced: check mode of %.8lx instead of %.8lx\n",
382 (unsigned long) dsc
->insn_addr
,
383 (unsigned long) memaddr
);
384 memaddr
= dsc
->insn_addr
;
387 /* If bit 0 of the address is set, assume this is a Thumb address. */
388 if (IS_THUMB_ADDR (memaddr
))
391 /* If the user wants to override the symbol table, let him. */
392 if (strcmp (arm_force_mode_string
, "arm") == 0)
394 if (strcmp (arm_force_mode_string
, "thumb") == 0)
397 /* ARM v6-M and v7-M are always in Thumb mode. */
398 if (gdbarch_tdep (gdbarch
)->is_m
)
401 /* If there are mapping symbols, consult them. */
402 type
= arm_find_mapping_symbol (memaddr
, NULL
);
406 /* Thumb functions have a "special" bit set in minimal symbols. */
407 sym
= lookup_minimal_symbol_by_pc (memaddr
);
409 return (MSYMBOL_IS_SPECIAL (sym
));
411 /* If the user wants to override the fallback mode, let them. */
412 if (strcmp (arm_fallback_mode_string
, "arm") == 0)
414 if (strcmp (arm_fallback_mode_string
, "thumb") == 0)
417 /* If we couldn't find any symbol, but we're talking to a running
418 target, then trust the current value of $cpsr. This lets
419 "display/i $pc" always show the correct mode (though if there is
420 a symbol table we will not reach here, so it still may not be
421 displayed in the mode it will be executed).
423 As a further heuristic if we detect that we are doing a single-step we
424 see what state executing the current instruction ends up with us being
426 if (target_has_registers
)
428 struct frame_info
*current_frame
= get_current_frame ();
429 CORE_ADDR current_pc
= get_frame_pc (current_frame
);
430 int is_thumb
= arm_frame_is_thumb (current_frame
);
432 if (memaddr
== current_pc
)
436 struct gdbarch
*gdbarch
= get_frame_arch (current_frame
);
437 next_pc
= arm_get_next_pc_raw (current_frame
, current_pc
, FALSE
);
438 if (memaddr
== gdbarch_addr_bits_remove (gdbarch
, next_pc
))
439 return IS_THUMB_ADDR (next_pc
);
445 /* Otherwise we're out of luck; we assume ARM. */
449 /* Remove useless bits from addresses in a running program. */
451 arm_addr_bits_remove (struct gdbarch
*gdbarch
, CORE_ADDR val
)
454 return UNMAKE_THUMB_ADDR (val
);
456 return (val
& 0x03fffffc);
459 /* When reading symbols, we need to zap the low bit of the address,
460 which may be set to 1 for Thumb functions. */
462 arm_smash_text_address (struct gdbarch
*gdbarch
, CORE_ADDR val
)
467 /* Return 1 if PC is the start of a compiler helper function which
468 can be safely ignored during prologue skipping. IS_THUMB is true
469 if the function is known to be a Thumb function due to the way it
472 skip_prologue_function (struct gdbarch
*gdbarch
, CORE_ADDR pc
, int is_thumb
)
474 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
475 struct minimal_symbol
*msym
;
477 msym
= lookup_minimal_symbol_by_pc (pc
);
479 && SYMBOL_VALUE_ADDRESS (msym
) == pc
480 && SYMBOL_LINKAGE_NAME (msym
) != NULL
)
482 const char *name
= SYMBOL_LINKAGE_NAME (msym
);
484 /* The GNU linker's Thumb call stub to foo is named
486 if (strstr (name
, "_from_thumb") != NULL
)
489 /* On soft-float targets, __truncdfsf2 is called to convert promoted
490 arguments to their argument types in non-prototyped
492 if (strncmp (name
, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
494 if (strncmp (name
, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
497 /* Internal functions related to thread-local storage. */
498 if (strncmp (name
, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
500 if (strncmp (name
, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
505 /* If we run against a stripped glibc, we may be unable to identify
506 special functions by name. Check for one important case,
507 __aeabi_read_tp, by comparing the *code* against the default
508 implementation (this is hand-written ARM assembler in glibc). */
511 && read_memory_unsigned_integer (pc
, 4, byte_order_for_code
)
512 == 0xe3e00a0f /* mov r0, #0xffff0fff */
513 && read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
)
514 == 0xe240f01f) /* sub pc, r0, #31 */
521 /* Support routines for instruction parsing. */
522 #define submask(x) ((1L << ((x) + 1)) - 1)
523 #define bit(obj,st) (((obj) >> (st)) & 1)
524 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
525 #define sbits(obj,st,fn) \
526 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
527 #define BranchDest(addr,instr) \
528 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
530 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
531 the first 16-bit of instruction, and INSN2 is the second 16-bit of
533 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
534 ((bits ((insn1), 0, 3) << 12) \
535 | (bits ((insn1), 10, 10) << 11) \
536 | (bits ((insn2), 12, 14) << 8) \
537 | bits ((insn2), 0, 7))
539 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
540 the 32-bit instruction. */
541 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
542 ((bits ((insn), 16, 19) << 12) \
543 | bits ((insn), 0, 11))
545 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
548 thumb_expand_immediate (unsigned int imm
)
550 unsigned int count
= imm
>> 7;
558 return (imm
& 0xff) | ((imm
& 0xff) << 16);
560 return ((imm
& 0xff) << 8) | ((imm
& 0xff) << 24);
562 return (imm
& 0xff) | ((imm
& 0xff) << 8)
563 | ((imm
& 0xff) << 16) | ((imm
& 0xff) << 24);
566 return (0x80 | (imm
& 0x7f)) << (32 - count
);
569 /* Return 1 if the 16-bit Thumb instruction INST might change
570 control flow, 0 otherwise. */
573 thumb_instruction_changes_pc (unsigned short inst
)
575 if ((inst
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
578 if ((inst
& 0xf000) == 0xd000) /* conditional branch */
581 if ((inst
& 0xf800) == 0xe000) /* unconditional branch */
584 if ((inst
& 0xff00) == 0x4700) /* bx REG, blx REG */
587 if ((inst
& 0xff87) == 0x4687) /* mov pc, REG */
590 if ((inst
& 0xf500) == 0xb100) /* CBNZ or CBZ. */
596 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
597 might change control flow, 0 otherwise. */
600 thumb2_instruction_changes_pc (unsigned short inst1
, unsigned short inst2
)
602 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
604 /* Branches and miscellaneous control instructions. */
606 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
611 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
613 /* SUBS PC, LR, #imm8. */
616 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
618 /* Conditional branch. */
625 if ((inst1
& 0xfe50) == 0xe810)
627 /* Load multiple or RFE. */
629 if (bit (inst1
, 7) && !bit (inst1
, 8))
635 else if (!bit (inst1
, 7) && bit (inst1
, 8))
641 else if (bit (inst1
, 7) && bit (inst1
, 8))
646 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
655 if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
657 /* MOV PC or MOVS PC. */
661 if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
664 if (bits (inst1
, 0, 3) == 15)
670 if ((inst2
& 0x0fc0) == 0x0000)
676 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
682 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
691 /* Analyze a Thumb prologue, looking for a recognizable stack frame
692 and frame pointer. Scan until we encounter a store that could
693 clobber the stack frame unexpectedly, or an unknown instruction.
694 Return the last address which is definitely safe to skip for an
695 initial breakpoint. */
698 thumb_analyze_prologue (struct gdbarch
*gdbarch
,
699 CORE_ADDR start
, CORE_ADDR limit
,
700 struct arm_prologue_cache
*cache
)
702 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
703 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
706 struct pv_area
*stack
;
707 struct cleanup
*back_to
;
709 CORE_ADDR unrecognized_pc
= 0;
711 for (i
= 0; i
< 16; i
++)
712 regs
[i
] = pv_register (i
, 0);
713 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
714 back_to
= make_cleanup_free_pv_area (stack
);
716 while (start
< limit
)
720 insn
= read_memory_unsigned_integer (start
, 2, byte_order_for_code
);
722 if ((insn
& 0xfe00) == 0xb400) /* push { rlist } */
727 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
730 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
731 whether to save LR (R14). */
732 mask
= (insn
& 0xff) | ((insn
& 0x100) << 6);
734 /* Calculate offsets of saved R0-R7 and LR. */
735 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
736 if (mask
& (1 << regno
))
738 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
740 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
743 else if ((insn
& 0xff00) == 0xb000) /* add sp, #simm OR
746 offset
= (insn
& 0x7f) << 2; /* get scaled offset */
747 if (insn
& 0x80) /* Check for SUB. */
748 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
751 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
754 else if ((insn
& 0xf800) == 0xa800) /* add Rd, sp, #imm */
755 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[ARM_SP_REGNUM
],
757 else if ((insn
& 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
758 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
759 regs
[bits (insn
, 0, 2)] = pv_add_constant (regs
[bits (insn
, 3, 5)],
761 else if ((insn
& 0xf800) == 0x3000 /* add Rd, #imm */
762 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
763 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[bits (insn
, 8, 10)],
765 else if ((insn
& 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
766 && pv_is_register (regs
[bits (insn
, 6, 8)], ARM_SP_REGNUM
)
767 && pv_is_constant (regs
[bits (insn
, 3, 5)]))
768 regs
[bits (insn
, 0, 2)] = pv_add (regs
[bits (insn
, 3, 5)],
769 regs
[bits (insn
, 6, 8)]);
770 else if ((insn
& 0xff00) == 0x4400 /* add Rd, Rm */
771 && pv_is_constant (regs
[bits (insn
, 3, 6)]))
773 int rd
= (bit (insn
, 7) << 3) + bits (insn
, 0, 2);
774 int rm
= bits (insn
, 3, 6);
775 regs
[rd
] = pv_add (regs
[rd
], regs
[rm
]);
777 else if ((insn
& 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
779 int dst_reg
= (insn
& 0x7) + ((insn
& 0x80) >> 4);
780 int src_reg
= (insn
& 0x78) >> 3;
781 regs
[dst_reg
] = regs
[src_reg
];
783 else if ((insn
& 0xf800) == 0x9000) /* str rd, [sp, #off] */
785 /* Handle stores to the stack. Normally pushes are used,
786 but with GCC -mtpcs-frame, there may be other stores
787 in the prologue to create the frame. */
788 int regno
= (insn
>> 8) & 0x7;
791 offset
= (insn
& 0xff) << 2;
792 addr
= pv_add_constant (regs
[ARM_SP_REGNUM
], offset
);
794 if (pv_area_store_would_trash (stack
, addr
))
797 pv_area_store (stack
, addr
, 4, regs
[regno
]);
799 else if ((insn
& 0xf800) == 0x6000) /* str rd, [rn, #off] */
801 int rd
= bits (insn
, 0, 2);
802 int rn
= bits (insn
, 3, 5);
805 offset
= bits (insn
, 6, 10) << 2;
806 addr
= pv_add_constant (regs
[rn
], offset
);
808 if (pv_area_store_would_trash (stack
, addr
))
811 pv_area_store (stack
, addr
, 4, regs
[rd
]);
813 else if (((insn
& 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
814 || (insn
& 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
815 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
816 /* Ignore stores of argument registers to the stack. */
818 else if ((insn
& 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
819 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
820 /* Ignore block loads from the stack, potentially copying
821 parameters from memory. */
823 else if ((insn
& 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
824 || ((insn
& 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
825 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
)))
826 /* Similarly ignore single loads from the stack. */
828 else if ((insn
& 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
829 || (insn
& 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
830 /* Skip register copies, i.e. saves to another register
831 instead of the stack. */
833 else if ((insn
& 0xf800) == 0x2000) /* movs Rd, #imm */
834 /* Recognize constant loads; even with small stacks these are necessary
836 regs
[bits (insn
, 8, 10)] = pv_constant (bits (insn
, 0, 7));
837 else if ((insn
& 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
839 /* Constant pool loads, for the same reason. */
840 unsigned int constant
;
843 loc
= start
+ 4 + bits (insn
, 0, 7) * 4;
844 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
845 regs
[bits (insn
, 8, 10)] = pv_constant (constant
);
847 else if ((insn
& 0xe000) == 0xe000)
849 unsigned short inst2
;
851 inst2
= read_memory_unsigned_integer (start
+ 2, 2,
852 byte_order_for_code
);
854 if ((insn
& 0xf800) == 0xf000 && (inst2
& 0xe800) == 0xe800)
856 /* BL, BLX. Allow some special function calls when
857 skipping the prologue; GCC generates these before
858 storing arguments to the stack. */
860 int j1
, j2
, imm1
, imm2
;
862 imm1
= sbits (insn
, 0, 10);
863 imm2
= bits (inst2
, 0, 10);
864 j1
= bit (inst2
, 13);
865 j2
= bit (inst2
, 11);
867 offset
= ((imm1
<< 12) + (imm2
<< 1));
868 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
870 nextpc
= start
+ 4 + offset
;
871 /* For BLX make sure to clear the low bits. */
872 if (bit (inst2
, 12) == 0)
873 nextpc
= nextpc
& 0xfffffffc;
875 if (!skip_prologue_function (gdbarch
, nextpc
,
876 bit (inst2
, 12) != 0))
880 else if ((insn
& 0xffd0) == 0xe900 /* stmdb Rn{!},
882 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
884 pv_t addr
= regs
[bits (insn
, 0, 3)];
887 if (pv_area_store_would_trash (stack
, addr
))
890 /* Calculate offsets of saved registers. */
891 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
892 if (inst2
& (1 << regno
))
894 addr
= pv_add_constant (addr
, -4);
895 pv_area_store (stack
, addr
, 4, regs
[regno
]);
899 regs
[bits (insn
, 0, 3)] = addr
;
902 else if ((insn
& 0xff50) == 0xe940 /* strd Rt, Rt2,
904 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
906 int regno1
= bits (inst2
, 12, 15);
907 int regno2
= bits (inst2
, 8, 11);
908 pv_t addr
= regs
[bits (insn
, 0, 3)];
910 offset
= inst2
& 0xff;
912 addr
= pv_add_constant (addr
, offset
);
914 addr
= pv_add_constant (addr
, -offset
);
916 if (pv_area_store_would_trash (stack
, addr
))
919 pv_area_store (stack
, addr
, 4, regs
[regno1
]);
920 pv_area_store (stack
, pv_add_constant (addr
, 4),
924 regs
[bits (insn
, 0, 3)] = addr
;
927 else if ((insn
& 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
928 && (inst2
& 0x0c00) == 0x0c00
929 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
931 int regno
= bits (inst2
, 12, 15);
932 pv_t addr
= regs
[bits (insn
, 0, 3)];
934 offset
= inst2
& 0xff;
936 addr
= pv_add_constant (addr
, offset
);
938 addr
= pv_add_constant (addr
, -offset
);
940 if (pv_area_store_would_trash (stack
, addr
))
943 pv_area_store (stack
, addr
, 4, regs
[regno
]);
946 regs
[bits (insn
, 0, 3)] = addr
;
949 else if ((insn
& 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
950 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
952 int regno
= bits (inst2
, 12, 15);
955 offset
= inst2
& 0xfff;
956 addr
= pv_add_constant (regs
[bits (insn
, 0, 3)], offset
);
958 if (pv_area_store_would_trash (stack
, addr
))
961 pv_area_store (stack
, addr
, 4, regs
[regno
]);
964 else if ((insn
& 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
965 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
966 /* Ignore stores of argument registers to the stack. */
969 else if ((insn
& 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
970 && (inst2
& 0x0d00) == 0x0c00
971 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
972 /* Ignore stores of argument registers to the stack. */
975 else if ((insn
& 0xffd0) == 0xe890 /* ldmia Rn[!],
977 && (inst2
& 0x8000) == 0x0000
978 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
979 /* Ignore block loads from the stack, potentially copying
980 parameters from memory. */
983 else if ((insn
& 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
985 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
986 /* Similarly ignore dual loads from the stack. */
989 else if ((insn
& 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
990 && (inst2
& 0x0d00) == 0x0c00
991 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
992 /* Similarly ignore single loads from the stack. */
995 else if ((insn
& 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
996 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
997 /* Similarly ignore single loads from the stack. */
1000 else if ((insn
& 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1001 && (inst2
& 0x8000) == 0x0000)
1003 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1004 | (bits (inst2
, 12, 14) << 8)
1005 | bits (inst2
, 0, 7));
1007 regs
[bits (inst2
, 8, 11)]
1008 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1009 thumb_expand_immediate (imm
));
1012 else if ((insn
& 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1013 && (inst2
& 0x8000) == 0x0000)
1015 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1016 | (bits (inst2
, 12, 14) << 8)
1017 | bits (inst2
, 0, 7));
1019 regs
[bits (inst2
, 8, 11)]
1020 = pv_add_constant (regs
[bits (insn
, 0, 3)], imm
);
1023 else if ((insn
& 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1024 && (inst2
& 0x8000) == 0x0000)
1026 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1027 | (bits (inst2
, 12, 14) << 8)
1028 | bits (inst2
, 0, 7));
1030 regs
[bits (inst2
, 8, 11)]
1031 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1032 - (CORE_ADDR
) thumb_expand_immediate (imm
));
1035 else if ((insn
& 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1036 && (inst2
& 0x8000) == 0x0000)
1038 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1039 | (bits (inst2
, 12, 14) << 8)
1040 | bits (inst2
, 0, 7));
1042 regs
[bits (inst2
, 8, 11)]
1043 = pv_add_constant (regs
[bits (insn
, 0, 3)], - (CORE_ADDR
) imm
);
1046 else if ((insn
& 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1048 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1049 | (bits (inst2
, 12, 14) << 8)
1050 | bits (inst2
, 0, 7));
1052 regs
[bits (inst2
, 8, 11)]
1053 = pv_constant (thumb_expand_immediate (imm
));
1056 else if ((insn
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1059 = EXTRACT_MOVW_MOVT_IMM_T (insn
, inst2
);
1061 regs
[bits (inst2
, 8, 11)] = pv_constant (imm
);
1064 else if (insn
== 0xea5f /* mov.w Rd,Rm */
1065 && (inst2
& 0xf0f0) == 0)
1067 int dst_reg
= (inst2
& 0x0f00) >> 8;
1068 int src_reg
= inst2
& 0xf;
1069 regs
[dst_reg
] = regs
[src_reg
];
1072 else if ((insn
& 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1074 /* Constant pool loads. */
1075 unsigned int constant
;
1078 offset
= bits (insn
, 0, 11);
1080 loc
= start
+ 4 + offset
;
1082 loc
= start
+ 4 - offset
;
1084 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1085 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1088 else if ((insn
& 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1090 /* Constant pool loads. */
1091 unsigned int constant
;
1094 offset
= bits (insn
, 0, 7) << 2;
1096 loc
= start
+ 4 + offset
;
1098 loc
= start
+ 4 - offset
;
1100 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1101 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1103 constant
= read_memory_unsigned_integer (loc
+ 4, 4, byte_order
);
1104 regs
[bits (inst2
, 8, 11)] = pv_constant (constant
);
1107 else if (thumb2_instruction_changes_pc (insn
, inst2
))
1109 /* Don't scan past anything that might change control flow. */
1114 /* The optimizer might shove anything into the prologue,
1115 so we just skip what we don't recognize. */
1116 unrecognized_pc
= start
;
1121 else if (thumb_instruction_changes_pc (insn
))
1123 /* Don't scan past anything that might change control flow. */
1128 /* The optimizer might shove anything into the prologue,
1129 so we just skip what we don't recognize. */
1130 unrecognized_pc
= start
;
1137 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1138 paddress (gdbarch
, start
));
1140 if (unrecognized_pc
== 0)
1141 unrecognized_pc
= start
;
1145 do_cleanups (back_to
);
1146 return unrecognized_pc
;
1149 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1151 /* Frame pointer is fp. Frame size is constant. */
1152 cache
->framereg
= ARM_FP_REGNUM
;
1153 cache
->framesize
= -regs
[ARM_FP_REGNUM
].k
;
1155 else if (pv_is_register (regs
[THUMB_FP_REGNUM
], ARM_SP_REGNUM
))
1157 /* Frame pointer is r7. Frame size is constant. */
1158 cache
->framereg
= THUMB_FP_REGNUM
;
1159 cache
->framesize
= -regs
[THUMB_FP_REGNUM
].k
;
1161 else if (pv_is_register (regs
[ARM_SP_REGNUM
], ARM_SP_REGNUM
))
1163 /* Try the stack pointer... this is a bit desperate. */
1164 cache
->framereg
= ARM_SP_REGNUM
;
1165 cache
->framesize
= -regs
[ARM_SP_REGNUM
].k
;
1169 /* We're just out of luck. We don't know where the frame is. */
1170 cache
->framereg
= -1;
1171 cache
->framesize
= 0;
1174 for (i
= 0; i
< 16; i
++)
1175 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
1176 cache
->saved_regs
[i
].addr
= offset
;
1178 do_cleanups (back_to
);
1179 return unrecognized_pc
;
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc
, struct gdbarch
*gdbarch
,
1191 unsigned int *destreg
, int *offset
)
1193 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1194 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1195 unsigned int low
, high
, address
;
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
1203 if ((insn1
& 0xf800) == 0x4800) /* ldr Rd, #immed */
1205 *destreg
= bits (insn1
, 8, 10);
1207 address
= bits (insn1
, 0, 7);
1209 else if ((insn1
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1211 unsigned short insn2
1212 = read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
1214 low
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1217 = read_memory_unsigned_integer (pc
+ 4, 2, byte_order_for_code
);
1219 = read_memory_unsigned_integer (pc
+ 6, 2, byte_order_for_code
);
1221 /* movt Rd, #const */
1222 if ((insn1
& 0xfbc0) == 0xf2c0)
1224 high
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1225 *destreg
= bits (insn2
, 8, 11);
1227 address
= (high
<< 16 | low
);
1234 = read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
1236 if ((insn
& 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1238 address
= bits (insn
, 0, 11);
1239 *destreg
= bits (insn
, 12, 15);
1242 else if ((insn
& 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1244 low
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1247 = read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
);
1249 if ((insn
& 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1251 high
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1252 *destreg
= bits (insn
, 12, 15);
1254 address
= (high
<< 16 | low
);
1262 /* Try to skip a sequence of instructions used for stack protector. If PC
1263 points to the first instruction of this sequence, return the address of
1264 first instruction after this sequence, otherwise, return original PC.
1266 On arm, this sequence of instructions is composed of mainly three steps,
1267 Step 1: load symbol __stack_chk_guard,
1268 Step 2: load from address of __stack_chk_guard,
1269 Step 3: store it to somewhere else.
1271 Usually, instructions on step 2 and step 3 are the same on various ARM
1272 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1273 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1274 instructions in step 1 vary from different ARM architectures. On ARMv7,
1277 movw Rn, #:lower16:__stack_chk_guard
1278 movt Rn, #:upper16:__stack_chk_guard
1285 .word __stack_chk_guard
1287 Since ldr/str is a very popular instruction, we can't use them as
1288 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1289 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1290 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1293 arm_skip_stack_protector(CORE_ADDR pc
, struct gdbarch
*gdbarch
)
1295 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1296 unsigned int address
, basereg
;
1297 struct minimal_symbol
*stack_chk_guard
;
1299 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1302 /* Try to parse the instructions in Step 1. */
1303 addr
= arm_analyze_load_stack_chk_guard (pc
, gdbarch
,
1308 stack_chk_guard
= lookup_minimal_symbol_by_pc (addr
);
1309 /* If name of symbol doesn't start with '__stack_chk_guard', this
1310 instruction sequence is not for stack protector. If symbol is
1311 removed, we conservatively think this sequence is for stack protector. */
1313 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard
), "__stack_chk_guard",
1314 strlen ("__stack_chk_guard")) != 0)
1319 unsigned int destreg
;
1321 = read_memory_unsigned_integer (pc
+ offset
, 2, byte_order_for_code
);
1323 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1324 if ((insn
& 0xf800) != 0x6800)
1326 if (bits (insn
, 3, 5) != basereg
)
1328 destreg
= bits (insn
, 0, 2);
1330 insn
= read_memory_unsigned_integer (pc
+ offset
+ 2, 2,
1331 byte_order_for_code
);
1332 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1333 if ((insn
& 0xf800) != 0x6000)
1335 if (destreg
!= bits (insn
, 0, 2))
1340 unsigned int destreg
;
1342 = read_memory_unsigned_integer (pc
+ offset
, 4, byte_order_for_code
);
1344 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1345 if ((insn
& 0x0e500000) != 0x04100000)
1347 if (bits (insn
, 16, 19) != basereg
)
1349 destreg
= bits (insn
, 12, 15);
1350 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1351 insn
= read_memory_unsigned_integer (pc
+ offset
+ 4,
1352 4, byte_order_for_code
);
1353 if ((insn
& 0x0e500000) != 0x04000000)
1355 if (bits (insn
, 12, 15) != destreg
)
1358 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1361 return pc
+ offset
+ 4;
1363 return pc
+ offset
+ 8;
1366 /* Advance the PC across any function entry prologue instructions to
1367 reach some "real" code.
1369 The APCS (ARM Procedure Call Standard) defines the following
1373 [stmfd sp!, {a1,a2,a3,a4}]
1374 stmfd sp!, {...,fp,ip,lr,pc}
1375 [stfe f7, [sp, #-12]!]
1376 [stfe f6, [sp, #-12]!]
1377 [stfe f5, [sp, #-12]!]
1378 [stfe f4, [sp, #-12]!]
1379 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1382 arm_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1384 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1387 CORE_ADDR func_addr
, limit_pc
;
1388 struct symtab_and_line sal
;
1390 /* See if we can determine the end of the prologue via the symbol table.
1391 If so, then return either PC, or the PC after the prologue, whichever
1393 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
1395 CORE_ADDR post_prologue_pc
1396 = skip_prologue_using_sal (gdbarch
, func_addr
);
1397 struct symtab
*s
= find_pc_symtab (func_addr
);
1399 if (post_prologue_pc
)
1401 = arm_skip_stack_protector (post_prologue_pc
, gdbarch
);
1404 /* GCC always emits a line note before the prologue and another
1405 one after, even if the two are at the same address or on the
1406 same line. Take advantage of this so that we do not need to
1407 know every instruction that might appear in the prologue. We
1408 will have producer information for most binaries; if it is
1409 missing (e.g. for -gstabs), assuming the GNU tools. */
1410 if (post_prologue_pc
1412 || s
->producer
== NULL
1413 || strncmp (s
->producer
, "GNU ", sizeof ("GNU ") - 1) == 0))
1414 return post_prologue_pc
;
1416 if (post_prologue_pc
!= 0)
1418 CORE_ADDR analyzed_limit
;
1420 /* For non-GCC compilers, make sure the entire line is an
1421 acceptable prologue; GDB will round this function's
1422 return value up to the end of the following line so we
1423 can not skip just part of a line (and we do not want to).
1425 RealView does not treat the prologue specially, but does
1426 associate prologue code with the opening brace; so this
1427 lets us skip the first line if we think it is the opening
1429 if (arm_pc_is_thumb (gdbarch
, func_addr
))
1430 analyzed_limit
= thumb_analyze_prologue (gdbarch
, func_addr
,
1431 post_prologue_pc
, NULL
);
1433 analyzed_limit
= arm_analyze_prologue (gdbarch
, func_addr
,
1434 post_prologue_pc
, NULL
);
1436 if (analyzed_limit
!= post_prologue_pc
)
1439 return post_prologue_pc
;
1443 /* Can't determine prologue from the symbol table, need to examine
1446 /* Find an upper limit on the function prologue using the debug
1447 information. If the debug information could not be used to provide
1448 that bound, then use an arbitrary large number as the upper bound. */
1449 /* Like arm_scan_prologue, stop no later than pc + 64. */
1450 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
1452 limit_pc
= pc
+ 64; /* Magic. */
1455 /* Check if this is Thumb code. */
1456 if (arm_pc_is_thumb (gdbarch
, pc
))
1457 return thumb_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1459 for (skip_pc
= pc
; skip_pc
< limit_pc
; skip_pc
+= 4)
1461 inst
= read_memory_unsigned_integer (skip_pc
, 4, byte_order_for_code
);
1463 /* "mov ip, sp" is no longer a required part of the prologue. */
1464 if (inst
== 0xe1a0c00d) /* mov ip, sp */
1467 if ((inst
& 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1470 if ((inst
& 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1473 /* Some prologues begin with "str lr, [sp, #-4]!". */
1474 if (inst
== 0xe52de004) /* str lr, [sp, #-4]! */
1477 if ((inst
& 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1480 if ((inst
& 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1483 /* Any insns after this point may float into the code, if it makes
1484 for better instruction scheduling, so we skip them only if we
1485 find them, but still consider the function to be frame-ful. */
1487 /* We may have either one sfmfd instruction here, or several stfe
1488 insns, depending on the version of floating point code we
1490 if ((inst
& 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1493 if ((inst
& 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1496 if ((inst
& 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1499 if ((inst
& 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1502 if ((inst
& 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1503 || (inst
& 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1504 || (inst
& 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1507 if ((inst
& 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1508 || (inst
& 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1509 || (inst
& 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1512 /* Un-recognized instruction; stop scanning. */
1516 return skip_pc
; /* End of prologue. */
1520 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1521 This function decodes a Thumb function prologue to determine:
1522 1) the size of the stack frame
1523 2) which registers are saved on it
1524 3) the offsets of saved regs
1525 4) the offset from the stack pointer to the frame pointer
1527 A typical Thumb function prologue would create this stack frame
1528 (offsets relative to FP)
1529 old SP -> 24 stack parameters
1532 R7 -> 0 local variables (16 bytes)
1533 SP -> -12 additional stack space (12 bytes)
1534 The frame size would thus be 36 bytes, and the frame offset would be
1535 12 bytes. The frame register is R7.
1537 The comments for thumb_skip_prolog() describe the algorithm we use
1538 to detect the end of the prolog. */
1542 thumb_scan_prologue (struct gdbarch
*gdbarch
, CORE_ADDR prev_pc
,
1543 CORE_ADDR block_addr
, struct arm_prologue_cache
*cache
)
1545 CORE_ADDR prologue_start
;
1546 CORE_ADDR prologue_end
;
1547 CORE_ADDR current_pc
;
1549 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1552 /* See comment in arm_scan_prologue for an explanation of
1554 if (prologue_end
> prologue_start
+ 64)
1556 prologue_end
= prologue_start
+ 64;
1560 /* We're in the boondocks: we have no idea where the start of the
1564 prologue_end
= min (prologue_end
, prev_pc
);
1566 thumb_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1569 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1572 arm_instruction_changes_pc (uint32_t this_instr
)
1574 if (bits (this_instr
, 28, 31) == INST_NV
)
1575 /* Unconditional instructions. */
1576 switch (bits (this_instr
, 24, 27))
1580 /* Branch with Link and change to Thumb. */
1585 /* Coprocessor register transfer. */
1586 if (bits (this_instr
, 12, 15) == 15)
1587 error (_("Invalid update to pc in instruction"));
1593 switch (bits (this_instr
, 25, 27))
1596 if (bits (this_instr
, 23, 24) == 2 && bit (this_instr
, 20) == 0)
1598 /* Multiplies and extra load/stores. */
1599 if (bit (this_instr
, 4) == 1 && bit (this_instr
, 7) == 1)
1600 /* Neither multiplies nor extension load/stores are allowed
1604 /* Otherwise, miscellaneous instructions. */
1606 /* BX <reg>, BXJ <reg>, BLX <reg> */
1607 if (bits (this_instr
, 4, 27) == 0x12fff1
1608 || bits (this_instr
, 4, 27) == 0x12fff2
1609 || bits (this_instr
, 4, 27) == 0x12fff3)
1612 /* Other miscellaneous instructions are unpredictable if they
1616 /* Data processing instruction. Fall through. */
1619 if (bits (this_instr
, 12, 15) == 15)
1626 /* Media instructions and architecturally undefined instructions. */
1627 if (bits (this_instr
, 25, 27) == 3 && bit (this_instr
, 4) == 1)
1631 if (bit (this_instr
, 20) == 0)
1635 if (bits (this_instr
, 12, 15) == ARM_PC_REGNUM
)
1641 /* Load/store multiple. */
1642 if (bit (this_instr
, 20) == 1 && bit (this_instr
, 15) == 1)
1648 /* Branch and branch with link. */
1653 /* Coprocessor transfers or SWIs can not affect PC. */
1657 internal_error (__FILE__
, __LINE__
, _("bad value in switch"));
1661 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1662 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1663 fill it in. Return the first address not recognized as a prologue
1666 We recognize all the instructions typically found in ARM prologues,
1667 plus harmless instructions which can be skipped (either for analysis
1668 purposes, or a more restrictive set that can be skipped when finding
1669 the end of the prologue). */
1672 arm_analyze_prologue (struct gdbarch
*gdbarch
,
1673 CORE_ADDR prologue_start
, CORE_ADDR prologue_end
,
1674 struct arm_prologue_cache
*cache
)
1676 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1677 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1679 CORE_ADDR offset
, current_pc
;
1680 pv_t regs
[ARM_FPS_REGNUM
];
1681 struct pv_area
*stack
;
1682 struct cleanup
*back_to
;
1683 int framereg
, framesize
;
1684 CORE_ADDR unrecognized_pc
= 0;
1686 /* Search the prologue looking for instructions that set up the
1687 frame pointer, adjust the stack pointer, and save registers.
1689 Be careful, however, and if it doesn't look like a prologue,
1690 don't try to scan it. If, for instance, a frameless function
1691 begins with stmfd sp!, then we will tell ourselves there is
1692 a frame, which will confuse stack traceback, as well as "finish"
1693 and other operations that rely on a knowledge of the stack
1696 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1697 regs
[regno
] = pv_register (regno
, 0);
1698 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
1699 back_to
= make_cleanup_free_pv_area (stack
);
1701 for (current_pc
= prologue_start
;
1702 current_pc
< prologue_end
;
1706 = read_memory_unsigned_integer (current_pc
, 4, byte_order_for_code
);
1708 if (insn
== 0xe1a0c00d) /* mov ip, sp */
1710 regs
[ARM_IP_REGNUM
] = regs
[ARM_SP_REGNUM
];
1713 else if ((insn
& 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1714 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1716 unsigned imm
= insn
& 0xff; /* immediate value */
1717 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1718 int rd
= bits (insn
, 12, 15);
1719 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1720 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], imm
);
1723 else if ((insn
& 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1724 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1726 unsigned imm
= insn
& 0xff; /* immediate value */
1727 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1728 int rd
= bits (insn
, 12, 15);
1729 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1730 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], -imm
);
1733 else if ((insn
& 0xffff0fff) == 0xe52d0004) /* str Rd,
1736 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1738 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1739 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4,
1740 regs
[bits (insn
, 12, 15)]);
1743 else if ((insn
& 0xffff0000) == 0xe92d0000)
1744 /* stmfd sp!, {..., fp, ip, lr, pc}
1746 stmfd sp!, {a1, a2, a3, a4} */
1748 int mask
= insn
& 0xffff;
1750 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1753 /* Calculate offsets of saved registers. */
1754 for (regno
= ARM_PC_REGNUM
; regno
>= 0; regno
--)
1755 if (mask
& (1 << regno
))
1758 = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1759 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
1762 else if ((insn
& 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1763 || (insn
& 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1764 || (insn
& 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1766 /* No need to add this to saved_regs -- it's just an arg reg. */
1769 else if ((insn
& 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1770 || (insn
& 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1771 || (insn
& 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1773 /* No need to add this to saved_regs -- it's just an arg reg. */
1776 else if ((insn
& 0xfff00000) == 0xe8800000 /* stm Rn,
1778 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1780 /* No need to add this to saved_regs -- it's just arg regs. */
1783 else if ((insn
& 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1785 unsigned imm
= insn
& 0xff; /* immediate value */
1786 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1787 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1788 regs
[ARM_FP_REGNUM
] = pv_add_constant (regs
[ARM_IP_REGNUM
], -imm
);
1790 else if ((insn
& 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1792 unsigned imm
= insn
& 0xff; /* immediate value */
1793 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1794 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1795 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -imm
);
1797 else if ((insn
& 0xffff7fff) == 0xed6d0103 /* stfe f?,
1799 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1801 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1804 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1805 regno
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x07);
1806 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12, regs
[regno
]);
1808 else if ((insn
& 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1810 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1812 int n_saved_fp_regs
;
1813 unsigned int fp_start_reg
, fp_bound_reg
;
1815 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1818 if ((insn
& 0x800) == 0x800) /* N0 is set */
1820 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1821 n_saved_fp_regs
= 3;
1823 n_saved_fp_regs
= 1;
1827 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1828 n_saved_fp_regs
= 2;
1830 n_saved_fp_regs
= 4;
1833 fp_start_reg
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x7);
1834 fp_bound_reg
= fp_start_reg
+ n_saved_fp_regs
;
1835 for (; fp_start_reg
< fp_bound_reg
; fp_start_reg
++)
1837 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1838 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12,
1839 regs
[fp_start_reg
++]);
1842 else if ((insn
& 0xff000000) == 0xeb000000 && cache
== NULL
) /* bl */
1844 /* Allow some special function calls when skipping the
1845 prologue; GCC generates these before storing arguments to
1847 CORE_ADDR dest
= BranchDest (current_pc
, insn
);
1849 if (skip_prologue_function (gdbarch
, dest
, 0))
1854 else if ((insn
& 0xf0000000) != 0xe0000000)
1855 break; /* Condition not true, exit early. */
1856 else if (arm_instruction_changes_pc (insn
))
1857 /* Don't scan past anything that might change control flow. */
1859 else if ((insn
& 0xfe500000) == 0xe8100000 /* ldm */
1860 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1861 /* Ignore block loads from the stack, potentially copying
1862 parameters from memory. */
1864 else if ((insn
& 0xfc500000) == 0xe4100000
1865 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1866 /* Similarly ignore single loads from the stack. */
1868 else if ((insn
& 0xffff0ff0) == 0xe1a00000)
1869 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1870 register instead of the stack. */
1874 /* The optimizer might shove anything into the prologue,
1875 so we just skip what we don't recognize. */
1876 unrecognized_pc
= current_pc
;
1881 if (unrecognized_pc
== 0)
1882 unrecognized_pc
= current_pc
;
1884 /* The frame size is just the distance from the frame register
1885 to the original stack pointer. */
1886 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1888 /* Frame pointer is fp. */
1889 framereg
= ARM_FP_REGNUM
;
1890 framesize
= -regs
[ARM_FP_REGNUM
].k
;
1892 else if (pv_is_register (regs
[ARM_SP_REGNUM
], ARM_SP_REGNUM
))
1894 /* Try the stack pointer... this is a bit desperate. */
1895 framereg
= ARM_SP_REGNUM
;
1896 framesize
= -regs
[ARM_SP_REGNUM
].k
;
1900 /* We're just out of luck. We don't know where the frame is. */
1907 cache
->framereg
= framereg
;
1908 cache
->framesize
= framesize
;
1910 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1911 if (pv_area_find_reg (stack
, gdbarch
, regno
, &offset
))
1912 cache
->saved_regs
[regno
].addr
= offset
;
1916 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1917 paddress (gdbarch
, unrecognized_pc
));
1919 do_cleanups (back_to
);
1920 return unrecognized_pc
;
1924 arm_scan_prologue (struct frame_info
*this_frame
,
1925 struct arm_prologue_cache
*cache
)
1927 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1928 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1930 CORE_ADDR prologue_start
, prologue_end
, current_pc
;
1931 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1932 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1933 pv_t regs
[ARM_FPS_REGNUM
];
1934 struct pv_area
*stack
;
1935 struct cleanup
*back_to
;
1938 /* Assume there is no frame until proven otherwise. */
1939 cache
->framereg
= ARM_SP_REGNUM
;
1940 cache
->framesize
= 0;
1942 /* Check for Thumb prologue. */
1943 if (arm_frame_is_thumb (this_frame
))
1945 thumb_scan_prologue (gdbarch
, prev_pc
, block_addr
, cache
);
1949 /* Find the function prologue. If we can't find the function in
1950 the symbol table, peek in the stack frame to find the PC. */
1951 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1954 /* One way to find the end of the prologue (which works well
1955 for unoptimized code) is to do the following:
1957 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1960 prologue_end = prev_pc;
1961 else if (sal.end < prologue_end)
1962 prologue_end = sal.end;
1964 This mechanism is very accurate so long as the optimizer
1965 doesn't move any instructions from the function body into the
1966 prologue. If this happens, sal.end will be the last
1967 instruction in the first hunk of prologue code just before
1968 the first instruction that the scheduler has moved from
1969 the body to the prologue.
1971 In order to make sure that we scan all of the prologue
1972 instructions, we use a slightly less accurate mechanism which
1973 may scan more than necessary. To help compensate for this
1974 lack of accuracy, the prologue scanning loop below contains
1975 several clauses which'll cause the loop to terminate early if
1976 an implausible prologue instruction is encountered.
1982 is a suitable endpoint since it accounts for the largest
1983 possible prologue plus up to five instructions inserted by
1986 if (prologue_end
> prologue_start
+ 64)
1988 prologue_end
= prologue_start
+ 64; /* See above. */
1993 /* We have no symbol information. Our only option is to assume this
1994 function has a standard stack frame and the normal frame register.
1995 Then, we can find the value of our frame pointer on entrance to
1996 the callee (or at the present moment if this is the innermost frame).
1997 The value stored there should be the address of the stmfd + 8. */
1998 CORE_ADDR frame_loc
;
1999 LONGEST return_value
;
2001 frame_loc
= get_frame_register_unsigned (this_frame
, ARM_FP_REGNUM
);
2002 if (!safe_read_memory_integer (frame_loc
, 4, byte_order
, &return_value
))
2006 prologue_start
= gdbarch_addr_bits_remove
2007 (gdbarch
, return_value
) - 8;
2008 prologue_end
= prologue_start
+ 64; /* See above. */
2012 if (prev_pc
< prologue_end
)
2013 prologue_end
= prev_pc
;
2015 arm_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
2018 static struct arm_prologue_cache
*
2019 arm_make_prologue_cache (struct frame_info
*this_frame
)
2022 struct arm_prologue_cache
*cache
;
2023 CORE_ADDR unwound_fp
;
2025 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2026 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2028 arm_scan_prologue (this_frame
, cache
);
2030 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
2031 if (unwound_fp
== 0)
2034 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
2036 /* Calculate actual addresses of saved registers using offsets
2037 determined by arm_scan_prologue. */
2038 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
2039 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
2040 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
2045 /* Our frame ID for a normal frame is the current function's starting PC
2046 and the caller's SP when we were called. */
2049 arm_prologue_this_id (struct frame_info
*this_frame
,
2051 struct frame_id
*this_id
)
2053 struct arm_prologue_cache
*cache
;
2057 if (*this_cache
== NULL
)
2058 *this_cache
= arm_make_prologue_cache (this_frame
);
2059 cache
= *this_cache
;
2061 /* This is meant to halt the backtrace at "_start". */
2062 pc
= get_frame_pc (this_frame
);
2063 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
2066 /* If we've hit a wall, stop. */
2067 if (cache
->prev_sp
== 0)
2070 /* Use function start address as part of the frame ID. If we cannot
2071 identify the start address (due to missing symbol information),
2072 fall back to just using the current PC. */
2073 func
= get_frame_func (this_frame
);
2077 id
= frame_id_build (cache
->prev_sp
, func
);
2081 static struct value
*
2082 arm_prologue_prev_register (struct frame_info
*this_frame
,
2086 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2087 struct arm_prologue_cache
*cache
;
2089 if (*this_cache
== NULL
)
2090 *this_cache
= arm_make_prologue_cache (this_frame
);
2091 cache
= *this_cache
;
2093 /* If we are asked to unwind the PC, then we need to return the LR
2094 instead. The prologue may save PC, but it will point into this
2095 frame's prologue, not the next frame's resume location. Also
2096 strip the saved T bit. A valid LR may have the low bit set, but
2097 a valid PC never does. */
2098 if (prev_regnum
== ARM_PC_REGNUM
)
2102 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2103 return frame_unwind_got_constant (this_frame
, prev_regnum
,
2104 arm_addr_bits_remove (gdbarch
, lr
));
2107 /* SP is generally not saved to the stack, but this frame is
2108 identified by the next frame's stack pointer at the time of the call.
2109 The value was already reconstructed into PREV_SP. */
2110 if (prev_regnum
== ARM_SP_REGNUM
)
2111 return frame_unwind_got_constant (this_frame
, prev_regnum
, cache
->prev_sp
);
2113 /* The CPSR may have been changed by the call instruction and by the
2114 called function. The only bit we can reconstruct is the T bit,
2115 by checking the low bit of LR as of the call. This is a reliable
2116 indicator of Thumb-ness except for some ARM v4T pre-interworking
2117 Thumb code, which could get away with a clear low bit as long as
2118 the called function did not use bx. Guess that all other
2119 bits are unchanged; the condition flags are presumably lost,
2120 but the processor status is likely valid. */
2121 if (prev_regnum
== ARM_PS_REGNUM
)
2124 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2126 cpsr
= get_frame_register_unsigned (this_frame
, prev_regnum
);
2127 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2128 if (IS_THUMB_ADDR (lr
))
2132 return frame_unwind_got_constant (this_frame
, prev_regnum
, cpsr
);
2135 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
2139 struct frame_unwind arm_prologue_unwind
= {
2141 default_frame_unwind_stop_reason
,
2142 arm_prologue_this_id
,
2143 arm_prologue_prev_register
,
2145 default_frame_sniffer
2148 /* Maintain a list of ARM exception table entries per objfile, similar to the
2149 list of mapping symbols. We only cache entries for standard ARM-defined
2150 personality routines; the cache will contain only the frame unwinding
2151 instructions associated with the entry (not the descriptors). */
2153 static const struct objfile_data
*arm_exidx_data_key
;
2155 struct arm_exidx_entry
2160 typedef struct arm_exidx_entry arm_exidx_entry_s
;
2161 DEF_VEC_O(arm_exidx_entry_s
);
2163 struct arm_exidx_data
2165 VEC(arm_exidx_entry_s
) **section_maps
;
2169 arm_exidx_data_free (struct objfile
*objfile
, void *arg
)
2171 struct arm_exidx_data
*data
= arg
;
2174 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
2175 VEC_free (arm_exidx_entry_s
, data
->section_maps
[i
]);
2179 arm_compare_exidx_entries (const struct arm_exidx_entry
*lhs
,
2180 const struct arm_exidx_entry
*rhs
)
2182 return lhs
->addr
< rhs
->addr
;
2185 static struct obj_section
*
2186 arm_obj_section_from_vma (struct objfile
*objfile
, bfd_vma vma
)
2188 struct obj_section
*osect
;
2190 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
2191 if (bfd_get_section_flags (objfile
->obfd
,
2192 osect
->the_bfd_section
) & SEC_ALLOC
)
2194 bfd_vma start
, size
;
2195 start
= bfd_get_section_vma (objfile
->obfd
, osect
->the_bfd_section
);
2196 size
= bfd_get_section_size (osect
->the_bfd_section
);
2198 if (start
<= vma
&& vma
< start
+ size
)
2205 /* Parse contents of exception table and exception index sections
2206 of OBJFILE, and fill in the exception table entry cache.
2208 For each entry that refers to a standard ARM-defined personality
2209 routine, extract the frame unwinding instructions (from either
2210 the index or the table section). The unwinding instructions
2212 - extracting them from the rest of the table data
2213 - converting to host endianness
2214 - appending the implicit 0xb0 ("Finish") code
2216 The extracted and normalized instructions are stored for later
2217 retrieval by the arm_find_exidx_entry routine. */
2220 arm_exidx_new_objfile (struct objfile
*objfile
)
2222 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2223 struct arm_exidx_data
*data
;
2224 asection
*exidx
, *extab
;
2225 bfd_vma exidx_vma
= 0, extab_vma
= 0;
2226 bfd_size_type exidx_size
= 0, extab_size
= 0;
2227 gdb_byte
*exidx_data
= NULL
, *extab_data
= NULL
;
2230 /* If we've already touched this file, do nothing. */
2231 if (!objfile
|| objfile_data (objfile
, arm_exidx_data_key
) != NULL
)
2234 /* Read contents of exception table and index. */
2235 exidx
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.exidx");
2238 exidx_vma
= bfd_section_vma (objfile
->obfd
, exidx
);
2239 exidx_size
= bfd_get_section_size (exidx
);
2240 exidx_data
= xmalloc (exidx_size
);
2241 make_cleanup (xfree
, exidx_data
);
2243 if (!bfd_get_section_contents (objfile
->obfd
, exidx
,
2244 exidx_data
, 0, exidx_size
))
2246 do_cleanups (cleanups
);
2251 extab
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.extab");
2254 extab_vma
= bfd_section_vma (objfile
->obfd
, extab
);
2255 extab_size
= bfd_get_section_size (extab
);
2256 extab_data
= xmalloc (extab_size
);
2257 make_cleanup (xfree
, extab_data
);
2259 if (!bfd_get_section_contents (objfile
->obfd
, extab
,
2260 extab_data
, 0, extab_size
))
2262 do_cleanups (cleanups
);
2267 /* Allocate exception table data structure. */
2268 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
, struct arm_exidx_data
);
2269 set_objfile_data (objfile
, arm_exidx_data_key
, data
);
2270 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
2271 objfile
->obfd
->section_count
,
2272 VEC(arm_exidx_entry_s
) *);
2274 /* Fill in exception table. */
2275 for (i
= 0; i
< exidx_size
/ 8; i
++)
2277 struct arm_exidx_entry new_exidx_entry
;
2278 bfd_vma idx
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8);
2279 bfd_vma val
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8 + 4);
2280 bfd_vma addr
= 0, word
= 0;
2281 int n_bytes
= 0, n_words
= 0;
2282 struct obj_section
*sec
;
2283 gdb_byte
*entry
= NULL
;
2285 /* Extract address of start of function. */
2286 idx
= ((idx
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2287 idx
+= exidx_vma
+ i
* 8;
2289 /* Find section containing function and compute section offset. */
2290 sec
= arm_obj_section_from_vma (objfile
, idx
);
2293 idx
-= bfd_get_section_vma (objfile
->obfd
, sec
->the_bfd_section
);
2295 /* Determine address of exception table entry. */
2298 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2300 else if ((val
& 0xff000000) == 0x80000000)
2302 /* Exception table entry embedded in .ARM.exidx
2303 -- must be short form. */
2307 else if (!(val
& 0x80000000))
2309 /* Exception table entry in .ARM.extab. */
2310 addr
= ((val
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2311 addr
+= exidx_vma
+ i
* 8 + 4;
2313 if (addr
>= extab_vma
&& addr
+ 4 <= extab_vma
+ extab_size
)
2315 word
= bfd_h_get_32 (objfile
->obfd
,
2316 extab_data
+ addr
- extab_vma
);
2319 if ((word
& 0xff000000) == 0x80000000)
2324 else if ((word
& 0xff000000) == 0x81000000
2325 || (word
& 0xff000000) == 0x82000000)
2329 n_words
= ((word
>> 16) & 0xff);
2331 else if (!(word
& 0x80000000))
2334 struct obj_section
*pers_sec
;
2335 int gnu_personality
= 0;
2337 /* Custom personality routine. */
2338 pers
= ((word
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2339 pers
= UNMAKE_THUMB_ADDR (pers
+ addr
- 4);
2341 /* Check whether we've got one of the variants of the
2342 GNU personality routines. */
2343 pers_sec
= arm_obj_section_from_vma (objfile
, pers
);
2346 static const char *personality
[] =
2348 "__gcc_personality_v0",
2349 "__gxx_personality_v0",
2350 "__gcj_personality_v0",
2351 "__gnu_objc_personality_v0",
2355 CORE_ADDR pc
= pers
+ obj_section_offset (pers_sec
);
2358 for (k
= 0; personality
[k
]; k
++)
2359 if (lookup_minimal_symbol_by_pc_name
2360 (pc
, personality
[k
], objfile
))
2362 gnu_personality
= 1;
2367 /* If so, the next word contains a word count in the high
2368 byte, followed by the same unwind instructions as the
2369 pre-defined forms. */
2371 && addr
+ 4 <= extab_vma
+ extab_size
)
2373 word
= bfd_h_get_32 (objfile
->obfd
,
2374 extab_data
+ addr
- extab_vma
);
2377 n_words
= ((word
>> 24) & 0xff);
2383 /* Sanity check address. */
2385 if (addr
< extab_vma
|| addr
+ 4 * n_words
> extab_vma
+ extab_size
)
2386 n_words
= n_bytes
= 0;
2388 /* The unwind instructions reside in WORD (only the N_BYTES least
2389 significant bytes are valid), followed by N_WORDS words in the
2390 extab section starting at ADDR. */
2391 if (n_bytes
|| n_words
)
2393 gdb_byte
*p
= entry
= obstack_alloc (&objfile
->objfile_obstack
,
2394 n_bytes
+ n_words
* 4 + 1);
2397 *p
++ = (gdb_byte
) ((word
>> (8 * n_bytes
)) & 0xff);
2401 word
= bfd_h_get_32 (objfile
->obfd
,
2402 extab_data
+ addr
- extab_vma
);
2405 *p
++ = (gdb_byte
) ((word
>> 24) & 0xff);
2406 *p
++ = (gdb_byte
) ((word
>> 16) & 0xff);
2407 *p
++ = (gdb_byte
) ((word
>> 8) & 0xff);
2408 *p
++ = (gdb_byte
) (word
& 0xff);
2411 /* Implied "Finish" to terminate the list. */
2415 /* Push entry onto vector. They are guaranteed to always
2416 appear in order of increasing addresses. */
2417 new_exidx_entry
.addr
= idx
;
2418 new_exidx_entry
.entry
= entry
;
2419 VEC_safe_push (arm_exidx_entry_s
,
2420 data
->section_maps
[sec
->the_bfd_section
->index
],
2424 do_cleanups (cleanups
);
2427 /* Search for the exception table entry covering MEMADDR. If one is found,
2428 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2429 set *START to the start of the region covered by this entry. */
2432 arm_find_exidx_entry (CORE_ADDR memaddr
, CORE_ADDR
*start
)
2434 struct obj_section
*sec
;
2436 sec
= find_pc_section (memaddr
);
2439 struct arm_exidx_data
*data
;
2440 VEC(arm_exidx_entry_s
) *map
;
2441 struct arm_exidx_entry map_key
= { memaddr
- obj_section_addr (sec
), 0 };
2444 data
= objfile_data (sec
->objfile
, arm_exidx_data_key
);
2447 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
2448 if (!VEC_empty (arm_exidx_entry_s
, map
))
2450 struct arm_exidx_entry
*map_sym
;
2452 idx
= VEC_lower_bound (arm_exidx_entry_s
, map
, &map_key
,
2453 arm_compare_exidx_entries
);
2455 /* VEC_lower_bound finds the earliest ordered insertion
2456 point. If the following symbol starts at this exact
2457 address, we use that; otherwise, the preceding
2458 exception table entry covers this address. */
2459 if (idx
< VEC_length (arm_exidx_entry_s
, map
))
2461 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
);
2462 if (map_sym
->addr
== map_key
.addr
)
2465 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2466 return map_sym
->entry
;
2472 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
- 1);
2474 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2475 return map_sym
->entry
;
2484 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2485 instruction list from the ARM exception table entry ENTRY, allocate and
2486 return a prologue cache structure describing how to unwind this frame.
2488 Return NULL if the unwinding instruction list contains a "spare",
2489 "reserved" or "refuse to unwind" instruction as defined in section
2490 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2491 for the ARM Architecture" document. */
2493 static struct arm_prologue_cache
*
2494 arm_exidx_fill_cache (struct frame_info
*this_frame
, gdb_byte
*entry
)
2499 struct arm_prologue_cache
*cache
;
2500 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2501 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2507 /* Whenever we reload SP, we actually have to retrieve its
2508 actual value in the current frame. */
2511 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2513 int reg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2514 vsp
= get_frame_register_unsigned (this_frame
, reg
);
2518 CORE_ADDR addr
= cache
->saved_regs
[ARM_SP_REGNUM
].addr
;
2519 vsp
= get_frame_memory_unsigned (this_frame
, addr
, 4);
2525 /* Decode next unwind instruction. */
2528 if ((insn
& 0xc0) == 0)
2530 int offset
= insn
& 0x3f;
2531 vsp
+= (offset
<< 2) + 4;
2533 else if ((insn
& 0xc0) == 0x40)
2535 int offset
= insn
& 0x3f;
2536 vsp
-= (offset
<< 2) + 4;
2538 else if ((insn
& 0xf0) == 0x80)
2540 int mask
= ((insn
& 0xf) << 8) | *entry
++;
2543 /* The special case of an all-zero mask identifies
2544 "Refuse to unwind". We return NULL to fall back
2545 to the prologue analyzer. */
2549 /* Pop registers r4..r15 under mask. */
2550 for (i
= 0; i
< 12; i
++)
2551 if (mask
& (1 << i
))
2553 cache
->saved_regs
[4 + i
].addr
= vsp
;
2557 /* Special-case popping SP -- we need to reload vsp. */
2558 if (mask
& (1 << (ARM_SP_REGNUM
- 4)))
2561 else if ((insn
& 0xf0) == 0x90)
2563 int reg
= insn
& 0xf;
2565 /* Reserved cases. */
2566 if (reg
== ARM_SP_REGNUM
|| reg
== ARM_PC_REGNUM
)
2569 /* Set SP from another register and mark VSP for reload. */
2570 cache
->saved_regs
[ARM_SP_REGNUM
] = cache
->saved_regs
[reg
];
2573 else if ((insn
& 0xf0) == 0xa0)
2575 int count
= insn
& 0x7;
2576 int pop_lr
= (insn
& 0x8) != 0;
2579 /* Pop r4..r[4+count]. */
2580 for (i
= 0; i
<= count
; i
++)
2582 cache
->saved_regs
[4 + i
].addr
= vsp
;
2586 /* If indicated by flag, pop LR as well. */
2589 cache
->saved_regs
[ARM_LR_REGNUM
].addr
= vsp
;
2593 else if (insn
== 0xb0)
2595 /* We could only have updated PC by popping into it; if so, it
2596 will show up as address. Otherwise, copy LR into PC. */
2597 if (!trad_frame_addr_p (cache
->saved_regs
, ARM_PC_REGNUM
))
2598 cache
->saved_regs
[ARM_PC_REGNUM
]
2599 = cache
->saved_regs
[ARM_LR_REGNUM
];
2604 else if (insn
== 0xb1)
2606 int mask
= *entry
++;
2609 /* All-zero mask and mask >= 16 is "spare". */
2610 if (mask
== 0 || mask
>= 16)
2613 /* Pop r0..r3 under mask. */
2614 for (i
= 0; i
< 4; i
++)
2615 if (mask
& (1 << i
))
2617 cache
->saved_regs
[i
].addr
= vsp
;
2621 else if (insn
== 0xb2)
2623 ULONGEST offset
= 0;
2628 offset
|= (*entry
& 0x7f) << shift
;
2631 while (*entry
++ & 0x80);
2633 vsp
+= 0x204 + (offset
<< 2);
2635 else if (insn
== 0xb3)
2637 int start
= *entry
>> 4;
2638 int count
= (*entry
++) & 0xf;
2641 /* Only registers D0..D15 are valid here. */
2642 if (start
+ count
>= 16)
2645 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2646 for (i
= 0; i
<= count
; i
++)
2648 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2652 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2655 else if ((insn
& 0xf8) == 0xb8)
2657 int count
= insn
& 0x7;
2660 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2661 for (i
= 0; i
<= count
; i
++)
2663 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2667 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2670 else if (insn
== 0xc6)
2672 int start
= *entry
>> 4;
2673 int count
= (*entry
++) & 0xf;
2676 /* Only registers WR0..WR15 are valid. */
2677 if (start
+ count
>= 16)
2680 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2681 for (i
= 0; i
<= count
; i
++)
2683 cache
->saved_regs
[ARM_WR0_REGNUM
+ start
+ i
].addr
= vsp
;
2687 else if (insn
== 0xc7)
2689 int mask
= *entry
++;
2692 /* All-zero mask and mask >= 16 is "spare". */
2693 if (mask
== 0 || mask
>= 16)
2696 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2697 for (i
= 0; i
< 4; i
++)
2698 if (mask
& (1 << i
))
2700 cache
->saved_regs
[ARM_WCGR0_REGNUM
+ i
].addr
= vsp
;
2704 else if ((insn
& 0xf8) == 0xc0)
2706 int count
= insn
& 0x7;
2709 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2710 for (i
= 0; i
<= count
; i
++)
2712 cache
->saved_regs
[ARM_WR0_REGNUM
+ 10 + i
].addr
= vsp
;
2716 else if (insn
== 0xc8)
2718 int start
= *entry
>> 4;
2719 int count
= (*entry
++) & 0xf;
2722 /* Only registers D0..D31 are valid. */
2723 if (start
+ count
>= 16)
2726 /* Pop VFP double-precision registers
2727 D[16+start]..D[16+start+count]. */
2728 for (i
= 0; i
<= count
; i
++)
2730 cache
->saved_regs
[ARM_D0_REGNUM
+ 16 + start
+ i
].addr
= vsp
;
2734 else if (insn
== 0xc9)
2736 int start
= *entry
>> 4;
2737 int count
= (*entry
++) & 0xf;
2740 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2741 for (i
= 0; i
<= count
; i
++)
2743 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2747 else if ((insn
& 0xf8) == 0xd0)
2749 int count
= insn
& 0x7;
2752 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2753 for (i
= 0; i
<= count
; i
++)
2755 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2761 /* Everything else is "spare". */
2766 /* If we restore SP from a register, assume this was the frame register.
2767 Otherwise just fall back to SP as frame register. */
2768 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2769 cache
->framereg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2771 cache
->framereg
= ARM_SP_REGNUM
;
2773 /* Determine offset to previous frame. */
2775 = vsp
- get_frame_register_unsigned (this_frame
, cache
->framereg
);
2777 /* We already got the previous SP. */
2778 cache
->prev_sp
= vsp
;
2783 /* Unwinding via ARM exception table entries. Note that the sniffer
2784 already computes a filled-in prologue cache, which is then used
2785 with the same arm_prologue_this_id and arm_prologue_prev_register
2786 routines also used for prologue-parsing based unwinding. */
2789 arm_exidx_unwind_sniffer (const struct frame_unwind
*self
,
2790 struct frame_info
*this_frame
,
2791 void **this_prologue_cache
)
2793 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2794 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2795 CORE_ADDR addr_in_block
, exidx_region
, func_start
;
2796 struct arm_prologue_cache
*cache
;
2799 /* See if we have an ARM exception table entry covering this address. */
2800 addr_in_block
= get_frame_address_in_block (this_frame
);
2801 entry
= arm_find_exidx_entry (addr_in_block
, &exidx_region
);
2805 /* The ARM exception table does not describe unwind information
2806 for arbitrary PC values, but is guaranteed to be correct only
2807 at call sites. We have to decide here whether we want to use
2808 ARM exception table information for this frame, or fall back
2809 to using prologue parsing. (Note that if we have DWARF CFI,
2810 this sniffer isn't even called -- CFI is always preferred.)
2812 Before we make this decision, however, we check whether we
2813 actually have *symbol* information for the current frame.
2814 If not, prologue parsing would not work anyway, so we might
2815 as well use the exception table and hope for the best. */
2816 if (find_pc_partial_function (addr_in_block
, NULL
, &func_start
, NULL
))
2820 /* If the next frame is "normal", we are at a call site in this
2821 frame, so exception information is guaranteed to be valid. */
2822 if (get_next_frame (this_frame
)
2823 && get_frame_type (get_next_frame (this_frame
)) == NORMAL_FRAME
)
2826 /* We also assume exception information is valid if we're currently
2827 blocked in a system call. The system library is supposed to
2828 ensure this, so that e.g. pthread cancellation works. */
2829 if (arm_frame_is_thumb (this_frame
))
2833 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 2, 2,
2834 byte_order_for_code
, &insn
)
2835 && (insn
& 0xff00) == 0xdf00 /* svc */)
2842 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 4, 4,
2843 byte_order_for_code
, &insn
)
2844 && (insn
& 0x0f000000) == 0x0f000000 /* svc */)
2848 /* Bail out if we don't know that exception information is valid. */
2852 /* The ARM exception index does not mark the *end* of the region
2853 covered by the entry, and some functions will not have any entry.
2854 To correctly recognize the end of the covered region, the linker
2855 should have inserted dummy records with a CANTUNWIND marker.
2857 Unfortunately, current versions of GNU ld do not reliably do
2858 this, and thus we may have found an incorrect entry above.
2859 As a (temporary) sanity check, we only use the entry if it
2860 lies *within* the bounds of the function. Note that this check
2861 might reject perfectly valid entries that just happen to cover
2862 multiple functions; therefore this check ought to be removed
2863 once the linker is fixed. */
2864 if (func_start
> exidx_region
)
2868 /* Decode the list of unwinding instructions into a prologue cache.
2869 Note that this may fail due to e.g. a "refuse to unwind" code. */
2870 cache
= arm_exidx_fill_cache (this_frame
, entry
);
2874 *this_prologue_cache
= cache
;
2878 struct frame_unwind arm_exidx_unwind
= {
2880 default_frame_unwind_stop_reason
,
2881 arm_prologue_this_id
,
2882 arm_prologue_prev_register
,
2884 arm_exidx_unwind_sniffer
2887 static struct arm_prologue_cache
*
2888 arm_make_stub_cache (struct frame_info
*this_frame
)
2890 struct arm_prologue_cache
*cache
;
2892 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2893 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2895 cache
->prev_sp
= get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2900 /* Our frame ID for a stub frame is the current SP and LR. */
2903 arm_stub_this_id (struct frame_info
*this_frame
,
2905 struct frame_id
*this_id
)
2907 struct arm_prologue_cache
*cache
;
2909 if (*this_cache
== NULL
)
2910 *this_cache
= arm_make_stub_cache (this_frame
);
2911 cache
= *this_cache
;
2913 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
2917 arm_stub_unwind_sniffer (const struct frame_unwind
*self
,
2918 struct frame_info
*this_frame
,
2919 void **this_prologue_cache
)
2921 CORE_ADDR addr_in_block
;
2924 addr_in_block
= get_frame_address_in_block (this_frame
);
2925 if (in_plt_section (addr_in_block
, NULL
)
2926 /* We also use the stub winder if the target memory is unreadable
2927 to avoid having the prologue unwinder trying to read it. */
2928 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
2934 struct frame_unwind arm_stub_unwind
= {
2936 default_frame_unwind_stop_reason
,
2938 arm_prologue_prev_register
,
2940 arm_stub_unwind_sniffer
2944 arm_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
2946 struct arm_prologue_cache
*cache
;
2948 if (*this_cache
== NULL
)
2949 *this_cache
= arm_make_prologue_cache (this_frame
);
2950 cache
= *this_cache
;
2952 return cache
->prev_sp
- cache
->framesize
;
2955 struct frame_base arm_normal_base
= {
2956 &arm_prologue_unwind
,
2957 arm_normal_frame_base
,
2958 arm_normal_frame_base
,
2959 arm_normal_frame_base
2962 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2963 dummy frame. The frame ID's base needs to match the TOS value
2964 saved by save_dummy_frame_tos() and returned from
2965 arm_push_dummy_call, and the PC needs to match the dummy frame's
2968 static struct frame_id
2969 arm_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2971 return frame_id_build (get_frame_register_unsigned (this_frame
,
2973 get_frame_pc (this_frame
));
2976 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2977 be used to construct the previous frame's ID, after looking up the
2978 containing function). */
2981 arm_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2984 pc
= frame_unwind_register_unsigned (this_frame
, ARM_PC_REGNUM
);
2985 return arm_addr_bits_remove (gdbarch
, pc
);
2989 arm_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2991 return frame_unwind_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2994 static struct value
*
2995 arm_dwarf2_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2998 struct gdbarch
* gdbarch
= get_frame_arch (this_frame
);
3000 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
3005 /* The PC is normally copied from the return column, which
3006 describes saves of LR. However, that version may have an
3007 extra bit set to indicate Thumb state. The bit is not
3009 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
3010 return frame_unwind_got_constant (this_frame
, regnum
,
3011 arm_addr_bits_remove (gdbarch
, lr
));
3014 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3015 cpsr
= get_frame_register_unsigned (this_frame
, regnum
);
3016 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
3017 if (IS_THUMB_ADDR (lr
))
3021 return frame_unwind_got_constant (this_frame
, regnum
, cpsr
);
3024 internal_error (__FILE__
, __LINE__
,
3025 _("Unexpected register %d"), regnum
);
3030 arm_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
3031 struct dwarf2_frame_state_reg
*reg
,
3032 struct frame_info
*this_frame
)
3038 reg
->how
= DWARF2_FRAME_REG_FN
;
3039 reg
->loc
.fn
= arm_dwarf2_prev_register
;
3042 reg
->how
= DWARF2_FRAME_REG_CFA
;
3047 /* Return true if we are in the function's epilogue, i.e. after the
3048 instruction that destroyed the function's stack frame. */
3051 thumb_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3053 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3054 unsigned int insn
, insn2
;
3055 int found_return
= 0, found_stack_adjust
= 0;
3056 CORE_ADDR func_start
, func_end
;
3060 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3063 /* The epilogue is a sequence of instructions along the following lines:
3065 - add stack frame size to SP or FP
3066 - [if frame pointer used] restore SP from FP
3067 - restore registers from SP [may include PC]
3068 - a return-type instruction [if PC wasn't already restored]
3070 In a first pass, we scan forward from the current PC and verify the
3071 instructions we find as compatible with this sequence, ending in a
3074 However, this is not sufficient to distinguish indirect function calls
3075 within a function from indirect tail calls in the epilogue in some cases.
3076 Therefore, if we didn't already find any SP-changing instruction during
3077 forward scan, we add a backward scanning heuristic to ensure we actually
3078 are in the epilogue. */
3081 while (scan_pc
< func_end
&& !found_return
)
3083 if (target_read_memory (scan_pc
, buf
, 2))
3087 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3089 if ((insn
& 0xff80) == 0x4700) /* bx <Rm> */
3091 else if (insn
== 0x46f7) /* mov pc, lr */
3093 else if (insn
== 0x46bd) /* mov sp, r7 */
3094 found_stack_adjust
= 1;
3095 else if ((insn
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3096 found_stack_adjust
= 1;
3097 else if ((insn
& 0xfe00) == 0xbc00) /* pop <registers> */
3099 found_stack_adjust
= 1;
3100 if (insn
& 0x0100) /* <registers> include PC. */
3103 else if ((insn
& 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3105 if (target_read_memory (scan_pc
, buf
, 2))
3109 insn2
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3111 if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3113 found_stack_adjust
= 1;
3114 if (insn2
& 0x8000) /* <registers> include PC. */
3117 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3118 && (insn2
& 0x0fff) == 0x0b04)
3120 found_stack_adjust
= 1;
3121 if ((insn2
& 0xf000) == 0xf000) /* <Rt> is PC. */
3124 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3125 && (insn2
& 0x0e00) == 0x0a00)
3126 found_stack_adjust
= 1;
3137 /* Since any instruction in the epilogue sequence, with the possible
3138 exception of return itself, updates the stack pointer, we need to
3139 scan backwards for at most one instruction. Try either a 16-bit or
3140 a 32-bit instruction. This is just a heuristic, so we do not worry
3141 too much about false positives. */
3143 if (!found_stack_adjust
)
3145 if (pc
- 4 < func_start
)
3147 if (target_read_memory (pc
- 4, buf
, 4))
3150 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3151 insn2
= extract_unsigned_integer (buf
+ 2, 2, byte_order_for_code
);
3153 if (insn2
== 0x46bd) /* mov sp, r7 */
3154 found_stack_adjust
= 1;
3155 else if ((insn2
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3156 found_stack_adjust
= 1;
3157 else if ((insn2
& 0xff00) == 0xbc00) /* pop <registers> without PC */
3158 found_stack_adjust
= 1;
3159 else if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3160 found_stack_adjust
= 1;
3161 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3162 && (insn2
& 0x0fff) == 0x0b04)
3163 found_stack_adjust
= 1;
3164 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3165 && (insn2
& 0x0e00) == 0x0a00)
3166 found_stack_adjust
= 1;
3169 return found_stack_adjust
;
3172 /* Return true if we are in the function's epilogue, i.e. after the
3173 instruction that destroyed the function's stack frame. */
3176 arm_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3178 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3180 int found_return
, found_stack_adjust
;
3181 CORE_ADDR func_start
, func_end
;
3183 if (arm_pc_is_thumb (gdbarch
, pc
))
3184 return thumb_in_function_epilogue_p (gdbarch
, pc
);
3186 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3189 /* We are in the epilogue if the previous instruction was a stack
3190 adjustment and the next instruction is a possible return (bx, mov
3191 pc, or pop). We could have to scan backwards to find the stack
3192 adjustment, or forwards to find the return, but this is a decent
3193 approximation. First scan forwards. */
3196 insn
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
3197 if (bits (insn
, 28, 31) != INST_NV
)
3199 if ((insn
& 0x0ffffff0) == 0x012fff10)
3202 else if ((insn
& 0x0ffffff0) == 0x01a0f000)
3205 else if ((insn
& 0x0fff0000) == 0x08bd0000
3206 && (insn
& 0x0000c000) != 0)
3207 /* POP (LDMIA), including PC or LR. */
3214 /* Scan backwards. This is just a heuristic, so do not worry about
3215 false positives from mode changes. */
3217 if (pc
< func_start
+ 4)
3220 found_stack_adjust
= 0;
3221 insn
= read_memory_unsigned_integer (pc
- 4, 4, byte_order_for_code
);
3222 if (bits (insn
, 28, 31) != INST_NV
)
3224 if ((insn
& 0x0df0f000) == 0x0080d000)
3225 /* ADD SP (register or immediate). */
3226 found_stack_adjust
= 1;
3227 else if ((insn
& 0x0df0f000) == 0x0040d000)
3228 /* SUB SP (register or immediate). */
3229 found_stack_adjust
= 1;
3230 else if ((insn
& 0x0ffffff0) == 0x01a0d000)
3232 found_stack_adjust
= 1;
3233 else if ((insn
& 0x0fff0000) == 0x08bd0000)
3235 found_stack_adjust
= 1;
3238 if (found_stack_adjust
)
3245 /* When arguments must be pushed onto the stack, they go on in reverse
3246 order. The code below implements a FILO (stack) to do this. */
3251 struct stack_item
*prev
;
3255 static struct stack_item
*
3256 push_stack_item (struct stack_item
*prev
, const void *contents
, int len
)
3258 struct stack_item
*si
;
3259 si
= xmalloc (sizeof (struct stack_item
));
3260 si
->data
= xmalloc (len
);
3263 memcpy (si
->data
, contents
, len
);
3267 static struct stack_item
*
3268 pop_stack_item (struct stack_item
*si
)
3270 struct stack_item
*dead
= si
;
3278 /* Return the alignment (in bytes) of the given type. */
3281 arm_type_align (struct type
*t
)
3287 t
= check_typedef (t
);
3288 switch (TYPE_CODE (t
))
3291 /* Should never happen. */
3292 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
3296 case TYPE_CODE_ENUM
:
3300 case TYPE_CODE_RANGE
:
3301 case TYPE_CODE_BITSTRING
:
3303 case TYPE_CODE_CHAR
:
3304 case TYPE_CODE_BOOL
:
3305 return TYPE_LENGTH (t
);
3307 case TYPE_CODE_ARRAY
:
3308 case TYPE_CODE_COMPLEX
:
3309 /* TODO: What about vector types? */
3310 return arm_type_align (TYPE_TARGET_TYPE (t
));
3312 case TYPE_CODE_STRUCT
:
3313 case TYPE_CODE_UNION
:
3315 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
3317 falign
= arm_type_align (TYPE_FIELD_TYPE (t
, n
));
3325 /* Possible base types for a candidate for passing and returning in
3328 enum arm_vfp_cprc_base_type
3337 /* The length of one element of base type B. */
3340 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b
)
3344 case VFP_CPRC_SINGLE
:
3346 case VFP_CPRC_DOUBLE
:
3348 case VFP_CPRC_VEC64
:
3350 case VFP_CPRC_VEC128
:
3353 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3358 /* The character ('s', 'd' or 'q') for the type of VFP register used
3359 for passing base type B. */
3362 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b
)
3366 case VFP_CPRC_SINGLE
:
3368 case VFP_CPRC_DOUBLE
:
3370 case VFP_CPRC_VEC64
:
3372 case VFP_CPRC_VEC128
:
3375 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3380 /* Determine whether T may be part of a candidate for passing and
3381 returning in VFP registers, ignoring the limit on the total number
3382 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3383 classification of the first valid component found; if it is not
3384 VFP_CPRC_UNKNOWN, all components must have the same classification
3385 as *BASE_TYPE. If it is found that T contains a type not permitted
3386 for passing and returning in VFP registers, a type differently
3387 classified from *BASE_TYPE, or two types differently classified
3388 from each other, return -1, otherwise return the total number of
3389 base-type elements found (possibly 0 in an empty structure or
3390 array). Vectors and complex types are not currently supported,
3391 matching the generic AAPCS support. */
3394 arm_vfp_cprc_sub_candidate (struct type
*t
,
3395 enum arm_vfp_cprc_base_type
*base_type
)
3397 t
= check_typedef (t
);
3398 switch (TYPE_CODE (t
))
3401 switch (TYPE_LENGTH (t
))
3404 if (*base_type
== VFP_CPRC_UNKNOWN
)
3405 *base_type
= VFP_CPRC_SINGLE
;
3406 else if (*base_type
!= VFP_CPRC_SINGLE
)
3411 if (*base_type
== VFP_CPRC_UNKNOWN
)
3412 *base_type
= VFP_CPRC_DOUBLE
;
3413 else if (*base_type
!= VFP_CPRC_DOUBLE
)
3422 case TYPE_CODE_ARRAY
:
3426 count
= arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t
), base_type
);
3429 if (TYPE_LENGTH (t
) == 0)
3431 gdb_assert (count
== 0);
3434 else if (count
== 0)
3436 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3437 gdb_assert ((TYPE_LENGTH (t
) % unitlen
) == 0);
3438 return TYPE_LENGTH (t
) / unitlen
;
3442 case TYPE_CODE_STRUCT
:
3447 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3449 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3451 if (sub_count
== -1)
3455 if (TYPE_LENGTH (t
) == 0)
3457 gdb_assert (count
== 0);
3460 else if (count
== 0)
3462 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3463 if (TYPE_LENGTH (t
) != unitlen
* count
)
3468 case TYPE_CODE_UNION
:
3473 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3475 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3477 if (sub_count
== -1)
3479 count
= (count
> sub_count
? count
: sub_count
);
3481 if (TYPE_LENGTH (t
) == 0)
3483 gdb_assert (count
== 0);
3486 else if (count
== 0)
3488 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3489 if (TYPE_LENGTH (t
) != unitlen
* count
)
3501 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3502 if passed to or returned from a non-variadic function with the VFP
3503 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3504 *BASE_TYPE to the base type for T and *COUNT to the number of
3505 elements of that base type before returning. */
3508 arm_vfp_call_candidate (struct type
*t
, enum arm_vfp_cprc_base_type
*base_type
,
3511 enum arm_vfp_cprc_base_type b
= VFP_CPRC_UNKNOWN
;
3512 int c
= arm_vfp_cprc_sub_candidate (t
, &b
);
3513 if (c
<= 0 || c
> 4)
3520 /* Return 1 if the VFP ABI should be used for passing arguments to and
3521 returning values from a function of type FUNC_TYPE, 0
3525 arm_vfp_abi_for_function (struct gdbarch
*gdbarch
, struct type
*func_type
)
3527 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3528 /* Variadic functions always use the base ABI. Assume that functions
3529 without debug info are not variadic. */
3530 if (func_type
&& TYPE_VARARGS (check_typedef (func_type
)))
3532 /* The VFP ABI is only supported as a variant of AAPCS. */
3533 if (tdep
->arm_abi
!= ARM_ABI_AAPCS
)
3535 return gdbarch_tdep (gdbarch
)->fp_model
== ARM_FLOAT_VFP
;
3538 /* We currently only support passing parameters in integer registers, which
3539 conforms with GCC's default model, and VFP argument passing following
3540 the VFP variant of AAPCS. Several other variants exist and
3541 we should probably support some of them based on the selected ABI. */
3544 arm_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
3545 struct regcache
*regcache
, CORE_ADDR bp_addr
, int nargs
,
3546 struct value
**args
, CORE_ADDR sp
, int struct_return
,
3547 CORE_ADDR struct_addr
)
3549 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3553 struct stack_item
*si
= NULL
;
3556 unsigned vfp_regs_free
= (1 << 16) - 1;
3558 /* Determine the type of this function and whether the VFP ABI
3560 ftype
= check_typedef (value_type (function
));
3561 if (TYPE_CODE (ftype
) == TYPE_CODE_PTR
)
3562 ftype
= check_typedef (TYPE_TARGET_TYPE (ftype
));
3563 use_vfp_abi
= arm_vfp_abi_for_function (gdbarch
, ftype
);
3565 /* Set the return address. For the ARM, the return breakpoint is
3566 always at BP_ADDR. */
3567 if (arm_pc_is_thumb (gdbarch
, bp_addr
))
3569 regcache_cooked_write_unsigned (regcache
, ARM_LR_REGNUM
, bp_addr
);
3571 /* Walk through the list of args and determine how large a temporary
3572 stack is required. Need to take care here as structs may be
3573 passed on the stack, and we have to to push them. */
3576 argreg
= ARM_A1_REGNUM
;
3579 /* The struct_return pointer occupies the first parameter
3580 passing register. */
3584 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = %s\n",
3585 gdbarch_register_name (gdbarch
, argreg
),
3586 paddress (gdbarch
, struct_addr
));
3587 regcache_cooked_write_unsigned (regcache
, argreg
, struct_addr
);
3591 for (argnum
= 0; argnum
< nargs
; argnum
++)
3594 struct type
*arg_type
;
3595 struct type
*target_type
;
3596 enum type_code typecode
;
3597 const bfd_byte
*val
;
3599 enum arm_vfp_cprc_base_type vfp_base_type
;
3601 int may_use_core_reg
= 1;
3603 arg_type
= check_typedef (value_type (args
[argnum
]));
3604 len
= TYPE_LENGTH (arg_type
);
3605 target_type
= TYPE_TARGET_TYPE (arg_type
);
3606 typecode
= TYPE_CODE (arg_type
);
3607 val
= value_contents (args
[argnum
]);
3609 align
= arm_type_align (arg_type
);
3610 /* Round alignment up to a whole number of words. */
3611 align
= (align
+ INT_REGISTER_SIZE
- 1) & ~(INT_REGISTER_SIZE
- 1);
3612 /* Different ABIs have different maximum alignments. */
3613 if (gdbarch_tdep (gdbarch
)->arm_abi
== ARM_ABI_APCS
)
3615 /* The APCS ABI only requires word alignment. */
3616 align
= INT_REGISTER_SIZE
;
3620 /* The AAPCS requires at most doubleword alignment. */
3621 if (align
> INT_REGISTER_SIZE
* 2)
3622 align
= INT_REGISTER_SIZE
* 2;
3626 && arm_vfp_call_candidate (arg_type
, &vfp_base_type
,
3634 /* Because this is a CPRC it cannot go in a core register or
3635 cause a core register to be skipped for alignment.
3636 Either it goes in VFP registers and the rest of this loop
3637 iteration is skipped for this argument, or it goes on the
3638 stack (and the stack alignment code is correct for this
3640 may_use_core_reg
= 0;
3642 unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
3643 shift
= unit_length
/ 4;
3644 mask
= (1 << (shift
* vfp_base_count
)) - 1;
3645 for (regno
= 0; regno
< 16; regno
+= shift
)
3646 if (((vfp_regs_free
>> regno
) & mask
) == mask
)
3655 vfp_regs_free
&= ~(mask
<< regno
);
3656 reg_scaled
= regno
/ shift
;
3657 reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
3658 for (i
= 0; i
< vfp_base_count
; i
++)
3662 if (reg_char
== 'q')
3663 arm_neon_quad_write (gdbarch
, regcache
, reg_scaled
+ i
,
3664 val
+ i
* unit_length
);
3667 sprintf (name_buf
, "%c%d", reg_char
, reg_scaled
+ i
);
3668 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3670 regcache_cooked_write (regcache
, regnum
,
3671 val
+ i
* unit_length
);
3678 /* This CPRC could not go in VFP registers, so all VFP
3679 registers are now marked as used. */
3684 /* Push stack padding for dowubleword alignment. */
3685 if (nstack
& (align
- 1))
3687 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3688 nstack
+= INT_REGISTER_SIZE
;
3691 /* Doubleword aligned quantities must go in even register pairs. */
3692 if (may_use_core_reg
3693 && argreg
<= ARM_LAST_ARG_REGNUM
3694 && align
> INT_REGISTER_SIZE
3698 /* If the argument is a pointer to a function, and it is a
3699 Thumb function, create a LOCAL copy of the value and set
3700 the THUMB bit in it. */
3701 if (TYPE_CODE_PTR
== typecode
3702 && target_type
!= NULL
3703 && TYPE_CODE_FUNC
== TYPE_CODE (check_typedef (target_type
)))
3705 CORE_ADDR regval
= extract_unsigned_integer (val
, len
, byte_order
);
3706 if (arm_pc_is_thumb (gdbarch
, regval
))
3708 bfd_byte
*copy
= alloca (len
);
3709 store_unsigned_integer (copy
, len
, byte_order
,
3710 MAKE_THUMB_ADDR (regval
));
3715 /* Copy the argument to general registers or the stack in
3716 register-sized pieces. Large arguments are split between
3717 registers and stack. */
3720 int partial_len
= len
< INT_REGISTER_SIZE
? len
: INT_REGISTER_SIZE
;
3722 if (may_use_core_reg
&& argreg
<= ARM_LAST_ARG_REGNUM
)
3724 /* The argument is being passed in a general purpose
3727 = extract_unsigned_integer (val
, partial_len
, byte_order
);
3728 if (byte_order
== BFD_ENDIAN_BIG
)
3729 regval
<<= (INT_REGISTER_SIZE
- partial_len
) * 8;
3731 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
3733 gdbarch_register_name
3735 phex (regval
, INT_REGISTER_SIZE
));
3736 regcache_cooked_write_unsigned (regcache
, argreg
, regval
);
3741 /* Push the arguments onto the stack. */
3743 fprintf_unfiltered (gdb_stdlog
, "arg %d @ sp + %d\n",
3745 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3746 nstack
+= INT_REGISTER_SIZE
;
3753 /* If we have an odd number of words to push, then decrement the stack
3754 by one word now, so first stack argument will be dword aligned. */
3761 write_memory (sp
, si
->data
, si
->len
);
3762 si
= pop_stack_item (si
);
3765 /* Finally, update teh SP register. */
3766 regcache_cooked_write_unsigned (regcache
, ARM_SP_REGNUM
, sp
);
3772 /* Always align the frame to an 8-byte boundary. This is required on
3773 some platforms and harmless on the rest. */
3776 arm_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3778 /* Align the stack to eight bytes. */
3779 return sp
& ~ (CORE_ADDR
) 7;
3783 print_fpu_flags (int flags
)
3785 if (flags
& (1 << 0))
3786 fputs ("IVO ", stdout
);
3787 if (flags
& (1 << 1))
3788 fputs ("DVZ ", stdout
);
3789 if (flags
& (1 << 2))
3790 fputs ("OFL ", stdout
);
3791 if (flags
& (1 << 3))
3792 fputs ("UFL ", stdout
);
3793 if (flags
& (1 << 4))
3794 fputs ("INX ", stdout
);
3798 /* Print interesting information about the floating point processor
3799 (if present) or emulator. */
3801 arm_print_float_info (struct gdbarch
*gdbarch
, struct ui_file
*file
,
3802 struct frame_info
*frame
, const char *args
)
3804 unsigned long status
= get_frame_register_unsigned (frame
, ARM_FPS_REGNUM
);
3807 type
= (status
>> 24) & 127;
3808 if (status
& (1 << 31))
3809 printf (_("Hardware FPU type %d\n"), type
);
3811 printf (_("Software FPU type %d\n"), type
);
3812 /* i18n: [floating point unit] mask */
3813 fputs (_("mask: "), stdout
);
3814 print_fpu_flags (status
>> 16);
3815 /* i18n: [floating point unit] flags */
3816 fputs (_("flags: "), stdout
);
3817 print_fpu_flags (status
);
3820 /* Construct the ARM extended floating point type. */
3821 static struct type
*
3822 arm_ext_type (struct gdbarch
*gdbarch
)
3824 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3826 if (!tdep
->arm_ext_type
)
3828 = arch_float_type (gdbarch
, -1, "builtin_type_arm_ext",
3829 floatformats_arm_ext
);
3831 return tdep
->arm_ext_type
;
3834 static struct type
*
3835 arm_neon_double_type (struct gdbarch
*gdbarch
)
3837 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3839 if (tdep
->neon_double_type
== NULL
)
3841 struct type
*t
, *elem
;
3843 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_d",
3845 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3846 append_composite_type_field (t
, "u8", init_vector_type (elem
, 8));
3847 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3848 append_composite_type_field (t
, "u16", init_vector_type (elem
, 4));
3849 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3850 append_composite_type_field (t
, "u32", init_vector_type (elem
, 2));
3851 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3852 append_composite_type_field (t
, "u64", elem
);
3853 elem
= builtin_type (gdbarch
)->builtin_float
;
3854 append_composite_type_field (t
, "f32", init_vector_type (elem
, 2));
3855 elem
= builtin_type (gdbarch
)->builtin_double
;
3856 append_composite_type_field (t
, "f64", elem
);
3858 TYPE_VECTOR (t
) = 1;
3859 TYPE_NAME (t
) = "neon_d";
3860 tdep
->neon_double_type
= t
;
3863 return tdep
->neon_double_type
;
3866 /* FIXME: The vector types are not correctly ordered on big-endian
3867 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3868 bits of d0 - regardless of what unit size is being held in d0. So
3869 the offset of the first uint8 in d0 is 7, but the offset of the
3870 first float is 4. This code works as-is for little-endian
3873 static struct type
*
3874 arm_neon_quad_type (struct gdbarch
*gdbarch
)
3876 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3878 if (tdep
->neon_quad_type
== NULL
)
3880 struct type
*t
, *elem
;
3882 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_q",
3884 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3885 append_composite_type_field (t
, "u8", init_vector_type (elem
, 16));
3886 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3887 append_composite_type_field (t
, "u16", init_vector_type (elem
, 8));
3888 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3889 append_composite_type_field (t
, "u32", init_vector_type (elem
, 4));
3890 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3891 append_composite_type_field (t
, "u64", init_vector_type (elem
, 2));
3892 elem
= builtin_type (gdbarch
)->builtin_float
;
3893 append_composite_type_field (t
, "f32", init_vector_type (elem
, 4));
3894 elem
= builtin_type (gdbarch
)->builtin_double
;
3895 append_composite_type_field (t
, "f64", init_vector_type (elem
, 2));
3897 TYPE_VECTOR (t
) = 1;
3898 TYPE_NAME (t
) = "neon_q";
3899 tdep
->neon_quad_type
= t
;
3902 return tdep
->neon_quad_type
;
3905 /* Return the GDB type object for the "standard" data type of data in
3908 static struct type
*
3909 arm_register_type (struct gdbarch
*gdbarch
, int regnum
)
3911 int num_regs
= gdbarch_num_regs (gdbarch
);
3913 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
3914 && regnum
>= num_regs
&& regnum
< num_regs
+ 32)
3915 return builtin_type (gdbarch
)->builtin_float
;
3917 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
3918 && regnum
>= num_regs
+ 32 && regnum
< num_regs
+ 32 + 16)
3919 return arm_neon_quad_type (gdbarch
);
3921 /* If the target description has register information, we are only
3922 in this function so that we can override the types of
3923 double-precision registers for NEON. */
3924 if (tdesc_has_registers (gdbarch_target_desc (gdbarch
)))
3926 struct type
*t
= tdesc_register_type (gdbarch
, regnum
);
3928 if (regnum
>= ARM_D0_REGNUM
&& regnum
< ARM_D0_REGNUM
+ 32
3929 && TYPE_CODE (t
) == TYPE_CODE_FLT
3930 && gdbarch_tdep (gdbarch
)->have_neon
)
3931 return arm_neon_double_type (gdbarch
);
3936 if (regnum
>= ARM_F0_REGNUM
&& regnum
< ARM_F0_REGNUM
+ NUM_FREGS
)
3938 if (!gdbarch_tdep (gdbarch
)->have_fpa_registers
)
3939 return builtin_type (gdbarch
)->builtin_void
;
3941 return arm_ext_type (gdbarch
);
3943 else if (regnum
== ARM_SP_REGNUM
)
3944 return builtin_type (gdbarch
)->builtin_data_ptr
;
3945 else if (regnum
== ARM_PC_REGNUM
)
3946 return builtin_type (gdbarch
)->builtin_func_ptr
;
3947 else if (regnum
>= ARRAY_SIZE (arm_register_names
))
3948 /* These registers are only supported on targets which supply
3949 an XML description. */
3950 return builtin_type (gdbarch
)->builtin_int0
;
3952 return builtin_type (gdbarch
)->builtin_uint32
;
3955 /* Map a DWARF register REGNUM onto the appropriate GDB register
3959 arm_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
3961 /* Core integer regs. */
3962 if (reg
>= 0 && reg
<= 15)
3965 /* Legacy FPA encoding. These were once used in a way which
3966 overlapped with VFP register numbering, so their use is
3967 discouraged, but GDB doesn't support the ARM toolchain
3968 which used them for VFP. */
3969 if (reg
>= 16 && reg
<= 23)
3970 return ARM_F0_REGNUM
+ reg
- 16;
3972 /* New assignments for the FPA registers. */
3973 if (reg
>= 96 && reg
<= 103)
3974 return ARM_F0_REGNUM
+ reg
- 96;
3976 /* WMMX register assignments. */
3977 if (reg
>= 104 && reg
<= 111)
3978 return ARM_WCGR0_REGNUM
+ reg
- 104;
3980 if (reg
>= 112 && reg
<= 127)
3981 return ARM_WR0_REGNUM
+ reg
- 112;
3983 if (reg
>= 192 && reg
<= 199)
3984 return ARM_WC0_REGNUM
+ reg
- 192;
3986 /* VFP v2 registers. A double precision value is actually
3987 in d1 rather than s2, but the ABI only defines numbering
3988 for the single precision registers. This will "just work"
3989 in GDB for little endian targets (we'll read eight bytes,
3990 starting in s0 and then progressing to s1), but will be
3991 reversed on big endian targets with VFP. This won't
3992 be a problem for the new Neon quad registers; you're supposed
3993 to use DW_OP_piece for those. */
3994 if (reg
>= 64 && reg
<= 95)
3998 sprintf (name_buf
, "s%d", reg
- 64);
3999 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
4003 /* VFP v3 / Neon registers. This range is also used for VFP v2
4004 registers, except that it now describes d0 instead of s0. */
4005 if (reg
>= 256 && reg
<= 287)
4009 sprintf (name_buf
, "d%d", reg
- 256);
4010 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
4017 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4019 arm_register_sim_regno (struct gdbarch
*gdbarch
, int regnum
)
4022 gdb_assert (reg
>= 0 && reg
< gdbarch_num_regs (gdbarch
));
4024 if (regnum
>= ARM_WR0_REGNUM
&& regnum
<= ARM_WR15_REGNUM
)
4025 return regnum
- ARM_WR0_REGNUM
+ SIM_ARM_IWMMXT_COP0R0_REGNUM
;
4027 if (regnum
>= ARM_WC0_REGNUM
&& regnum
<= ARM_WC7_REGNUM
)
4028 return regnum
- ARM_WC0_REGNUM
+ SIM_ARM_IWMMXT_COP1R0_REGNUM
;
4030 if (regnum
>= ARM_WCGR0_REGNUM
&& regnum
<= ARM_WCGR7_REGNUM
)
4031 return regnum
- ARM_WCGR0_REGNUM
+ SIM_ARM_IWMMXT_COP1R8_REGNUM
;
4033 if (reg
< NUM_GREGS
)
4034 return SIM_ARM_R0_REGNUM
+ reg
;
4037 if (reg
< NUM_FREGS
)
4038 return SIM_ARM_FP0_REGNUM
+ reg
;
4041 if (reg
< NUM_SREGS
)
4042 return SIM_ARM_FPS_REGNUM
+ reg
;
4045 internal_error (__FILE__
, __LINE__
, _("Bad REGNUM %d"), regnum
);
4048 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4049 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4050 It is thought that this is is the floating-point register format on
4051 little-endian systems. */
4054 convert_from_extended (const struct floatformat
*fmt
, const void *ptr
,
4055 void *dbl
, int endianess
)
4059 if (endianess
== BFD_ENDIAN_BIG
)
4060 floatformat_to_doublest (&floatformat_arm_ext_big
, ptr
, &d
);
4062 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4064 floatformat_from_doublest (fmt
, &d
, dbl
);
4068 convert_to_extended (const struct floatformat
*fmt
, void *dbl
, const void *ptr
,
4073 floatformat_to_doublest (fmt
, ptr
, &d
);
4074 if (endianess
== BFD_ENDIAN_BIG
)
4075 floatformat_from_doublest (&floatformat_arm_ext_big
, &d
, dbl
);
4077 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4082 condition_true (unsigned long cond
, unsigned long status_reg
)
4084 if (cond
== INST_AL
|| cond
== INST_NV
)
4090 return ((status_reg
& FLAG_Z
) != 0);
4092 return ((status_reg
& FLAG_Z
) == 0);
4094 return ((status_reg
& FLAG_C
) != 0);
4096 return ((status_reg
& FLAG_C
) == 0);
4098 return ((status_reg
& FLAG_N
) != 0);
4100 return ((status_reg
& FLAG_N
) == 0);
4102 return ((status_reg
& FLAG_V
) != 0);
4104 return ((status_reg
& FLAG_V
) == 0);
4106 return ((status_reg
& (FLAG_C
| FLAG_Z
)) == FLAG_C
);
4108 return ((status_reg
& (FLAG_C
| FLAG_Z
)) != FLAG_C
);
4110 return (((status_reg
& FLAG_N
) == 0) == ((status_reg
& FLAG_V
) == 0));
4112 return (((status_reg
& FLAG_N
) == 0) != ((status_reg
& FLAG_V
) == 0));
4114 return (((status_reg
& FLAG_Z
) == 0)
4115 && (((status_reg
& FLAG_N
) == 0)
4116 == ((status_reg
& FLAG_V
) == 0)));
4118 return (((status_reg
& FLAG_Z
) != 0)
4119 || (((status_reg
& FLAG_N
) == 0)
4120 != ((status_reg
& FLAG_V
) == 0)));
4125 static unsigned long
4126 shifted_reg_val (struct frame_info
*frame
, unsigned long inst
, int carry
,
4127 unsigned long pc_val
, unsigned long status_reg
)
4129 unsigned long res
, shift
;
4130 int rm
= bits (inst
, 0, 3);
4131 unsigned long shifttype
= bits (inst
, 5, 6);
4135 int rs
= bits (inst
, 8, 11);
4136 shift
= (rs
== 15 ? pc_val
+ 8
4137 : get_frame_register_unsigned (frame
, rs
)) & 0xFF;
4140 shift
= bits (inst
, 7, 11);
4142 res
= (rm
== ARM_PC_REGNUM
4143 ? (pc_val
+ (bit (inst
, 4) ? 12 : 8))
4144 : get_frame_register_unsigned (frame
, rm
));
4149 res
= shift
>= 32 ? 0 : res
<< shift
;
4153 res
= shift
>= 32 ? 0 : res
>> shift
;
4159 res
= ((res
& 0x80000000L
)
4160 ? ~((~res
) >> shift
) : res
>> shift
);
4163 case 3: /* ROR/RRX */
4166 res
= (res
>> 1) | (carry
? 0x80000000L
: 0);
4168 res
= (res
>> shift
) | (res
<< (32 - shift
));
4172 return res
& 0xffffffff;
4175 /* Return number of 1-bits in VAL. */
4178 bitcount (unsigned long val
)
4181 for (nbits
= 0; val
!= 0; nbits
++)
4182 val
&= val
- 1; /* Delete rightmost 1-bit in val. */
4186 /* Return the size in bytes of the complete Thumb instruction whose
4187 first halfword is INST1. */
4190 thumb_insn_size (unsigned short inst1
)
4192 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
4199 thumb_advance_itstate (unsigned int itstate
)
4201 /* Preserve IT[7:5], the first three bits of the condition. Shift
4202 the upcoming condition flags left by one bit. */
4203 itstate
= (itstate
& 0xe0) | ((itstate
<< 1) & 0x1f);
4205 /* If we have finished the IT block, clear the state. */
4206 if ((itstate
& 0x0f) == 0)
4212 /* Find the next PC after the current instruction executes. In some
4213 cases we can not statically determine the answer (see the IT state
4214 handling in this function); in that case, a breakpoint may be
4215 inserted in addition to the returned PC, which will be used to set
4216 another breakpoint by our caller. */
4219 thumb_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
, int insert_bkpt
)
4221 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4222 struct address_space
*aspace
= get_frame_address_space (frame
);
4223 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4224 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4225 unsigned long pc_val
= ((unsigned long) pc
) + 4; /* PC after prefetch */
4226 unsigned short inst1
;
4227 CORE_ADDR nextpc
= pc
+ 2; /* Default is next instruction. */
4228 unsigned long offset
;
4229 ULONGEST status
, itstate
;
4231 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4232 pc_val
= MAKE_THUMB_ADDR (pc_val
);
4234 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
4236 /* Thumb-2 conditional execution support. There are eight bits in
4237 the CPSR which describe conditional execution state. Once
4238 reconstructed (they're in a funny order), the low five bits
4239 describe the low bit of the condition for each instruction and
4240 how many instructions remain. The high three bits describe the
4241 base condition. One of the low four bits will be set if an IT
4242 block is active. These bits read as zero on earlier
4244 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4245 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4247 /* If-Then handling. On GNU/Linux, where this routine is used, we
4248 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4249 can disable execution of the undefined instruction. So we might
4250 miss the breakpoint if we set it on a skipped conditional
4251 instruction. Because conditional instructions can change the
4252 flags, affecting the execution of further instructions, we may
4253 need to set two breakpoints. */
4255 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
!= NULL
)
4257 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4259 /* An IT instruction. Because this instruction does not
4260 modify the flags, we can accurately predict the next
4261 executed instruction. */
4262 itstate
= inst1
& 0x00ff;
4263 pc
+= thumb_insn_size (inst1
);
4265 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4267 inst1
= read_memory_unsigned_integer (pc
, 2,
4268 byte_order_for_code
);
4269 pc
+= thumb_insn_size (inst1
);
4270 itstate
= thumb_advance_itstate (itstate
);
4273 return MAKE_THUMB_ADDR (pc
);
4275 else if (itstate
!= 0)
4277 /* We are in a conditional block. Check the condition. */
4278 if (! condition_true (itstate
>> 4, status
))
4280 /* Advance to the next executed instruction. */
4281 pc
+= thumb_insn_size (inst1
);
4282 itstate
= thumb_advance_itstate (itstate
);
4284 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4286 inst1
= read_memory_unsigned_integer (pc
, 2,
4287 byte_order_for_code
);
4288 pc
+= thumb_insn_size (inst1
);
4289 itstate
= thumb_advance_itstate (itstate
);
4292 return MAKE_THUMB_ADDR (pc
);
4294 else if ((itstate
& 0x0f) == 0x08)
4296 /* This is the last instruction of the conditional
4297 block, and it is executed. We can handle it normally
4298 because the following instruction is not conditional,
4299 and we must handle it normally because it is
4300 permitted to branch. Fall through. */
4306 /* There are conditional instructions after this one.
4307 If this instruction modifies the flags, then we can
4308 not predict what the next executed instruction will
4309 be. Fortunately, this instruction is architecturally
4310 forbidden to branch; we know it will fall through.
4311 Start by skipping past it. */
4312 pc
+= thumb_insn_size (inst1
);
4313 itstate
= thumb_advance_itstate (itstate
);
4315 /* Set a breakpoint on the following instruction. */
4316 gdb_assert ((itstate
& 0x0f) != 0);
4318 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
4319 cond_negated
= (itstate
>> 4) & 1;
4321 /* Skip all following instructions with the same
4322 condition. If there is a later instruction in the IT
4323 block with the opposite condition, set the other
4324 breakpoint there. If not, then set a breakpoint on
4325 the instruction after the IT block. */
4328 inst1
= read_memory_unsigned_integer (pc
, 2,
4329 byte_order_for_code
);
4330 pc
+= thumb_insn_size (inst1
);
4331 itstate
= thumb_advance_itstate (itstate
);
4333 while (itstate
!= 0 && ((itstate
>> 4) & 1) == cond_negated
);
4335 return MAKE_THUMB_ADDR (pc
);
4339 else if (itstate
& 0x0f)
4341 /* We are in a conditional block. Check the condition. */
4342 int cond
= itstate
>> 4;
4344 if (! condition_true (cond
, status
))
4346 /* Advance to the next instruction. All the 32-bit
4347 instructions share a common prefix. */
4348 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
4349 return MAKE_THUMB_ADDR (pc
+ 4);
4351 return MAKE_THUMB_ADDR (pc
+ 2);
4354 /* Otherwise, handle the instruction normally. */
4357 if ((inst1
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
4361 /* Fetch the saved PC from the stack. It's stored above
4362 all of the other registers. */
4363 offset
= bitcount (bits (inst1
, 0, 7)) * INT_REGISTER_SIZE
;
4364 sp
= get_frame_register_unsigned (frame
, ARM_SP_REGNUM
);
4365 nextpc
= read_memory_unsigned_integer (sp
+ offset
, 4, byte_order
);
4367 else if ((inst1
& 0xf000) == 0xd000) /* conditional branch */
4369 unsigned long cond
= bits (inst1
, 8, 11);
4370 if (cond
== 0x0f) /* 0x0f = SWI */
4372 struct gdbarch_tdep
*tdep
;
4373 tdep
= gdbarch_tdep (gdbarch
);
4375 if (tdep
->syscall_next_pc
!= NULL
)
4376 nextpc
= tdep
->syscall_next_pc (frame
);
4379 else if (cond
!= 0x0f && condition_true (cond
, status
))
4380 nextpc
= pc_val
+ (sbits (inst1
, 0, 7) << 1);
4382 else if ((inst1
& 0xf800) == 0xe000) /* unconditional branch */
4384 nextpc
= pc_val
+ (sbits (inst1
, 0, 10) << 1);
4386 else if ((inst1
& 0xe000) == 0xe000) /* 32-bit instruction */
4388 unsigned short inst2
;
4389 inst2
= read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
4391 /* Default to the next instruction. */
4393 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4395 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
4397 /* Branches and miscellaneous control instructions. */
4399 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
4402 int j1
, j2
, imm1
, imm2
;
4404 imm1
= sbits (inst1
, 0, 10);
4405 imm2
= bits (inst2
, 0, 10);
4406 j1
= bit (inst2
, 13);
4407 j2
= bit (inst2
, 11);
4409 offset
= ((imm1
<< 12) + (imm2
<< 1));
4410 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
4412 nextpc
= pc_val
+ offset
;
4413 /* For BLX make sure to clear the low bits. */
4414 if (bit (inst2
, 12) == 0)
4415 nextpc
= nextpc
& 0xfffffffc;
4417 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
4419 /* SUBS PC, LR, #imm8. */
4420 nextpc
= get_frame_register_unsigned (frame
, ARM_LR_REGNUM
);
4421 nextpc
-= inst2
& 0x00ff;
4423 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
4425 /* Conditional branch. */
4426 if (condition_true (bits (inst1
, 6, 9), status
))
4428 int sign
, j1
, j2
, imm1
, imm2
;
4430 sign
= sbits (inst1
, 10, 10);
4431 imm1
= bits (inst1
, 0, 5);
4432 imm2
= bits (inst2
, 0, 10);
4433 j1
= bit (inst2
, 13);
4434 j2
= bit (inst2
, 11);
4436 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4437 offset
+= (imm1
<< 12) + (imm2
<< 1);
4439 nextpc
= pc_val
+ offset
;
4443 else if ((inst1
& 0xfe50) == 0xe810)
4445 /* Load multiple or RFE. */
4446 int rn
, offset
, load_pc
= 1;
4448 rn
= bits (inst1
, 0, 3);
4449 if (bit (inst1
, 7) && !bit (inst1
, 8))
4452 if (!bit (inst2
, 15))
4454 offset
= bitcount (inst2
) * 4 - 4;
4456 else if (!bit (inst1
, 7) && bit (inst1
, 8))
4459 if (!bit (inst2
, 15))
4463 else if (bit (inst1
, 7) && bit (inst1
, 8))
4468 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
4478 CORE_ADDR addr
= get_frame_register_unsigned (frame
, rn
);
4479 nextpc
= get_frame_memory_unsigned (frame
, addr
+ offset
, 4);
4482 else if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
4484 /* MOV PC or MOVS PC. */
4485 nextpc
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4486 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4488 else if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
4492 int rn
, load_pc
= 1;
4494 rn
= bits (inst1
, 0, 3);
4495 base
= get_frame_register_unsigned (frame
, rn
);
4496 if (rn
== ARM_PC_REGNUM
)
4498 base
= (base
+ 4) & ~(CORE_ADDR
) 0x3;
4500 base
+= bits (inst2
, 0, 11);
4502 base
-= bits (inst2
, 0, 11);
4504 else if (bit (inst1
, 7))
4505 base
+= bits (inst2
, 0, 11);
4506 else if (bit (inst2
, 11))
4508 if (bit (inst2
, 10))
4511 base
+= bits (inst2
, 0, 7);
4513 base
-= bits (inst2
, 0, 7);
4516 else if ((inst2
& 0x0fc0) == 0x0000)
4518 int shift
= bits (inst2
, 4, 5), rm
= bits (inst2
, 0, 3);
4519 base
+= get_frame_register_unsigned (frame
, rm
) << shift
;
4526 nextpc
= get_frame_memory_unsigned (frame
, base
, 4);
4528 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
4531 CORE_ADDR tbl_reg
, table
, offset
, length
;
4533 tbl_reg
= bits (inst1
, 0, 3);
4534 if (tbl_reg
== 0x0f)
4535 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4537 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4539 offset
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4540 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 1);
4541 nextpc
= pc_val
+ length
;
4543 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
4546 CORE_ADDR tbl_reg
, table
, offset
, length
;
4548 tbl_reg
= bits (inst1
, 0, 3);
4549 if (tbl_reg
== 0x0f)
4550 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4552 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4554 offset
= 2 * get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4555 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 2);
4556 nextpc
= pc_val
+ length
;
4559 else if ((inst1
& 0xff00) == 0x4700) /* bx REG, blx REG */
4561 if (bits (inst1
, 3, 6) == 0x0f)
4564 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4566 else if ((inst1
& 0xff87) == 0x4687) /* mov pc, REG */
4568 if (bits (inst1
, 3, 6) == 0x0f)
4571 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4573 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4575 else if ((inst1
& 0xf500) == 0xb100)
4578 int imm
= (bit (inst1
, 9) << 6) + (bits (inst1
, 3, 7) << 1);
4579 ULONGEST reg
= get_frame_register_unsigned (frame
, bits (inst1
, 0, 2));
4581 if (bit (inst1
, 11) && reg
!= 0)
4582 nextpc
= pc_val
+ imm
;
4583 else if (!bit (inst1
, 11) && reg
== 0)
4584 nextpc
= pc_val
+ imm
;
4589 /* Get the raw next address. PC is the current program counter, in
4590 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
4591 the alternative next instruction if there are two options.
4593 The value returned has the execution state of the next instruction
4594 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4595 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4599 arm_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
, int insert_bkpt
)
4601 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4602 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4603 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4604 unsigned long pc_val
;
4605 unsigned long this_instr
;
4606 unsigned long status
;
4609 if (arm_frame_is_thumb (frame
))
4610 return thumb_get_next_pc_raw (frame
, pc
, insert_bkpt
);
4612 pc_val
= (unsigned long) pc
;
4613 this_instr
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
4615 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4616 nextpc
= (CORE_ADDR
) (pc_val
+ 4); /* Default case */
4618 if (bits (this_instr
, 28, 31) == INST_NV
)
4619 switch (bits (this_instr
, 24, 27))
4624 /* Branch with Link and change to Thumb. */
4625 nextpc
= BranchDest (pc
, this_instr
);
4626 nextpc
|= bit (this_instr
, 24) << 1;
4627 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4633 /* Coprocessor register transfer. */
4634 if (bits (this_instr
, 12, 15) == 15)
4635 error (_("Invalid update to pc in instruction"));
4638 else if (condition_true (bits (this_instr
, 28, 31), status
))
4640 switch (bits (this_instr
, 24, 27))
4643 case 0x1: /* data processing */
4647 unsigned long operand1
, operand2
, result
= 0;
4651 if (bits (this_instr
, 12, 15) != 15)
4654 if (bits (this_instr
, 22, 25) == 0
4655 && bits (this_instr
, 4, 7) == 9) /* multiply */
4656 error (_("Invalid update to pc in instruction"));
4658 /* BX <reg>, BLX <reg> */
4659 if (bits (this_instr
, 4, 27) == 0x12fff1
4660 || bits (this_instr
, 4, 27) == 0x12fff3)
4662 rn
= bits (this_instr
, 0, 3);
4663 nextpc
= ((rn
== ARM_PC_REGNUM
)
4665 : get_frame_register_unsigned (frame
, rn
));
4670 /* Multiply into PC. */
4671 c
= (status
& FLAG_C
) ? 1 : 0;
4672 rn
= bits (this_instr
, 16, 19);
4673 operand1
= ((rn
== ARM_PC_REGNUM
)
4675 : get_frame_register_unsigned (frame
, rn
));
4677 if (bit (this_instr
, 25))
4679 unsigned long immval
= bits (this_instr
, 0, 7);
4680 unsigned long rotate
= 2 * bits (this_instr
, 8, 11);
4681 operand2
= ((immval
>> rotate
) | (immval
<< (32 - rotate
)))
4684 else /* operand 2 is a shifted register. */
4685 operand2
= shifted_reg_val (frame
, this_instr
, c
,
4688 switch (bits (this_instr
, 21, 24))
4691 result
= operand1
& operand2
;
4695 result
= operand1
^ operand2
;
4699 result
= operand1
- operand2
;
4703 result
= operand2
- operand1
;
4707 result
= operand1
+ operand2
;
4711 result
= operand1
+ operand2
+ c
;
4715 result
= operand1
- operand2
+ c
;
4719 result
= operand2
- operand1
+ c
;
4725 case 0xb: /* tst, teq, cmp, cmn */
4726 result
= (unsigned long) nextpc
;
4730 result
= operand1
| operand2
;
4734 /* Always step into a function. */
4739 result
= operand1
& ~operand2
;
4747 /* In 26-bit APCS the bottom two bits of the result are
4748 ignored, and we always end up in ARM state. */
4750 nextpc
= arm_addr_bits_remove (gdbarch
, result
);
4758 case 0x5: /* data transfer */
4761 if (bit (this_instr
, 20))
4764 if (bits (this_instr
, 12, 15) == 15)
4770 if (bit (this_instr
, 22))
4771 error (_("Invalid update to pc in instruction"));
4773 /* byte write to PC */
4774 rn
= bits (this_instr
, 16, 19);
4775 base
= ((rn
== ARM_PC_REGNUM
)
4777 : get_frame_register_unsigned (frame
, rn
));
4779 if (bit (this_instr
, 24))
4782 int c
= (status
& FLAG_C
) ? 1 : 0;
4783 unsigned long offset
=
4784 (bit (this_instr
, 25)
4785 ? shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
)
4786 : bits (this_instr
, 0, 11));
4788 if (bit (this_instr
, 23))
4793 nextpc
= (CORE_ADDR
) read_memory_integer ((CORE_ADDR
) base
,
4800 case 0x9: /* block transfer */
4801 if (bit (this_instr
, 20))
4804 if (bit (this_instr
, 15))
4809 if (bit (this_instr
, 23))
4812 unsigned long reglist
= bits (this_instr
, 0, 14);
4813 offset
= bitcount (reglist
) * 4;
4814 if (bit (this_instr
, 24)) /* pre */
4817 else if (bit (this_instr
, 24))
4821 unsigned long rn_val
=
4822 get_frame_register_unsigned (frame
,
4823 bits (this_instr
, 16, 19));
4825 (CORE_ADDR
) read_memory_integer ((CORE_ADDR
) (rn_val
4833 case 0xb: /* branch & link */
4834 case 0xa: /* branch */
4836 nextpc
= BranchDest (pc
, this_instr
);
4842 case 0xe: /* coproc ops */
4846 struct gdbarch_tdep
*tdep
;
4847 tdep
= gdbarch_tdep (gdbarch
);
4849 if (tdep
->syscall_next_pc
!= NULL
)
4850 nextpc
= tdep
->syscall_next_pc (frame
);
4856 fprintf_filtered (gdb_stderr
, _("Bad bit-field extraction\n"));
4865 arm_get_next_pc (struct frame_info
*frame
, CORE_ADDR pc
)
4867 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4869 gdbarch_addr_bits_remove (gdbarch
,
4870 arm_get_next_pc_raw (frame
, pc
, TRUE
));
4872 error (_("Infinite loop detected"));
4876 /* single_step() is called just before we want to resume the inferior,
4877 if we want to single-step it but there is no hardware or kernel
4878 single-step support. We find the target of the coming instruction
4879 and breakpoint it. */
4882 arm_software_single_step (struct frame_info
*frame
)
4884 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4885 struct address_space
*aspace
= get_frame_address_space (frame
);
4887 /* NOTE: This may insert the wrong breakpoint instruction when
4888 single-stepping over a mode-changing instruction, if the
4889 CPSR heuristics are used. */
4891 CORE_ADDR next_pc
= arm_get_next_pc (frame
, get_frame_pc (frame
));
4892 insert_single_step_breakpoint (gdbarch
, aspace
, next_pc
);
4897 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4898 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4899 NULL if an error occurs. BUF is freed. */
4902 extend_buffer_earlier (gdb_byte
*buf
, CORE_ADDR endaddr
,
4903 int old_len
, int new_len
)
4905 gdb_byte
*new_buf
, *middle
;
4906 int bytes_to_read
= new_len
- old_len
;
4908 new_buf
= xmalloc (new_len
);
4909 memcpy (new_buf
+ bytes_to_read
, buf
, old_len
);
4911 if (target_read_memory (endaddr
- new_len
, new_buf
, bytes_to_read
) != 0)
4919 /* An IT block is at most the 2-byte IT instruction followed by
4920 four 4-byte instructions. The furthest back we must search to
4921 find an IT block that affects the current instruction is thus
4922 2 + 3 * 4 == 14 bytes. */
4923 #define MAX_IT_BLOCK_PREFIX 14
4925 /* Use a quick scan if there are more than this many bytes of
4927 #define IT_SCAN_THRESHOLD 32
4929 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4930 A breakpoint in an IT block may not be hit, depending on the
4933 arm_adjust_breakpoint_address (struct gdbarch
*gdbarch
, CORE_ADDR bpaddr
)
4937 CORE_ADDR boundary
, func_start
;
4938 int buf_len
, buf2_len
;
4939 enum bfd_endian order
= gdbarch_byte_order_for_code (gdbarch
);
4940 int i
, any
, last_it
, last_it_count
;
4942 /* If we are using BKPT breakpoints, none of this is necessary. */
4943 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
== NULL
)
4946 /* ARM mode does not have this problem. */
4947 if (!arm_pc_is_thumb (gdbarch
, bpaddr
))
4950 /* We are setting a breakpoint in Thumb code that could potentially
4951 contain an IT block. The first step is to find how much Thumb
4952 code there is; we do not need to read outside of known Thumb
4954 map_type
= arm_find_mapping_symbol (bpaddr
, &boundary
);
4956 /* Thumb-2 code must have mapping symbols to have a chance. */
4959 bpaddr
= gdbarch_addr_bits_remove (gdbarch
, bpaddr
);
4961 if (find_pc_partial_function (bpaddr
, NULL
, &func_start
, NULL
)
4962 && func_start
> boundary
)
4963 boundary
= func_start
;
4965 /* Search for a candidate IT instruction. We have to do some fancy
4966 footwork to distinguish a real IT instruction from the second
4967 half of a 32-bit instruction, but there is no need for that if
4968 there's no candidate. */
4969 buf_len
= min (bpaddr
- boundary
, MAX_IT_BLOCK_PREFIX
);
4971 /* No room for an IT instruction. */
4974 buf
= xmalloc (buf_len
);
4975 if (target_read_memory (bpaddr
- buf_len
, buf
, buf_len
) != 0)
4978 for (i
= 0; i
< buf_len
; i
+= 2)
4980 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
4981 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4993 /* OK, the code bytes before this instruction contain at least one
4994 halfword which resembles an IT instruction. We know that it's
4995 Thumb code, but there are still two possibilities. Either the
4996 halfword really is an IT instruction, or it is the second half of
4997 a 32-bit Thumb instruction. The only way we can tell is to
4998 scan forwards from a known instruction boundary. */
4999 if (bpaddr
- boundary
> IT_SCAN_THRESHOLD
)
5003 /* There's a lot of code before this instruction. Start with an
5004 optimistic search; it's easy to recognize halfwords that can
5005 not be the start of a 32-bit instruction, and use that to
5006 lock on to the instruction boundaries. */
5007 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, IT_SCAN_THRESHOLD
);
5010 buf_len
= IT_SCAN_THRESHOLD
;
5013 for (i
= 0; i
< buf_len
- sizeof (buf
) && ! definite
; i
+= 2)
5015 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5016 if (thumb_insn_size (inst1
) == 2)
5023 /* At this point, if DEFINITE, BUF[I] is the first place we
5024 are sure that we know the instruction boundaries, and it is far
5025 enough from BPADDR that we could not miss an IT instruction
5026 affecting BPADDR. If ! DEFINITE, give up - start from a
5030 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
,
5034 buf_len
= bpaddr
- boundary
;
5040 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
5043 buf_len
= bpaddr
- boundary
;
5047 /* Scan forwards. Find the last IT instruction before BPADDR. */
5052 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5054 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5059 else if (inst1
& 0x0002)
5061 else if (inst1
& 0x0004)
5066 i
+= thumb_insn_size (inst1
);
5072 /* There wasn't really an IT instruction after all. */
5075 if (last_it_count
< 1)
5076 /* It was too far away. */
5079 /* This really is a trouble spot. Move the breakpoint to the IT
5081 return bpaddr
- buf_len
+ last_it
;
5084 /* ARM displaced stepping support.
5086 Generally ARM displaced stepping works as follows:
5088 1. When an instruction is to be single-stepped, it is first decoded by
5089 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5090 Depending on the type of instruction, it is then copied to a scratch
5091 location, possibly in a modified form. The copy_* set of functions
5092 performs such modification, as necessary. A breakpoint is placed after
5093 the modified instruction in the scratch space to return control to GDB.
5094 Note in particular that instructions which modify the PC will no longer
5095 do so after modification.
5097 2. The instruction is single-stepped, by setting the PC to the scratch
5098 location address, and resuming. Control returns to GDB when the
5101 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5102 function used for the current instruction. This function's job is to
5103 put the CPU/memory state back to what it would have been if the
5104 instruction had been executed unmodified in its original location. */
5106 /* NOP instruction (mov r0, r0). */
5107 #define ARM_NOP 0xe1a00000
5109 /* Helper for register reads for displaced stepping. In particular, this
5110 returns the PC as it would be seen by the instruction at its original
5114 displaced_read_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5118 CORE_ADDR from
= dsc
->insn_addr
;
5120 if (regno
== ARM_PC_REGNUM
)
5122 /* Compute pipeline offset:
5123 - When executing an ARM instruction, PC reads as the address of the
5124 current instruction plus 8.
5125 - When executing a Thumb instruction, PC reads as the address of the
5126 current instruction plus 4. */
5133 if (debug_displaced
)
5134 fprintf_unfiltered (gdb_stdlog
, "displaced: read pc value %.8lx\n",
5135 (unsigned long) from
);
5136 return (ULONGEST
) from
;
5140 regcache_cooked_read_unsigned (regs
, regno
, &ret
);
5141 if (debug_displaced
)
5142 fprintf_unfiltered (gdb_stdlog
, "displaced: read r%d value %.8lx\n",
5143 regno
, (unsigned long) ret
);
5149 displaced_in_arm_mode (struct regcache
*regs
)
5152 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5154 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5156 return (ps
& t_bit
) == 0;
5159 /* Write to the PC as from a branch instruction. */
5162 branch_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5166 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5167 architecture versions < 6. */
5168 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5169 val
& ~(ULONGEST
) 0x3);
5171 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5172 val
& ~(ULONGEST
) 0x1);
5175 /* Write to the PC as from a branch-exchange instruction. */
5178 bx_write_pc (struct regcache
*regs
, ULONGEST val
)
5181 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5183 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5187 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
| t_bit
);
5188 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffe);
5190 else if ((val
& 2) == 0)
5192 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5193 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
);
5197 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5198 mode, align dest to 4 bytes). */
5199 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5200 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5201 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffc);
5205 /* Write to the PC as if from a load instruction. */
5208 load_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5211 if (DISPLACED_STEPPING_ARCH_VERSION
>= 5)
5212 bx_write_pc (regs
, val
);
5214 branch_write_pc (regs
, dsc
, val
);
5217 /* Write to the PC as if from an ALU instruction. */
5220 alu_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5223 if (DISPLACED_STEPPING_ARCH_VERSION
>= 7 && !dsc
->is_thumb
)
5224 bx_write_pc (regs
, val
);
5226 branch_write_pc (regs
, dsc
, val
);
5229 /* Helper for writing to registers for displaced stepping. Writing to the PC
5230 has a varying effects depending on the instruction which does the write:
5231 this is controlled by the WRITE_PC argument. */
5234 displaced_write_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5235 int regno
, ULONGEST val
, enum pc_write_style write_pc
)
5237 if (regno
== ARM_PC_REGNUM
)
5239 if (debug_displaced
)
5240 fprintf_unfiltered (gdb_stdlog
, "displaced: writing pc %.8lx\n",
5241 (unsigned long) val
);
5244 case BRANCH_WRITE_PC
:
5245 branch_write_pc (regs
, dsc
, val
);
5249 bx_write_pc (regs
, val
);
5253 load_write_pc (regs
, dsc
, val
);
5257 alu_write_pc (regs
, dsc
, val
);
5260 case CANNOT_WRITE_PC
:
5261 warning (_("Instruction wrote to PC in an unexpected way when "
5262 "single-stepping"));
5266 internal_error (__FILE__
, __LINE__
,
5267 _("Invalid argument to displaced_write_reg"));
5270 dsc
->wrote_to_pc
= 1;
5274 if (debug_displaced
)
5275 fprintf_unfiltered (gdb_stdlog
, "displaced: writing r%d value %.8lx\n",
5276 regno
, (unsigned long) val
);
5277 regcache_cooked_write_unsigned (regs
, regno
, val
);
5281 /* This function is used to concisely determine if an instruction INSN
5282 references PC. Register fields of interest in INSN should have the
5283 corresponding fields of BITMASK set to 0b1111. The function
5284 returns return 1 if any of these fields in INSN reference the PC
5285 (also 0b1111, r15), else it returns 0. */
5288 insn_references_pc (uint32_t insn
, uint32_t bitmask
)
5290 uint32_t lowbit
= 1;
5292 while (bitmask
!= 0)
5296 for (; lowbit
&& (bitmask
& lowbit
) == 0; lowbit
<<= 1)
5302 mask
= lowbit
* 0xf;
5304 if ((insn
& mask
) == mask
)
5313 /* The simplest copy function. Many instructions have the same effect no
5314 matter what address they are executed at: in those cases, use this. */
5317 copy_unmodified (struct gdbarch
*gdbarch
, uint32_t insn
,
5318 const char *iname
, struct displaced_step_closure
*dsc
)
5320 if (debug_displaced
)
5321 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx, "
5322 "opcode/class '%s' unmodified\n", (unsigned long) insn
,
5325 dsc
->modinsn
[0] = insn
;
5330 /* Preload instructions with immediate offset. */
5333 cleanup_preload (struct gdbarch
*gdbarch
,
5334 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5336 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5337 if (!dsc
->u
.preload
.immed
)
5338 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5342 copy_preload (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5343 struct displaced_step_closure
*dsc
)
5345 unsigned int rn
= bits (insn
, 16, 19);
5348 if (!insn_references_pc (insn
, 0x000f0000ul
))
5349 return copy_unmodified (gdbarch
, insn
, "preload", dsc
);
5351 if (debug_displaced
)
5352 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5353 (unsigned long) insn
);
5355 /* Preload instructions:
5357 {pli/pld} [rn, #+/-imm]
5359 {pli/pld} [r0, #+/-imm]. */
5361 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5362 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5363 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5365 dsc
->u
.preload
.immed
= 1;
5367 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5369 dsc
->cleanup
= &cleanup_preload
;
5374 /* Preload instructions with register offset. */
5377 copy_preload_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5378 struct regcache
*regs
,
5379 struct displaced_step_closure
*dsc
)
5381 unsigned int rn
= bits (insn
, 16, 19);
5382 unsigned int rm
= bits (insn
, 0, 3);
5383 ULONGEST rn_val
, rm_val
;
5385 if (!insn_references_pc (insn
, 0x000f000ful
))
5386 return copy_unmodified (gdbarch
, insn
, "preload reg", dsc
);
5388 if (debug_displaced
)
5389 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5390 (unsigned long) insn
);
5392 /* Preload register-offset instructions:
5394 {pli/pld} [rn, rm {, shift}]
5396 {pli/pld} [r0, r1 {, shift}]. */
5398 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5399 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5400 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5401 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5402 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5403 displaced_write_reg (regs
, dsc
, 1, rm_val
, CANNOT_WRITE_PC
);
5405 dsc
->u
.preload
.immed
= 0;
5407 dsc
->modinsn
[0] = (insn
& 0xfff0fff0) | 0x1;
5409 dsc
->cleanup
= &cleanup_preload
;
5414 /* Copy/cleanup coprocessor load and store instructions. */
5417 cleanup_copro_load_store (struct gdbarch
*gdbarch
,
5418 struct regcache
*regs
,
5419 struct displaced_step_closure
*dsc
)
5421 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 0);
5423 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5425 if (dsc
->u
.ldst
.writeback
)
5426 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, LOAD_WRITE_PC
);
5430 copy_copro_load_store (struct gdbarch
*gdbarch
, uint32_t insn
,
5431 struct regcache
*regs
,
5432 struct displaced_step_closure
*dsc
)
5434 unsigned int rn
= bits (insn
, 16, 19);
5437 if (!insn_references_pc (insn
, 0x000f0000ul
))
5438 return copy_unmodified (gdbarch
, insn
, "copro load/store", dsc
);
5440 if (debug_displaced
)
5441 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5442 "load/store insn %.8lx\n", (unsigned long) insn
);
5444 /* Coprocessor load/store instructions:
5446 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5448 {stc/stc2} [r0, #+/-imm].
5450 ldc/ldc2 are handled identically. */
5452 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5453 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5454 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5456 dsc
->u
.ldst
.writeback
= bit (insn
, 25);
5457 dsc
->u
.ldst
.rn
= rn
;
5459 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5461 dsc
->cleanup
= &cleanup_copro_load_store
;
5466 /* Clean up branch instructions (actually perform the branch, by setting
5470 cleanup_branch (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5471 struct displaced_step_closure
*dsc
)
5473 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
5474 int branch_taken
= condition_true (dsc
->u
.branch
.cond
, status
);
5475 enum pc_write_style write_pc
= dsc
->u
.branch
.exchange
5476 ? BX_WRITE_PC
: BRANCH_WRITE_PC
;
5481 if (dsc
->u
.branch
.link
)
5483 ULONGEST pc
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
5484 displaced_write_reg (regs
, dsc
, ARM_LR_REGNUM
, pc
- 4, CANNOT_WRITE_PC
);
5487 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, dsc
->u
.branch
.dest
, write_pc
);
5490 /* Copy B/BL/BLX instructions with immediate destinations. */
5493 copy_b_bl_blx (struct gdbarch
*gdbarch
, uint32_t insn
,
5494 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5496 unsigned int cond
= bits (insn
, 28, 31);
5497 int exchange
= (cond
== 0xf);
5498 int link
= exchange
|| bit (insn
, 24);
5499 CORE_ADDR from
= dsc
->insn_addr
;
5502 if (debug_displaced
)
5503 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s immediate insn "
5504 "%.8lx\n", (exchange
) ? "blx" : (link
) ? "bl" : "b",
5505 (unsigned long) insn
);
5507 /* Implement "BL<cond> <label>" as:
5509 Preparation: cond <- instruction condition
5510 Insn: mov r0, r0 (nop)
5511 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5513 B<cond> similar, but don't set r14 in cleanup. */
5516 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5517 then arrange the switch into Thumb mode. */
5518 offset
= (bits (insn
, 0, 23) << 2) | (bit (insn
, 24) << 1) | 1;
5520 offset
= bits (insn
, 0, 23) << 2;
5522 if (bit (offset
, 25))
5523 offset
= offset
| ~0x3ffffff;
5525 dsc
->u
.branch
.cond
= cond
;
5526 dsc
->u
.branch
.link
= link
;
5527 dsc
->u
.branch
.exchange
= exchange
;
5528 dsc
->u
.branch
.dest
= from
+ 8 + offset
;
5530 dsc
->modinsn
[0] = ARM_NOP
;
5532 dsc
->cleanup
= &cleanup_branch
;
5537 /* Copy BX/BLX with register-specified destinations. */
5540 copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5541 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5543 unsigned int cond
= bits (insn
, 28, 31);
5546 int link
= bit (insn
, 5);
5547 unsigned int rm
= bits (insn
, 0, 3);
5549 if (debug_displaced
)
5550 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s register insn "
5551 "%.8lx\n", (link
) ? "blx" : "bx",
5552 (unsigned long) insn
);
5554 /* Implement {BX,BLX}<cond> <reg>" as:
5556 Preparation: cond <- instruction condition
5557 Insn: mov r0, r0 (nop)
5558 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5560 Don't set r14 in cleanup for BX. */
5562 dsc
->u
.branch
.dest
= displaced_read_reg (regs
, dsc
, rm
);
5564 dsc
->u
.branch
.cond
= cond
;
5565 dsc
->u
.branch
.link
= link
;
5566 dsc
->u
.branch
.exchange
= 1;
5568 dsc
->modinsn
[0] = ARM_NOP
;
5570 dsc
->cleanup
= &cleanup_branch
;
5575 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5578 cleanup_alu_imm (struct gdbarch
*gdbarch
,
5579 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5581 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
5582 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5583 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5584 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
5588 copy_alu_imm (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5589 struct displaced_step_closure
*dsc
)
5591 unsigned int rn
= bits (insn
, 16, 19);
5592 unsigned int rd
= bits (insn
, 12, 15);
5593 unsigned int op
= bits (insn
, 21, 24);
5594 int is_mov
= (op
== 0xd);
5595 ULONGEST rd_val
, rn_val
;
5597 if (!insn_references_pc (insn
, 0x000ff000ul
))
5598 return copy_unmodified (gdbarch
, insn
, "ALU immediate", dsc
);
5600 if (debug_displaced
)
5601 fprintf_unfiltered (gdb_stdlog
, "displaced: copying immediate %s insn "
5602 "%.8lx\n", is_mov
? "move" : "ALU",
5603 (unsigned long) insn
);
5605 /* Instruction is of form:
5607 <op><cond> rd, [rn,] #imm
5611 Preparation: tmp1, tmp2 <- r0, r1;
5613 Insn: <op><cond> r0, r1, #imm
5614 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5617 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5618 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5619 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5620 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
5621 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
5622 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
5626 dsc
->modinsn
[0] = insn
& 0xfff00fff;
5628 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x10000;
5630 dsc
->cleanup
= &cleanup_alu_imm
;
5635 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5638 cleanup_alu_reg (struct gdbarch
*gdbarch
,
5639 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5644 rd_val
= displaced_read_reg (regs
, dsc
, 0);
5646 for (i
= 0; i
< 3; i
++)
5647 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
5649 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
5653 copy_alu_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5654 struct displaced_step_closure
*dsc
)
5656 unsigned int rn
= bits (insn
, 16, 19);
5657 unsigned int rm
= bits (insn
, 0, 3);
5658 unsigned int rd
= bits (insn
, 12, 15);
5659 unsigned int op
= bits (insn
, 21, 24);
5660 int is_mov
= (op
== 0xd);
5661 ULONGEST rd_val
, rn_val
, rm_val
;
5663 if (!insn_references_pc (insn
, 0x000ff00ful
))
5664 return copy_unmodified (gdbarch
, insn
, "ALU reg", dsc
);
5666 if (debug_displaced
)
5667 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.8lx\n",
5668 is_mov
? "move" : "ALU", (unsigned long) insn
);
5670 /* Instruction is of form:
5672 <op><cond> rd, [rn,] rm [, <shift>]
5676 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5677 r0, r1, r2 <- rd, rn, rm
5678 Insn: <op><cond> r0, r1, r2 [, <shift>]
5679 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5682 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5683 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5684 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
5685 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
5686 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5687 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5688 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
5689 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
5690 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
5694 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x2;
5696 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x10002;
5698 dsc
->cleanup
= &cleanup_alu_reg
;
5703 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5706 cleanup_alu_shifted_reg (struct gdbarch
*gdbarch
,
5707 struct regcache
*regs
,
5708 struct displaced_step_closure
*dsc
)
5710 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
5713 for (i
= 0; i
< 4; i
++)
5714 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
5716 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
5720 copy_alu_shifted_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5721 struct regcache
*regs
,
5722 struct displaced_step_closure
*dsc
)
5724 unsigned int rn
= bits (insn
, 16, 19);
5725 unsigned int rm
= bits (insn
, 0, 3);
5726 unsigned int rd
= bits (insn
, 12, 15);
5727 unsigned int rs
= bits (insn
, 8, 11);
5728 unsigned int op
= bits (insn
, 21, 24);
5729 int is_mov
= (op
== 0xd), i
;
5730 ULONGEST rd_val
, rn_val
, rm_val
, rs_val
;
5732 if (!insn_references_pc (insn
, 0x000fff0ful
))
5733 return copy_unmodified (gdbarch
, insn
, "ALU shifted reg", dsc
);
5735 if (debug_displaced
)
5736 fprintf_unfiltered (gdb_stdlog
, "displaced: copying shifted reg %s insn "
5737 "%.8lx\n", is_mov
? "move" : "ALU",
5738 (unsigned long) insn
);
5740 /* Instruction is of form:
5742 <op><cond> rd, [rn,] rm, <shift> rs
5746 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5747 r0, r1, r2, r3 <- rd, rn, rm, rs
5748 Insn: <op><cond> r0, r1, r2, <shift> r3
5750 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5754 for (i
= 0; i
< 4; i
++)
5755 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
5757 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
5758 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5759 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5760 rs_val
= displaced_read_reg (regs
, dsc
, rs
);
5761 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
5762 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
5763 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
5764 displaced_write_reg (regs
, dsc
, 3, rs_val
, CANNOT_WRITE_PC
);
5768 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x302;
5770 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x10302;
5772 dsc
->cleanup
= &cleanup_alu_shifted_reg
;
5777 /* Clean up load instructions. */
5780 cleanup_load (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5781 struct displaced_step_closure
*dsc
)
5783 ULONGEST rt_val
, rt_val2
= 0, rn_val
;
5785 rt_val
= displaced_read_reg (regs
, dsc
, 0);
5786 if (dsc
->u
.ldst
.xfersize
== 8)
5787 rt_val2
= displaced_read_reg (regs
, dsc
, 1);
5788 rn_val
= displaced_read_reg (regs
, dsc
, 2);
5790 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5791 if (dsc
->u
.ldst
.xfersize
> 4)
5792 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5793 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
5794 if (!dsc
->u
.ldst
.immed
)
5795 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
5797 /* Handle register writeback. */
5798 if (dsc
->u
.ldst
.writeback
)
5799 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
5800 /* Put result in right place. */
5801 displaced_write_reg (regs
, dsc
, dsc
->rd
, rt_val
, LOAD_WRITE_PC
);
5802 if (dsc
->u
.ldst
.xfersize
== 8)
5803 displaced_write_reg (regs
, dsc
, dsc
->rd
+ 1, rt_val2
, LOAD_WRITE_PC
);
5806 /* Clean up store instructions. */
5809 cleanup_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5810 struct displaced_step_closure
*dsc
)
5812 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 2);
5814 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5815 if (dsc
->u
.ldst
.xfersize
> 4)
5816 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5817 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
5818 if (!dsc
->u
.ldst
.immed
)
5819 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
5820 if (!dsc
->u
.ldst
.restore_r4
)
5821 displaced_write_reg (regs
, dsc
, 4, dsc
->tmp
[4], CANNOT_WRITE_PC
);
5824 if (dsc
->u
.ldst
.writeback
)
5825 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
5828 /* Copy "extra" load/store instructions. These are halfword/doubleword
5829 transfers, which have a different encoding to byte/word transfers. */
5832 copy_extra_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
, int unpriveleged
,
5833 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5835 unsigned int op1
= bits (insn
, 20, 24);
5836 unsigned int op2
= bits (insn
, 5, 6);
5837 unsigned int rt
= bits (insn
, 12, 15);
5838 unsigned int rn
= bits (insn
, 16, 19);
5839 unsigned int rm
= bits (insn
, 0, 3);
5840 char load
[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5841 char bytesize
[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5842 int immed
= (op1
& 0x4) != 0;
5844 ULONGEST rt_val
, rt_val2
= 0, rn_val
, rm_val
= 0;
5846 if (!insn_references_pc (insn
, 0x000ff00ful
))
5847 return copy_unmodified (gdbarch
, insn
, "extra load/store", dsc
);
5849 if (debug_displaced
)
5850 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %sextra load/store "
5851 "insn %.8lx\n", unpriveleged
? "unpriveleged " : "",
5852 (unsigned long) insn
);
5854 opcode
= ((op2
<< 2) | (op1
& 0x1) | ((op1
& 0x4) >> 1)) - 4;
5857 internal_error (__FILE__
, __LINE__
,
5858 _("copy_extra_ld_st: instruction decode error"));
5860 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5861 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5862 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
5864 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
5866 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
5867 if (bytesize
[opcode
] == 8)
5868 rt_val2
= displaced_read_reg (regs
, dsc
, rt
+ 1);
5869 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5871 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5873 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
5874 if (bytesize
[opcode
] == 8)
5875 displaced_write_reg (regs
, dsc
, 1, rt_val2
, CANNOT_WRITE_PC
);
5876 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
5878 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
5881 dsc
->u
.ldst
.xfersize
= bytesize
[opcode
];
5882 dsc
->u
.ldst
.rn
= rn
;
5883 dsc
->u
.ldst
.immed
= immed
;
5884 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
5885 dsc
->u
.ldst
.restore_r4
= 0;
5888 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5890 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5891 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
5893 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5895 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5896 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
5898 dsc
->cleanup
= load
[opcode
] ? &cleanup_load
: &cleanup_store
;
5903 /* Copy byte/word loads and stores. */
5906 copy_ldr_str_ldrb_strb (struct gdbarch
*gdbarch
, uint32_t insn
,
5907 struct regcache
*regs
,
5908 struct displaced_step_closure
*dsc
, int load
, int byte
,
5911 int immed
= !bit (insn
, 25);
5912 unsigned int rt
= bits (insn
, 12, 15);
5913 unsigned int rn
= bits (insn
, 16, 19);
5914 unsigned int rm
= bits (insn
, 0, 3); /* Only valid if !immed. */
5915 ULONGEST rt_val
, rn_val
, rm_val
= 0;
5917 if (!insn_references_pc (insn
, 0x000ff00ful
))
5918 return copy_unmodified (gdbarch
, insn
, "load/store", dsc
);
5920 if (debug_displaced
)
5921 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s%s insn %.8lx\n",
5922 load
? (byte
? "ldrb" : "ldr")
5923 : (byte
? "strb" : "str"), usermode
? "t" : "",
5924 (unsigned long) insn
);
5926 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5927 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
5929 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
5931 dsc
->tmp
[4] = displaced_read_reg (regs
, dsc
, 4);
5933 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
5934 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5936 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5938 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
5939 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
5941 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
5944 dsc
->u
.ldst
.xfersize
= byte
? 1 : 4;
5945 dsc
->u
.ldst
.rn
= rn
;
5946 dsc
->u
.ldst
.immed
= immed
;
5947 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
5949 /* To write PC we can do:
5951 Before this sequence of instructions:
5952 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5953 r2 is the Rn value got from dispalced_read_reg.
5955 Insn1: push {pc} Write address of STR instruction + offset on stack
5956 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5957 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5958 = addr(Insn1) + offset - addr(Insn3) - 8
5960 Insn4: add r4, r4, #8 r4 = offset - 8
5961 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5963 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5965 Otherwise we don't know what value to write for PC, since the offset is
5966 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5967 of this can be found in Section "Saving from r15" in
5968 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5970 if (load
|| rt
!= ARM_PC_REGNUM
)
5972 dsc
->u
.ldst
.restore_r4
= 0;
5975 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5977 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5978 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
5980 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5982 {ldr,str}[b]<cond> r0, [r2, r3]. */
5983 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
5987 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5988 dsc
->u
.ldst
.restore_r4
= 1;
5989 dsc
->modinsn
[0] = 0xe92d8000; /* push {pc} */
5990 dsc
->modinsn
[1] = 0xe8bd0010; /* pop {r4} */
5991 dsc
->modinsn
[2] = 0xe044400f; /* sub r4, r4, pc. */
5992 dsc
->modinsn
[3] = 0xe2844008; /* add r4, r4, #8. */
5993 dsc
->modinsn
[4] = 0xe0800004; /* add r0, r0, r4. */
5997 dsc
->modinsn
[5] = (insn
& 0xfff00fff) | 0x20000;
5999 dsc
->modinsn
[5] = (insn
& 0xfff00ff0) | 0x20003;
6004 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6009 /* Cleanup LDM instructions with fully-populated register list. This is an
6010 unfortunate corner case: it's impossible to implement correctly by modifying
6011 the instruction. The issue is as follows: we have an instruction,
6015 which we must rewrite to avoid loading PC. A possible solution would be to
6016 do the load in two halves, something like (with suitable cleanup
6020 ldm[id][ab] r8!, {r0-r7}
6022 ldm[id][ab] r8, {r7-r14}
6025 but at present there's no suitable place for <temp>, since the scratch space
6026 is overwritten before the cleanup routine is called. For now, we simply
6027 emulate the instruction. */
6030 cleanup_block_load_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6031 struct displaced_step_closure
*dsc
)
6033 int inc
= dsc
->u
.block
.increment
;
6034 int bump_before
= dsc
->u
.block
.before
? (inc
? 4 : -4) : 0;
6035 int bump_after
= dsc
->u
.block
.before
? 0 : (inc
? 4 : -4);
6036 uint32_t regmask
= dsc
->u
.block
.regmask
;
6037 int regno
= inc
? 0 : 15;
6038 CORE_ADDR xfer_addr
= dsc
->u
.block
.xfer_addr
;
6039 int exception_return
= dsc
->u
.block
.load
&& dsc
->u
.block
.user
6040 && (regmask
& 0x8000) != 0;
6041 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6042 int do_transfer
= condition_true (dsc
->u
.block
.cond
, status
);
6043 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6048 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6049 sensible we can do here. Complain loudly. */
6050 if (exception_return
)
6051 error (_("Cannot single-step exception return"));
6053 /* We don't handle any stores here for now. */
6054 gdb_assert (dsc
->u
.block
.load
!= 0);
6056 if (debug_displaced
)
6057 fprintf_unfiltered (gdb_stdlog
, "displaced: emulating block transfer: "
6058 "%s %s %s\n", dsc
->u
.block
.load
? "ldm" : "stm",
6059 dsc
->u
.block
.increment
? "inc" : "dec",
6060 dsc
->u
.block
.before
? "before" : "after");
6067 while (regno
<= ARM_PC_REGNUM
&& (regmask
& (1 << regno
)) == 0)
6070 while (regno
>= 0 && (regmask
& (1 << regno
)) == 0)
6073 xfer_addr
+= bump_before
;
6075 memword
= read_memory_unsigned_integer (xfer_addr
, 4, byte_order
);
6076 displaced_write_reg (regs
, dsc
, regno
, memword
, LOAD_WRITE_PC
);
6078 xfer_addr
+= bump_after
;
6080 regmask
&= ~(1 << regno
);
6083 if (dsc
->u
.block
.writeback
)
6084 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, xfer_addr
,
6088 /* Clean up an STM which included the PC in the register list. */
6091 cleanup_block_store_pc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6092 struct displaced_step_closure
*dsc
)
6094 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6095 int store_executed
= condition_true (dsc
->u
.block
.cond
, status
);
6096 CORE_ADDR pc_stored_at
, transferred_regs
= bitcount (dsc
->u
.block
.regmask
);
6097 CORE_ADDR stm_insn_addr
;
6100 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6102 /* If condition code fails, there's nothing else to do. */
6103 if (!store_executed
)
6106 if (dsc
->u
.block
.increment
)
6108 pc_stored_at
= dsc
->u
.block
.xfer_addr
+ 4 * transferred_regs
;
6110 if (dsc
->u
.block
.before
)
6115 pc_stored_at
= dsc
->u
.block
.xfer_addr
;
6117 if (dsc
->u
.block
.before
)
6121 pc_val
= read_memory_unsigned_integer (pc_stored_at
, 4, byte_order
);
6122 stm_insn_addr
= dsc
->scratch_base
;
6123 offset
= pc_val
- stm_insn_addr
;
6125 if (debug_displaced
)
6126 fprintf_unfiltered (gdb_stdlog
, "displaced: detected PC offset %.8lx for "
6127 "STM instruction\n", offset
);
6129 /* Rewrite the stored PC to the proper value for the non-displaced original
6131 write_memory_unsigned_integer (pc_stored_at
, 4, byte_order
,
6132 dsc
->insn_addr
+ offset
);
6135 /* Clean up an LDM which includes the PC in the register list. We clumped all
6136 the registers in the transferred list into a contiguous range r0...rX (to
6137 avoid loading PC directly and losing control of the debugged program), so we
6138 must undo that here. */
6141 cleanup_block_load_pc (struct gdbarch
*gdbarch
,
6142 struct regcache
*regs
,
6143 struct displaced_step_closure
*dsc
)
6145 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6146 int load_executed
= condition_true (dsc
->u
.block
.cond
, status
), i
;
6147 unsigned int mask
= dsc
->u
.block
.regmask
, write_reg
= ARM_PC_REGNUM
;
6148 unsigned int regs_loaded
= bitcount (mask
);
6149 unsigned int num_to_shuffle
= regs_loaded
, clobbered
;
6151 /* The method employed here will fail if the register list is fully populated
6152 (we need to avoid loading PC directly). */
6153 gdb_assert (num_to_shuffle
< 16);
6158 clobbered
= (1 << num_to_shuffle
) - 1;
6160 while (num_to_shuffle
> 0)
6162 if ((mask
& (1 << write_reg
)) != 0)
6164 unsigned int read_reg
= num_to_shuffle
- 1;
6166 if (read_reg
!= write_reg
)
6168 ULONGEST rval
= displaced_read_reg (regs
, dsc
, read_reg
);
6169 displaced_write_reg (regs
, dsc
, write_reg
, rval
, LOAD_WRITE_PC
);
6170 if (debug_displaced
)
6171 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: move "
6172 "loaded register r%d to r%d\n"), read_reg
,
6175 else if (debug_displaced
)
6176 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: register "
6177 "r%d already in the right place\n"),
6180 clobbered
&= ~(1 << write_reg
);
6188 /* Restore any registers we scribbled over. */
6189 for (write_reg
= 0; clobbered
!= 0; write_reg
++)
6191 if ((clobbered
& (1 << write_reg
)) != 0)
6193 displaced_write_reg (regs
, dsc
, write_reg
, dsc
->tmp
[write_reg
],
6195 if (debug_displaced
)
6196 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: restored "
6197 "clobbered register r%d\n"), write_reg
);
6198 clobbered
&= ~(1 << write_reg
);
6202 /* Perform register writeback manually. */
6203 if (dsc
->u
.block
.writeback
)
6205 ULONGEST new_rn_val
= dsc
->u
.block
.xfer_addr
;
6207 if (dsc
->u
.block
.increment
)
6208 new_rn_val
+= regs_loaded
* 4;
6210 new_rn_val
-= regs_loaded
* 4;
6212 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, new_rn_val
,
6217 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6218 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6221 copy_block_xfer (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6222 struct displaced_step_closure
*dsc
)
6224 int load
= bit (insn
, 20);
6225 int user
= bit (insn
, 22);
6226 int increment
= bit (insn
, 23);
6227 int before
= bit (insn
, 24);
6228 int writeback
= bit (insn
, 21);
6229 int rn
= bits (insn
, 16, 19);
6231 /* Block transfers which don't mention PC can be run directly
6233 if (rn
!= ARM_PC_REGNUM
&& (insn
& 0x8000) == 0)
6234 return copy_unmodified (gdbarch
, insn
, "ldm/stm", dsc
);
6236 if (rn
== ARM_PC_REGNUM
)
6238 warning (_("displaced: Unpredictable LDM or STM with "
6239 "base register r15"));
6240 return copy_unmodified (gdbarch
, insn
, "unpredictable ldm/stm", dsc
);
6243 if (debug_displaced
)
6244 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
6245 "%.8lx\n", (unsigned long) insn
);
6247 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
6248 dsc
->u
.block
.rn
= rn
;
6250 dsc
->u
.block
.load
= load
;
6251 dsc
->u
.block
.user
= user
;
6252 dsc
->u
.block
.increment
= increment
;
6253 dsc
->u
.block
.before
= before
;
6254 dsc
->u
.block
.writeback
= writeback
;
6255 dsc
->u
.block
.cond
= bits (insn
, 28, 31);
6257 dsc
->u
.block
.regmask
= insn
& 0xffff;
6261 if ((insn
& 0xffff) == 0xffff)
6263 /* LDM with a fully-populated register list. This case is
6264 particularly tricky. Implement for now by fully emulating the
6265 instruction (which might not behave perfectly in all cases, but
6266 these instructions should be rare enough for that not to matter
6268 dsc
->modinsn
[0] = ARM_NOP
;
6270 dsc
->cleanup
= &cleanup_block_load_all
;
6274 /* LDM of a list of registers which includes PC. Implement by
6275 rewriting the list of registers to be transferred into a
6276 contiguous chunk r0...rX before doing the transfer, then shuffling
6277 registers into the correct places in the cleanup routine. */
6278 unsigned int regmask
= insn
& 0xffff;
6279 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
6280 unsigned int to
= 0, from
= 0, i
, new_rn
;
6282 for (i
= 0; i
< num_in_list
; i
++)
6283 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
6285 /* Writeback makes things complicated. We need to avoid clobbering
6286 the base register with one of the registers in our modified
6287 register list, but just using a different register can't work in
6290 ldm r14!, {r0-r13,pc}
6292 which would need to be rewritten as:
6296 but that can't work, because there's no free register for N.
6298 Solve this by turning off the writeback bit, and emulating
6299 writeback manually in the cleanup routine. */
6304 new_regmask
= (1 << num_in_list
) - 1;
6306 if (debug_displaced
)
6307 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
6308 "{..., pc}: original reg list %.4x, modified "
6309 "list %.4x\n"), rn
, writeback
? "!" : "",
6310 (int) insn
& 0xffff, new_regmask
);
6312 dsc
->modinsn
[0] = (insn
& ~0xffff) | (new_regmask
& 0xffff);
6314 dsc
->cleanup
= &cleanup_block_load_pc
;
6319 /* STM of a list of registers which includes PC. Run the instruction
6320 as-is, but out of line: this will store the wrong value for the PC,
6321 so we must manually fix up the memory in the cleanup routine.
6322 Doing things this way has the advantage that we can auto-detect
6323 the offset of the PC write (which is architecture-dependent) in
6324 the cleanup routine. */
6325 dsc
->modinsn
[0] = insn
;
6327 dsc
->cleanup
= &cleanup_block_store_pc
;
6333 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6334 for Linux, where some SVC instructions must be treated specially. */
6337 cleanup_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6338 struct displaced_step_closure
*dsc
)
6340 CORE_ADDR resume_addr
= dsc
->insn_addr
+ 4;
6342 if (debug_displaced
)
6343 fprintf_unfiltered (gdb_stdlog
, "displaced: cleanup for svc, resume at "
6344 "%.8lx\n", (unsigned long) resume_addr
);
6346 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, resume_addr
, BRANCH_WRITE_PC
);
6350 copy_svc (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
6351 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6353 /* Allow OS-specific code to override SVC handling. */
6354 if (dsc
->u
.svc
.copy_svc_os
)
6355 return dsc
->u
.svc
.copy_svc_os (gdbarch
, insn
, to
, regs
, dsc
);
6357 if (debug_displaced
)
6358 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.8lx\n",
6359 (unsigned long) insn
);
6361 /* Preparation: none.
6362 Insn: unmodified svc.
6363 Cleanup: pc <- insn_addr + 4. */
6365 dsc
->modinsn
[0] = insn
;
6367 dsc
->cleanup
= &cleanup_svc
;
6368 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6370 dsc
->wrote_to_pc
= 1;
6375 /* Copy undefined instructions. */
6378 copy_undef (struct gdbarch
*gdbarch
, uint32_t insn
,
6379 struct displaced_step_closure
*dsc
)
6381 if (debug_displaced
)
6382 fprintf_unfiltered (gdb_stdlog
,
6383 "displaced: copying undefined insn %.8lx\n",
6384 (unsigned long) insn
);
6386 dsc
->modinsn
[0] = insn
;
6391 /* Copy unpredictable instructions. */
6394 copy_unpred (struct gdbarch
*gdbarch
, uint32_t insn
,
6395 struct displaced_step_closure
*dsc
)
6397 if (debug_displaced
)
6398 fprintf_unfiltered (gdb_stdlog
, "displaced: copying unpredictable insn "
6399 "%.8lx\n", (unsigned long) insn
);
6401 dsc
->modinsn
[0] = insn
;
6406 /* The decode_* functions are instruction decoding helpers. They mostly follow
6407 the presentation in the ARM ARM. */
6410 decode_misc_memhint_neon (struct gdbarch
*gdbarch
, uint32_t insn
,
6411 struct regcache
*regs
,
6412 struct displaced_step_closure
*dsc
)
6414 unsigned int op1
= bits (insn
, 20, 26), op2
= bits (insn
, 4, 7);
6415 unsigned int rn
= bits (insn
, 16, 19);
6417 if (op1
== 0x10 && (op2
& 0x2) == 0x0 && (rn
& 0xe) == 0x0)
6418 return copy_unmodified (gdbarch
, insn
, "cps", dsc
);
6419 else if (op1
== 0x10 && op2
== 0x0 && (rn
& 0xe) == 0x1)
6420 return copy_unmodified (gdbarch
, insn
, "setend", dsc
);
6421 else if ((op1
& 0x60) == 0x20)
6422 return copy_unmodified (gdbarch
, insn
, "neon dataproc", dsc
);
6423 else if ((op1
& 0x71) == 0x40)
6424 return copy_unmodified (gdbarch
, insn
, "neon elt/struct load/store", dsc
);
6425 else if ((op1
& 0x77) == 0x41)
6426 return copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
6427 else if ((op1
& 0x77) == 0x45)
6428 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pli. */
6429 else if ((op1
& 0x77) == 0x51)
6432 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
6434 return copy_unpred (gdbarch
, insn
, dsc
);
6436 else if ((op1
& 0x77) == 0x55)
6437 return copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
6438 else if (op1
== 0x57)
6441 case 0x1: return copy_unmodified (gdbarch
, insn
, "clrex", dsc
);
6442 case 0x4: return copy_unmodified (gdbarch
, insn
, "dsb", dsc
);
6443 case 0x5: return copy_unmodified (gdbarch
, insn
, "dmb", dsc
);
6444 case 0x6: return copy_unmodified (gdbarch
, insn
, "isb", dsc
);
6445 default: return copy_unpred (gdbarch
, insn
, dsc
);
6447 else if ((op1
& 0x63) == 0x43)
6448 return copy_unpred (gdbarch
, insn
, dsc
);
6449 else if ((op2
& 0x1) == 0x0)
6450 switch (op1
& ~0x80)
6453 return copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
6455 return copy_preload_reg (gdbarch
, insn
, regs
, dsc
); /* pli reg. */
6456 case 0x71: case 0x75:
6458 return copy_preload_reg (gdbarch
, insn
, regs
, dsc
);
6459 case 0x63: case 0x67: case 0x73: case 0x77:
6460 return copy_unpred (gdbarch
, insn
, dsc
);
6462 return copy_undef (gdbarch
, insn
, dsc
);
6465 return copy_undef (gdbarch
, insn
, dsc
); /* Probably unreachable. */
6469 decode_unconditional (struct gdbarch
*gdbarch
, uint32_t insn
,
6470 struct regcache
*regs
,
6471 struct displaced_step_closure
*dsc
)
6473 if (bit (insn
, 27) == 0)
6474 return decode_misc_memhint_neon (gdbarch
, insn
, regs
, dsc
);
6475 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6476 else switch (((insn
& 0x7000000) >> 23) | ((insn
& 0x100000) >> 20))
6479 return copy_unmodified (gdbarch
, insn
, "srs", dsc
);
6482 return copy_unmodified (gdbarch
, insn
, "rfe", dsc
);
6484 case 0x4: case 0x5: case 0x6: case 0x7:
6485 return copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
6488 switch ((insn
& 0xe00000) >> 21)
6490 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6492 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6495 return copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
6498 return copy_undef (gdbarch
, insn
, dsc
);
6503 int rn_f
= (bits (insn
, 16, 19) == 0xf);
6504 switch ((insn
& 0xe00000) >> 21)
6507 /* ldc/ldc2 imm (undefined for rn == pc). */
6508 return rn_f
? copy_undef (gdbarch
, insn
, dsc
)
6509 : copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6512 return copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
6514 case 0x4: case 0x5: case 0x6: case 0x7:
6515 /* ldc/ldc2 lit (undefined for rn != pc). */
6516 return rn_f
? copy_copro_load_store (gdbarch
, insn
, regs
, dsc
)
6517 : copy_undef (gdbarch
, insn
, dsc
);
6520 return copy_undef (gdbarch
, insn
, dsc
);
6525 return copy_unmodified (gdbarch
, insn
, "stc/stc2", dsc
);
6528 if (bits (insn
, 16, 19) == 0xf)
6530 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6532 return copy_undef (gdbarch
, insn
, dsc
);
6536 return copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
6538 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
6542 return copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
6544 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
6547 return copy_undef (gdbarch
, insn
, dsc
);
6551 /* Decode miscellaneous instructions in dp/misc encoding space. */
6554 decode_miscellaneous (struct gdbarch
*gdbarch
, uint32_t insn
,
6555 struct regcache
*regs
,
6556 struct displaced_step_closure
*dsc
)
6558 unsigned int op2
= bits (insn
, 4, 6);
6559 unsigned int op
= bits (insn
, 21, 22);
6560 unsigned int op1
= bits (insn
, 16, 19);
6565 return copy_unmodified (gdbarch
, insn
, "mrs/msr", dsc
);
6568 if (op
== 0x1) /* bx. */
6569 return copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
);
6571 return copy_unmodified (gdbarch
, insn
, "clz", dsc
);
6573 return copy_undef (gdbarch
, insn
, dsc
);
6577 /* Not really supported. */
6578 return copy_unmodified (gdbarch
, insn
, "bxj", dsc
);
6580 return copy_undef (gdbarch
, insn
, dsc
);
6584 return copy_bx_blx_reg (gdbarch
, insn
,
6585 regs
, dsc
); /* blx register. */
6587 return copy_undef (gdbarch
, insn
, dsc
);
6590 return copy_unmodified (gdbarch
, insn
, "saturating add/sub", dsc
);
6594 return copy_unmodified (gdbarch
, insn
, "bkpt", dsc
);
6596 /* Not really supported. */
6597 return copy_unmodified (gdbarch
, insn
, "smc", dsc
);
6600 return copy_undef (gdbarch
, insn
, dsc
);
6605 decode_dp_misc (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6606 struct displaced_step_closure
*dsc
)
6609 switch (bits (insn
, 20, 24))
6612 return copy_unmodified (gdbarch
, insn
, "movw", dsc
);
6615 return copy_unmodified (gdbarch
, insn
, "movt", dsc
);
6617 case 0x12: case 0x16:
6618 return copy_unmodified (gdbarch
, insn
, "msr imm", dsc
);
6621 return copy_alu_imm (gdbarch
, insn
, regs
, dsc
);
6625 uint32_t op1
= bits (insn
, 20, 24), op2
= bits (insn
, 4, 7);
6627 if ((op1
& 0x19) != 0x10 && (op2
& 0x1) == 0x0)
6628 return copy_alu_reg (gdbarch
, insn
, regs
, dsc
);
6629 else if ((op1
& 0x19) != 0x10 && (op2
& 0x9) == 0x1)
6630 return copy_alu_shifted_reg (gdbarch
, insn
, regs
, dsc
);
6631 else if ((op1
& 0x19) == 0x10 && (op2
& 0x8) == 0x0)
6632 return decode_miscellaneous (gdbarch
, insn
, regs
, dsc
);
6633 else if ((op1
& 0x19) == 0x10 && (op2
& 0x9) == 0x8)
6634 return copy_unmodified (gdbarch
, insn
, "halfword mul/mla", dsc
);
6635 else if ((op1
& 0x10) == 0x00 && op2
== 0x9)
6636 return copy_unmodified (gdbarch
, insn
, "mul/mla", dsc
);
6637 else if ((op1
& 0x10) == 0x10 && op2
== 0x9)
6638 return copy_unmodified (gdbarch
, insn
, "synch", dsc
);
6639 else if (op2
== 0xb || (op2
& 0xd) == 0xd)
6640 /* 2nd arg means "unpriveleged". */
6641 return copy_extra_ld_st (gdbarch
, insn
, (op1
& 0x12) == 0x02, regs
,
6645 /* Should be unreachable. */
6650 decode_ld_st_word_ubyte (struct gdbarch
*gdbarch
, uint32_t insn
,
6651 struct regcache
*regs
,
6652 struct displaced_step_closure
*dsc
)
6654 int a
= bit (insn
, 25), b
= bit (insn
, 4);
6655 uint32_t op1
= bits (insn
, 20, 24);
6656 int rn_f
= bits (insn
, 16, 19) == 0xf;
6658 if ((!a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02)
6659 || (a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02 && !b
))
6660 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 0, 0);
6661 else if ((!a
&& (op1
& 0x17) == 0x02)
6662 || (a
&& (op1
& 0x17) == 0x02 && !b
))
6663 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 0, 1);
6664 else if ((!a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03)
6665 || (a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03 && !b
))
6666 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 0, 0);
6667 else if ((!a
&& (op1
& 0x17) == 0x03)
6668 || (a
&& (op1
& 0x17) == 0x03 && !b
))
6669 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 0, 1);
6670 else if ((!a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06)
6671 || (a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06 && !b
))
6672 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 0);
6673 else if ((!a
&& (op1
& 0x17) == 0x06)
6674 || (a
&& (op1
& 0x17) == 0x06 && !b
))
6675 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 1);
6676 else if ((!a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07)
6677 || (a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07 && !b
))
6678 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 0);
6679 else if ((!a
&& (op1
& 0x17) == 0x07)
6680 || (a
&& (op1
& 0x17) == 0x07 && !b
))
6681 return copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 1);
6683 /* Should be unreachable. */
6688 decode_media (struct gdbarch
*gdbarch
, uint32_t insn
,
6689 struct displaced_step_closure
*dsc
)
6691 switch (bits (insn
, 20, 24))
6693 case 0x00: case 0x01: case 0x02: case 0x03:
6694 return copy_unmodified (gdbarch
, insn
, "parallel add/sub signed", dsc
);
6696 case 0x04: case 0x05: case 0x06: case 0x07:
6697 return copy_unmodified (gdbarch
, insn
, "parallel add/sub unsigned", dsc
);
6699 case 0x08: case 0x09: case 0x0a: case 0x0b:
6700 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6701 return copy_unmodified (gdbarch
, insn
,
6702 "decode/pack/unpack/saturate/reverse", dsc
);
6705 if (bits (insn
, 5, 7) == 0) /* op2. */
6707 if (bits (insn
, 12, 15) == 0xf)
6708 return copy_unmodified (gdbarch
, insn
, "usad8", dsc
);
6710 return copy_unmodified (gdbarch
, insn
, "usada8", dsc
);
6713 return copy_undef (gdbarch
, insn
, dsc
);
6715 case 0x1a: case 0x1b:
6716 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
6717 return copy_unmodified (gdbarch
, insn
, "sbfx", dsc
);
6719 return copy_undef (gdbarch
, insn
, dsc
);
6721 case 0x1c: case 0x1d:
6722 if (bits (insn
, 5, 6) == 0x0) /* op2[1:0]. */
6724 if (bits (insn
, 0, 3) == 0xf)
6725 return copy_unmodified (gdbarch
, insn
, "bfc", dsc
);
6727 return copy_unmodified (gdbarch
, insn
, "bfi", dsc
);
6730 return copy_undef (gdbarch
, insn
, dsc
);
6732 case 0x1e: case 0x1f:
6733 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
6734 return copy_unmodified (gdbarch
, insn
, "ubfx", dsc
);
6736 return copy_undef (gdbarch
, insn
, dsc
);
6739 /* Should be unreachable. */
6744 decode_b_bl_ldmstm (struct gdbarch
*gdbarch
, int32_t insn
,
6745 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6748 return copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
6750 return copy_block_xfer (gdbarch
, insn
, regs
, dsc
);
6754 decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
,
6755 struct regcache
*regs
,
6756 struct displaced_step_closure
*dsc
)
6758 unsigned int opcode
= bits (insn
, 20, 24);
6762 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6763 return copy_unmodified (gdbarch
, insn
, "vfp/neon mrrc/mcrr", dsc
);
6765 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6766 case 0x12: case 0x16:
6767 return copy_unmodified (gdbarch
, insn
, "vfp/neon vstm/vpush", dsc
);
6769 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6770 case 0x13: case 0x17:
6771 return copy_unmodified (gdbarch
, insn
, "vfp/neon vldm/vpop", dsc
);
6773 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6774 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6775 /* Note: no writeback for these instructions. Bit 25 will always be
6776 zero though (via caller), so the following works OK. */
6777 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6780 /* Should be unreachable. */
6785 decode_svc_copro (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
6786 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6788 unsigned int op1
= bits (insn
, 20, 25);
6789 int op
= bit (insn
, 4);
6790 unsigned int coproc
= bits (insn
, 8, 11);
6791 unsigned int rn
= bits (insn
, 16, 19);
6793 if ((op1
& 0x20) == 0x00 && (op1
& 0x3a) != 0x00 && (coproc
& 0xe) == 0xa)
6794 return decode_ext_reg_ld_st (gdbarch
, insn
, regs
, dsc
);
6795 else if ((op1
& 0x21) == 0x00 && (op1
& 0x3a) != 0x00
6796 && (coproc
& 0xe) != 0xa)
6798 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6799 else if ((op1
& 0x21) == 0x01 && (op1
& 0x3a) != 0x00
6800 && (coproc
& 0xe) != 0xa)
6801 /* ldc/ldc2 imm/lit. */
6802 return copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
6803 else if ((op1
& 0x3e) == 0x00)
6804 return copy_undef (gdbarch
, insn
, dsc
);
6805 else if ((op1
& 0x3e) == 0x04 && (coproc
& 0xe) == 0xa)
6806 return copy_unmodified (gdbarch
, insn
, "neon 64bit xfer", dsc
);
6807 else if (op1
== 0x04 && (coproc
& 0xe) != 0xa)
6808 return copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
6809 else if (op1
== 0x05 && (coproc
& 0xe) != 0xa)
6810 return copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
6811 else if ((op1
& 0x30) == 0x20 && !op
)
6813 if ((coproc
& 0xe) == 0xa)
6814 return copy_unmodified (gdbarch
, insn
, "vfp dataproc", dsc
);
6816 return copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
6818 else if ((op1
& 0x30) == 0x20 && op
)
6819 return copy_unmodified (gdbarch
, insn
, "neon 8/16/32 bit xfer", dsc
);
6820 else if ((op1
& 0x31) == 0x20 && op
&& (coproc
& 0xe) != 0xa)
6821 return copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
6822 else if ((op1
& 0x31) == 0x21 && op
&& (coproc
& 0xe) != 0xa)
6823 return copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
6824 else if ((op1
& 0x30) == 0x30)
6825 return copy_svc (gdbarch
, insn
, to
, regs
, dsc
);
6827 return copy_undef (gdbarch
, insn
, dsc
); /* Possibly unreachable. */
6831 thumb_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
6832 CORE_ADDR to
, struct regcache
*regs
,
6833 struct displaced_step_closure
*dsc
)
6835 error (_("Displaced stepping is only supported in ARM mode"));
6839 arm_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
6840 CORE_ADDR to
, struct regcache
*regs
,
6841 struct displaced_step_closure
*dsc
)
6844 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
6847 /* Most displaced instructions use a 1-instruction scratch space, so set this
6848 here and override below if/when necessary. */
6850 dsc
->insn_addr
= from
;
6851 dsc
->scratch_base
= to
;
6852 dsc
->cleanup
= NULL
;
6853 dsc
->wrote_to_pc
= 0;
6855 if (!displaced_in_arm_mode (regs
))
6856 return thumb_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
6860 insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
6861 if (debug_displaced
)
6862 fprintf_unfiltered (gdb_stdlog
, "displaced: stepping insn %.8lx "
6863 "at %.8lx\n", (unsigned long) insn
,
6864 (unsigned long) from
);
6866 if ((insn
& 0xf0000000) == 0xf0000000)
6867 err
= decode_unconditional (gdbarch
, insn
, regs
, dsc
);
6868 else switch (((insn
& 0x10) >> 4) | ((insn
& 0xe000000) >> 24))
6870 case 0x0: case 0x1: case 0x2: case 0x3:
6871 err
= decode_dp_misc (gdbarch
, insn
, regs
, dsc
);
6874 case 0x4: case 0x5: case 0x6:
6875 err
= decode_ld_st_word_ubyte (gdbarch
, insn
, regs
, dsc
);
6879 err
= decode_media (gdbarch
, insn
, dsc
);
6882 case 0x8: case 0x9: case 0xa: case 0xb:
6883 err
= decode_b_bl_ldmstm (gdbarch
, insn
, regs
, dsc
);
6886 case 0xc: case 0xd: case 0xe: case 0xf:
6887 err
= decode_svc_copro (gdbarch
, insn
, to
, regs
, dsc
);
6892 internal_error (__FILE__
, __LINE__
,
6893 _("arm_process_displaced_insn: Instruction decode error"));
6896 /* Actually set up the scratch space for a displaced instruction. */
6899 arm_displaced_init_closure (struct gdbarch
*gdbarch
, CORE_ADDR from
,
6900 CORE_ADDR to
, struct displaced_step_closure
*dsc
)
6902 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
6903 unsigned int i
, len
, offset
;
6904 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
6905 int size
= dsc
->is_thumb
? 2 : 4;
6906 const unsigned char *bkp_insn
;
6909 /* Poke modified instruction(s). */
6910 for (i
= 0; i
< dsc
->numinsns
; i
++)
6912 if (debug_displaced
)
6914 fprintf_unfiltered (gdb_stdlog
, "displaced: writing insn ");
6916 fprintf_unfiltered (gdb_stdlog
, "%.8lx",
6919 fprintf_unfiltered (gdb_stdlog
, "%.4x",
6920 (unsigned short)dsc
->modinsn
[i
]);
6922 fprintf_unfiltered (gdb_stdlog
, " at %.8lx\n",
6923 (unsigned long) to
+ offset
);
6926 write_memory_unsigned_integer (to
+ offset
, size
,
6927 byte_order_for_code
,
6932 /* Choose the correct breakpoint instruction. */
6935 bkp_insn
= tdep
->thumb_breakpoint
;
6936 len
= tdep
->thumb_breakpoint_size
;
6940 bkp_insn
= tdep
->arm_breakpoint
;
6941 len
= tdep
->arm_breakpoint_size
;
6944 /* Put breakpoint afterwards. */
6945 write_memory (to
+ offset
, bkp_insn
, len
);
6947 if (debug_displaced
)
6948 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
6949 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
6952 /* Entry point for copying an instruction into scratch space for displaced
6955 struct displaced_step_closure
*
6956 arm_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
6957 CORE_ADDR from
, CORE_ADDR to
,
6958 struct regcache
*regs
)
6960 struct displaced_step_closure
*dsc
6961 = xmalloc (sizeof (struct displaced_step_closure
));
6962 arm_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
6963 arm_displaced_init_closure (gdbarch
, from
, to
, dsc
);
6968 /* Entry point for cleaning things up after a displaced instruction has been
6972 arm_displaced_step_fixup (struct gdbarch
*gdbarch
,
6973 struct displaced_step_closure
*dsc
,
6974 CORE_ADDR from
, CORE_ADDR to
,
6975 struct regcache
*regs
)
6978 dsc
->cleanup (gdbarch
, regs
, dsc
);
6980 if (!dsc
->wrote_to_pc
)
6981 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
6982 dsc
->insn_addr
+ dsc
->insn_size
);
6986 #include "bfd-in2.h"
6987 #include "libcoff.h"
6990 gdb_print_insn_arm (bfd_vma memaddr
, disassemble_info
*info
)
6992 struct gdbarch
*gdbarch
= info
->application_data
;
6994 if (arm_pc_is_thumb (gdbarch
, memaddr
))
6996 static asymbol
*asym
;
6997 static combined_entry_type ce
;
6998 static struct coff_symbol_struct csym
;
6999 static struct bfd fake_bfd
;
7000 static bfd_target fake_target
;
7002 if (csym
.native
== NULL
)
7004 /* Create a fake symbol vector containing a Thumb symbol.
7005 This is solely so that the code in print_insn_little_arm()
7006 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7007 the presence of a Thumb symbol and switch to decoding
7008 Thumb instructions. */
7010 fake_target
.flavour
= bfd_target_coff_flavour
;
7011 fake_bfd
.xvec
= &fake_target
;
7012 ce
.u
.syment
.n_sclass
= C_THUMBEXTFUNC
;
7014 csym
.symbol
.the_bfd
= &fake_bfd
;
7015 csym
.symbol
.name
= "fake";
7016 asym
= (asymbol
*) & csym
;
7019 memaddr
= UNMAKE_THUMB_ADDR (memaddr
);
7020 info
->symbols
= &asym
;
7023 info
->symbols
= NULL
;
7025 if (info
->endian
== BFD_ENDIAN_BIG
)
7026 return print_insn_big_arm (memaddr
, info
);
7028 return print_insn_little_arm (memaddr
, info
);
7031 /* The following define instruction sequences that will cause ARM
7032 cpu's to take an undefined instruction trap. These are used to
7033 signal a breakpoint to GDB.
7035 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7036 modes. A different instruction is required for each mode. The ARM
7037 cpu's can also be big or little endian. Thus four different
7038 instructions are needed to support all cases.
7040 Note: ARMv4 defines several new instructions that will take the
7041 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7042 not in fact add the new instructions. The new undefined
7043 instructions in ARMv4 are all instructions that had no defined
7044 behaviour in earlier chips. There is no guarantee that they will
7045 raise an exception, but may be treated as NOP's. In practice, it
7046 may only safe to rely on instructions matching:
7048 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7049 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7050 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7052 Even this may only true if the condition predicate is true. The
7053 following use a condition predicate of ALWAYS so it is always TRUE.
7055 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7056 and NetBSD all use a software interrupt rather than an undefined
7057 instruction to force a trap. This can be handled by by the
7058 abi-specific code during establishment of the gdbarch vector. */
7060 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7061 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7062 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7063 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7065 static const char arm_default_arm_le_breakpoint
[] = ARM_LE_BREAKPOINT
;
7066 static const char arm_default_arm_be_breakpoint
[] = ARM_BE_BREAKPOINT
;
7067 static const char arm_default_thumb_le_breakpoint
[] = THUMB_LE_BREAKPOINT
;
7068 static const char arm_default_thumb_be_breakpoint
[] = THUMB_BE_BREAKPOINT
;
7070 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7071 the program counter value to determine whether a 16-bit or 32-bit
7072 breakpoint should be used. It returns a pointer to a string of
7073 bytes that encode a breakpoint instruction, stores the length of
7074 the string to *lenptr, and adjusts the program counter (if
7075 necessary) to point to the actual memory location where the
7076 breakpoint should be inserted. */
7078 static const unsigned char *
7079 arm_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
, int *lenptr
)
7081 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
7082 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
7084 if (arm_pc_is_thumb (gdbarch
, *pcptr
))
7086 *pcptr
= UNMAKE_THUMB_ADDR (*pcptr
);
7088 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7089 check whether we are replacing a 32-bit instruction. */
7090 if (tdep
->thumb2_breakpoint
!= NULL
)
7093 if (target_read_memory (*pcptr
, buf
, 2) == 0)
7095 unsigned short inst1
;
7096 inst1
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
7097 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
7099 *lenptr
= tdep
->thumb2_breakpoint_size
;
7100 return tdep
->thumb2_breakpoint
;
7105 *lenptr
= tdep
->thumb_breakpoint_size
;
7106 return tdep
->thumb_breakpoint
;
7110 *lenptr
= tdep
->arm_breakpoint_size
;
7111 return tdep
->arm_breakpoint
;
7116 arm_remote_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
7119 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
7121 arm_breakpoint_from_pc (gdbarch
, pcptr
, kindptr
);
7123 if (arm_pc_is_thumb (gdbarch
, *pcptr
) && *kindptr
== 4)
7124 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7125 that this is not confused with a 32-bit ARM breakpoint. */
7129 /* Extract from an array REGBUF containing the (raw) register state a
7130 function return value of type TYPE, and copy that, in virtual
7131 format, into VALBUF. */
7134 arm_extract_return_value (struct type
*type
, struct regcache
*regs
,
7137 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
7138 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7140 if (TYPE_CODE_FLT
== TYPE_CODE (type
))
7142 switch (gdbarch_tdep (gdbarch
)->fp_model
)
7146 /* The value is in register F0 in internal format. We need to
7147 extract the raw value and then convert it to the desired
7149 bfd_byte tmpbuf
[FP_REGISTER_SIZE
];
7151 regcache_cooked_read (regs
, ARM_F0_REGNUM
, tmpbuf
);
7152 convert_from_extended (floatformat_from_type (type
), tmpbuf
,
7153 valbuf
, gdbarch_byte_order (gdbarch
));
7157 case ARM_FLOAT_SOFT_FPA
:
7158 case ARM_FLOAT_SOFT_VFP
:
7159 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7160 not using the VFP ABI code. */
7162 regcache_cooked_read (regs
, ARM_A1_REGNUM
, valbuf
);
7163 if (TYPE_LENGTH (type
) > 4)
7164 regcache_cooked_read (regs
, ARM_A1_REGNUM
+ 1,
7165 valbuf
+ INT_REGISTER_SIZE
);
7169 internal_error (__FILE__
, __LINE__
,
7170 _("arm_extract_return_value: "
7171 "Floating point model not supported"));
7175 else if (TYPE_CODE (type
) == TYPE_CODE_INT
7176 || TYPE_CODE (type
) == TYPE_CODE_CHAR
7177 || TYPE_CODE (type
) == TYPE_CODE_BOOL
7178 || TYPE_CODE (type
) == TYPE_CODE_PTR
7179 || TYPE_CODE (type
) == TYPE_CODE_REF
7180 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
7182 /* If the type is a plain integer, then the access is
7183 straight-forward. Otherwise we have to play around a bit
7185 int len
= TYPE_LENGTH (type
);
7186 int regno
= ARM_A1_REGNUM
;
7191 /* By using store_unsigned_integer we avoid having to do
7192 anything special for small big-endian values. */
7193 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
7194 store_unsigned_integer (valbuf
,
7195 (len
> INT_REGISTER_SIZE
7196 ? INT_REGISTER_SIZE
: len
),
7198 len
-= INT_REGISTER_SIZE
;
7199 valbuf
+= INT_REGISTER_SIZE
;
7204 /* For a structure or union the behaviour is as if the value had
7205 been stored to word-aligned memory and then loaded into
7206 registers with 32-bit load instruction(s). */
7207 int len
= TYPE_LENGTH (type
);
7208 int regno
= ARM_A1_REGNUM
;
7209 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
7213 regcache_cooked_read (regs
, regno
++, tmpbuf
);
7214 memcpy (valbuf
, tmpbuf
,
7215 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
7216 len
-= INT_REGISTER_SIZE
;
7217 valbuf
+= INT_REGISTER_SIZE
;
7223 /* Will a function return an aggregate type in memory or in a
7224 register? Return 0 if an aggregate type can be returned in a
7225 register, 1 if it must be returned in memory. */
7228 arm_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
7231 enum type_code code
;
7233 CHECK_TYPEDEF (type
);
7235 /* In the ARM ABI, "integer" like aggregate types are returned in
7236 registers. For an aggregate type to be integer like, its size
7237 must be less than or equal to INT_REGISTER_SIZE and the
7238 offset of each addressable subfield must be zero. Note that bit
7239 fields are not addressable, and all addressable subfields of
7240 unions always start at offset zero.
7242 This function is based on the behaviour of GCC 2.95.1.
7243 See: gcc/arm.c: arm_return_in_memory() for details.
7245 Note: All versions of GCC before GCC 2.95.2 do not set up the
7246 parameters correctly for a function returning the following
7247 structure: struct { float f;}; This should be returned in memory,
7248 not a register. Richard Earnshaw sent me a patch, but I do not
7249 know of any way to detect if a function like the above has been
7250 compiled with the correct calling convention. */
7252 /* All aggregate types that won't fit in a register must be returned
7254 if (TYPE_LENGTH (type
) > INT_REGISTER_SIZE
)
7259 /* The AAPCS says all aggregates not larger than a word are returned
7261 if (gdbarch_tdep (gdbarch
)->arm_abi
!= ARM_ABI_APCS
)
7264 /* The only aggregate types that can be returned in a register are
7265 structs and unions. Arrays must be returned in memory. */
7266 code
= TYPE_CODE (type
);
7267 if ((TYPE_CODE_STRUCT
!= code
) && (TYPE_CODE_UNION
!= code
))
7272 /* Assume all other aggregate types can be returned in a register.
7273 Run a check for structures, unions and arrays. */
7276 if ((TYPE_CODE_STRUCT
== code
) || (TYPE_CODE_UNION
== code
))
7279 /* Need to check if this struct/union is "integer" like. For
7280 this to be true, its size must be less than or equal to
7281 INT_REGISTER_SIZE and the offset of each addressable
7282 subfield must be zero. Note that bit fields are not
7283 addressable, and unions always start at offset zero. If any
7284 of the subfields is a floating point type, the struct/union
7285 cannot be an integer type. */
7287 /* For each field in the object, check:
7288 1) Is it FP? --> yes, nRc = 1;
7289 2) Is it addressable (bitpos != 0) and
7290 not packed (bitsize == 0)?
7294 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
7296 enum type_code field_type_code
;
7297 field_type_code
= TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type
,
7300 /* Is it a floating point type field? */
7301 if (field_type_code
== TYPE_CODE_FLT
)
7307 /* If bitpos != 0, then we have to care about it. */
7308 if (TYPE_FIELD_BITPOS (type
, i
) != 0)
7310 /* Bitfields are not addressable. If the field bitsize is
7311 zero, then the field is not packed. Hence it cannot be
7312 a bitfield or any other packed type. */
7313 if (TYPE_FIELD_BITSIZE (type
, i
) == 0)
7325 /* Write into appropriate registers a function return value of type
7326 TYPE, given in virtual format. */
7329 arm_store_return_value (struct type
*type
, struct regcache
*regs
,
7330 const gdb_byte
*valbuf
)
7332 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
7333 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7335 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
7337 char buf
[MAX_REGISTER_SIZE
];
7339 switch (gdbarch_tdep (gdbarch
)->fp_model
)
7343 convert_to_extended (floatformat_from_type (type
), buf
, valbuf
,
7344 gdbarch_byte_order (gdbarch
));
7345 regcache_cooked_write (regs
, ARM_F0_REGNUM
, buf
);
7348 case ARM_FLOAT_SOFT_FPA
:
7349 case ARM_FLOAT_SOFT_VFP
:
7350 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7351 not using the VFP ABI code. */
7353 regcache_cooked_write (regs
, ARM_A1_REGNUM
, valbuf
);
7354 if (TYPE_LENGTH (type
) > 4)
7355 regcache_cooked_write (regs
, ARM_A1_REGNUM
+ 1,
7356 valbuf
+ INT_REGISTER_SIZE
);
7360 internal_error (__FILE__
, __LINE__
,
7361 _("arm_store_return_value: Floating "
7362 "point model not supported"));
7366 else if (TYPE_CODE (type
) == TYPE_CODE_INT
7367 || TYPE_CODE (type
) == TYPE_CODE_CHAR
7368 || TYPE_CODE (type
) == TYPE_CODE_BOOL
7369 || TYPE_CODE (type
) == TYPE_CODE_PTR
7370 || TYPE_CODE (type
) == TYPE_CODE_REF
7371 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
7373 if (TYPE_LENGTH (type
) <= 4)
7375 /* Values of one word or less are zero/sign-extended and
7377 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
7378 LONGEST val
= unpack_long (type
, valbuf
);
7380 store_signed_integer (tmpbuf
, INT_REGISTER_SIZE
, byte_order
, val
);
7381 regcache_cooked_write (regs
, ARM_A1_REGNUM
, tmpbuf
);
7385 /* Integral values greater than one word are stored in consecutive
7386 registers starting with r0. This will always be a multiple of
7387 the regiser size. */
7388 int len
= TYPE_LENGTH (type
);
7389 int regno
= ARM_A1_REGNUM
;
7393 regcache_cooked_write (regs
, regno
++, valbuf
);
7394 len
-= INT_REGISTER_SIZE
;
7395 valbuf
+= INT_REGISTER_SIZE
;
7401 /* For a structure or union the behaviour is as if the value had
7402 been stored to word-aligned memory and then loaded into
7403 registers with 32-bit load instruction(s). */
7404 int len
= TYPE_LENGTH (type
);
7405 int regno
= ARM_A1_REGNUM
;
7406 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
7410 memcpy (tmpbuf
, valbuf
,
7411 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
7412 regcache_cooked_write (regs
, regno
++, tmpbuf
);
7413 len
-= INT_REGISTER_SIZE
;
7414 valbuf
+= INT_REGISTER_SIZE
;
7420 /* Handle function return values. */
7422 static enum return_value_convention
7423 arm_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
7424 struct type
*valtype
, struct regcache
*regcache
,
7425 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
7427 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
7428 enum arm_vfp_cprc_base_type vfp_base_type
;
7431 if (arm_vfp_abi_for_function (gdbarch
, func_type
)
7432 && arm_vfp_call_candidate (valtype
, &vfp_base_type
, &vfp_base_count
))
7434 int reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
7435 int unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
7437 for (i
= 0; i
< vfp_base_count
; i
++)
7439 if (reg_char
== 'q')
7442 arm_neon_quad_write (gdbarch
, regcache
, i
,
7443 writebuf
+ i
* unit_length
);
7446 arm_neon_quad_read (gdbarch
, regcache
, i
,
7447 readbuf
+ i
* unit_length
);
7454 sprintf (name_buf
, "%c%d", reg_char
, i
);
7455 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
7458 regcache_cooked_write (regcache
, regnum
,
7459 writebuf
+ i
* unit_length
);
7461 regcache_cooked_read (regcache
, regnum
,
7462 readbuf
+ i
* unit_length
);
7465 return RETURN_VALUE_REGISTER_CONVENTION
;
7468 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
7469 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
7470 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
7472 if (tdep
->struct_return
== pcc_struct_return
7473 || arm_return_in_memory (gdbarch
, valtype
))
7474 return RETURN_VALUE_STRUCT_CONVENTION
;
7478 arm_store_return_value (valtype
, regcache
, writebuf
);
7481 arm_extract_return_value (valtype
, regcache
, readbuf
);
7483 return RETURN_VALUE_REGISTER_CONVENTION
;
7488 arm_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
7490 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
7491 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
7492 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7494 char buf
[INT_REGISTER_SIZE
];
7496 jb_addr
= get_frame_register_unsigned (frame
, ARM_A1_REGNUM
);
7498 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
7502 *pc
= extract_unsigned_integer (buf
, INT_REGISTER_SIZE
, byte_order
);
7506 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7507 return the target PC. Otherwise return 0. */
7510 arm_skip_stub (struct frame_info
*frame
, CORE_ADDR pc
)
7514 CORE_ADDR start_addr
;
7516 /* Find the starting address and name of the function containing the PC. */
7517 if (find_pc_partial_function (pc
, &name
, &start_addr
, NULL
) == 0)
7520 /* If PC is in a Thumb call or return stub, return the address of the
7521 target PC, which is in a register. The thunk functions are called
7522 _call_via_xx, where x is the register name. The possible names
7523 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7524 functions, named __ARM_call_via_r[0-7]. */
7525 if (strncmp (name
, "_call_via_", 10) == 0
7526 || strncmp (name
, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7528 /* Use the name suffix to determine which register contains the
7530 static char *table
[15] =
7531 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7532 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7535 int offset
= strlen (name
) - 2;
7537 for (regno
= 0; regno
<= 14; regno
++)
7538 if (strcmp (&name
[offset
], table
[regno
]) == 0)
7539 return get_frame_register_unsigned (frame
, regno
);
7542 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7543 non-interworking calls to foo. We could decode the stubs
7544 to find the target but it's easier to use the symbol table. */
7545 namelen
= strlen (name
);
7546 if (name
[0] == '_' && name
[1] == '_'
7547 && ((namelen
> 2 + strlen ("_from_thumb")
7548 && strncmp (name
+ namelen
- strlen ("_from_thumb"), "_from_thumb",
7549 strlen ("_from_thumb")) == 0)
7550 || (namelen
> 2 + strlen ("_from_arm")
7551 && strncmp (name
+ namelen
- strlen ("_from_arm"), "_from_arm",
7552 strlen ("_from_arm")) == 0)))
7555 int target_len
= namelen
- 2;
7556 struct minimal_symbol
*minsym
;
7557 struct objfile
*objfile
;
7558 struct obj_section
*sec
;
7560 if (name
[namelen
- 1] == 'b')
7561 target_len
-= strlen ("_from_thumb");
7563 target_len
-= strlen ("_from_arm");
7565 target_name
= alloca (target_len
+ 1);
7566 memcpy (target_name
, name
+ 2, target_len
);
7567 target_name
[target_len
] = '\0';
7569 sec
= find_pc_section (pc
);
7570 objfile
= (sec
== NULL
) ? NULL
: sec
->objfile
;
7571 minsym
= lookup_minimal_symbol (target_name
, NULL
, objfile
);
7573 return SYMBOL_VALUE_ADDRESS (minsym
);
7578 return 0; /* not a stub */
7582 set_arm_command (char *args
, int from_tty
)
7584 printf_unfiltered (_("\
7585 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7586 help_list (setarmcmdlist
, "set arm ", all_commands
, gdb_stdout
);
7590 show_arm_command (char *args
, int from_tty
)
7592 cmd_show_list (showarmcmdlist
, from_tty
, "");
7596 arm_update_current_architecture (void)
7598 struct gdbarch_info info
;
7600 /* If the current architecture is not ARM, we have nothing to do. */
7601 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
!= bfd_arch_arm
)
7604 /* Update the architecture. */
7605 gdbarch_info_init (&info
);
7607 if (!gdbarch_update_p (info
))
7608 internal_error (__FILE__
, __LINE__
, _("could not update architecture"));
7612 set_fp_model_sfunc (char *args
, int from_tty
,
7613 struct cmd_list_element
*c
)
7615 enum arm_float_model fp_model
;
7617 for (fp_model
= ARM_FLOAT_AUTO
; fp_model
!= ARM_FLOAT_LAST
; fp_model
++)
7618 if (strcmp (current_fp_model
, fp_model_strings
[fp_model
]) == 0)
7620 arm_fp_model
= fp_model
;
7624 if (fp_model
== ARM_FLOAT_LAST
)
7625 internal_error (__FILE__
, __LINE__
, _("Invalid fp model accepted: %s."),
7628 arm_update_current_architecture ();
7632 show_fp_model (struct ui_file
*file
, int from_tty
,
7633 struct cmd_list_element
*c
, const char *value
)
7635 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
7637 if (arm_fp_model
== ARM_FLOAT_AUTO
7638 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
7639 fprintf_filtered (file
, _("\
7640 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7641 fp_model_strings
[tdep
->fp_model
]);
7643 fprintf_filtered (file
, _("\
7644 The current ARM floating point model is \"%s\".\n"),
7645 fp_model_strings
[arm_fp_model
]);
7649 arm_set_abi (char *args
, int from_tty
,
7650 struct cmd_list_element
*c
)
7652 enum arm_abi_kind arm_abi
;
7654 for (arm_abi
= ARM_ABI_AUTO
; arm_abi
!= ARM_ABI_LAST
; arm_abi
++)
7655 if (strcmp (arm_abi_string
, arm_abi_strings
[arm_abi
]) == 0)
7657 arm_abi_global
= arm_abi
;
7661 if (arm_abi
== ARM_ABI_LAST
)
7662 internal_error (__FILE__
, __LINE__
, _("Invalid ABI accepted: %s."),
7665 arm_update_current_architecture ();
7669 arm_show_abi (struct ui_file
*file
, int from_tty
,
7670 struct cmd_list_element
*c
, const char *value
)
7672 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
7674 if (arm_abi_global
== ARM_ABI_AUTO
7675 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
7676 fprintf_filtered (file
, _("\
7677 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7678 arm_abi_strings
[tdep
->arm_abi
]);
7680 fprintf_filtered (file
, _("The current ARM ABI is \"%s\".\n"),
7685 arm_show_fallback_mode (struct ui_file
*file
, int from_tty
,
7686 struct cmd_list_element
*c
, const char *value
)
7688 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
7690 fprintf_filtered (file
,
7691 _("The current execution mode assumed "
7692 "(when symbols are unavailable) is \"%s\".\n"),
7693 arm_fallback_mode_string
);
7697 arm_show_force_mode (struct ui_file
*file
, int from_tty
,
7698 struct cmd_list_element
*c
, const char *value
)
7700 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
7702 fprintf_filtered (file
,
7703 _("The current execution mode assumed "
7704 "(even when symbols are available) is \"%s\".\n"),
7705 arm_force_mode_string
);
7708 /* If the user changes the register disassembly style used for info
7709 register and other commands, we have to also switch the style used
7710 in opcodes for disassembly output. This function is run in the "set
7711 arm disassembly" command, and does that. */
7714 set_disassembly_style_sfunc (char *args
, int from_tty
,
7715 struct cmd_list_element
*c
)
7717 set_disassembly_style ();
7720 /* Return the ARM register name corresponding to register I. */
7722 arm_register_name (struct gdbarch
*gdbarch
, int i
)
7724 const int num_regs
= gdbarch_num_regs (gdbarch
);
7726 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
7727 && i
>= num_regs
&& i
< num_regs
+ 32)
7729 static const char *const vfp_pseudo_names
[] = {
7730 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7731 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7732 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7733 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7736 return vfp_pseudo_names
[i
- num_regs
];
7739 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
7740 && i
>= num_regs
+ 32 && i
< num_regs
+ 32 + 16)
7742 static const char *const neon_pseudo_names
[] = {
7743 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7744 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7747 return neon_pseudo_names
[i
- num_regs
- 32];
7750 if (i
>= ARRAY_SIZE (arm_register_names
))
7751 /* These registers are only supported on targets which supply
7752 an XML description. */
7755 return arm_register_names
[i
];
7759 set_disassembly_style (void)
7763 /* Find the style that the user wants. */
7764 for (current
= 0; current
< num_disassembly_options
; current
++)
7765 if (disassembly_style
== valid_disassembly_styles
[current
])
7767 gdb_assert (current
< num_disassembly_options
);
7769 /* Synchronize the disassembler. */
7770 set_arm_regname_option (current
);
7773 /* Test whether the coff symbol specific value corresponds to a Thumb
7777 coff_sym_is_thumb (int val
)
7779 return (val
== C_THUMBEXT
7780 || val
== C_THUMBSTAT
7781 || val
== C_THUMBEXTFUNC
7782 || val
== C_THUMBSTATFUNC
7783 || val
== C_THUMBLABEL
);
7786 /* arm_coff_make_msymbol_special()
7787 arm_elf_make_msymbol_special()
7789 These functions test whether the COFF or ELF symbol corresponds to
7790 an address in thumb code, and set a "special" bit in a minimal
7791 symbol to indicate that it does. */
7794 arm_elf_make_msymbol_special(asymbol
*sym
, struct minimal_symbol
*msym
)
7796 /* Thumb symbols are of type STT_LOPROC, (synonymous with
7798 if (ELF_ST_TYPE (((elf_symbol_type
*)sym
)->internal_elf_sym
.st_info
)
7800 MSYMBOL_SET_SPECIAL (msym
);
7804 arm_coff_make_msymbol_special(int val
, struct minimal_symbol
*msym
)
7806 if (coff_sym_is_thumb (val
))
7807 MSYMBOL_SET_SPECIAL (msym
);
7811 arm_objfile_data_free (struct objfile
*objfile
, void *arg
)
7813 struct arm_per_objfile
*data
= arg
;
7816 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
7817 VEC_free (arm_mapping_symbol_s
, data
->section_maps
[i
]);
7821 arm_record_special_symbol (struct gdbarch
*gdbarch
, struct objfile
*objfile
,
7824 const char *name
= bfd_asymbol_name (sym
);
7825 struct arm_per_objfile
*data
;
7826 VEC(arm_mapping_symbol_s
) **map_p
;
7827 struct arm_mapping_symbol new_map_sym
;
7829 gdb_assert (name
[0] == '$');
7830 if (name
[1] != 'a' && name
[1] != 't' && name
[1] != 'd')
7833 data
= objfile_data (objfile
, arm_objfile_data_key
);
7836 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
,
7837 struct arm_per_objfile
);
7838 set_objfile_data (objfile
, arm_objfile_data_key
, data
);
7839 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
7840 objfile
->obfd
->section_count
,
7841 VEC(arm_mapping_symbol_s
) *);
7843 map_p
= &data
->section_maps
[bfd_get_section (sym
)->index
];
7845 new_map_sym
.value
= sym
->value
;
7846 new_map_sym
.type
= name
[1];
7848 /* Assume that most mapping symbols appear in order of increasing
7849 value. If they were randomly distributed, it would be faster to
7850 always push here and then sort at first use. */
7851 if (!VEC_empty (arm_mapping_symbol_s
, *map_p
))
7853 struct arm_mapping_symbol
*prev_map_sym
;
7855 prev_map_sym
= VEC_last (arm_mapping_symbol_s
, *map_p
);
7856 if (prev_map_sym
->value
>= sym
->value
)
7859 idx
= VEC_lower_bound (arm_mapping_symbol_s
, *map_p
, &new_map_sym
,
7860 arm_compare_mapping_symbols
);
7861 VEC_safe_insert (arm_mapping_symbol_s
, *map_p
, idx
, &new_map_sym
);
7866 VEC_safe_push (arm_mapping_symbol_s
, *map_p
, &new_map_sym
);
7870 arm_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
7872 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
7873 regcache_cooked_write_unsigned (regcache
, ARM_PC_REGNUM
, pc
);
7875 /* If necessary, set the T bit. */
7878 ULONGEST val
, t_bit
;
7879 regcache_cooked_read_unsigned (regcache
, ARM_PS_REGNUM
, &val
);
7880 t_bit
= arm_psr_thumb_bit (gdbarch
);
7881 if (arm_pc_is_thumb (gdbarch
, pc
))
7882 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
7885 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
7890 /* Read the contents of a NEON quad register, by reading from two
7891 double registers. This is used to implement the quad pseudo
7892 registers, and for argument passing in case the quad registers are
7893 missing; vectors are passed in quad registers when using the VFP
7894 ABI, even if a NEON unit is not present. REGNUM is the index of
7895 the quad register, in [0, 15]. */
7897 static enum register_status
7898 arm_neon_quad_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
7899 int regnum
, gdb_byte
*buf
)
7902 gdb_byte reg_buf
[8];
7903 int offset
, double_regnum
;
7904 enum register_status status
;
7906 sprintf (name_buf
, "d%d", regnum
<< 1);
7907 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
7910 /* d0 is always the least significant half of q0. */
7911 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
7916 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
7917 if (status
!= REG_VALID
)
7919 memcpy (buf
+ offset
, reg_buf
, 8);
7921 offset
= 8 - offset
;
7922 status
= regcache_raw_read (regcache
, double_regnum
+ 1, reg_buf
);
7923 if (status
!= REG_VALID
)
7925 memcpy (buf
+ offset
, reg_buf
, 8);
7930 static enum register_status
7931 arm_pseudo_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
7932 int regnum
, gdb_byte
*buf
)
7934 const int num_regs
= gdbarch_num_regs (gdbarch
);
7936 gdb_byte reg_buf
[8];
7937 int offset
, double_regnum
;
7939 gdb_assert (regnum
>= num_regs
);
7942 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
7943 /* Quad-precision register. */
7944 return arm_neon_quad_read (gdbarch
, regcache
, regnum
- 32, buf
);
7947 enum register_status status
;
7949 /* Single-precision register. */
7950 gdb_assert (regnum
< 32);
7952 /* s0 is always the least significant half of d0. */
7953 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
7954 offset
= (regnum
& 1) ? 0 : 4;
7956 offset
= (regnum
& 1) ? 4 : 0;
7958 sprintf (name_buf
, "d%d", regnum
>> 1);
7959 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
7962 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
7963 if (status
== REG_VALID
)
7964 memcpy (buf
, reg_buf
+ offset
, 4);
7969 /* Store the contents of BUF to a NEON quad register, by writing to
7970 two double registers. This is used to implement the quad pseudo
7971 registers, and for argument passing in case the quad registers are
7972 missing; vectors are passed in quad registers when using the VFP
7973 ABI, even if a NEON unit is not present. REGNUM is the index
7974 of the quad register, in [0, 15]. */
7977 arm_neon_quad_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
7978 int regnum
, const gdb_byte
*buf
)
7981 gdb_byte reg_buf
[8];
7982 int offset
, double_regnum
;
7984 sprintf (name_buf
, "d%d", regnum
<< 1);
7985 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
7988 /* d0 is always the least significant half of q0. */
7989 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
7994 regcache_raw_write (regcache
, double_regnum
, buf
+ offset
);
7995 offset
= 8 - offset
;
7996 regcache_raw_write (regcache
, double_regnum
+ 1, buf
+ offset
);
8000 arm_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
8001 int regnum
, const gdb_byte
*buf
)
8003 const int num_regs
= gdbarch_num_regs (gdbarch
);
8005 gdb_byte reg_buf
[8];
8006 int offset
, double_regnum
;
8008 gdb_assert (regnum
>= num_regs
);
8011 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
8012 /* Quad-precision register. */
8013 arm_neon_quad_write (gdbarch
, regcache
, regnum
- 32, buf
);
8016 /* Single-precision register. */
8017 gdb_assert (regnum
< 32);
8019 /* s0 is always the least significant half of d0. */
8020 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
8021 offset
= (regnum
& 1) ? 0 : 4;
8023 offset
= (regnum
& 1) ? 4 : 0;
8025 sprintf (name_buf
, "d%d", regnum
>> 1);
8026 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
8029 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
8030 memcpy (reg_buf
+ offset
, buf
, 4);
8031 regcache_raw_write (regcache
, double_regnum
, reg_buf
);
8035 static struct value
*
8036 value_of_arm_user_reg (struct frame_info
*frame
, const void *baton
)
8038 const int *reg_p
= baton
;
8039 return value_of_register (*reg_p
, frame
);
8042 static enum gdb_osabi
8043 arm_elf_osabi_sniffer (bfd
*abfd
)
8045 unsigned int elfosabi
;
8046 enum gdb_osabi osabi
= GDB_OSABI_UNKNOWN
;
8048 elfosabi
= elf_elfheader (abfd
)->e_ident
[EI_OSABI
];
8050 if (elfosabi
== ELFOSABI_ARM
)
8051 /* GNU tools use this value. Check note sections in this case,
8053 bfd_map_over_sections (abfd
,
8054 generic_elf_osabi_sniff_abi_tag_sections
,
8057 /* Anything else will be handled by the generic ELF sniffer. */
8062 arm_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
8063 struct reggroup
*group
)
8065 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8066 this, FPS register belongs to save_regroup, restore_reggroup, and
8067 all_reggroup, of course. */
8068 if (regnum
== ARM_FPS_REGNUM
)
8069 return (group
== float_reggroup
8070 || group
== save_reggroup
8071 || group
== restore_reggroup
8072 || group
== all_reggroup
);
8074 return default_register_reggroup_p (gdbarch
, regnum
, group
);
8078 /* Initialize the current architecture based on INFO. If possible,
8079 re-use an architecture from ARCHES, which is a list of
8080 architectures already created during this debugging session.
8082 Called e.g. at program startup, when reading a core file, and when
8083 reading a binary file. */
8085 static struct gdbarch
*
8086 arm_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
8088 struct gdbarch_tdep
*tdep
;
8089 struct gdbarch
*gdbarch
;
8090 struct gdbarch_list
*best_arch
;
8091 enum arm_abi_kind arm_abi
= arm_abi_global
;
8092 enum arm_float_model fp_model
= arm_fp_model
;
8093 struct tdesc_arch_data
*tdesc_data
= NULL
;
8095 int have_vfp_registers
= 0, have_vfp_pseudos
= 0, have_neon_pseudos
= 0;
8097 int have_fpa_registers
= 1;
8098 const struct target_desc
*tdesc
= info
.target_desc
;
8100 /* If we have an object to base this architecture on, try to determine
8103 if (arm_abi
== ARM_ABI_AUTO
&& info
.abfd
!= NULL
)
8105 int ei_osabi
, e_flags
;
8107 switch (bfd_get_flavour (info
.abfd
))
8109 case bfd_target_aout_flavour
:
8110 /* Assume it's an old APCS-style ABI. */
8111 arm_abi
= ARM_ABI_APCS
;
8114 case bfd_target_coff_flavour
:
8115 /* Assume it's an old APCS-style ABI. */
8117 arm_abi
= ARM_ABI_APCS
;
8120 case bfd_target_elf_flavour
:
8121 ei_osabi
= elf_elfheader (info
.abfd
)->e_ident
[EI_OSABI
];
8122 e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
8124 if (ei_osabi
== ELFOSABI_ARM
)
8126 /* GNU tools used to use this value, but do not for EABI
8127 objects. There's nowhere to tag an EABI version
8128 anyway, so assume APCS. */
8129 arm_abi
= ARM_ABI_APCS
;
8131 else if (ei_osabi
== ELFOSABI_NONE
)
8133 int eabi_ver
= EF_ARM_EABI_VERSION (e_flags
);
8134 int attr_arch
, attr_profile
;
8138 case EF_ARM_EABI_UNKNOWN
:
8139 /* Assume GNU tools. */
8140 arm_abi
= ARM_ABI_APCS
;
8143 case EF_ARM_EABI_VER4
:
8144 case EF_ARM_EABI_VER5
:
8145 arm_abi
= ARM_ABI_AAPCS
;
8146 /* EABI binaries default to VFP float ordering.
8147 They may also contain build attributes that can
8148 be used to identify if the VFP argument-passing
8150 if (fp_model
== ARM_FLOAT_AUTO
)
8153 switch (bfd_elf_get_obj_attr_int (info
.abfd
,
8158 /* "The user intended FP parameter/result
8159 passing to conform to AAPCS, base
8161 fp_model
= ARM_FLOAT_SOFT_VFP
;
8164 /* "The user intended FP parameter/result
8165 passing to conform to AAPCS, VFP
8167 fp_model
= ARM_FLOAT_VFP
;
8170 /* "The user intended FP parameter/result
8171 passing to conform to tool chain-specific
8172 conventions" - we don't know any such
8173 conventions, so leave it as "auto". */
8176 /* Attribute value not mentioned in the
8177 October 2008 ABI, so leave it as
8182 fp_model
= ARM_FLOAT_SOFT_VFP
;
8188 /* Leave it as "auto". */
8189 warning (_("unknown ARM EABI version 0x%x"), eabi_ver
);
8194 /* Detect M-profile programs. This only works if the
8195 executable file includes build attributes; GCC does
8196 copy them to the executable, but e.g. RealView does
8198 attr_arch
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
8200 attr_profile
= bfd_elf_get_obj_attr_int (info
.abfd
,
8202 Tag_CPU_arch_profile
);
8203 /* GCC specifies the profile for v6-M; RealView only
8204 specifies the profile for architectures starting with
8205 V7 (as opposed to architectures with a tag
8206 numerically greater than TAG_CPU_ARCH_V7). */
8207 if (!tdesc_has_registers (tdesc
)
8208 && (attr_arch
== TAG_CPU_ARCH_V6_M
8209 || attr_arch
== TAG_CPU_ARCH_V6S_M
8210 || attr_profile
== 'M'))
8211 tdesc
= tdesc_arm_with_m
;
8215 if (fp_model
== ARM_FLOAT_AUTO
)
8217 int e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
8219 switch (e_flags
& (EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
))
8222 /* Leave it as "auto". Strictly speaking this case
8223 means FPA, but almost nobody uses that now, and
8224 many toolchains fail to set the appropriate bits
8225 for the floating-point model they use. */
8227 case EF_ARM_SOFT_FLOAT
:
8228 fp_model
= ARM_FLOAT_SOFT_FPA
;
8230 case EF_ARM_VFP_FLOAT
:
8231 fp_model
= ARM_FLOAT_VFP
;
8233 case EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
:
8234 fp_model
= ARM_FLOAT_SOFT_VFP
;
8239 if (e_flags
& EF_ARM_BE8
)
8240 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
8245 /* Leave it as "auto". */
8250 /* Check any target description for validity. */
8251 if (tdesc_has_registers (tdesc
))
8253 /* For most registers we require GDB's default names; but also allow
8254 the numeric names for sp / lr / pc, as a convenience. */
8255 static const char *const arm_sp_names
[] = { "r13", "sp", NULL
};
8256 static const char *const arm_lr_names
[] = { "r14", "lr", NULL
};
8257 static const char *const arm_pc_names
[] = { "r15", "pc", NULL
};
8259 const struct tdesc_feature
*feature
;
8262 feature
= tdesc_find_feature (tdesc
,
8263 "org.gnu.gdb.arm.core");
8264 if (feature
== NULL
)
8266 feature
= tdesc_find_feature (tdesc
,
8267 "org.gnu.gdb.arm.m-profile");
8268 if (feature
== NULL
)
8274 tdesc_data
= tdesc_data_alloc ();
8277 for (i
= 0; i
< ARM_SP_REGNUM
; i
++)
8278 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
8279 arm_register_names
[i
]);
8280 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
8283 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
8286 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
8290 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
8291 ARM_PS_REGNUM
, "xpsr");
8293 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
8294 ARM_PS_REGNUM
, "cpsr");
8298 tdesc_data_cleanup (tdesc_data
);
8302 feature
= tdesc_find_feature (tdesc
,
8303 "org.gnu.gdb.arm.fpa");
8304 if (feature
!= NULL
)
8307 for (i
= ARM_F0_REGNUM
; i
<= ARM_FPS_REGNUM
; i
++)
8308 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
8309 arm_register_names
[i
]);
8312 tdesc_data_cleanup (tdesc_data
);
8317 have_fpa_registers
= 0;
8319 feature
= tdesc_find_feature (tdesc
,
8320 "org.gnu.gdb.xscale.iwmmxt");
8321 if (feature
!= NULL
)
8323 static const char *const iwmmxt_names
[] = {
8324 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8325 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8326 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8327 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8331 for (i
= ARM_WR0_REGNUM
; i
<= ARM_WR15_REGNUM
; i
++)
8333 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
8334 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
8336 /* Check for the control registers, but do not fail if they
8338 for (i
= ARM_WC0_REGNUM
; i
<= ARM_WCASF_REGNUM
; i
++)
8339 tdesc_numbered_register (feature
, tdesc_data
, i
,
8340 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
8342 for (i
= ARM_WCGR0_REGNUM
; i
<= ARM_WCGR3_REGNUM
; i
++)
8344 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
8345 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
8349 tdesc_data_cleanup (tdesc_data
);
8354 /* If we have a VFP unit, check whether the single precision registers
8355 are present. If not, then we will synthesize them as pseudo
8357 feature
= tdesc_find_feature (tdesc
,
8358 "org.gnu.gdb.arm.vfp");
8359 if (feature
!= NULL
)
8361 static const char *const vfp_double_names
[] = {
8362 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8363 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8364 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8365 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8368 /* Require the double precision registers. There must be either
8371 for (i
= 0; i
< 32; i
++)
8373 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
8375 vfp_double_names
[i
]);
8380 if (!valid_p
&& i
!= 16)
8382 tdesc_data_cleanup (tdesc_data
);
8386 if (tdesc_unnumbered_register (feature
, "s0") == 0)
8387 have_vfp_pseudos
= 1;
8389 have_vfp_registers
= 1;
8391 /* If we have VFP, also check for NEON. The architecture allows
8392 NEON without VFP (integer vector operations only), but GDB
8393 does not support that. */
8394 feature
= tdesc_find_feature (tdesc
,
8395 "org.gnu.gdb.arm.neon");
8396 if (feature
!= NULL
)
8398 /* NEON requires 32 double-precision registers. */
8401 tdesc_data_cleanup (tdesc_data
);
8405 /* If there are quad registers defined by the stub, use
8406 their type; otherwise (normally) provide them with
8407 the default type. */
8408 if (tdesc_unnumbered_register (feature
, "q0") == 0)
8409 have_neon_pseudos
= 1;
8416 /* If there is already a candidate, use it. */
8417 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
8419 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
8421 if (arm_abi
!= ARM_ABI_AUTO
8422 && arm_abi
!= gdbarch_tdep (best_arch
->gdbarch
)->arm_abi
)
8425 if (fp_model
!= ARM_FLOAT_AUTO
8426 && fp_model
!= gdbarch_tdep (best_arch
->gdbarch
)->fp_model
)
8429 /* There are various other properties in tdep that we do not
8430 need to check here: those derived from a target description,
8431 since gdbarches with a different target description are
8432 automatically disqualified. */
8434 /* Do check is_m, though, since it might come from the binary. */
8435 if (is_m
!= gdbarch_tdep (best_arch
->gdbarch
)->is_m
)
8438 /* Found a match. */
8442 if (best_arch
!= NULL
)
8444 if (tdesc_data
!= NULL
)
8445 tdesc_data_cleanup (tdesc_data
);
8446 return best_arch
->gdbarch
;
8449 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
8450 gdbarch
= gdbarch_alloc (&info
, tdep
);
8452 /* Record additional information about the architecture we are defining.
8453 These are gdbarch discriminators, like the OSABI. */
8454 tdep
->arm_abi
= arm_abi
;
8455 tdep
->fp_model
= fp_model
;
8457 tdep
->have_fpa_registers
= have_fpa_registers
;
8458 tdep
->have_vfp_registers
= have_vfp_registers
;
8459 tdep
->have_vfp_pseudos
= have_vfp_pseudos
;
8460 tdep
->have_neon_pseudos
= have_neon_pseudos
;
8461 tdep
->have_neon
= have_neon
;
8464 switch (info
.byte_order_for_code
)
8466 case BFD_ENDIAN_BIG
:
8467 tdep
->arm_breakpoint
= arm_default_arm_be_breakpoint
;
8468 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_be_breakpoint
);
8469 tdep
->thumb_breakpoint
= arm_default_thumb_be_breakpoint
;
8470 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_be_breakpoint
);
8474 case BFD_ENDIAN_LITTLE
:
8475 tdep
->arm_breakpoint
= arm_default_arm_le_breakpoint
;
8476 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_le_breakpoint
);
8477 tdep
->thumb_breakpoint
= arm_default_thumb_le_breakpoint
;
8478 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_le_breakpoint
);
8483 internal_error (__FILE__
, __LINE__
,
8484 _("arm_gdbarch_init: bad byte order for float format"));
8487 /* On ARM targets char defaults to unsigned. */
8488 set_gdbarch_char_signed (gdbarch
, 0);
8490 /* Note: for displaced stepping, this includes the breakpoint, and one word
8491 of additional scratch space. This setting isn't used for anything beside
8492 displaced stepping at present. */
8493 set_gdbarch_max_insn_length (gdbarch
, 4 * DISPLACED_MODIFIED_INSNS
);
8495 /* This should be low enough for everything. */
8496 tdep
->lowest_pc
= 0x20;
8497 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
8499 /* The default, for both APCS and AAPCS, is to return small
8500 structures in registers. */
8501 tdep
->struct_return
= reg_struct_return
;
8503 set_gdbarch_push_dummy_call (gdbarch
, arm_push_dummy_call
);
8504 set_gdbarch_frame_align (gdbarch
, arm_frame_align
);
8506 set_gdbarch_write_pc (gdbarch
, arm_write_pc
);
8508 /* Frame handling. */
8509 set_gdbarch_dummy_id (gdbarch
, arm_dummy_id
);
8510 set_gdbarch_unwind_pc (gdbarch
, arm_unwind_pc
);
8511 set_gdbarch_unwind_sp (gdbarch
, arm_unwind_sp
);
8513 frame_base_set_default (gdbarch
, &arm_normal_base
);
8515 /* Address manipulation. */
8516 set_gdbarch_smash_text_address (gdbarch
, arm_smash_text_address
);
8517 set_gdbarch_addr_bits_remove (gdbarch
, arm_addr_bits_remove
);
8519 /* Advance PC across function entry code. */
8520 set_gdbarch_skip_prologue (gdbarch
, arm_skip_prologue
);
8522 /* Detect whether PC is in function epilogue. */
8523 set_gdbarch_in_function_epilogue_p (gdbarch
, arm_in_function_epilogue_p
);
8525 /* Skip trampolines. */
8526 set_gdbarch_skip_trampoline_code (gdbarch
, arm_skip_stub
);
8528 /* The stack grows downward. */
8529 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
8531 /* Breakpoint manipulation. */
8532 set_gdbarch_breakpoint_from_pc (gdbarch
, arm_breakpoint_from_pc
);
8533 set_gdbarch_remote_breakpoint_from_pc (gdbarch
,
8534 arm_remote_breakpoint_from_pc
);
8536 /* Information about registers, etc. */
8537 set_gdbarch_sp_regnum (gdbarch
, ARM_SP_REGNUM
);
8538 set_gdbarch_pc_regnum (gdbarch
, ARM_PC_REGNUM
);
8539 set_gdbarch_num_regs (gdbarch
, ARM_NUM_REGS
);
8540 set_gdbarch_register_type (gdbarch
, arm_register_type
);
8541 set_gdbarch_register_reggroup_p (gdbarch
, arm_register_reggroup_p
);
8543 /* This "info float" is FPA-specific. Use the generic version if we
8545 if (gdbarch_tdep (gdbarch
)->have_fpa_registers
)
8546 set_gdbarch_print_float_info (gdbarch
, arm_print_float_info
);
8548 /* Internal <-> external register number maps. */
8549 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, arm_dwarf_reg_to_regnum
);
8550 set_gdbarch_register_sim_regno (gdbarch
, arm_register_sim_regno
);
8552 set_gdbarch_register_name (gdbarch
, arm_register_name
);
8554 /* Returning results. */
8555 set_gdbarch_return_value (gdbarch
, arm_return_value
);
8558 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_arm
);
8560 /* Minsymbol frobbing. */
8561 set_gdbarch_elf_make_msymbol_special (gdbarch
, arm_elf_make_msymbol_special
);
8562 set_gdbarch_coff_make_msymbol_special (gdbarch
,
8563 arm_coff_make_msymbol_special
);
8564 set_gdbarch_record_special_symbol (gdbarch
, arm_record_special_symbol
);
8566 /* Thumb-2 IT block support. */
8567 set_gdbarch_adjust_breakpoint_address (gdbarch
,
8568 arm_adjust_breakpoint_address
);
8570 /* Virtual tables. */
8571 set_gdbarch_vbit_in_delta (gdbarch
, 1);
8573 /* Hook in the ABI-specific overrides, if they have been registered. */
8574 gdbarch_init_osabi (info
, gdbarch
);
8576 dwarf2_frame_set_init_reg (gdbarch
, arm_dwarf2_frame_init_reg
);
8578 /* Add some default predicates. */
8579 frame_unwind_append_unwinder (gdbarch
, &arm_stub_unwind
);
8580 dwarf2_append_unwinders (gdbarch
);
8581 frame_unwind_append_unwinder (gdbarch
, &arm_exidx_unwind
);
8582 frame_unwind_append_unwinder (gdbarch
, &arm_prologue_unwind
);
8584 /* Now we have tuned the configuration, set a few final things,
8585 based on what the OS ABI has told us. */
8587 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8588 binaries are always marked. */
8589 if (tdep
->arm_abi
== ARM_ABI_AUTO
)
8590 tdep
->arm_abi
= ARM_ABI_APCS
;
8592 /* Watchpoints are not steppable. */
8593 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
8595 /* We used to default to FPA for generic ARM, but almost nobody
8596 uses that now, and we now provide a way for the user to force
8597 the model. So default to the most useful variant. */
8598 if (tdep
->fp_model
== ARM_FLOAT_AUTO
)
8599 tdep
->fp_model
= ARM_FLOAT_SOFT_FPA
;
8601 if (tdep
->jb_pc
>= 0)
8602 set_gdbarch_get_longjmp_target (gdbarch
, arm_get_longjmp_target
);
8604 /* Floating point sizes and format. */
8605 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
8606 if (tdep
->fp_model
== ARM_FLOAT_SOFT_FPA
|| tdep
->fp_model
== ARM_FLOAT_FPA
)
8608 set_gdbarch_double_format
8609 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
8610 set_gdbarch_long_double_format
8611 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
8615 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
8616 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
8619 if (have_vfp_pseudos
)
8621 /* NOTE: These are the only pseudo registers used by
8622 the ARM target at the moment. If more are added, a
8623 little more care in numbering will be needed. */
8625 int num_pseudos
= 32;
8626 if (have_neon_pseudos
)
8628 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudos
);
8629 set_gdbarch_pseudo_register_read (gdbarch
, arm_pseudo_read
);
8630 set_gdbarch_pseudo_register_write (gdbarch
, arm_pseudo_write
);
8635 set_tdesc_pseudo_register_name (gdbarch
, arm_register_name
);
8637 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
8639 /* Override tdesc_register_type to adjust the types of VFP
8640 registers for NEON. */
8641 set_gdbarch_register_type (gdbarch
, arm_register_type
);
8644 /* Add standard register aliases. We add aliases even for those
8645 nanes which are used by the current architecture - it's simpler,
8646 and does no harm, since nothing ever lists user registers. */
8647 for (i
= 0; i
< ARRAY_SIZE (arm_register_aliases
); i
++)
8648 user_reg_add (gdbarch
, arm_register_aliases
[i
].name
,
8649 value_of_arm_user_reg
, &arm_register_aliases
[i
].regnum
);
8655 arm_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
8657 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8662 fprintf_unfiltered (file
, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8663 (unsigned long) tdep
->lowest_pc
);
8666 extern initialize_file_ftype _initialize_arm_tdep
; /* -Wmissing-prototypes */
8669 _initialize_arm_tdep (void)
8671 struct ui_file
*stb
;
8673 struct cmd_list_element
*new_set
, *new_show
;
8674 const char *setname
;
8675 const char *setdesc
;
8676 const char *const *regnames
;
8678 static char *helptext
;
8679 char regdesc
[1024], *rdptr
= regdesc
;
8680 size_t rest
= sizeof (regdesc
);
8682 gdbarch_register (bfd_arch_arm
, arm_gdbarch_init
, arm_dump_tdep
);
8684 arm_objfile_data_key
8685 = register_objfile_data_with_cleanup (NULL
, arm_objfile_data_free
);
8687 /* Add ourselves to objfile event chain. */
8688 observer_attach_new_objfile (arm_exidx_new_objfile
);
8690 = register_objfile_data_with_cleanup (NULL
, arm_exidx_data_free
);
8692 /* Register an ELF OS ABI sniffer for ARM binaries. */
8693 gdbarch_register_osabi_sniffer (bfd_arch_arm
,
8694 bfd_target_elf_flavour
,
8695 arm_elf_osabi_sniffer
);
8697 /* Initialize the standard target descriptions. */
8698 initialize_tdesc_arm_with_m ();
8700 /* Get the number of possible sets of register names defined in opcodes. */
8701 num_disassembly_options
= get_arm_regname_num_options ();
8703 /* Add root prefix command for all "set arm"/"show arm" commands. */
8704 add_prefix_cmd ("arm", no_class
, set_arm_command
,
8705 _("Various ARM-specific commands."),
8706 &setarmcmdlist
, "set arm ", 0, &setlist
);
8708 add_prefix_cmd ("arm", no_class
, show_arm_command
,
8709 _("Various ARM-specific commands."),
8710 &showarmcmdlist
, "show arm ", 0, &showlist
);
8712 /* Sync the opcode insn printer with our register viewer. */
8713 parse_arm_disassembler_option ("reg-names-std");
8715 /* Initialize the array that will be passed to
8716 add_setshow_enum_cmd(). */
8717 valid_disassembly_styles
8718 = xmalloc ((num_disassembly_options
+ 1) * sizeof (char *));
8719 for (i
= 0; i
< num_disassembly_options
; i
++)
8721 numregs
= get_arm_regnames (i
, &setname
, &setdesc
, ®names
);
8722 valid_disassembly_styles
[i
] = setname
;
8723 length
= snprintf (rdptr
, rest
, "%s - %s\n", setname
, setdesc
);
8726 /* When we find the default names, tell the disassembler to use
8728 if (!strcmp (setname
, "std"))
8730 disassembly_style
= setname
;
8731 set_arm_regname_option (i
);
8734 /* Mark the end of valid options. */
8735 valid_disassembly_styles
[num_disassembly_options
] = NULL
;
8737 /* Create the help text. */
8738 stb
= mem_fileopen ();
8739 fprintf_unfiltered (stb
, "%s%s%s",
8740 _("The valid values are:\n"),
8742 _("The default is \"std\"."));
8743 helptext
= ui_file_xstrdup (stb
, NULL
);
8744 ui_file_delete (stb
);
8746 add_setshow_enum_cmd("disassembler", no_class
,
8747 valid_disassembly_styles
, &disassembly_style
,
8748 _("Set the disassembly style."),
8749 _("Show the disassembly style."),
8751 set_disassembly_style_sfunc
,
8752 NULL
, /* FIXME: i18n: The disassembly style is
8754 &setarmcmdlist
, &showarmcmdlist
);
8756 add_setshow_boolean_cmd ("apcs32", no_class
, &arm_apcs_32
,
8757 _("Set usage of ARM 32-bit mode."),
8758 _("Show usage of ARM 32-bit mode."),
8759 _("When off, a 26-bit PC will be used."),
8761 NULL
, /* FIXME: i18n: Usage of ARM 32-bit
8763 &setarmcmdlist
, &showarmcmdlist
);
8765 /* Add a command to allow the user to force the FPU model. */
8766 add_setshow_enum_cmd ("fpu", no_class
, fp_model_strings
, ¤t_fp_model
,
8767 _("Set the floating point type."),
8768 _("Show the floating point type."),
8769 _("auto - Determine the FP typefrom the OS-ABI.\n\
8770 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8771 fpa - FPA co-processor (GCC compiled).\n\
8772 softvfp - Software FP with pure-endian doubles.\n\
8773 vfp - VFP co-processor."),
8774 set_fp_model_sfunc
, show_fp_model
,
8775 &setarmcmdlist
, &showarmcmdlist
);
8777 /* Add a command to allow the user to force the ABI. */
8778 add_setshow_enum_cmd ("abi", class_support
, arm_abi_strings
, &arm_abi_string
,
8781 NULL
, arm_set_abi
, arm_show_abi
,
8782 &setarmcmdlist
, &showarmcmdlist
);
8784 /* Add two commands to allow the user to force the assumed
8786 add_setshow_enum_cmd ("fallback-mode", class_support
,
8787 arm_mode_strings
, &arm_fallback_mode_string
,
8788 _("Set the mode assumed when symbols are unavailable."),
8789 _("Show the mode assumed when symbols are unavailable."),
8790 NULL
, NULL
, arm_show_fallback_mode
,
8791 &setarmcmdlist
, &showarmcmdlist
);
8792 add_setshow_enum_cmd ("force-mode", class_support
,
8793 arm_mode_strings
, &arm_force_mode_string
,
8794 _("Set the mode assumed even when symbols are available."),
8795 _("Show the mode assumed even when symbols are available."),
8796 NULL
, NULL
, arm_show_force_mode
,
8797 &setarmcmdlist
, &showarmcmdlist
);
8799 /* Debugging flag. */
8800 add_setshow_boolean_cmd ("arm", class_maintenance
, &arm_debug
,
8801 _("Set ARM debugging."),
8802 _("Show ARM debugging."),
8803 _("When on, arm-specific debugging is enabled."),
8805 NULL
, /* FIXME: i18n: "ARM debugging is %s. */
8806 &setdebuglist
, &showdebuglist
);