1 /* Common target dependent code for GDB on ARM systems.
3 Copyright (C) 1988-1989, 1991-1993, 1995-1996, 1998-2012 Free
4 Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include <ctype.h> /* XXX for isupper (). */
28 #include "gdb_string.h"
29 #include "dis-asm.h" /* For register styles. */
31 #include "reggroups.h"
34 #include "arch-utils.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
40 #include "dwarf2-frame.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
48 #include "gdb/sim-arm.h"
51 #include "coff/internal.h"
54 #include "gdb_assert.h"
57 #include "features/arm-with-m.c"
58 #include "features/arm-with-iwmmxt.c"
59 #include "features/arm-with-vfpv2.c"
60 #include "features/arm-with-vfpv3.c"
61 #include "features/arm-with-neon.c"
65 /* Macros for setting and testing a bit in a minimal symbol that marks
66 it as Thumb function. The MSB of the minimal symbol's "info" field
67 is used for this purpose.
69 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
70 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
72 #define MSYMBOL_SET_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym) = 1
75 #define MSYMBOL_IS_SPECIAL(msym) \
76 MSYMBOL_TARGET_FLAG_1 (msym)
78 /* Per-objfile data used for mapping symbols. */
79 static const struct objfile_data
*arm_objfile_data_key
;
81 struct arm_mapping_symbol
86 typedef struct arm_mapping_symbol arm_mapping_symbol_s
;
87 DEF_VEC_O(arm_mapping_symbol_s
);
89 struct arm_per_objfile
91 VEC(arm_mapping_symbol_s
) **section_maps
;
94 /* The list of available "set arm ..." and "show arm ..." commands. */
95 static struct cmd_list_element
*setarmcmdlist
= NULL
;
96 static struct cmd_list_element
*showarmcmdlist
= NULL
;
98 /* The type of floating-point to use. Keep this in sync with enum
99 arm_float_model, and the help string in _initialize_arm_tdep. */
100 static const char *fp_model_strings
[] =
110 /* A variable that can be configured by the user. */
111 static enum arm_float_model arm_fp_model
= ARM_FLOAT_AUTO
;
112 static const char *current_fp_model
= "auto";
114 /* The ABI to use. Keep this in sync with arm_abi_kind. */
115 static const char *arm_abi_strings
[] =
123 /* A variable that can be configured by the user. */
124 static enum arm_abi_kind arm_abi_global
= ARM_ABI_AUTO
;
125 static const char *arm_abi_string
= "auto";
127 /* The execution mode to assume. */
128 static const char *arm_mode_strings
[] =
136 static const char *arm_fallback_mode_string
= "auto";
137 static const char *arm_force_mode_string
= "auto";
139 /* Internal override of the execution mode. -1 means no override,
140 0 means override to ARM mode, 1 means override to Thumb mode.
141 The effect is the same as if arm_force_mode has been set by the
142 user (except the internal override has precedence over a user's
143 arm_force_mode override). */
144 static int arm_override_mode
= -1;
146 /* Number of different reg name sets (options). */
147 static int num_disassembly_options
;
149 /* The standard register names, and all the valid aliases for them. Note
150 that `fp', `sp' and `pc' are not added in this alias list, because they
151 have been added as builtin user registers in
152 std-regs.c:_initialize_frame_reg. */
157 } arm_register_aliases
[] = {
158 /* Basic register numbers. */
175 /* Synonyms (argument and variable registers). */
188 /* Other platform-specific names for r9. */
194 /* Names used by GCC (not listed in the ARM EABI). */
196 /* A special name from the older ATPCS. */
200 static const char *const arm_register_names
[] =
201 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
202 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
203 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
204 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
205 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
206 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
207 "fps", "cpsr" }; /* 24 25 */
209 /* Valid register name styles. */
210 static const char **valid_disassembly_styles
;
212 /* Disassembly style to use. Default to "std" register names. */
213 static const char *disassembly_style
;
215 /* This is used to keep the bfd arch_info in sync with the disassembly
217 static void set_disassembly_style_sfunc(char *, int,
218 struct cmd_list_element
*);
219 static void set_disassembly_style (void);
221 static void convert_from_extended (const struct floatformat
*, const void *,
223 static void convert_to_extended (const struct floatformat
*, void *,
226 static enum register_status
arm_neon_quad_read (struct gdbarch
*gdbarch
,
227 struct regcache
*regcache
,
228 int regnum
, gdb_byte
*buf
);
229 static void arm_neon_quad_write (struct gdbarch
*gdbarch
,
230 struct regcache
*regcache
,
231 int regnum
, const gdb_byte
*buf
);
233 static int thumb_insn_size (unsigned short inst1
);
235 struct arm_prologue_cache
237 /* The stack pointer at the time this frame was created; i.e. the
238 caller's stack pointer when this function was called. It is used
239 to identify this frame. */
242 /* The frame base for this frame is just prev_sp - frame size.
243 FRAMESIZE is the distance from the frame pointer to the
244 initial stack pointer. */
248 /* The register used to hold the frame pointer for this frame. */
251 /* Saved register offsets. */
252 struct trad_frame_saved_reg
*saved_regs
;
255 static CORE_ADDR
arm_analyze_prologue (struct gdbarch
*gdbarch
,
256 CORE_ADDR prologue_start
,
257 CORE_ADDR prologue_end
,
258 struct arm_prologue_cache
*cache
);
260 /* Architecture version for displaced stepping. This effects the behaviour of
261 certain instructions, and really should not be hard-wired. */
263 #define DISPLACED_STEPPING_ARCH_VERSION 5
265 /* Addresses for calling Thumb functions have the bit 0 set.
266 Here are some macros to test, set, or clear bit 0 of addresses. */
267 #define IS_THUMB_ADDR(addr) ((addr) & 1)
268 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
269 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
271 /* Set to true if the 32-bit mode is in use. */
275 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
278 arm_psr_thumb_bit (struct gdbarch
*gdbarch
)
280 if (gdbarch_tdep (gdbarch
)->is_m
)
286 /* Determine if FRAME is executing in Thumb mode. */
289 arm_frame_is_thumb (struct frame_info
*frame
)
292 ULONGEST t_bit
= arm_psr_thumb_bit (get_frame_arch (frame
));
294 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
295 directly (from a signal frame or dummy frame) or by interpreting
296 the saved LR (from a prologue or DWARF frame). So consult it and
297 trust the unwinders. */
298 cpsr
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
300 return (cpsr
& t_bit
) != 0;
303 /* Callback for VEC_lower_bound. */
306 arm_compare_mapping_symbols (const struct arm_mapping_symbol
*lhs
,
307 const struct arm_mapping_symbol
*rhs
)
309 return lhs
->value
< rhs
->value
;
312 /* Search for the mapping symbol covering MEMADDR. If one is found,
313 return its type. Otherwise, return 0. If START is non-NULL,
314 set *START to the location of the mapping symbol. */
317 arm_find_mapping_symbol (CORE_ADDR memaddr
, CORE_ADDR
*start
)
319 struct obj_section
*sec
;
321 /* If there are mapping symbols, consult them. */
322 sec
= find_pc_section (memaddr
);
325 struct arm_per_objfile
*data
;
326 VEC(arm_mapping_symbol_s
) *map
;
327 struct arm_mapping_symbol map_key
= { memaddr
- obj_section_addr (sec
),
331 data
= objfile_data (sec
->objfile
, arm_objfile_data_key
);
334 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
335 if (!VEC_empty (arm_mapping_symbol_s
, map
))
337 struct arm_mapping_symbol
*map_sym
;
339 idx
= VEC_lower_bound (arm_mapping_symbol_s
, map
, &map_key
,
340 arm_compare_mapping_symbols
);
342 /* VEC_lower_bound finds the earliest ordered insertion
343 point. If the following symbol starts at this exact
344 address, we use that; otherwise, the preceding
345 mapping symbol covers this address. */
346 if (idx
< VEC_length (arm_mapping_symbol_s
, map
))
348 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
);
349 if (map_sym
->value
== map_key
.value
)
352 *start
= map_sym
->value
+ obj_section_addr (sec
);
353 return map_sym
->type
;
359 map_sym
= VEC_index (arm_mapping_symbol_s
, map
, idx
- 1);
361 *start
= map_sym
->value
+ obj_section_addr (sec
);
362 return map_sym
->type
;
371 /* Determine if the program counter specified in MEMADDR is in a Thumb
372 function. This function should be called for addresses unrelated to
373 any executing frame; otherwise, prefer arm_frame_is_thumb. */
376 arm_pc_is_thumb (struct gdbarch
*gdbarch
, CORE_ADDR memaddr
)
378 struct obj_section
*sec
;
379 struct minimal_symbol
*sym
;
381 struct displaced_step_closure
* dsc
382 = get_displaced_step_closure_by_addr(memaddr
);
384 /* If checking the mode of displaced instruction in copy area, the mode
385 should be determined by instruction on the original address. */
389 fprintf_unfiltered (gdb_stdlog
,
390 "displaced: check mode of %.8lx instead of %.8lx\n",
391 (unsigned long) dsc
->insn_addr
,
392 (unsigned long) memaddr
);
393 memaddr
= dsc
->insn_addr
;
396 /* If bit 0 of the address is set, assume this is a Thumb address. */
397 if (IS_THUMB_ADDR (memaddr
))
400 /* Respect internal mode override if active. */
401 if (arm_override_mode
!= -1)
402 return arm_override_mode
;
404 /* If the user wants to override the symbol table, let him. */
405 if (strcmp (arm_force_mode_string
, "arm") == 0)
407 if (strcmp (arm_force_mode_string
, "thumb") == 0)
410 /* ARM v6-M and v7-M are always in Thumb mode. */
411 if (gdbarch_tdep (gdbarch
)->is_m
)
414 /* If there are mapping symbols, consult them. */
415 type
= arm_find_mapping_symbol (memaddr
, NULL
);
419 /* Thumb functions have a "special" bit set in minimal symbols. */
420 sym
= lookup_minimal_symbol_by_pc (memaddr
);
422 return (MSYMBOL_IS_SPECIAL (sym
));
424 /* If the user wants to override the fallback mode, let them. */
425 if (strcmp (arm_fallback_mode_string
, "arm") == 0)
427 if (strcmp (arm_fallback_mode_string
, "thumb") == 0)
430 /* If we couldn't find any symbol, but we're talking to a running
431 target, then trust the current value of $cpsr. This lets
432 "display/i $pc" always show the correct mode (though if there is
433 a symbol table we will not reach here, so it still may not be
434 displayed in the mode it will be executed). */
435 if (target_has_registers
)
436 return arm_frame_is_thumb (get_current_frame ());
438 /* Otherwise we're out of luck; we assume ARM. */
442 /* Remove useless bits from addresses in a running program. */
444 arm_addr_bits_remove (struct gdbarch
*gdbarch
, CORE_ADDR val
)
447 return UNMAKE_THUMB_ADDR (val
);
449 return (val
& 0x03fffffc);
452 /* When reading symbols, we need to zap the low bit of the address,
453 which may be set to 1 for Thumb functions. */
455 arm_smash_text_address (struct gdbarch
*gdbarch
, CORE_ADDR val
)
460 /* Return 1 if PC is the start of a compiler helper function which
461 can be safely ignored during prologue skipping. IS_THUMB is true
462 if the function is known to be a Thumb function due to the way it
465 skip_prologue_function (struct gdbarch
*gdbarch
, CORE_ADDR pc
, int is_thumb
)
467 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
468 struct minimal_symbol
*msym
;
470 msym
= lookup_minimal_symbol_by_pc (pc
);
472 && SYMBOL_VALUE_ADDRESS (msym
) == pc
473 && SYMBOL_LINKAGE_NAME (msym
) != NULL
)
475 const char *name
= SYMBOL_LINKAGE_NAME (msym
);
477 /* The GNU linker's Thumb call stub to foo is named
479 if (strstr (name
, "_from_thumb") != NULL
)
482 /* On soft-float targets, __truncdfsf2 is called to convert promoted
483 arguments to their argument types in non-prototyped
485 if (strncmp (name
, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
487 if (strncmp (name
, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
490 /* Internal functions related to thread-local storage. */
491 if (strncmp (name
, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
493 if (strncmp (name
, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
498 /* If we run against a stripped glibc, we may be unable to identify
499 special functions by name. Check for one important case,
500 __aeabi_read_tp, by comparing the *code* against the default
501 implementation (this is hand-written ARM assembler in glibc). */
504 && read_memory_unsigned_integer (pc
, 4, byte_order_for_code
)
505 == 0xe3e00a0f /* mov r0, #0xffff0fff */
506 && read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
)
507 == 0xe240f01f) /* sub pc, r0, #31 */
514 /* Support routines for instruction parsing. */
515 #define submask(x) ((1L << ((x) + 1)) - 1)
516 #define bit(obj,st) (((obj) >> (st)) & 1)
517 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
518 #define sbits(obj,st,fn) \
519 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
520 #define BranchDest(addr,instr) \
521 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
523 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
524 the first 16-bit of instruction, and INSN2 is the second 16-bit of
526 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
527 ((bits ((insn1), 0, 3) << 12) \
528 | (bits ((insn1), 10, 10) << 11) \
529 | (bits ((insn2), 12, 14) << 8) \
530 | bits ((insn2), 0, 7))
532 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
533 the 32-bit instruction. */
534 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
535 ((bits ((insn), 16, 19) << 12) \
536 | bits ((insn), 0, 11))
538 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
541 thumb_expand_immediate (unsigned int imm
)
543 unsigned int count
= imm
>> 7;
551 return (imm
& 0xff) | ((imm
& 0xff) << 16);
553 return ((imm
& 0xff) << 8) | ((imm
& 0xff) << 24);
555 return (imm
& 0xff) | ((imm
& 0xff) << 8)
556 | ((imm
& 0xff) << 16) | ((imm
& 0xff) << 24);
559 return (0x80 | (imm
& 0x7f)) << (32 - count
);
562 /* Return 1 if the 16-bit Thumb instruction INST might change
563 control flow, 0 otherwise. */
566 thumb_instruction_changes_pc (unsigned short inst
)
568 if ((inst
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
571 if ((inst
& 0xf000) == 0xd000) /* conditional branch */
574 if ((inst
& 0xf800) == 0xe000) /* unconditional branch */
577 if ((inst
& 0xff00) == 0x4700) /* bx REG, blx REG */
580 if ((inst
& 0xff87) == 0x4687) /* mov pc, REG */
583 if ((inst
& 0xf500) == 0xb100) /* CBNZ or CBZ. */
589 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
590 might change control flow, 0 otherwise. */
593 thumb2_instruction_changes_pc (unsigned short inst1
, unsigned short inst2
)
595 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
597 /* Branches and miscellaneous control instructions. */
599 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
604 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
606 /* SUBS PC, LR, #imm8. */
609 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
611 /* Conditional branch. */
618 if ((inst1
& 0xfe50) == 0xe810)
620 /* Load multiple or RFE. */
622 if (bit (inst1
, 7) && !bit (inst1
, 8))
628 else if (!bit (inst1
, 7) && bit (inst1
, 8))
634 else if (bit (inst1
, 7) && bit (inst1
, 8))
639 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
648 if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
650 /* MOV PC or MOVS PC. */
654 if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
657 if (bits (inst1
, 0, 3) == 15)
663 if ((inst2
& 0x0fc0) == 0x0000)
669 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
675 if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
684 /* Analyze a Thumb prologue, looking for a recognizable stack frame
685 and frame pointer. Scan until we encounter a store that could
686 clobber the stack frame unexpectedly, or an unknown instruction.
687 Return the last address which is definitely safe to skip for an
688 initial breakpoint. */
691 thumb_analyze_prologue (struct gdbarch
*gdbarch
,
692 CORE_ADDR start
, CORE_ADDR limit
,
693 struct arm_prologue_cache
*cache
)
695 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
696 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
699 struct pv_area
*stack
;
700 struct cleanup
*back_to
;
702 CORE_ADDR unrecognized_pc
= 0;
704 for (i
= 0; i
< 16; i
++)
705 regs
[i
] = pv_register (i
, 0);
706 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
707 back_to
= make_cleanup_free_pv_area (stack
);
709 while (start
< limit
)
713 insn
= read_memory_unsigned_integer (start
, 2, byte_order_for_code
);
715 if ((insn
& 0xfe00) == 0xb400) /* push { rlist } */
720 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
723 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
724 whether to save LR (R14). */
725 mask
= (insn
& 0xff) | ((insn
& 0x100) << 6);
727 /* Calculate offsets of saved R0-R7 and LR. */
728 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
729 if (mask
& (1 << regno
))
731 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
733 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
736 else if ((insn
& 0xff00) == 0xb000) /* add sp, #simm OR
739 offset
= (insn
& 0x7f) << 2; /* get scaled offset */
740 if (insn
& 0x80) /* Check for SUB. */
741 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
744 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
],
747 else if ((insn
& 0xf800) == 0xa800) /* add Rd, sp, #imm */
748 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[ARM_SP_REGNUM
],
750 else if ((insn
& 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
751 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
752 regs
[bits (insn
, 0, 2)] = pv_add_constant (regs
[bits (insn
, 3, 5)],
754 else if ((insn
& 0xf800) == 0x3000 /* add Rd, #imm */
755 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
756 regs
[bits (insn
, 8, 10)] = pv_add_constant (regs
[bits (insn
, 8, 10)],
758 else if ((insn
& 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
759 && pv_is_register (regs
[bits (insn
, 6, 8)], ARM_SP_REGNUM
)
760 && pv_is_constant (regs
[bits (insn
, 3, 5)]))
761 regs
[bits (insn
, 0, 2)] = pv_add (regs
[bits (insn
, 3, 5)],
762 regs
[bits (insn
, 6, 8)]);
763 else if ((insn
& 0xff00) == 0x4400 /* add Rd, Rm */
764 && pv_is_constant (regs
[bits (insn
, 3, 6)]))
766 int rd
= (bit (insn
, 7) << 3) + bits (insn
, 0, 2);
767 int rm
= bits (insn
, 3, 6);
768 regs
[rd
] = pv_add (regs
[rd
], regs
[rm
]);
770 else if ((insn
& 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
772 int dst_reg
= (insn
& 0x7) + ((insn
& 0x80) >> 4);
773 int src_reg
= (insn
& 0x78) >> 3;
774 regs
[dst_reg
] = regs
[src_reg
];
776 else if ((insn
& 0xf800) == 0x9000) /* str rd, [sp, #off] */
778 /* Handle stores to the stack. Normally pushes are used,
779 but with GCC -mtpcs-frame, there may be other stores
780 in the prologue to create the frame. */
781 int regno
= (insn
>> 8) & 0x7;
784 offset
= (insn
& 0xff) << 2;
785 addr
= pv_add_constant (regs
[ARM_SP_REGNUM
], offset
);
787 if (pv_area_store_would_trash (stack
, addr
))
790 pv_area_store (stack
, addr
, 4, regs
[regno
]);
792 else if ((insn
& 0xf800) == 0x6000) /* str rd, [rn, #off] */
794 int rd
= bits (insn
, 0, 2);
795 int rn
= bits (insn
, 3, 5);
798 offset
= bits (insn
, 6, 10) << 2;
799 addr
= pv_add_constant (regs
[rn
], offset
);
801 if (pv_area_store_would_trash (stack
, addr
))
804 pv_area_store (stack
, addr
, 4, regs
[rd
]);
806 else if (((insn
& 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
807 || (insn
& 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
808 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
))
809 /* Ignore stores of argument registers to the stack. */
811 else if ((insn
& 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
812 && pv_is_register (regs
[bits (insn
, 8, 10)], ARM_SP_REGNUM
))
813 /* Ignore block loads from the stack, potentially copying
814 parameters from memory. */
816 else if ((insn
& 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
817 || ((insn
& 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
818 && pv_is_register (regs
[bits (insn
, 3, 5)], ARM_SP_REGNUM
)))
819 /* Similarly ignore single loads from the stack. */
821 else if ((insn
& 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
822 || (insn
& 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
823 /* Skip register copies, i.e. saves to another register
824 instead of the stack. */
826 else if ((insn
& 0xf800) == 0x2000) /* movs Rd, #imm */
827 /* Recognize constant loads; even with small stacks these are necessary
829 regs
[bits (insn
, 8, 10)] = pv_constant (bits (insn
, 0, 7));
830 else if ((insn
& 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
832 /* Constant pool loads, for the same reason. */
833 unsigned int constant
;
836 loc
= start
+ 4 + bits (insn
, 0, 7) * 4;
837 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
838 regs
[bits (insn
, 8, 10)] = pv_constant (constant
);
840 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instructions. */
842 unsigned short inst2
;
844 inst2
= read_memory_unsigned_integer (start
+ 2, 2,
845 byte_order_for_code
);
847 if ((insn
& 0xf800) == 0xf000 && (inst2
& 0xe800) == 0xe800)
849 /* BL, BLX. Allow some special function calls when
850 skipping the prologue; GCC generates these before
851 storing arguments to the stack. */
853 int j1
, j2
, imm1
, imm2
;
855 imm1
= sbits (insn
, 0, 10);
856 imm2
= bits (inst2
, 0, 10);
857 j1
= bit (inst2
, 13);
858 j2
= bit (inst2
, 11);
860 offset
= ((imm1
<< 12) + (imm2
<< 1));
861 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
863 nextpc
= start
+ 4 + offset
;
864 /* For BLX make sure to clear the low bits. */
865 if (bit (inst2
, 12) == 0)
866 nextpc
= nextpc
& 0xfffffffc;
868 if (!skip_prologue_function (gdbarch
, nextpc
,
869 bit (inst2
, 12) != 0))
873 else if ((insn
& 0xffd0) == 0xe900 /* stmdb Rn{!},
875 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
877 pv_t addr
= regs
[bits (insn
, 0, 3)];
880 if (pv_area_store_would_trash (stack
, addr
))
883 /* Calculate offsets of saved registers. */
884 for (regno
= ARM_LR_REGNUM
; regno
>= 0; regno
--)
885 if (inst2
& (1 << regno
))
887 addr
= pv_add_constant (addr
, -4);
888 pv_area_store (stack
, addr
, 4, regs
[regno
]);
892 regs
[bits (insn
, 0, 3)] = addr
;
895 else if ((insn
& 0xff50) == 0xe940 /* strd Rt, Rt2,
897 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
899 int regno1
= bits (inst2
, 12, 15);
900 int regno2
= bits (inst2
, 8, 11);
901 pv_t addr
= regs
[bits (insn
, 0, 3)];
903 offset
= inst2
& 0xff;
905 addr
= pv_add_constant (addr
, offset
);
907 addr
= pv_add_constant (addr
, -offset
);
909 if (pv_area_store_would_trash (stack
, addr
))
912 pv_area_store (stack
, addr
, 4, regs
[regno1
]);
913 pv_area_store (stack
, pv_add_constant (addr
, 4),
917 regs
[bits (insn
, 0, 3)] = addr
;
920 else if ((insn
& 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
921 && (inst2
& 0x0c00) == 0x0c00
922 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
924 int regno
= bits (inst2
, 12, 15);
925 pv_t addr
= regs
[bits (insn
, 0, 3)];
927 offset
= inst2
& 0xff;
929 addr
= pv_add_constant (addr
, offset
);
931 addr
= pv_add_constant (addr
, -offset
);
933 if (pv_area_store_would_trash (stack
, addr
))
936 pv_area_store (stack
, addr
, 4, regs
[regno
]);
939 regs
[bits (insn
, 0, 3)] = addr
;
942 else if ((insn
& 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
943 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
945 int regno
= bits (inst2
, 12, 15);
948 offset
= inst2
& 0xfff;
949 addr
= pv_add_constant (regs
[bits (insn
, 0, 3)], offset
);
951 if (pv_area_store_would_trash (stack
, addr
))
954 pv_area_store (stack
, addr
, 4, regs
[regno
]);
957 else if ((insn
& 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
958 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
959 /* Ignore stores of argument registers to the stack. */
962 else if ((insn
& 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
963 && (inst2
& 0x0d00) == 0x0c00
964 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
965 /* Ignore stores of argument registers to the stack. */
968 else if ((insn
& 0xffd0) == 0xe890 /* ldmia Rn[!],
970 && (inst2
& 0x8000) == 0x0000
971 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
972 /* Ignore block loads from the stack, potentially copying
973 parameters from memory. */
976 else if ((insn
& 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
978 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
979 /* Similarly ignore dual loads from the stack. */
982 else if ((insn
& 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
983 && (inst2
& 0x0d00) == 0x0c00
984 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
985 /* Similarly ignore single loads from the stack. */
988 else if ((insn
& 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
989 && pv_is_register (regs
[bits (insn
, 0, 3)], ARM_SP_REGNUM
))
990 /* Similarly ignore single loads from the stack. */
993 else if ((insn
& 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
994 && (inst2
& 0x8000) == 0x0000)
996 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
997 | (bits (inst2
, 12, 14) << 8)
998 | bits (inst2
, 0, 7));
1000 regs
[bits (inst2
, 8, 11)]
1001 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1002 thumb_expand_immediate (imm
));
1005 else if ((insn
& 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1006 && (inst2
& 0x8000) == 0x0000)
1008 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1009 | (bits (inst2
, 12, 14) << 8)
1010 | bits (inst2
, 0, 7));
1012 regs
[bits (inst2
, 8, 11)]
1013 = pv_add_constant (regs
[bits (insn
, 0, 3)], imm
);
1016 else if ((insn
& 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1017 && (inst2
& 0x8000) == 0x0000)
1019 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1020 | (bits (inst2
, 12, 14) << 8)
1021 | bits (inst2
, 0, 7));
1023 regs
[bits (inst2
, 8, 11)]
1024 = pv_add_constant (regs
[bits (insn
, 0, 3)],
1025 - (CORE_ADDR
) thumb_expand_immediate (imm
));
1028 else if ((insn
& 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1029 && (inst2
& 0x8000) == 0x0000)
1031 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1032 | (bits (inst2
, 12, 14) << 8)
1033 | bits (inst2
, 0, 7));
1035 regs
[bits (inst2
, 8, 11)]
1036 = pv_add_constant (regs
[bits (insn
, 0, 3)], - (CORE_ADDR
) imm
);
1039 else if ((insn
& 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1041 unsigned int imm
= ((bits (insn
, 10, 10) << 11)
1042 | (bits (inst2
, 12, 14) << 8)
1043 | bits (inst2
, 0, 7));
1045 regs
[bits (inst2
, 8, 11)]
1046 = pv_constant (thumb_expand_immediate (imm
));
1049 else if ((insn
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1052 = EXTRACT_MOVW_MOVT_IMM_T (insn
, inst2
);
1054 regs
[bits (inst2
, 8, 11)] = pv_constant (imm
);
1057 else if (insn
== 0xea5f /* mov.w Rd,Rm */
1058 && (inst2
& 0xf0f0) == 0)
1060 int dst_reg
= (inst2
& 0x0f00) >> 8;
1061 int src_reg
= inst2
& 0xf;
1062 regs
[dst_reg
] = regs
[src_reg
];
1065 else if ((insn
& 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1067 /* Constant pool loads. */
1068 unsigned int constant
;
1071 offset
= bits (insn
, 0, 11);
1073 loc
= start
+ 4 + offset
;
1075 loc
= start
+ 4 - offset
;
1077 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1078 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1081 else if ((insn
& 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1083 /* Constant pool loads. */
1084 unsigned int constant
;
1087 offset
= bits (insn
, 0, 7) << 2;
1089 loc
= start
+ 4 + offset
;
1091 loc
= start
+ 4 - offset
;
1093 constant
= read_memory_unsigned_integer (loc
, 4, byte_order
);
1094 regs
[bits (inst2
, 12, 15)] = pv_constant (constant
);
1096 constant
= read_memory_unsigned_integer (loc
+ 4, 4, byte_order
);
1097 regs
[bits (inst2
, 8, 11)] = pv_constant (constant
);
1100 else if (thumb2_instruction_changes_pc (insn
, inst2
))
1102 /* Don't scan past anything that might change control flow. */
1107 /* The optimizer might shove anything into the prologue,
1108 so we just skip what we don't recognize. */
1109 unrecognized_pc
= start
;
1114 else if (thumb_instruction_changes_pc (insn
))
1116 /* Don't scan past anything that might change control flow. */
1121 /* The optimizer might shove anything into the prologue,
1122 so we just skip what we don't recognize. */
1123 unrecognized_pc
= start
;
1130 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1131 paddress (gdbarch
, start
));
1133 if (unrecognized_pc
== 0)
1134 unrecognized_pc
= start
;
1138 do_cleanups (back_to
);
1139 return unrecognized_pc
;
1142 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1144 /* Frame pointer is fp. Frame size is constant. */
1145 cache
->framereg
= ARM_FP_REGNUM
;
1146 cache
->framesize
= -regs
[ARM_FP_REGNUM
].k
;
1148 else if (pv_is_register (regs
[THUMB_FP_REGNUM
], ARM_SP_REGNUM
))
1150 /* Frame pointer is r7. Frame size is constant. */
1151 cache
->framereg
= THUMB_FP_REGNUM
;
1152 cache
->framesize
= -regs
[THUMB_FP_REGNUM
].k
;
1156 /* Try the stack pointer... this is a bit desperate. */
1157 cache
->framereg
= ARM_SP_REGNUM
;
1158 cache
->framesize
= -regs
[ARM_SP_REGNUM
].k
;
1161 for (i
= 0; i
< 16; i
++)
1162 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
1163 cache
->saved_regs
[i
].addr
= offset
;
1165 do_cleanups (back_to
);
1166 return unrecognized_pc
;
1170 /* Try to analyze the instructions starting from PC, which load symbol
1171 __stack_chk_guard. Return the address of instruction after loading this
1172 symbol, set the dest register number to *BASEREG, and set the size of
1173 instructions for loading symbol in OFFSET. Return 0 if instructions are
1177 arm_analyze_load_stack_chk_guard(CORE_ADDR pc
, struct gdbarch
*gdbarch
,
1178 unsigned int *destreg
, int *offset
)
1180 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1181 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1182 unsigned int low
, high
, address
;
1187 unsigned short insn1
1188 = read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
1190 if ((insn1
& 0xf800) == 0x4800) /* ldr Rd, #immed */
1192 *destreg
= bits (insn1
, 8, 10);
1194 address
= bits (insn1
, 0, 7);
1196 else if ((insn1
& 0xfbf0) == 0xf240) /* movw Rd, #const */
1198 unsigned short insn2
1199 = read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
1201 low
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1204 = read_memory_unsigned_integer (pc
+ 4, 2, byte_order_for_code
);
1206 = read_memory_unsigned_integer (pc
+ 6, 2, byte_order_for_code
);
1208 /* movt Rd, #const */
1209 if ((insn1
& 0xfbc0) == 0xf2c0)
1211 high
= EXTRACT_MOVW_MOVT_IMM_T (insn1
, insn2
);
1212 *destreg
= bits (insn2
, 8, 11);
1214 address
= (high
<< 16 | low
);
1221 = read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
1223 if ((insn
& 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1225 address
= bits (insn
, 0, 11);
1226 *destreg
= bits (insn
, 12, 15);
1229 else if ((insn
& 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1231 low
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1234 = read_memory_unsigned_integer (pc
+ 4, 4, byte_order_for_code
);
1236 if ((insn
& 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1238 high
= EXTRACT_MOVW_MOVT_IMM_A (insn
);
1239 *destreg
= bits (insn
, 12, 15);
1241 address
= (high
<< 16 | low
);
1249 /* Try to skip a sequence of instructions used for stack protector. If PC
1250 points to the first instruction of this sequence, return the address of
1251 first instruction after this sequence, otherwise, return original PC.
1253 On arm, this sequence of instructions is composed of mainly three steps,
1254 Step 1: load symbol __stack_chk_guard,
1255 Step 2: load from address of __stack_chk_guard,
1256 Step 3: store it to somewhere else.
1258 Usually, instructions on step 2 and step 3 are the same on various ARM
1259 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1260 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1261 instructions in step 1 vary from different ARM architectures. On ARMv7,
1264 movw Rn, #:lower16:__stack_chk_guard
1265 movt Rn, #:upper16:__stack_chk_guard
1272 .word __stack_chk_guard
1274 Since ldr/str is a very popular instruction, we can't use them as
1275 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1276 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1277 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1280 arm_skip_stack_protector(CORE_ADDR pc
, struct gdbarch
*gdbarch
)
1282 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1283 unsigned int address
, basereg
;
1284 struct minimal_symbol
*stack_chk_guard
;
1286 int is_thumb
= arm_pc_is_thumb (gdbarch
, pc
);
1289 /* Try to parse the instructions in Step 1. */
1290 addr
= arm_analyze_load_stack_chk_guard (pc
, gdbarch
,
1295 stack_chk_guard
= lookup_minimal_symbol_by_pc (addr
);
1296 /* If name of symbol doesn't start with '__stack_chk_guard', this
1297 instruction sequence is not for stack protector. If symbol is
1298 removed, we conservatively think this sequence is for stack protector. */
1300 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard
), "__stack_chk_guard",
1301 strlen ("__stack_chk_guard")) != 0)
1306 unsigned int destreg
;
1308 = read_memory_unsigned_integer (pc
+ offset
, 2, byte_order_for_code
);
1310 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1311 if ((insn
& 0xf800) != 0x6800)
1313 if (bits (insn
, 3, 5) != basereg
)
1315 destreg
= bits (insn
, 0, 2);
1317 insn
= read_memory_unsigned_integer (pc
+ offset
+ 2, 2,
1318 byte_order_for_code
);
1319 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1320 if ((insn
& 0xf800) != 0x6000)
1322 if (destreg
!= bits (insn
, 0, 2))
1327 unsigned int destreg
;
1329 = read_memory_unsigned_integer (pc
+ offset
, 4, byte_order_for_code
);
1331 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1332 if ((insn
& 0x0e500000) != 0x04100000)
1334 if (bits (insn
, 16, 19) != basereg
)
1336 destreg
= bits (insn
, 12, 15);
1337 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1338 insn
= read_memory_unsigned_integer (pc
+ offset
+ 4,
1339 4, byte_order_for_code
);
1340 if ((insn
& 0x0e500000) != 0x04000000)
1342 if (bits (insn
, 12, 15) != destreg
)
1345 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1348 return pc
+ offset
+ 4;
1350 return pc
+ offset
+ 8;
1353 /* Advance the PC across any function entry prologue instructions to
1354 reach some "real" code.
1356 The APCS (ARM Procedure Call Standard) defines the following
1360 [stmfd sp!, {a1,a2,a3,a4}]
1361 stmfd sp!, {...,fp,ip,lr,pc}
1362 [stfe f7, [sp, #-12]!]
1363 [stfe f6, [sp, #-12]!]
1364 [stfe f5, [sp, #-12]!]
1365 [stfe f4, [sp, #-12]!]
1366 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1369 arm_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1371 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1374 CORE_ADDR func_addr
, limit_pc
;
1375 struct symtab_and_line sal
;
1377 /* See if we can determine the end of the prologue via the symbol table.
1378 If so, then return either PC, or the PC after the prologue, whichever
1380 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
1382 CORE_ADDR post_prologue_pc
1383 = skip_prologue_using_sal (gdbarch
, func_addr
);
1384 struct symtab
*s
= find_pc_symtab (func_addr
);
1386 if (post_prologue_pc
)
1388 = arm_skip_stack_protector (post_prologue_pc
, gdbarch
);
1391 /* GCC always emits a line note before the prologue and another
1392 one after, even if the two are at the same address or on the
1393 same line. Take advantage of this so that we do not need to
1394 know every instruction that might appear in the prologue. We
1395 will have producer information for most binaries; if it is
1396 missing (e.g. for -gstabs), assuming the GNU tools. */
1397 if (post_prologue_pc
1399 || s
->producer
== NULL
1400 || strncmp (s
->producer
, "GNU ", sizeof ("GNU ") - 1) == 0))
1401 return post_prologue_pc
;
1403 if (post_prologue_pc
!= 0)
1405 CORE_ADDR analyzed_limit
;
1407 /* For non-GCC compilers, make sure the entire line is an
1408 acceptable prologue; GDB will round this function's
1409 return value up to the end of the following line so we
1410 can not skip just part of a line (and we do not want to).
1412 RealView does not treat the prologue specially, but does
1413 associate prologue code with the opening brace; so this
1414 lets us skip the first line if we think it is the opening
1416 if (arm_pc_is_thumb (gdbarch
, func_addr
))
1417 analyzed_limit
= thumb_analyze_prologue (gdbarch
, func_addr
,
1418 post_prologue_pc
, NULL
);
1420 analyzed_limit
= arm_analyze_prologue (gdbarch
, func_addr
,
1421 post_prologue_pc
, NULL
);
1423 if (analyzed_limit
!= post_prologue_pc
)
1426 return post_prologue_pc
;
1430 /* Can't determine prologue from the symbol table, need to examine
1433 /* Find an upper limit on the function prologue using the debug
1434 information. If the debug information could not be used to provide
1435 that bound, then use an arbitrary large number as the upper bound. */
1436 /* Like arm_scan_prologue, stop no later than pc + 64. */
1437 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
1439 limit_pc
= pc
+ 64; /* Magic. */
1442 /* Check if this is Thumb code. */
1443 if (arm_pc_is_thumb (gdbarch
, pc
))
1444 return thumb_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1446 for (skip_pc
= pc
; skip_pc
< limit_pc
; skip_pc
+= 4)
1448 inst
= read_memory_unsigned_integer (skip_pc
, 4, byte_order_for_code
);
1450 /* "mov ip, sp" is no longer a required part of the prologue. */
1451 if (inst
== 0xe1a0c00d) /* mov ip, sp */
1454 if ((inst
& 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1457 if ((inst
& 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1460 /* Some prologues begin with "str lr, [sp, #-4]!". */
1461 if (inst
== 0xe52de004) /* str lr, [sp, #-4]! */
1464 if ((inst
& 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1467 if ((inst
& 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1470 /* Any insns after this point may float into the code, if it makes
1471 for better instruction scheduling, so we skip them only if we
1472 find them, but still consider the function to be frame-ful. */
1474 /* We may have either one sfmfd instruction here, or several stfe
1475 insns, depending on the version of floating point code we
1477 if ((inst
& 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1480 if ((inst
& 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1483 if ((inst
& 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1486 if ((inst
& 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1489 if ((inst
& 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1490 || (inst
& 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1491 || (inst
& 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1494 if ((inst
& 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1495 || (inst
& 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1496 || (inst
& 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1499 /* Un-recognized instruction; stop scanning. */
1503 return skip_pc
; /* End of prologue. */
1507 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1508 This function decodes a Thumb function prologue to determine:
1509 1) the size of the stack frame
1510 2) which registers are saved on it
1511 3) the offsets of saved regs
1512 4) the offset from the stack pointer to the frame pointer
1514 A typical Thumb function prologue would create this stack frame
1515 (offsets relative to FP)
1516 old SP -> 24 stack parameters
1519 R7 -> 0 local variables (16 bytes)
1520 SP -> -12 additional stack space (12 bytes)
1521 The frame size would thus be 36 bytes, and the frame offset would be
1522 12 bytes. The frame register is R7.
1524 The comments for thumb_skip_prolog() describe the algorithm we use
1525 to detect the end of the prolog. */
1529 thumb_scan_prologue (struct gdbarch
*gdbarch
, CORE_ADDR prev_pc
,
1530 CORE_ADDR block_addr
, struct arm_prologue_cache
*cache
)
1532 CORE_ADDR prologue_start
;
1533 CORE_ADDR prologue_end
;
1534 CORE_ADDR current_pc
;
1536 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1539 /* See comment in arm_scan_prologue for an explanation of
1541 if (prologue_end
> prologue_start
+ 64)
1543 prologue_end
= prologue_start
+ 64;
1547 /* We're in the boondocks: we have no idea where the start of the
1551 prologue_end
= min (prologue_end
, prev_pc
);
1553 thumb_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1556 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1559 arm_instruction_changes_pc (uint32_t this_instr
)
1561 if (bits (this_instr
, 28, 31) == INST_NV
)
1562 /* Unconditional instructions. */
1563 switch (bits (this_instr
, 24, 27))
1567 /* Branch with Link and change to Thumb. */
1572 /* Coprocessor register transfer. */
1573 if (bits (this_instr
, 12, 15) == 15)
1574 error (_("Invalid update to pc in instruction"));
1580 switch (bits (this_instr
, 25, 27))
1583 if (bits (this_instr
, 23, 24) == 2 && bit (this_instr
, 20) == 0)
1585 /* Multiplies and extra load/stores. */
1586 if (bit (this_instr
, 4) == 1 && bit (this_instr
, 7) == 1)
1587 /* Neither multiplies nor extension load/stores are allowed
1591 /* Otherwise, miscellaneous instructions. */
1593 /* BX <reg>, BXJ <reg>, BLX <reg> */
1594 if (bits (this_instr
, 4, 27) == 0x12fff1
1595 || bits (this_instr
, 4, 27) == 0x12fff2
1596 || bits (this_instr
, 4, 27) == 0x12fff3)
1599 /* Other miscellaneous instructions are unpredictable if they
1603 /* Data processing instruction. Fall through. */
1606 if (bits (this_instr
, 12, 15) == 15)
1613 /* Media instructions and architecturally undefined instructions. */
1614 if (bits (this_instr
, 25, 27) == 3 && bit (this_instr
, 4) == 1)
1618 if (bit (this_instr
, 20) == 0)
1622 if (bits (this_instr
, 12, 15) == ARM_PC_REGNUM
)
1628 /* Load/store multiple. */
1629 if (bit (this_instr
, 20) == 1 && bit (this_instr
, 15) == 1)
1635 /* Branch and branch with link. */
1640 /* Coprocessor transfers or SWIs can not affect PC. */
1644 internal_error (__FILE__
, __LINE__
, _("bad value in switch"));
1648 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1649 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1650 fill it in. Return the first address not recognized as a prologue
1653 We recognize all the instructions typically found in ARM prologues,
1654 plus harmless instructions which can be skipped (either for analysis
1655 purposes, or a more restrictive set that can be skipped when finding
1656 the end of the prologue). */
1659 arm_analyze_prologue (struct gdbarch
*gdbarch
,
1660 CORE_ADDR prologue_start
, CORE_ADDR prologue_end
,
1661 struct arm_prologue_cache
*cache
)
1663 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1664 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
1666 CORE_ADDR offset
, current_pc
;
1667 pv_t regs
[ARM_FPS_REGNUM
];
1668 struct pv_area
*stack
;
1669 struct cleanup
*back_to
;
1670 int framereg
, framesize
;
1671 CORE_ADDR unrecognized_pc
= 0;
1673 /* Search the prologue looking for instructions that set up the
1674 frame pointer, adjust the stack pointer, and save registers.
1676 Be careful, however, and if it doesn't look like a prologue,
1677 don't try to scan it. If, for instance, a frameless function
1678 begins with stmfd sp!, then we will tell ourselves there is
1679 a frame, which will confuse stack traceback, as well as "finish"
1680 and other operations that rely on a knowledge of the stack
1683 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1684 regs
[regno
] = pv_register (regno
, 0);
1685 stack
= make_pv_area (ARM_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
1686 back_to
= make_cleanup_free_pv_area (stack
);
1688 for (current_pc
= prologue_start
;
1689 current_pc
< prologue_end
;
1693 = read_memory_unsigned_integer (current_pc
, 4, byte_order_for_code
);
1695 if (insn
== 0xe1a0c00d) /* mov ip, sp */
1697 regs
[ARM_IP_REGNUM
] = regs
[ARM_SP_REGNUM
];
1700 else if ((insn
& 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1701 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1703 unsigned imm
= insn
& 0xff; /* immediate value */
1704 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1705 int rd
= bits (insn
, 12, 15);
1706 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1707 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], imm
);
1710 else if ((insn
& 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1711 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1713 unsigned imm
= insn
& 0xff; /* immediate value */
1714 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1715 int rd
= bits (insn
, 12, 15);
1716 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1717 regs
[rd
] = pv_add_constant (regs
[bits (insn
, 16, 19)], -imm
);
1720 else if ((insn
& 0xffff0fff) == 0xe52d0004) /* str Rd,
1723 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1725 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1726 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4,
1727 regs
[bits (insn
, 12, 15)]);
1730 else if ((insn
& 0xffff0000) == 0xe92d0000)
1731 /* stmfd sp!, {..., fp, ip, lr, pc}
1733 stmfd sp!, {a1, a2, a3, a4} */
1735 int mask
= insn
& 0xffff;
1737 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1740 /* Calculate offsets of saved registers. */
1741 for (regno
= ARM_PC_REGNUM
; regno
>= 0; regno
--)
1742 if (mask
& (1 << regno
))
1745 = pv_add_constant (regs
[ARM_SP_REGNUM
], -4);
1746 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 4, regs
[regno
]);
1749 else if ((insn
& 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1750 || (insn
& 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1751 || (insn
& 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1753 /* No need to add this to saved_regs -- it's just an arg reg. */
1756 else if ((insn
& 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1757 || (insn
& 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1758 || (insn
& 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1760 /* No need to add this to saved_regs -- it's just an arg reg. */
1763 else if ((insn
& 0xfff00000) == 0xe8800000 /* stm Rn,
1765 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1767 /* No need to add this to saved_regs -- it's just arg regs. */
1770 else if ((insn
& 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1772 unsigned imm
= insn
& 0xff; /* immediate value */
1773 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1774 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1775 regs
[ARM_FP_REGNUM
] = pv_add_constant (regs
[ARM_IP_REGNUM
], -imm
);
1777 else if ((insn
& 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1779 unsigned imm
= insn
& 0xff; /* immediate value */
1780 unsigned rot
= (insn
& 0xf00) >> 7; /* rotate amount */
1781 imm
= (imm
>> rot
) | (imm
<< (32 - rot
));
1782 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -imm
);
1784 else if ((insn
& 0xffff7fff) == 0xed6d0103 /* stfe f?,
1786 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1788 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1791 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1792 regno
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x07);
1793 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12, regs
[regno
]);
1795 else if ((insn
& 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1797 && gdbarch_tdep (gdbarch
)->have_fpa_registers
)
1799 int n_saved_fp_regs
;
1800 unsigned int fp_start_reg
, fp_bound_reg
;
1802 if (pv_area_store_would_trash (stack
, regs
[ARM_SP_REGNUM
]))
1805 if ((insn
& 0x800) == 0x800) /* N0 is set */
1807 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1808 n_saved_fp_regs
= 3;
1810 n_saved_fp_regs
= 1;
1814 if ((insn
& 0x40000) == 0x40000) /* N1 is set */
1815 n_saved_fp_regs
= 2;
1817 n_saved_fp_regs
= 4;
1820 fp_start_reg
= ARM_F0_REGNUM
+ ((insn
>> 12) & 0x7);
1821 fp_bound_reg
= fp_start_reg
+ n_saved_fp_regs
;
1822 for (; fp_start_reg
< fp_bound_reg
; fp_start_reg
++)
1824 regs
[ARM_SP_REGNUM
] = pv_add_constant (regs
[ARM_SP_REGNUM
], -12);
1825 pv_area_store (stack
, regs
[ARM_SP_REGNUM
], 12,
1826 regs
[fp_start_reg
++]);
1829 else if ((insn
& 0xff000000) == 0xeb000000 && cache
== NULL
) /* bl */
1831 /* Allow some special function calls when skipping the
1832 prologue; GCC generates these before storing arguments to
1834 CORE_ADDR dest
= BranchDest (current_pc
, insn
);
1836 if (skip_prologue_function (gdbarch
, dest
, 0))
1841 else if ((insn
& 0xf0000000) != 0xe0000000)
1842 break; /* Condition not true, exit early. */
1843 else if (arm_instruction_changes_pc (insn
))
1844 /* Don't scan past anything that might change control flow. */
1846 else if ((insn
& 0xfe500000) == 0xe8100000 /* ldm */
1847 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1848 /* Ignore block loads from the stack, potentially copying
1849 parameters from memory. */
1851 else if ((insn
& 0xfc500000) == 0xe4100000
1852 && pv_is_register (regs
[bits (insn
, 16, 19)], ARM_SP_REGNUM
))
1853 /* Similarly ignore single loads from the stack. */
1855 else if ((insn
& 0xffff0ff0) == 0xe1a00000)
1856 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1857 register instead of the stack. */
1861 /* The optimizer might shove anything into the prologue,
1862 so we just skip what we don't recognize. */
1863 unrecognized_pc
= current_pc
;
1868 if (unrecognized_pc
== 0)
1869 unrecognized_pc
= current_pc
;
1871 /* The frame size is just the distance from the frame register
1872 to the original stack pointer. */
1873 if (pv_is_register (regs
[ARM_FP_REGNUM
], ARM_SP_REGNUM
))
1875 /* Frame pointer is fp. */
1876 framereg
= ARM_FP_REGNUM
;
1877 framesize
= -regs
[ARM_FP_REGNUM
].k
;
1881 /* Try the stack pointer... this is a bit desperate. */
1882 framereg
= ARM_SP_REGNUM
;
1883 framesize
= -regs
[ARM_SP_REGNUM
].k
;
1888 cache
->framereg
= framereg
;
1889 cache
->framesize
= framesize
;
1891 for (regno
= 0; regno
< ARM_FPS_REGNUM
; regno
++)
1892 if (pv_area_find_reg (stack
, gdbarch
, regno
, &offset
))
1893 cache
->saved_regs
[regno
].addr
= offset
;
1897 fprintf_unfiltered (gdb_stdlog
, "Prologue scan stopped at %s\n",
1898 paddress (gdbarch
, unrecognized_pc
));
1900 do_cleanups (back_to
);
1901 return unrecognized_pc
;
1905 arm_scan_prologue (struct frame_info
*this_frame
,
1906 struct arm_prologue_cache
*cache
)
1908 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1909 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1911 CORE_ADDR prologue_start
, prologue_end
, current_pc
;
1912 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1913 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1914 pv_t regs
[ARM_FPS_REGNUM
];
1915 struct pv_area
*stack
;
1916 struct cleanup
*back_to
;
1919 /* Assume there is no frame until proven otherwise. */
1920 cache
->framereg
= ARM_SP_REGNUM
;
1921 cache
->framesize
= 0;
1923 /* Check for Thumb prologue. */
1924 if (arm_frame_is_thumb (this_frame
))
1926 thumb_scan_prologue (gdbarch
, prev_pc
, block_addr
, cache
);
1930 /* Find the function prologue. If we can't find the function in
1931 the symbol table, peek in the stack frame to find the PC. */
1932 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1935 /* One way to find the end of the prologue (which works well
1936 for unoptimized code) is to do the following:
1938 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1941 prologue_end = prev_pc;
1942 else if (sal.end < prologue_end)
1943 prologue_end = sal.end;
1945 This mechanism is very accurate so long as the optimizer
1946 doesn't move any instructions from the function body into the
1947 prologue. If this happens, sal.end will be the last
1948 instruction in the first hunk of prologue code just before
1949 the first instruction that the scheduler has moved from
1950 the body to the prologue.
1952 In order to make sure that we scan all of the prologue
1953 instructions, we use a slightly less accurate mechanism which
1954 may scan more than necessary. To help compensate for this
1955 lack of accuracy, the prologue scanning loop below contains
1956 several clauses which'll cause the loop to terminate early if
1957 an implausible prologue instruction is encountered.
1963 is a suitable endpoint since it accounts for the largest
1964 possible prologue plus up to five instructions inserted by
1967 if (prologue_end
> prologue_start
+ 64)
1969 prologue_end
= prologue_start
+ 64; /* See above. */
1974 /* We have no symbol information. Our only option is to assume this
1975 function has a standard stack frame and the normal frame register.
1976 Then, we can find the value of our frame pointer on entrance to
1977 the callee (or at the present moment if this is the innermost frame).
1978 The value stored there should be the address of the stmfd + 8. */
1979 CORE_ADDR frame_loc
;
1980 LONGEST return_value
;
1982 frame_loc
= get_frame_register_unsigned (this_frame
, ARM_FP_REGNUM
);
1983 if (!safe_read_memory_integer (frame_loc
, 4, byte_order
, &return_value
))
1987 prologue_start
= gdbarch_addr_bits_remove
1988 (gdbarch
, return_value
) - 8;
1989 prologue_end
= prologue_start
+ 64; /* See above. */
1993 if (prev_pc
< prologue_end
)
1994 prologue_end
= prev_pc
;
1996 arm_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1999 static struct arm_prologue_cache
*
2000 arm_make_prologue_cache (struct frame_info
*this_frame
)
2003 struct arm_prologue_cache
*cache
;
2004 CORE_ADDR unwound_fp
;
2006 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2007 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2009 arm_scan_prologue (this_frame
, cache
);
2011 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
2012 if (unwound_fp
== 0)
2015 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
2017 /* Calculate actual addresses of saved registers using offsets
2018 determined by arm_scan_prologue. */
2019 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
2020 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
2021 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
2026 /* Our frame ID for a normal frame is the current function's starting PC
2027 and the caller's SP when we were called. */
2030 arm_prologue_this_id (struct frame_info
*this_frame
,
2032 struct frame_id
*this_id
)
2034 struct arm_prologue_cache
*cache
;
2038 if (*this_cache
== NULL
)
2039 *this_cache
= arm_make_prologue_cache (this_frame
);
2040 cache
= *this_cache
;
2042 /* This is meant to halt the backtrace at "_start". */
2043 pc
= get_frame_pc (this_frame
);
2044 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
2047 /* If we've hit a wall, stop. */
2048 if (cache
->prev_sp
== 0)
2051 /* Use function start address as part of the frame ID. If we cannot
2052 identify the start address (due to missing symbol information),
2053 fall back to just using the current PC. */
2054 func
= get_frame_func (this_frame
);
2058 id
= frame_id_build (cache
->prev_sp
, func
);
2062 static struct value
*
2063 arm_prologue_prev_register (struct frame_info
*this_frame
,
2067 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2068 struct arm_prologue_cache
*cache
;
2070 if (*this_cache
== NULL
)
2071 *this_cache
= arm_make_prologue_cache (this_frame
);
2072 cache
= *this_cache
;
2074 /* If we are asked to unwind the PC, then we need to return the LR
2075 instead. The prologue may save PC, but it will point into this
2076 frame's prologue, not the next frame's resume location. Also
2077 strip the saved T bit. A valid LR may have the low bit set, but
2078 a valid PC never does. */
2079 if (prev_regnum
== ARM_PC_REGNUM
)
2083 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2084 return frame_unwind_got_constant (this_frame
, prev_regnum
,
2085 arm_addr_bits_remove (gdbarch
, lr
));
2088 /* SP is generally not saved to the stack, but this frame is
2089 identified by the next frame's stack pointer at the time of the call.
2090 The value was already reconstructed into PREV_SP. */
2091 if (prev_regnum
== ARM_SP_REGNUM
)
2092 return frame_unwind_got_constant (this_frame
, prev_regnum
, cache
->prev_sp
);
2094 /* The CPSR may have been changed by the call instruction and by the
2095 called function. The only bit we can reconstruct is the T bit,
2096 by checking the low bit of LR as of the call. This is a reliable
2097 indicator of Thumb-ness except for some ARM v4T pre-interworking
2098 Thumb code, which could get away with a clear low bit as long as
2099 the called function did not use bx. Guess that all other
2100 bits are unchanged; the condition flags are presumably lost,
2101 but the processor status is likely valid. */
2102 if (prev_regnum
== ARM_PS_REGNUM
)
2105 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2107 cpsr
= get_frame_register_unsigned (this_frame
, prev_regnum
);
2108 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2109 if (IS_THUMB_ADDR (lr
))
2113 return frame_unwind_got_constant (this_frame
, prev_regnum
, cpsr
);
2116 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
2120 struct frame_unwind arm_prologue_unwind
= {
2122 default_frame_unwind_stop_reason
,
2123 arm_prologue_this_id
,
2124 arm_prologue_prev_register
,
2126 default_frame_sniffer
2129 /* Maintain a list of ARM exception table entries per objfile, similar to the
2130 list of mapping symbols. We only cache entries for standard ARM-defined
2131 personality routines; the cache will contain only the frame unwinding
2132 instructions associated with the entry (not the descriptors). */
2134 static const struct objfile_data
*arm_exidx_data_key
;
2136 struct arm_exidx_entry
2141 typedef struct arm_exidx_entry arm_exidx_entry_s
;
2142 DEF_VEC_O(arm_exidx_entry_s
);
2144 struct arm_exidx_data
2146 VEC(arm_exidx_entry_s
) **section_maps
;
2150 arm_exidx_data_free (struct objfile
*objfile
, void *arg
)
2152 struct arm_exidx_data
*data
= arg
;
2155 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
2156 VEC_free (arm_exidx_entry_s
, data
->section_maps
[i
]);
2160 arm_compare_exidx_entries (const struct arm_exidx_entry
*lhs
,
2161 const struct arm_exidx_entry
*rhs
)
2163 return lhs
->addr
< rhs
->addr
;
2166 static struct obj_section
*
2167 arm_obj_section_from_vma (struct objfile
*objfile
, bfd_vma vma
)
2169 struct obj_section
*osect
;
2171 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
2172 if (bfd_get_section_flags (objfile
->obfd
,
2173 osect
->the_bfd_section
) & SEC_ALLOC
)
2175 bfd_vma start
, size
;
2176 start
= bfd_get_section_vma (objfile
->obfd
, osect
->the_bfd_section
);
2177 size
= bfd_get_section_size (osect
->the_bfd_section
);
2179 if (start
<= vma
&& vma
< start
+ size
)
2186 /* Parse contents of exception table and exception index sections
2187 of OBJFILE, and fill in the exception table entry cache.
2189 For each entry that refers to a standard ARM-defined personality
2190 routine, extract the frame unwinding instructions (from either
2191 the index or the table section). The unwinding instructions
2193 - extracting them from the rest of the table data
2194 - converting to host endianness
2195 - appending the implicit 0xb0 ("Finish") code
2197 The extracted and normalized instructions are stored for later
2198 retrieval by the arm_find_exidx_entry routine. */
2201 arm_exidx_new_objfile (struct objfile
*objfile
)
2203 struct cleanup
*cleanups
;
2204 struct arm_exidx_data
*data
;
2205 asection
*exidx
, *extab
;
2206 bfd_vma exidx_vma
= 0, extab_vma
= 0;
2207 bfd_size_type exidx_size
= 0, extab_size
= 0;
2208 gdb_byte
*exidx_data
= NULL
, *extab_data
= NULL
;
2211 /* If we've already touched this file, do nothing. */
2212 if (!objfile
|| objfile_data (objfile
, arm_exidx_data_key
) != NULL
)
2214 cleanups
= make_cleanup (null_cleanup
, NULL
);
2216 /* Read contents of exception table and index. */
2217 exidx
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.exidx");
2220 exidx_vma
= bfd_section_vma (objfile
->obfd
, exidx
);
2221 exidx_size
= bfd_get_section_size (exidx
);
2222 exidx_data
= xmalloc (exidx_size
);
2223 make_cleanup (xfree
, exidx_data
);
2225 if (!bfd_get_section_contents (objfile
->obfd
, exidx
,
2226 exidx_data
, 0, exidx_size
))
2228 do_cleanups (cleanups
);
2233 extab
= bfd_get_section_by_name (objfile
->obfd
, ".ARM.extab");
2236 extab_vma
= bfd_section_vma (objfile
->obfd
, extab
);
2237 extab_size
= bfd_get_section_size (extab
);
2238 extab_data
= xmalloc (extab_size
);
2239 make_cleanup (xfree
, extab_data
);
2241 if (!bfd_get_section_contents (objfile
->obfd
, extab
,
2242 extab_data
, 0, extab_size
))
2244 do_cleanups (cleanups
);
2249 /* Allocate exception table data structure. */
2250 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
, struct arm_exidx_data
);
2251 set_objfile_data (objfile
, arm_exidx_data_key
, data
);
2252 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
2253 objfile
->obfd
->section_count
,
2254 VEC(arm_exidx_entry_s
) *);
2256 /* Fill in exception table. */
2257 for (i
= 0; i
< exidx_size
/ 8; i
++)
2259 struct arm_exidx_entry new_exidx_entry
;
2260 bfd_vma idx
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8);
2261 bfd_vma val
= bfd_h_get_32 (objfile
->obfd
, exidx_data
+ i
* 8 + 4);
2262 bfd_vma addr
= 0, word
= 0;
2263 int n_bytes
= 0, n_words
= 0;
2264 struct obj_section
*sec
;
2265 gdb_byte
*entry
= NULL
;
2267 /* Extract address of start of function. */
2268 idx
= ((idx
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2269 idx
+= exidx_vma
+ i
* 8;
2271 /* Find section containing function and compute section offset. */
2272 sec
= arm_obj_section_from_vma (objfile
, idx
);
2275 idx
-= bfd_get_section_vma (objfile
->obfd
, sec
->the_bfd_section
);
2277 /* Determine address of exception table entry. */
2280 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2282 else if ((val
& 0xff000000) == 0x80000000)
2284 /* Exception table entry embedded in .ARM.exidx
2285 -- must be short form. */
2289 else if (!(val
& 0x80000000))
2291 /* Exception table entry in .ARM.extab. */
2292 addr
= ((val
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2293 addr
+= exidx_vma
+ i
* 8 + 4;
2295 if (addr
>= extab_vma
&& addr
+ 4 <= extab_vma
+ extab_size
)
2297 word
= bfd_h_get_32 (objfile
->obfd
,
2298 extab_data
+ addr
- extab_vma
);
2301 if ((word
& 0xff000000) == 0x80000000)
2306 else if ((word
& 0xff000000) == 0x81000000
2307 || (word
& 0xff000000) == 0x82000000)
2311 n_words
= ((word
>> 16) & 0xff);
2313 else if (!(word
& 0x80000000))
2316 struct obj_section
*pers_sec
;
2317 int gnu_personality
= 0;
2319 /* Custom personality routine. */
2320 pers
= ((word
& 0x7fffffff) ^ 0x40000000) - 0x40000000;
2321 pers
= UNMAKE_THUMB_ADDR (pers
+ addr
- 4);
2323 /* Check whether we've got one of the variants of the
2324 GNU personality routines. */
2325 pers_sec
= arm_obj_section_from_vma (objfile
, pers
);
2328 static const char *personality
[] =
2330 "__gcc_personality_v0",
2331 "__gxx_personality_v0",
2332 "__gcj_personality_v0",
2333 "__gnu_objc_personality_v0",
2337 CORE_ADDR pc
= pers
+ obj_section_offset (pers_sec
);
2340 for (k
= 0; personality
[k
]; k
++)
2341 if (lookup_minimal_symbol_by_pc_name
2342 (pc
, personality
[k
], objfile
))
2344 gnu_personality
= 1;
2349 /* If so, the next word contains a word count in the high
2350 byte, followed by the same unwind instructions as the
2351 pre-defined forms. */
2353 && addr
+ 4 <= extab_vma
+ extab_size
)
2355 word
= bfd_h_get_32 (objfile
->obfd
,
2356 extab_data
+ addr
- extab_vma
);
2359 n_words
= ((word
>> 24) & 0xff);
2365 /* Sanity check address. */
2367 if (addr
< extab_vma
|| addr
+ 4 * n_words
> extab_vma
+ extab_size
)
2368 n_words
= n_bytes
= 0;
2370 /* The unwind instructions reside in WORD (only the N_BYTES least
2371 significant bytes are valid), followed by N_WORDS words in the
2372 extab section starting at ADDR. */
2373 if (n_bytes
|| n_words
)
2375 gdb_byte
*p
= entry
= obstack_alloc (&objfile
->objfile_obstack
,
2376 n_bytes
+ n_words
* 4 + 1);
2379 *p
++ = (gdb_byte
) ((word
>> (8 * n_bytes
)) & 0xff);
2383 word
= bfd_h_get_32 (objfile
->obfd
,
2384 extab_data
+ addr
- extab_vma
);
2387 *p
++ = (gdb_byte
) ((word
>> 24) & 0xff);
2388 *p
++ = (gdb_byte
) ((word
>> 16) & 0xff);
2389 *p
++ = (gdb_byte
) ((word
>> 8) & 0xff);
2390 *p
++ = (gdb_byte
) (word
& 0xff);
2393 /* Implied "Finish" to terminate the list. */
2397 /* Push entry onto vector. They are guaranteed to always
2398 appear in order of increasing addresses. */
2399 new_exidx_entry
.addr
= idx
;
2400 new_exidx_entry
.entry
= entry
;
2401 VEC_safe_push (arm_exidx_entry_s
,
2402 data
->section_maps
[sec
->the_bfd_section
->index
],
2406 do_cleanups (cleanups
);
2409 /* Search for the exception table entry covering MEMADDR. If one is found,
2410 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2411 set *START to the start of the region covered by this entry. */
2414 arm_find_exidx_entry (CORE_ADDR memaddr
, CORE_ADDR
*start
)
2416 struct obj_section
*sec
;
2418 sec
= find_pc_section (memaddr
);
2421 struct arm_exidx_data
*data
;
2422 VEC(arm_exidx_entry_s
) *map
;
2423 struct arm_exidx_entry map_key
= { memaddr
- obj_section_addr (sec
), 0 };
2426 data
= objfile_data (sec
->objfile
, arm_exidx_data_key
);
2429 map
= data
->section_maps
[sec
->the_bfd_section
->index
];
2430 if (!VEC_empty (arm_exidx_entry_s
, map
))
2432 struct arm_exidx_entry
*map_sym
;
2434 idx
= VEC_lower_bound (arm_exidx_entry_s
, map
, &map_key
,
2435 arm_compare_exidx_entries
);
2437 /* VEC_lower_bound finds the earliest ordered insertion
2438 point. If the following symbol starts at this exact
2439 address, we use that; otherwise, the preceding
2440 exception table entry covers this address. */
2441 if (idx
< VEC_length (arm_exidx_entry_s
, map
))
2443 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
);
2444 if (map_sym
->addr
== map_key
.addr
)
2447 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2448 return map_sym
->entry
;
2454 map_sym
= VEC_index (arm_exidx_entry_s
, map
, idx
- 1);
2456 *start
= map_sym
->addr
+ obj_section_addr (sec
);
2457 return map_sym
->entry
;
2466 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2467 instruction list from the ARM exception table entry ENTRY, allocate and
2468 return a prologue cache structure describing how to unwind this frame.
2470 Return NULL if the unwinding instruction list contains a "spare",
2471 "reserved" or "refuse to unwind" instruction as defined in section
2472 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2473 for the ARM Architecture" document. */
2475 static struct arm_prologue_cache
*
2476 arm_exidx_fill_cache (struct frame_info
*this_frame
, gdb_byte
*entry
)
2481 struct arm_prologue_cache
*cache
;
2482 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2483 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2489 /* Whenever we reload SP, we actually have to retrieve its
2490 actual value in the current frame. */
2493 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2495 int reg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2496 vsp
= get_frame_register_unsigned (this_frame
, reg
);
2500 CORE_ADDR addr
= cache
->saved_regs
[ARM_SP_REGNUM
].addr
;
2501 vsp
= get_frame_memory_unsigned (this_frame
, addr
, 4);
2507 /* Decode next unwind instruction. */
2510 if ((insn
& 0xc0) == 0)
2512 int offset
= insn
& 0x3f;
2513 vsp
+= (offset
<< 2) + 4;
2515 else if ((insn
& 0xc0) == 0x40)
2517 int offset
= insn
& 0x3f;
2518 vsp
-= (offset
<< 2) + 4;
2520 else if ((insn
& 0xf0) == 0x80)
2522 int mask
= ((insn
& 0xf) << 8) | *entry
++;
2525 /* The special case of an all-zero mask identifies
2526 "Refuse to unwind". We return NULL to fall back
2527 to the prologue analyzer. */
2531 /* Pop registers r4..r15 under mask. */
2532 for (i
= 0; i
< 12; i
++)
2533 if (mask
& (1 << i
))
2535 cache
->saved_regs
[4 + i
].addr
= vsp
;
2539 /* Special-case popping SP -- we need to reload vsp. */
2540 if (mask
& (1 << (ARM_SP_REGNUM
- 4)))
2543 else if ((insn
& 0xf0) == 0x90)
2545 int reg
= insn
& 0xf;
2547 /* Reserved cases. */
2548 if (reg
== ARM_SP_REGNUM
|| reg
== ARM_PC_REGNUM
)
2551 /* Set SP from another register and mark VSP for reload. */
2552 cache
->saved_regs
[ARM_SP_REGNUM
] = cache
->saved_regs
[reg
];
2555 else if ((insn
& 0xf0) == 0xa0)
2557 int count
= insn
& 0x7;
2558 int pop_lr
= (insn
& 0x8) != 0;
2561 /* Pop r4..r[4+count]. */
2562 for (i
= 0; i
<= count
; i
++)
2564 cache
->saved_regs
[4 + i
].addr
= vsp
;
2568 /* If indicated by flag, pop LR as well. */
2571 cache
->saved_regs
[ARM_LR_REGNUM
].addr
= vsp
;
2575 else if (insn
== 0xb0)
2577 /* We could only have updated PC by popping into it; if so, it
2578 will show up as address. Otherwise, copy LR into PC. */
2579 if (!trad_frame_addr_p (cache
->saved_regs
, ARM_PC_REGNUM
))
2580 cache
->saved_regs
[ARM_PC_REGNUM
]
2581 = cache
->saved_regs
[ARM_LR_REGNUM
];
2586 else if (insn
== 0xb1)
2588 int mask
= *entry
++;
2591 /* All-zero mask and mask >= 16 is "spare". */
2592 if (mask
== 0 || mask
>= 16)
2595 /* Pop r0..r3 under mask. */
2596 for (i
= 0; i
< 4; i
++)
2597 if (mask
& (1 << i
))
2599 cache
->saved_regs
[i
].addr
= vsp
;
2603 else if (insn
== 0xb2)
2605 ULONGEST offset
= 0;
2610 offset
|= (*entry
& 0x7f) << shift
;
2613 while (*entry
++ & 0x80);
2615 vsp
+= 0x204 + (offset
<< 2);
2617 else if (insn
== 0xb3)
2619 int start
= *entry
>> 4;
2620 int count
= (*entry
++) & 0xf;
2623 /* Only registers D0..D15 are valid here. */
2624 if (start
+ count
>= 16)
2627 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2628 for (i
= 0; i
<= count
; i
++)
2630 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2634 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2637 else if ((insn
& 0xf8) == 0xb8)
2639 int count
= insn
& 0x7;
2642 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2643 for (i
= 0; i
<= count
; i
++)
2645 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2649 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2652 else if (insn
== 0xc6)
2654 int start
= *entry
>> 4;
2655 int count
= (*entry
++) & 0xf;
2658 /* Only registers WR0..WR15 are valid. */
2659 if (start
+ count
>= 16)
2662 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2663 for (i
= 0; i
<= count
; i
++)
2665 cache
->saved_regs
[ARM_WR0_REGNUM
+ start
+ i
].addr
= vsp
;
2669 else if (insn
== 0xc7)
2671 int mask
= *entry
++;
2674 /* All-zero mask and mask >= 16 is "spare". */
2675 if (mask
== 0 || mask
>= 16)
2678 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2679 for (i
= 0; i
< 4; i
++)
2680 if (mask
& (1 << i
))
2682 cache
->saved_regs
[ARM_WCGR0_REGNUM
+ i
].addr
= vsp
;
2686 else if ((insn
& 0xf8) == 0xc0)
2688 int count
= insn
& 0x7;
2691 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2692 for (i
= 0; i
<= count
; i
++)
2694 cache
->saved_regs
[ARM_WR0_REGNUM
+ 10 + i
].addr
= vsp
;
2698 else if (insn
== 0xc8)
2700 int start
= *entry
>> 4;
2701 int count
= (*entry
++) & 0xf;
2704 /* Only registers D0..D31 are valid. */
2705 if (start
+ count
>= 16)
2708 /* Pop VFP double-precision registers
2709 D[16+start]..D[16+start+count]. */
2710 for (i
= 0; i
<= count
; i
++)
2712 cache
->saved_regs
[ARM_D0_REGNUM
+ 16 + start
+ i
].addr
= vsp
;
2716 else if (insn
== 0xc9)
2718 int start
= *entry
>> 4;
2719 int count
= (*entry
++) & 0xf;
2722 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2723 for (i
= 0; i
<= count
; i
++)
2725 cache
->saved_regs
[ARM_D0_REGNUM
+ start
+ i
].addr
= vsp
;
2729 else if ((insn
& 0xf8) == 0xd0)
2731 int count
= insn
& 0x7;
2734 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2735 for (i
= 0; i
<= count
; i
++)
2737 cache
->saved_regs
[ARM_D0_REGNUM
+ 8 + i
].addr
= vsp
;
2743 /* Everything else is "spare". */
2748 /* If we restore SP from a register, assume this was the frame register.
2749 Otherwise just fall back to SP as frame register. */
2750 if (trad_frame_realreg_p (cache
->saved_regs
, ARM_SP_REGNUM
))
2751 cache
->framereg
= cache
->saved_regs
[ARM_SP_REGNUM
].realreg
;
2753 cache
->framereg
= ARM_SP_REGNUM
;
2755 /* Determine offset to previous frame. */
2757 = vsp
- get_frame_register_unsigned (this_frame
, cache
->framereg
);
2759 /* We already got the previous SP. */
2760 cache
->prev_sp
= vsp
;
2765 /* Unwinding via ARM exception table entries. Note that the sniffer
2766 already computes a filled-in prologue cache, which is then used
2767 with the same arm_prologue_this_id and arm_prologue_prev_register
2768 routines also used for prologue-parsing based unwinding. */
2771 arm_exidx_unwind_sniffer (const struct frame_unwind
*self
,
2772 struct frame_info
*this_frame
,
2773 void **this_prologue_cache
)
2775 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2776 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2777 CORE_ADDR addr_in_block
, exidx_region
, func_start
;
2778 struct arm_prologue_cache
*cache
;
2781 /* See if we have an ARM exception table entry covering this address. */
2782 addr_in_block
= get_frame_address_in_block (this_frame
);
2783 entry
= arm_find_exidx_entry (addr_in_block
, &exidx_region
);
2787 /* The ARM exception table does not describe unwind information
2788 for arbitrary PC values, but is guaranteed to be correct only
2789 at call sites. We have to decide here whether we want to use
2790 ARM exception table information for this frame, or fall back
2791 to using prologue parsing. (Note that if we have DWARF CFI,
2792 this sniffer isn't even called -- CFI is always preferred.)
2794 Before we make this decision, however, we check whether we
2795 actually have *symbol* information for the current frame.
2796 If not, prologue parsing would not work anyway, so we might
2797 as well use the exception table and hope for the best. */
2798 if (find_pc_partial_function (addr_in_block
, NULL
, &func_start
, NULL
))
2802 /* If the next frame is "normal", we are at a call site in this
2803 frame, so exception information is guaranteed to be valid. */
2804 if (get_next_frame (this_frame
)
2805 && get_frame_type (get_next_frame (this_frame
)) == NORMAL_FRAME
)
2808 /* We also assume exception information is valid if we're currently
2809 blocked in a system call. The system library is supposed to
2810 ensure this, so that e.g. pthread cancellation works. */
2811 if (arm_frame_is_thumb (this_frame
))
2815 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 2, 2,
2816 byte_order_for_code
, &insn
)
2817 && (insn
& 0xff00) == 0xdf00 /* svc */)
2824 if (safe_read_memory_integer (get_frame_pc (this_frame
) - 4, 4,
2825 byte_order_for_code
, &insn
)
2826 && (insn
& 0x0f000000) == 0x0f000000 /* svc */)
2830 /* Bail out if we don't know that exception information is valid. */
2834 /* The ARM exception index does not mark the *end* of the region
2835 covered by the entry, and some functions will not have any entry.
2836 To correctly recognize the end of the covered region, the linker
2837 should have inserted dummy records with a CANTUNWIND marker.
2839 Unfortunately, current versions of GNU ld do not reliably do
2840 this, and thus we may have found an incorrect entry above.
2841 As a (temporary) sanity check, we only use the entry if it
2842 lies *within* the bounds of the function. Note that this check
2843 might reject perfectly valid entries that just happen to cover
2844 multiple functions; therefore this check ought to be removed
2845 once the linker is fixed. */
2846 if (func_start
> exidx_region
)
2850 /* Decode the list of unwinding instructions into a prologue cache.
2851 Note that this may fail due to e.g. a "refuse to unwind" code. */
2852 cache
= arm_exidx_fill_cache (this_frame
, entry
);
2856 *this_prologue_cache
= cache
;
2860 struct frame_unwind arm_exidx_unwind
= {
2862 default_frame_unwind_stop_reason
,
2863 arm_prologue_this_id
,
2864 arm_prologue_prev_register
,
2866 arm_exidx_unwind_sniffer
2869 static struct arm_prologue_cache
*
2870 arm_make_stub_cache (struct frame_info
*this_frame
)
2872 struct arm_prologue_cache
*cache
;
2874 cache
= FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache
);
2875 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
2877 cache
->prev_sp
= get_frame_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2882 /* Our frame ID for a stub frame is the current SP and LR. */
2885 arm_stub_this_id (struct frame_info
*this_frame
,
2887 struct frame_id
*this_id
)
2889 struct arm_prologue_cache
*cache
;
2891 if (*this_cache
== NULL
)
2892 *this_cache
= arm_make_stub_cache (this_frame
);
2893 cache
= *this_cache
;
2895 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
2899 arm_stub_unwind_sniffer (const struct frame_unwind
*self
,
2900 struct frame_info
*this_frame
,
2901 void **this_prologue_cache
)
2903 CORE_ADDR addr_in_block
;
2906 addr_in_block
= get_frame_address_in_block (this_frame
);
2907 if (in_plt_section (addr_in_block
, NULL
)
2908 /* We also use the stub winder if the target memory is unreadable
2909 to avoid having the prologue unwinder trying to read it. */
2910 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
2916 struct frame_unwind arm_stub_unwind
= {
2918 default_frame_unwind_stop_reason
,
2920 arm_prologue_prev_register
,
2922 arm_stub_unwind_sniffer
2926 arm_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
2928 struct arm_prologue_cache
*cache
;
2930 if (*this_cache
== NULL
)
2931 *this_cache
= arm_make_prologue_cache (this_frame
);
2932 cache
= *this_cache
;
2934 return cache
->prev_sp
- cache
->framesize
;
2937 struct frame_base arm_normal_base
= {
2938 &arm_prologue_unwind
,
2939 arm_normal_frame_base
,
2940 arm_normal_frame_base
,
2941 arm_normal_frame_base
2944 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2945 dummy frame. The frame ID's base needs to match the TOS value
2946 saved by save_dummy_frame_tos() and returned from
2947 arm_push_dummy_call, and the PC needs to match the dummy frame's
2950 static struct frame_id
2951 arm_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2953 return frame_id_build (get_frame_register_unsigned (this_frame
,
2955 get_frame_pc (this_frame
));
2958 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2959 be used to construct the previous frame's ID, after looking up the
2960 containing function). */
2963 arm_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2966 pc
= frame_unwind_register_unsigned (this_frame
, ARM_PC_REGNUM
);
2967 return arm_addr_bits_remove (gdbarch
, pc
);
2971 arm_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2973 return frame_unwind_register_unsigned (this_frame
, ARM_SP_REGNUM
);
2976 static struct value
*
2977 arm_dwarf2_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2980 struct gdbarch
* gdbarch
= get_frame_arch (this_frame
);
2982 ULONGEST t_bit
= arm_psr_thumb_bit (gdbarch
);
2987 /* The PC is normally copied from the return column, which
2988 describes saves of LR. However, that version may have an
2989 extra bit set to indicate Thumb state. The bit is not
2991 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2992 return frame_unwind_got_constant (this_frame
, regnum
,
2993 arm_addr_bits_remove (gdbarch
, lr
));
2996 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2997 cpsr
= get_frame_register_unsigned (this_frame
, regnum
);
2998 lr
= frame_unwind_register_unsigned (this_frame
, ARM_LR_REGNUM
);
2999 if (IS_THUMB_ADDR (lr
))
3003 return frame_unwind_got_constant (this_frame
, regnum
, cpsr
);
3006 internal_error (__FILE__
, __LINE__
,
3007 _("Unexpected register %d"), regnum
);
3012 arm_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
3013 struct dwarf2_frame_state_reg
*reg
,
3014 struct frame_info
*this_frame
)
3020 reg
->how
= DWARF2_FRAME_REG_FN
;
3021 reg
->loc
.fn
= arm_dwarf2_prev_register
;
3024 reg
->how
= DWARF2_FRAME_REG_CFA
;
3029 /* Return true if we are in the function's epilogue, i.e. after the
3030 instruction that destroyed the function's stack frame. */
3033 thumb_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3035 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3036 unsigned int insn
, insn2
;
3037 int found_return
= 0, found_stack_adjust
= 0;
3038 CORE_ADDR func_start
, func_end
;
3042 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3045 /* The epilogue is a sequence of instructions along the following lines:
3047 - add stack frame size to SP or FP
3048 - [if frame pointer used] restore SP from FP
3049 - restore registers from SP [may include PC]
3050 - a return-type instruction [if PC wasn't already restored]
3052 In a first pass, we scan forward from the current PC and verify the
3053 instructions we find as compatible with this sequence, ending in a
3056 However, this is not sufficient to distinguish indirect function calls
3057 within a function from indirect tail calls in the epilogue in some cases.
3058 Therefore, if we didn't already find any SP-changing instruction during
3059 forward scan, we add a backward scanning heuristic to ensure we actually
3060 are in the epilogue. */
3063 while (scan_pc
< func_end
&& !found_return
)
3065 if (target_read_memory (scan_pc
, buf
, 2))
3069 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3071 if ((insn
& 0xff80) == 0x4700) /* bx <Rm> */
3073 else if (insn
== 0x46f7) /* mov pc, lr */
3075 else if (insn
== 0x46bd) /* mov sp, r7 */
3076 found_stack_adjust
= 1;
3077 else if ((insn
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3078 found_stack_adjust
= 1;
3079 else if ((insn
& 0xfe00) == 0xbc00) /* pop <registers> */
3081 found_stack_adjust
= 1;
3082 if (insn
& 0x0100) /* <registers> include PC. */
3085 else if (thumb_insn_size (insn
) == 4) /* 32-bit Thumb-2 instruction */
3087 if (target_read_memory (scan_pc
, buf
, 2))
3091 insn2
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3093 if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3095 found_stack_adjust
= 1;
3096 if (insn2
& 0x8000) /* <registers> include PC. */
3099 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3100 && (insn2
& 0x0fff) == 0x0b04)
3102 found_stack_adjust
= 1;
3103 if ((insn2
& 0xf000) == 0xf000) /* <Rt> is PC. */
3106 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3107 && (insn2
& 0x0e00) == 0x0a00)
3108 found_stack_adjust
= 1;
3119 /* Since any instruction in the epilogue sequence, with the possible
3120 exception of return itself, updates the stack pointer, we need to
3121 scan backwards for at most one instruction. Try either a 16-bit or
3122 a 32-bit instruction. This is just a heuristic, so we do not worry
3123 too much about false positives. */
3125 if (!found_stack_adjust
)
3127 if (pc
- 4 < func_start
)
3129 if (target_read_memory (pc
- 4, buf
, 4))
3132 insn
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
3133 insn2
= extract_unsigned_integer (buf
+ 2, 2, byte_order_for_code
);
3135 if (insn2
== 0x46bd) /* mov sp, r7 */
3136 found_stack_adjust
= 1;
3137 else if ((insn2
& 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3138 found_stack_adjust
= 1;
3139 else if ((insn2
& 0xff00) == 0xbc00) /* pop <registers> without PC */
3140 found_stack_adjust
= 1;
3141 else if (insn
== 0xe8bd) /* ldm.w sp!, <registers> */
3142 found_stack_adjust
= 1;
3143 else if (insn
== 0xf85d /* ldr.w <Rt>, [sp], #4 */
3144 && (insn2
& 0x0fff) == 0x0b04)
3145 found_stack_adjust
= 1;
3146 else if ((insn
& 0xffbf) == 0xecbd /* vldm sp!, <list> */
3147 && (insn2
& 0x0e00) == 0x0a00)
3148 found_stack_adjust
= 1;
3151 return found_stack_adjust
;
3154 /* Return true if we are in the function's epilogue, i.e. after the
3155 instruction that destroyed the function's stack frame. */
3158 arm_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3160 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3162 int found_return
, found_stack_adjust
;
3163 CORE_ADDR func_start
, func_end
;
3165 if (arm_pc_is_thumb (gdbarch
, pc
))
3166 return thumb_in_function_epilogue_p (gdbarch
, pc
);
3168 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
3171 /* We are in the epilogue if the previous instruction was a stack
3172 adjustment and the next instruction is a possible return (bx, mov
3173 pc, or pop). We could have to scan backwards to find the stack
3174 adjustment, or forwards to find the return, but this is a decent
3175 approximation. First scan forwards. */
3178 insn
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
3179 if (bits (insn
, 28, 31) != INST_NV
)
3181 if ((insn
& 0x0ffffff0) == 0x012fff10)
3184 else if ((insn
& 0x0ffffff0) == 0x01a0f000)
3187 else if ((insn
& 0x0fff0000) == 0x08bd0000
3188 && (insn
& 0x0000c000) != 0)
3189 /* POP (LDMIA), including PC or LR. */
3196 /* Scan backwards. This is just a heuristic, so do not worry about
3197 false positives from mode changes. */
3199 if (pc
< func_start
+ 4)
3202 found_stack_adjust
= 0;
3203 insn
= read_memory_unsigned_integer (pc
- 4, 4, byte_order_for_code
);
3204 if (bits (insn
, 28, 31) != INST_NV
)
3206 if ((insn
& 0x0df0f000) == 0x0080d000)
3207 /* ADD SP (register or immediate). */
3208 found_stack_adjust
= 1;
3209 else if ((insn
& 0x0df0f000) == 0x0040d000)
3210 /* SUB SP (register or immediate). */
3211 found_stack_adjust
= 1;
3212 else if ((insn
& 0x0ffffff0) == 0x01a0d000)
3214 found_stack_adjust
= 1;
3215 else if ((insn
& 0x0fff0000) == 0x08bd0000)
3217 found_stack_adjust
= 1;
3220 if (found_stack_adjust
)
3227 /* When arguments must be pushed onto the stack, they go on in reverse
3228 order. The code below implements a FILO (stack) to do this. */
3233 struct stack_item
*prev
;
3237 static struct stack_item
*
3238 push_stack_item (struct stack_item
*prev
, const void *contents
, int len
)
3240 struct stack_item
*si
;
3241 si
= xmalloc (sizeof (struct stack_item
));
3242 si
->data
= xmalloc (len
);
3245 memcpy (si
->data
, contents
, len
);
3249 static struct stack_item
*
3250 pop_stack_item (struct stack_item
*si
)
3252 struct stack_item
*dead
= si
;
3260 /* Return the alignment (in bytes) of the given type. */
3263 arm_type_align (struct type
*t
)
3269 t
= check_typedef (t
);
3270 switch (TYPE_CODE (t
))
3273 /* Should never happen. */
3274 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
3278 case TYPE_CODE_ENUM
:
3282 case TYPE_CODE_RANGE
:
3283 case TYPE_CODE_BITSTRING
:
3285 case TYPE_CODE_CHAR
:
3286 case TYPE_CODE_BOOL
:
3287 return TYPE_LENGTH (t
);
3289 case TYPE_CODE_ARRAY
:
3290 case TYPE_CODE_COMPLEX
:
3291 /* TODO: What about vector types? */
3292 return arm_type_align (TYPE_TARGET_TYPE (t
));
3294 case TYPE_CODE_STRUCT
:
3295 case TYPE_CODE_UNION
:
3297 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
3299 falign
= arm_type_align (TYPE_FIELD_TYPE (t
, n
));
3307 /* Possible base types for a candidate for passing and returning in
3310 enum arm_vfp_cprc_base_type
3319 /* The length of one element of base type B. */
3322 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b
)
3326 case VFP_CPRC_SINGLE
:
3328 case VFP_CPRC_DOUBLE
:
3330 case VFP_CPRC_VEC64
:
3332 case VFP_CPRC_VEC128
:
3335 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3340 /* The character ('s', 'd' or 'q') for the type of VFP register used
3341 for passing base type B. */
3344 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b
)
3348 case VFP_CPRC_SINGLE
:
3350 case VFP_CPRC_DOUBLE
:
3352 case VFP_CPRC_VEC64
:
3354 case VFP_CPRC_VEC128
:
3357 internal_error (__FILE__
, __LINE__
, _("Invalid VFP CPRC type: %d."),
3362 /* Determine whether T may be part of a candidate for passing and
3363 returning in VFP registers, ignoring the limit on the total number
3364 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3365 classification of the first valid component found; if it is not
3366 VFP_CPRC_UNKNOWN, all components must have the same classification
3367 as *BASE_TYPE. If it is found that T contains a type not permitted
3368 for passing and returning in VFP registers, a type differently
3369 classified from *BASE_TYPE, or two types differently classified
3370 from each other, return -1, otherwise return the total number of
3371 base-type elements found (possibly 0 in an empty structure or
3372 array). Vectors and complex types are not currently supported,
3373 matching the generic AAPCS support. */
3376 arm_vfp_cprc_sub_candidate (struct type
*t
,
3377 enum arm_vfp_cprc_base_type
*base_type
)
3379 t
= check_typedef (t
);
3380 switch (TYPE_CODE (t
))
3383 switch (TYPE_LENGTH (t
))
3386 if (*base_type
== VFP_CPRC_UNKNOWN
)
3387 *base_type
= VFP_CPRC_SINGLE
;
3388 else if (*base_type
!= VFP_CPRC_SINGLE
)
3393 if (*base_type
== VFP_CPRC_UNKNOWN
)
3394 *base_type
= VFP_CPRC_DOUBLE
;
3395 else if (*base_type
!= VFP_CPRC_DOUBLE
)
3404 case TYPE_CODE_ARRAY
:
3408 count
= arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t
), base_type
);
3411 if (TYPE_LENGTH (t
) == 0)
3413 gdb_assert (count
== 0);
3416 else if (count
== 0)
3418 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3419 gdb_assert ((TYPE_LENGTH (t
) % unitlen
) == 0);
3420 return TYPE_LENGTH (t
) / unitlen
;
3424 case TYPE_CODE_STRUCT
:
3429 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3431 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3433 if (sub_count
== -1)
3437 if (TYPE_LENGTH (t
) == 0)
3439 gdb_assert (count
== 0);
3442 else if (count
== 0)
3444 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3445 if (TYPE_LENGTH (t
) != unitlen
* count
)
3450 case TYPE_CODE_UNION
:
3455 for (i
= 0; i
< TYPE_NFIELDS (t
); i
++)
3457 int sub_count
= arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t
, i
),
3459 if (sub_count
== -1)
3461 count
= (count
> sub_count
? count
: sub_count
);
3463 if (TYPE_LENGTH (t
) == 0)
3465 gdb_assert (count
== 0);
3468 else if (count
== 0)
3470 unitlen
= arm_vfp_cprc_unit_length (*base_type
);
3471 if (TYPE_LENGTH (t
) != unitlen
* count
)
3483 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3484 if passed to or returned from a non-variadic function with the VFP
3485 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3486 *BASE_TYPE to the base type for T and *COUNT to the number of
3487 elements of that base type before returning. */
3490 arm_vfp_call_candidate (struct type
*t
, enum arm_vfp_cprc_base_type
*base_type
,
3493 enum arm_vfp_cprc_base_type b
= VFP_CPRC_UNKNOWN
;
3494 int c
= arm_vfp_cprc_sub_candidate (t
, &b
);
3495 if (c
<= 0 || c
> 4)
3502 /* Return 1 if the VFP ABI should be used for passing arguments to and
3503 returning values from a function of type FUNC_TYPE, 0
3507 arm_vfp_abi_for_function (struct gdbarch
*gdbarch
, struct type
*func_type
)
3509 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3510 /* Variadic functions always use the base ABI. Assume that functions
3511 without debug info are not variadic. */
3512 if (func_type
&& TYPE_VARARGS (check_typedef (func_type
)))
3514 /* The VFP ABI is only supported as a variant of AAPCS. */
3515 if (tdep
->arm_abi
!= ARM_ABI_AAPCS
)
3517 return gdbarch_tdep (gdbarch
)->fp_model
== ARM_FLOAT_VFP
;
3520 /* We currently only support passing parameters in integer registers, which
3521 conforms with GCC's default model, and VFP argument passing following
3522 the VFP variant of AAPCS. Several other variants exist and
3523 we should probably support some of them based on the selected ABI. */
3526 arm_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
3527 struct regcache
*regcache
, CORE_ADDR bp_addr
, int nargs
,
3528 struct value
**args
, CORE_ADDR sp
, int struct_return
,
3529 CORE_ADDR struct_addr
)
3531 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3535 struct stack_item
*si
= NULL
;
3538 unsigned vfp_regs_free
= (1 << 16) - 1;
3540 /* Determine the type of this function and whether the VFP ABI
3542 ftype
= check_typedef (value_type (function
));
3543 if (TYPE_CODE (ftype
) == TYPE_CODE_PTR
)
3544 ftype
= check_typedef (TYPE_TARGET_TYPE (ftype
));
3545 use_vfp_abi
= arm_vfp_abi_for_function (gdbarch
, ftype
);
3547 /* Set the return address. For the ARM, the return breakpoint is
3548 always at BP_ADDR. */
3549 if (arm_pc_is_thumb (gdbarch
, bp_addr
))
3551 regcache_cooked_write_unsigned (regcache
, ARM_LR_REGNUM
, bp_addr
);
3553 /* Walk through the list of args and determine how large a temporary
3554 stack is required. Need to take care here as structs may be
3555 passed on the stack, and we have to push them. */
3558 argreg
= ARM_A1_REGNUM
;
3561 /* The struct_return pointer occupies the first parameter
3562 passing register. */
3566 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = %s\n",
3567 gdbarch_register_name (gdbarch
, argreg
),
3568 paddress (gdbarch
, struct_addr
));
3569 regcache_cooked_write_unsigned (regcache
, argreg
, struct_addr
);
3573 for (argnum
= 0; argnum
< nargs
; argnum
++)
3576 struct type
*arg_type
;
3577 struct type
*target_type
;
3578 enum type_code typecode
;
3579 const bfd_byte
*val
;
3581 enum arm_vfp_cprc_base_type vfp_base_type
;
3583 int may_use_core_reg
= 1;
3585 arg_type
= check_typedef (value_type (args
[argnum
]));
3586 len
= TYPE_LENGTH (arg_type
);
3587 target_type
= TYPE_TARGET_TYPE (arg_type
);
3588 typecode
= TYPE_CODE (arg_type
);
3589 val
= value_contents (args
[argnum
]);
3591 align
= arm_type_align (arg_type
);
3592 /* Round alignment up to a whole number of words. */
3593 align
= (align
+ INT_REGISTER_SIZE
- 1) & ~(INT_REGISTER_SIZE
- 1);
3594 /* Different ABIs have different maximum alignments. */
3595 if (gdbarch_tdep (gdbarch
)->arm_abi
== ARM_ABI_APCS
)
3597 /* The APCS ABI only requires word alignment. */
3598 align
= INT_REGISTER_SIZE
;
3602 /* The AAPCS requires at most doubleword alignment. */
3603 if (align
> INT_REGISTER_SIZE
* 2)
3604 align
= INT_REGISTER_SIZE
* 2;
3608 && arm_vfp_call_candidate (arg_type
, &vfp_base_type
,
3616 /* Because this is a CPRC it cannot go in a core register or
3617 cause a core register to be skipped for alignment.
3618 Either it goes in VFP registers and the rest of this loop
3619 iteration is skipped for this argument, or it goes on the
3620 stack (and the stack alignment code is correct for this
3622 may_use_core_reg
= 0;
3624 unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
3625 shift
= unit_length
/ 4;
3626 mask
= (1 << (shift
* vfp_base_count
)) - 1;
3627 for (regno
= 0; regno
< 16; regno
+= shift
)
3628 if (((vfp_regs_free
>> regno
) & mask
) == mask
)
3637 vfp_regs_free
&= ~(mask
<< regno
);
3638 reg_scaled
= regno
/ shift
;
3639 reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
3640 for (i
= 0; i
< vfp_base_count
; i
++)
3644 if (reg_char
== 'q')
3645 arm_neon_quad_write (gdbarch
, regcache
, reg_scaled
+ i
,
3646 val
+ i
* unit_length
);
3649 sprintf (name_buf
, "%c%d", reg_char
, reg_scaled
+ i
);
3650 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3652 regcache_cooked_write (regcache
, regnum
,
3653 val
+ i
* unit_length
);
3660 /* This CPRC could not go in VFP registers, so all VFP
3661 registers are now marked as used. */
3666 /* Push stack padding for dowubleword alignment. */
3667 if (nstack
& (align
- 1))
3669 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3670 nstack
+= INT_REGISTER_SIZE
;
3673 /* Doubleword aligned quantities must go in even register pairs. */
3674 if (may_use_core_reg
3675 && argreg
<= ARM_LAST_ARG_REGNUM
3676 && align
> INT_REGISTER_SIZE
3680 /* If the argument is a pointer to a function, and it is a
3681 Thumb function, create a LOCAL copy of the value and set
3682 the THUMB bit in it. */
3683 if (TYPE_CODE_PTR
== typecode
3684 && target_type
!= NULL
3685 && TYPE_CODE_FUNC
== TYPE_CODE (check_typedef (target_type
)))
3687 CORE_ADDR regval
= extract_unsigned_integer (val
, len
, byte_order
);
3688 if (arm_pc_is_thumb (gdbarch
, regval
))
3690 bfd_byte
*copy
= alloca (len
);
3691 store_unsigned_integer (copy
, len
, byte_order
,
3692 MAKE_THUMB_ADDR (regval
));
3697 /* Copy the argument to general registers or the stack in
3698 register-sized pieces. Large arguments are split between
3699 registers and stack. */
3702 int partial_len
= len
< INT_REGISTER_SIZE
? len
: INT_REGISTER_SIZE
;
3704 if (may_use_core_reg
&& argreg
<= ARM_LAST_ARG_REGNUM
)
3706 /* The argument is being passed in a general purpose
3709 = extract_unsigned_integer (val
, partial_len
, byte_order
);
3710 if (byte_order
== BFD_ENDIAN_BIG
)
3711 regval
<<= (INT_REGISTER_SIZE
- partial_len
) * 8;
3713 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
3715 gdbarch_register_name
3717 phex (regval
, INT_REGISTER_SIZE
));
3718 regcache_cooked_write_unsigned (regcache
, argreg
, regval
);
3723 /* Push the arguments onto the stack. */
3725 fprintf_unfiltered (gdb_stdlog
, "arg %d @ sp + %d\n",
3727 si
= push_stack_item (si
, val
, INT_REGISTER_SIZE
);
3728 nstack
+= INT_REGISTER_SIZE
;
3735 /* If we have an odd number of words to push, then decrement the stack
3736 by one word now, so first stack argument will be dword aligned. */
3743 write_memory (sp
, si
->data
, si
->len
);
3744 si
= pop_stack_item (si
);
3747 /* Finally, update teh SP register. */
3748 regcache_cooked_write_unsigned (regcache
, ARM_SP_REGNUM
, sp
);
3754 /* Always align the frame to an 8-byte boundary. This is required on
3755 some platforms and harmless on the rest. */
3758 arm_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3760 /* Align the stack to eight bytes. */
3761 return sp
& ~ (CORE_ADDR
) 7;
3765 print_fpu_flags (int flags
)
3767 if (flags
& (1 << 0))
3768 fputs ("IVO ", stdout
);
3769 if (flags
& (1 << 1))
3770 fputs ("DVZ ", stdout
);
3771 if (flags
& (1 << 2))
3772 fputs ("OFL ", stdout
);
3773 if (flags
& (1 << 3))
3774 fputs ("UFL ", stdout
);
3775 if (flags
& (1 << 4))
3776 fputs ("INX ", stdout
);
3780 /* Print interesting information about the floating point processor
3781 (if present) or emulator. */
3783 arm_print_float_info (struct gdbarch
*gdbarch
, struct ui_file
*file
,
3784 struct frame_info
*frame
, const char *args
)
3786 unsigned long status
= get_frame_register_unsigned (frame
, ARM_FPS_REGNUM
);
3789 type
= (status
>> 24) & 127;
3790 if (status
& (1 << 31))
3791 printf (_("Hardware FPU type %d\n"), type
);
3793 printf (_("Software FPU type %d\n"), type
);
3794 /* i18n: [floating point unit] mask */
3795 fputs (_("mask: "), stdout
);
3796 print_fpu_flags (status
>> 16);
3797 /* i18n: [floating point unit] flags */
3798 fputs (_("flags: "), stdout
);
3799 print_fpu_flags (status
);
3802 /* Construct the ARM extended floating point type. */
3803 static struct type
*
3804 arm_ext_type (struct gdbarch
*gdbarch
)
3806 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3808 if (!tdep
->arm_ext_type
)
3810 = arch_float_type (gdbarch
, -1, "builtin_type_arm_ext",
3811 floatformats_arm_ext
);
3813 return tdep
->arm_ext_type
;
3816 static struct type
*
3817 arm_neon_double_type (struct gdbarch
*gdbarch
)
3819 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3821 if (tdep
->neon_double_type
== NULL
)
3823 struct type
*t
, *elem
;
3825 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_d",
3827 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3828 append_composite_type_field (t
, "u8", init_vector_type (elem
, 8));
3829 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3830 append_composite_type_field (t
, "u16", init_vector_type (elem
, 4));
3831 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3832 append_composite_type_field (t
, "u32", init_vector_type (elem
, 2));
3833 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3834 append_composite_type_field (t
, "u64", elem
);
3835 elem
= builtin_type (gdbarch
)->builtin_float
;
3836 append_composite_type_field (t
, "f32", init_vector_type (elem
, 2));
3837 elem
= builtin_type (gdbarch
)->builtin_double
;
3838 append_composite_type_field (t
, "f64", elem
);
3840 TYPE_VECTOR (t
) = 1;
3841 TYPE_NAME (t
) = "neon_d";
3842 tdep
->neon_double_type
= t
;
3845 return tdep
->neon_double_type
;
3848 /* FIXME: The vector types are not correctly ordered on big-endian
3849 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3850 bits of d0 - regardless of what unit size is being held in d0. So
3851 the offset of the first uint8 in d0 is 7, but the offset of the
3852 first float is 4. This code works as-is for little-endian
3855 static struct type
*
3856 arm_neon_quad_type (struct gdbarch
*gdbarch
)
3858 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3860 if (tdep
->neon_quad_type
== NULL
)
3862 struct type
*t
, *elem
;
3864 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_neon_q",
3866 elem
= builtin_type (gdbarch
)->builtin_uint8
;
3867 append_composite_type_field (t
, "u8", init_vector_type (elem
, 16));
3868 elem
= builtin_type (gdbarch
)->builtin_uint16
;
3869 append_composite_type_field (t
, "u16", init_vector_type (elem
, 8));
3870 elem
= builtin_type (gdbarch
)->builtin_uint32
;
3871 append_composite_type_field (t
, "u32", init_vector_type (elem
, 4));
3872 elem
= builtin_type (gdbarch
)->builtin_uint64
;
3873 append_composite_type_field (t
, "u64", init_vector_type (elem
, 2));
3874 elem
= builtin_type (gdbarch
)->builtin_float
;
3875 append_composite_type_field (t
, "f32", init_vector_type (elem
, 4));
3876 elem
= builtin_type (gdbarch
)->builtin_double
;
3877 append_composite_type_field (t
, "f64", init_vector_type (elem
, 2));
3879 TYPE_VECTOR (t
) = 1;
3880 TYPE_NAME (t
) = "neon_q";
3881 tdep
->neon_quad_type
= t
;
3884 return tdep
->neon_quad_type
;
3887 /* Return the GDB type object for the "standard" data type of data in
3890 static struct type
*
3891 arm_register_type (struct gdbarch
*gdbarch
, int regnum
)
3893 int num_regs
= gdbarch_num_regs (gdbarch
);
3895 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
3896 && regnum
>= num_regs
&& regnum
< num_regs
+ 32)
3897 return builtin_type (gdbarch
)->builtin_float
;
3899 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
3900 && regnum
>= num_regs
+ 32 && regnum
< num_regs
+ 32 + 16)
3901 return arm_neon_quad_type (gdbarch
);
3903 /* If the target description has register information, we are only
3904 in this function so that we can override the types of
3905 double-precision registers for NEON. */
3906 if (tdesc_has_registers (gdbarch_target_desc (gdbarch
)))
3908 struct type
*t
= tdesc_register_type (gdbarch
, regnum
);
3910 if (regnum
>= ARM_D0_REGNUM
&& regnum
< ARM_D0_REGNUM
+ 32
3911 && TYPE_CODE (t
) == TYPE_CODE_FLT
3912 && gdbarch_tdep (gdbarch
)->have_neon
)
3913 return arm_neon_double_type (gdbarch
);
3918 if (regnum
>= ARM_F0_REGNUM
&& regnum
< ARM_F0_REGNUM
+ NUM_FREGS
)
3920 if (!gdbarch_tdep (gdbarch
)->have_fpa_registers
)
3921 return builtin_type (gdbarch
)->builtin_void
;
3923 return arm_ext_type (gdbarch
);
3925 else if (regnum
== ARM_SP_REGNUM
)
3926 return builtin_type (gdbarch
)->builtin_data_ptr
;
3927 else if (regnum
== ARM_PC_REGNUM
)
3928 return builtin_type (gdbarch
)->builtin_func_ptr
;
3929 else if (regnum
>= ARRAY_SIZE (arm_register_names
))
3930 /* These registers are only supported on targets which supply
3931 an XML description. */
3932 return builtin_type (gdbarch
)->builtin_int0
;
3934 return builtin_type (gdbarch
)->builtin_uint32
;
3937 /* Map a DWARF register REGNUM onto the appropriate GDB register
3941 arm_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
3943 /* Core integer regs. */
3944 if (reg
>= 0 && reg
<= 15)
3947 /* Legacy FPA encoding. These were once used in a way which
3948 overlapped with VFP register numbering, so their use is
3949 discouraged, but GDB doesn't support the ARM toolchain
3950 which used them for VFP. */
3951 if (reg
>= 16 && reg
<= 23)
3952 return ARM_F0_REGNUM
+ reg
- 16;
3954 /* New assignments for the FPA registers. */
3955 if (reg
>= 96 && reg
<= 103)
3956 return ARM_F0_REGNUM
+ reg
- 96;
3958 /* WMMX register assignments. */
3959 if (reg
>= 104 && reg
<= 111)
3960 return ARM_WCGR0_REGNUM
+ reg
- 104;
3962 if (reg
>= 112 && reg
<= 127)
3963 return ARM_WR0_REGNUM
+ reg
- 112;
3965 if (reg
>= 192 && reg
<= 199)
3966 return ARM_WC0_REGNUM
+ reg
- 192;
3968 /* VFP v2 registers. A double precision value is actually
3969 in d1 rather than s2, but the ABI only defines numbering
3970 for the single precision registers. This will "just work"
3971 in GDB for little endian targets (we'll read eight bytes,
3972 starting in s0 and then progressing to s1), but will be
3973 reversed on big endian targets with VFP. This won't
3974 be a problem for the new Neon quad registers; you're supposed
3975 to use DW_OP_piece for those. */
3976 if (reg
>= 64 && reg
<= 95)
3980 sprintf (name_buf
, "s%d", reg
- 64);
3981 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3985 /* VFP v3 / Neon registers. This range is also used for VFP v2
3986 registers, except that it now describes d0 instead of s0. */
3987 if (reg
>= 256 && reg
<= 287)
3991 sprintf (name_buf
, "d%d", reg
- 256);
3992 return user_reg_map_name_to_regnum (gdbarch
, name_buf
,
3999 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4001 arm_register_sim_regno (struct gdbarch
*gdbarch
, int regnum
)
4004 gdb_assert (reg
>= 0 && reg
< gdbarch_num_regs (gdbarch
));
4006 if (regnum
>= ARM_WR0_REGNUM
&& regnum
<= ARM_WR15_REGNUM
)
4007 return regnum
- ARM_WR0_REGNUM
+ SIM_ARM_IWMMXT_COP0R0_REGNUM
;
4009 if (regnum
>= ARM_WC0_REGNUM
&& regnum
<= ARM_WC7_REGNUM
)
4010 return regnum
- ARM_WC0_REGNUM
+ SIM_ARM_IWMMXT_COP1R0_REGNUM
;
4012 if (regnum
>= ARM_WCGR0_REGNUM
&& regnum
<= ARM_WCGR7_REGNUM
)
4013 return regnum
- ARM_WCGR0_REGNUM
+ SIM_ARM_IWMMXT_COP1R8_REGNUM
;
4015 if (reg
< NUM_GREGS
)
4016 return SIM_ARM_R0_REGNUM
+ reg
;
4019 if (reg
< NUM_FREGS
)
4020 return SIM_ARM_FP0_REGNUM
+ reg
;
4023 if (reg
< NUM_SREGS
)
4024 return SIM_ARM_FPS_REGNUM
+ reg
;
4027 internal_error (__FILE__
, __LINE__
, _("Bad REGNUM %d"), regnum
);
4030 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4031 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4032 It is thought that this is is the floating-point register format on
4033 little-endian systems. */
4036 convert_from_extended (const struct floatformat
*fmt
, const void *ptr
,
4037 void *dbl
, int endianess
)
4041 if (endianess
== BFD_ENDIAN_BIG
)
4042 floatformat_to_doublest (&floatformat_arm_ext_big
, ptr
, &d
);
4044 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4046 floatformat_from_doublest (fmt
, &d
, dbl
);
4050 convert_to_extended (const struct floatformat
*fmt
, void *dbl
, const void *ptr
,
4055 floatformat_to_doublest (fmt
, ptr
, &d
);
4056 if (endianess
== BFD_ENDIAN_BIG
)
4057 floatformat_from_doublest (&floatformat_arm_ext_big
, &d
, dbl
);
4059 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword
,
4064 condition_true (unsigned long cond
, unsigned long status_reg
)
4066 if (cond
== INST_AL
|| cond
== INST_NV
)
4072 return ((status_reg
& FLAG_Z
) != 0);
4074 return ((status_reg
& FLAG_Z
) == 0);
4076 return ((status_reg
& FLAG_C
) != 0);
4078 return ((status_reg
& FLAG_C
) == 0);
4080 return ((status_reg
& FLAG_N
) != 0);
4082 return ((status_reg
& FLAG_N
) == 0);
4084 return ((status_reg
& FLAG_V
) != 0);
4086 return ((status_reg
& FLAG_V
) == 0);
4088 return ((status_reg
& (FLAG_C
| FLAG_Z
)) == FLAG_C
);
4090 return ((status_reg
& (FLAG_C
| FLAG_Z
)) != FLAG_C
);
4092 return (((status_reg
& FLAG_N
) == 0) == ((status_reg
& FLAG_V
) == 0));
4094 return (((status_reg
& FLAG_N
) == 0) != ((status_reg
& FLAG_V
) == 0));
4096 return (((status_reg
& FLAG_Z
) == 0)
4097 && (((status_reg
& FLAG_N
) == 0)
4098 == ((status_reg
& FLAG_V
) == 0)));
4100 return (((status_reg
& FLAG_Z
) != 0)
4101 || (((status_reg
& FLAG_N
) == 0)
4102 != ((status_reg
& FLAG_V
) == 0)));
4107 static unsigned long
4108 shifted_reg_val (struct frame_info
*frame
, unsigned long inst
, int carry
,
4109 unsigned long pc_val
, unsigned long status_reg
)
4111 unsigned long res
, shift
;
4112 int rm
= bits (inst
, 0, 3);
4113 unsigned long shifttype
= bits (inst
, 5, 6);
4117 int rs
= bits (inst
, 8, 11);
4118 shift
= (rs
== 15 ? pc_val
+ 8
4119 : get_frame_register_unsigned (frame
, rs
)) & 0xFF;
4122 shift
= bits (inst
, 7, 11);
4124 res
= (rm
== ARM_PC_REGNUM
4125 ? (pc_val
+ (bit (inst
, 4) ? 12 : 8))
4126 : get_frame_register_unsigned (frame
, rm
));
4131 res
= shift
>= 32 ? 0 : res
<< shift
;
4135 res
= shift
>= 32 ? 0 : res
>> shift
;
4141 res
= ((res
& 0x80000000L
)
4142 ? ~((~res
) >> shift
) : res
>> shift
);
4145 case 3: /* ROR/RRX */
4148 res
= (res
>> 1) | (carry
? 0x80000000L
: 0);
4150 res
= (res
>> shift
) | (res
<< (32 - shift
));
4154 return res
& 0xffffffff;
4157 /* Return number of 1-bits in VAL. */
4160 bitcount (unsigned long val
)
4163 for (nbits
= 0; val
!= 0; nbits
++)
4164 val
&= val
- 1; /* Delete rightmost 1-bit in val. */
4168 /* Return the size in bytes of the complete Thumb instruction whose
4169 first halfword is INST1. */
4172 thumb_insn_size (unsigned short inst1
)
4174 if ((inst1
& 0xe000) == 0xe000 && (inst1
& 0x1800) != 0)
4181 thumb_advance_itstate (unsigned int itstate
)
4183 /* Preserve IT[7:5], the first three bits of the condition. Shift
4184 the upcoming condition flags left by one bit. */
4185 itstate
= (itstate
& 0xe0) | ((itstate
<< 1) & 0x1f);
4187 /* If we have finished the IT block, clear the state. */
4188 if ((itstate
& 0x0f) == 0)
4194 /* Find the next PC after the current instruction executes. In some
4195 cases we can not statically determine the answer (see the IT state
4196 handling in this function); in that case, a breakpoint may be
4197 inserted in addition to the returned PC, which will be used to set
4198 another breakpoint by our caller. */
4201 thumb_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4203 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4204 struct address_space
*aspace
= get_frame_address_space (frame
);
4205 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4206 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4207 unsigned long pc_val
= ((unsigned long) pc
) + 4; /* PC after prefetch */
4208 unsigned short inst1
;
4209 CORE_ADDR nextpc
= pc
+ 2; /* Default is next instruction. */
4210 unsigned long offset
;
4211 ULONGEST status
, itstate
;
4213 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4214 pc_val
= MAKE_THUMB_ADDR (pc_val
);
4216 inst1
= read_memory_unsigned_integer (pc
, 2, byte_order_for_code
);
4218 /* Thumb-2 conditional execution support. There are eight bits in
4219 the CPSR which describe conditional execution state. Once
4220 reconstructed (they're in a funny order), the low five bits
4221 describe the low bit of the condition for each instruction and
4222 how many instructions remain. The high three bits describe the
4223 base condition. One of the low four bits will be set if an IT
4224 block is active. These bits read as zero on earlier
4226 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4227 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4229 /* If-Then handling. On GNU/Linux, where this routine is used, we
4230 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4231 can disable execution of the undefined instruction. So we might
4232 miss the breakpoint if we set it on a skipped conditional
4233 instruction. Because conditional instructions can change the
4234 flags, affecting the execution of further instructions, we may
4235 need to set two breakpoints. */
4237 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
!= NULL
)
4239 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
4241 /* An IT instruction. Because this instruction does not
4242 modify the flags, we can accurately predict the next
4243 executed instruction. */
4244 itstate
= inst1
& 0x00ff;
4245 pc
+= thumb_insn_size (inst1
);
4247 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4249 inst1
= read_memory_unsigned_integer (pc
, 2,
4250 byte_order_for_code
);
4251 pc
+= thumb_insn_size (inst1
);
4252 itstate
= thumb_advance_itstate (itstate
);
4255 return MAKE_THUMB_ADDR (pc
);
4257 else if (itstate
!= 0)
4259 /* We are in a conditional block. Check the condition. */
4260 if (! condition_true (itstate
>> 4, status
))
4262 /* Advance to the next executed instruction. */
4263 pc
+= thumb_insn_size (inst1
);
4264 itstate
= thumb_advance_itstate (itstate
);
4266 while (itstate
!= 0 && ! condition_true (itstate
>> 4, status
))
4268 inst1
= read_memory_unsigned_integer (pc
, 2,
4269 byte_order_for_code
);
4270 pc
+= thumb_insn_size (inst1
);
4271 itstate
= thumb_advance_itstate (itstate
);
4274 return MAKE_THUMB_ADDR (pc
);
4276 else if ((itstate
& 0x0f) == 0x08)
4278 /* This is the last instruction of the conditional
4279 block, and it is executed. We can handle it normally
4280 because the following instruction is not conditional,
4281 and we must handle it normally because it is
4282 permitted to branch. Fall through. */
4288 /* There are conditional instructions after this one.
4289 If this instruction modifies the flags, then we can
4290 not predict what the next executed instruction will
4291 be. Fortunately, this instruction is architecturally
4292 forbidden to branch; we know it will fall through.
4293 Start by skipping past it. */
4294 pc
+= thumb_insn_size (inst1
);
4295 itstate
= thumb_advance_itstate (itstate
);
4297 /* Set a breakpoint on the following instruction. */
4298 gdb_assert ((itstate
& 0x0f) != 0);
4299 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
4300 MAKE_THUMB_ADDR (pc
));
4301 cond_negated
= (itstate
>> 4) & 1;
4303 /* Skip all following instructions with the same
4304 condition. If there is a later instruction in the IT
4305 block with the opposite condition, set the other
4306 breakpoint there. If not, then set a breakpoint on
4307 the instruction after the IT block. */
4310 inst1
= read_memory_unsigned_integer (pc
, 2,
4311 byte_order_for_code
);
4312 pc
+= thumb_insn_size (inst1
);
4313 itstate
= thumb_advance_itstate (itstate
);
4315 while (itstate
!= 0 && ((itstate
>> 4) & 1) == cond_negated
);
4317 return MAKE_THUMB_ADDR (pc
);
4321 else if (itstate
& 0x0f)
4323 /* We are in a conditional block. Check the condition. */
4324 int cond
= itstate
>> 4;
4326 if (! condition_true (cond
, status
))
4327 /* Advance to the next instruction. All the 32-bit
4328 instructions share a common prefix. */
4329 return MAKE_THUMB_ADDR (pc
+ thumb_insn_size (inst1
));
4331 /* Otherwise, handle the instruction normally. */
4334 if ((inst1
& 0xff00) == 0xbd00) /* pop {rlist, pc} */
4338 /* Fetch the saved PC from the stack. It's stored above
4339 all of the other registers. */
4340 offset
= bitcount (bits (inst1
, 0, 7)) * INT_REGISTER_SIZE
;
4341 sp
= get_frame_register_unsigned (frame
, ARM_SP_REGNUM
);
4342 nextpc
= read_memory_unsigned_integer (sp
+ offset
, 4, byte_order
);
4344 else if ((inst1
& 0xf000) == 0xd000) /* conditional branch */
4346 unsigned long cond
= bits (inst1
, 8, 11);
4347 if (cond
== 0x0f) /* 0x0f = SWI */
4349 struct gdbarch_tdep
*tdep
;
4350 tdep
= gdbarch_tdep (gdbarch
);
4352 if (tdep
->syscall_next_pc
!= NULL
)
4353 nextpc
= tdep
->syscall_next_pc (frame
);
4356 else if (cond
!= 0x0f && condition_true (cond
, status
))
4357 nextpc
= pc_val
+ (sbits (inst1
, 0, 7) << 1);
4359 else if ((inst1
& 0xf800) == 0xe000) /* unconditional branch */
4361 nextpc
= pc_val
+ (sbits (inst1
, 0, 10) << 1);
4363 else if (thumb_insn_size (inst1
) == 4) /* 32-bit instruction */
4365 unsigned short inst2
;
4366 inst2
= read_memory_unsigned_integer (pc
+ 2, 2, byte_order_for_code
);
4368 /* Default to the next instruction. */
4370 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4372 if ((inst1
& 0xf800) == 0xf000 && (inst2
& 0x8000) == 0x8000)
4374 /* Branches and miscellaneous control instructions. */
4376 if ((inst2
& 0x1000) != 0 || (inst2
& 0xd001) == 0xc000)
4379 int j1
, j2
, imm1
, imm2
;
4381 imm1
= sbits (inst1
, 0, 10);
4382 imm2
= bits (inst2
, 0, 10);
4383 j1
= bit (inst2
, 13);
4384 j2
= bit (inst2
, 11);
4386 offset
= ((imm1
<< 12) + (imm2
<< 1));
4387 offset
^= ((!j2
) << 22) | ((!j1
) << 23);
4389 nextpc
= pc_val
+ offset
;
4390 /* For BLX make sure to clear the low bits. */
4391 if (bit (inst2
, 12) == 0)
4392 nextpc
= nextpc
& 0xfffffffc;
4394 else if (inst1
== 0xf3de && (inst2
& 0xff00) == 0x3f00)
4396 /* SUBS PC, LR, #imm8. */
4397 nextpc
= get_frame_register_unsigned (frame
, ARM_LR_REGNUM
);
4398 nextpc
-= inst2
& 0x00ff;
4400 else if ((inst2
& 0xd000) == 0x8000 && (inst1
& 0x0380) != 0x0380)
4402 /* Conditional branch. */
4403 if (condition_true (bits (inst1
, 6, 9), status
))
4405 int sign
, j1
, j2
, imm1
, imm2
;
4407 sign
= sbits (inst1
, 10, 10);
4408 imm1
= bits (inst1
, 0, 5);
4409 imm2
= bits (inst2
, 0, 10);
4410 j1
= bit (inst2
, 13);
4411 j2
= bit (inst2
, 11);
4413 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4414 offset
+= (imm1
<< 12) + (imm2
<< 1);
4416 nextpc
= pc_val
+ offset
;
4420 else if ((inst1
& 0xfe50) == 0xe810)
4422 /* Load multiple or RFE. */
4423 int rn
, offset
, load_pc
= 1;
4425 rn
= bits (inst1
, 0, 3);
4426 if (bit (inst1
, 7) && !bit (inst1
, 8))
4429 if (!bit (inst2
, 15))
4431 offset
= bitcount (inst2
) * 4 - 4;
4433 else if (!bit (inst1
, 7) && bit (inst1
, 8))
4436 if (!bit (inst2
, 15))
4440 else if (bit (inst1
, 7) && bit (inst1
, 8))
4445 else if (!bit (inst1
, 7) && !bit (inst1
, 8))
4455 CORE_ADDR addr
= get_frame_register_unsigned (frame
, rn
);
4456 nextpc
= get_frame_memory_unsigned (frame
, addr
+ offset
, 4);
4459 else if ((inst1
& 0xffef) == 0xea4f && (inst2
& 0xfff0) == 0x0f00)
4461 /* MOV PC or MOVS PC. */
4462 nextpc
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4463 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4465 else if ((inst1
& 0xff70) == 0xf850 && (inst2
& 0xf000) == 0xf000)
4469 int rn
, load_pc
= 1;
4471 rn
= bits (inst1
, 0, 3);
4472 base
= get_frame_register_unsigned (frame
, rn
);
4473 if (rn
== ARM_PC_REGNUM
)
4475 base
= (base
+ 4) & ~(CORE_ADDR
) 0x3;
4477 base
+= bits (inst2
, 0, 11);
4479 base
-= bits (inst2
, 0, 11);
4481 else if (bit (inst1
, 7))
4482 base
+= bits (inst2
, 0, 11);
4483 else if (bit (inst2
, 11))
4485 if (bit (inst2
, 10))
4488 base
+= bits (inst2
, 0, 7);
4490 base
-= bits (inst2
, 0, 7);
4493 else if ((inst2
& 0x0fc0) == 0x0000)
4495 int shift
= bits (inst2
, 4, 5), rm
= bits (inst2
, 0, 3);
4496 base
+= get_frame_register_unsigned (frame
, rm
) << shift
;
4503 nextpc
= get_frame_memory_unsigned (frame
, base
, 4);
4505 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf000)
4508 CORE_ADDR tbl_reg
, table
, offset
, length
;
4510 tbl_reg
= bits (inst1
, 0, 3);
4511 if (tbl_reg
== 0x0f)
4512 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4514 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4516 offset
= get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4517 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 1);
4518 nextpc
= pc_val
+ length
;
4520 else if ((inst1
& 0xfff0) == 0xe8d0 && (inst2
& 0xfff0) == 0xf010)
4523 CORE_ADDR tbl_reg
, table
, offset
, length
;
4525 tbl_reg
= bits (inst1
, 0, 3);
4526 if (tbl_reg
== 0x0f)
4527 table
= pc
+ 4; /* Regcache copy of PC isn't right yet. */
4529 table
= get_frame_register_unsigned (frame
, tbl_reg
);
4531 offset
= 2 * get_frame_register_unsigned (frame
, bits (inst2
, 0, 3));
4532 length
= 2 * get_frame_memory_unsigned (frame
, table
+ offset
, 2);
4533 nextpc
= pc_val
+ length
;
4536 else if ((inst1
& 0xff00) == 0x4700) /* bx REG, blx REG */
4538 if (bits (inst1
, 3, 6) == 0x0f)
4541 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4543 else if ((inst1
& 0xff87) == 0x4687) /* mov pc, REG */
4545 if (bits (inst1
, 3, 6) == 0x0f)
4548 nextpc
= get_frame_register_unsigned (frame
, bits (inst1
, 3, 6));
4550 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4552 else if ((inst1
& 0xf500) == 0xb100)
4555 int imm
= (bit (inst1
, 9) << 6) + (bits (inst1
, 3, 7) << 1);
4556 ULONGEST reg
= get_frame_register_unsigned (frame
, bits (inst1
, 0, 2));
4558 if (bit (inst1
, 11) && reg
!= 0)
4559 nextpc
= pc_val
+ imm
;
4560 else if (!bit (inst1
, 11) && reg
== 0)
4561 nextpc
= pc_val
+ imm
;
4566 /* Get the raw next address. PC is the current program counter, in
4567 FRAME, which is assumed to be executing in ARM mode.
4569 The value returned has the execution state of the next instruction
4570 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4571 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4575 arm_get_next_pc_raw (struct frame_info
*frame
, CORE_ADDR pc
)
4577 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4578 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4579 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4580 unsigned long pc_val
;
4581 unsigned long this_instr
;
4582 unsigned long status
;
4585 pc_val
= (unsigned long) pc
;
4586 this_instr
= read_memory_unsigned_integer (pc
, 4, byte_order_for_code
);
4588 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4589 nextpc
= (CORE_ADDR
) (pc_val
+ 4); /* Default case */
4591 if (bits (this_instr
, 28, 31) == INST_NV
)
4592 switch (bits (this_instr
, 24, 27))
4597 /* Branch with Link and change to Thumb. */
4598 nextpc
= BranchDest (pc
, this_instr
);
4599 nextpc
|= bit (this_instr
, 24) << 1;
4600 nextpc
= MAKE_THUMB_ADDR (nextpc
);
4606 /* Coprocessor register transfer. */
4607 if (bits (this_instr
, 12, 15) == 15)
4608 error (_("Invalid update to pc in instruction"));
4611 else if (condition_true (bits (this_instr
, 28, 31), status
))
4613 switch (bits (this_instr
, 24, 27))
4616 case 0x1: /* data processing */
4620 unsigned long operand1
, operand2
, result
= 0;
4624 if (bits (this_instr
, 12, 15) != 15)
4627 if (bits (this_instr
, 22, 25) == 0
4628 && bits (this_instr
, 4, 7) == 9) /* multiply */
4629 error (_("Invalid update to pc in instruction"));
4631 /* BX <reg>, BLX <reg> */
4632 if (bits (this_instr
, 4, 27) == 0x12fff1
4633 || bits (this_instr
, 4, 27) == 0x12fff3)
4635 rn
= bits (this_instr
, 0, 3);
4636 nextpc
= ((rn
== ARM_PC_REGNUM
)
4638 : get_frame_register_unsigned (frame
, rn
));
4643 /* Multiply into PC. */
4644 c
= (status
& FLAG_C
) ? 1 : 0;
4645 rn
= bits (this_instr
, 16, 19);
4646 operand1
= ((rn
== ARM_PC_REGNUM
)
4648 : get_frame_register_unsigned (frame
, rn
));
4650 if (bit (this_instr
, 25))
4652 unsigned long immval
= bits (this_instr
, 0, 7);
4653 unsigned long rotate
= 2 * bits (this_instr
, 8, 11);
4654 operand2
= ((immval
>> rotate
) | (immval
<< (32 - rotate
)))
4657 else /* operand 2 is a shifted register. */
4658 operand2
= shifted_reg_val (frame
, this_instr
, c
,
4661 switch (bits (this_instr
, 21, 24))
4664 result
= operand1
& operand2
;
4668 result
= operand1
^ operand2
;
4672 result
= operand1
- operand2
;
4676 result
= operand2
- operand1
;
4680 result
= operand1
+ operand2
;
4684 result
= operand1
+ operand2
+ c
;
4688 result
= operand1
- operand2
+ c
;
4692 result
= operand2
- operand1
+ c
;
4698 case 0xb: /* tst, teq, cmp, cmn */
4699 result
= (unsigned long) nextpc
;
4703 result
= operand1
| operand2
;
4707 /* Always step into a function. */
4712 result
= operand1
& ~operand2
;
4720 /* In 26-bit APCS the bottom two bits of the result are
4721 ignored, and we always end up in ARM state. */
4723 nextpc
= arm_addr_bits_remove (gdbarch
, result
);
4731 case 0x5: /* data transfer */
4734 if (bit (this_instr
, 20))
4737 if (bits (this_instr
, 12, 15) == 15)
4743 if (bit (this_instr
, 22))
4744 error (_("Invalid update to pc in instruction"));
4746 /* byte write to PC */
4747 rn
= bits (this_instr
, 16, 19);
4748 base
= ((rn
== ARM_PC_REGNUM
)
4750 : get_frame_register_unsigned (frame
, rn
));
4752 if (bit (this_instr
, 24))
4755 int c
= (status
& FLAG_C
) ? 1 : 0;
4756 unsigned long offset
=
4757 (bit (this_instr
, 25)
4758 ? shifted_reg_val (frame
, this_instr
, c
, pc_val
, status
)
4759 : bits (this_instr
, 0, 11));
4761 if (bit (this_instr
, 23))
4767 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
) base
,
4774 case 0x9: /* block transfer */
4775 if (bit (this_instr
, 20))
4778 if (bit (this_instr
, 15))
4782 unsigned long rn_val
4783 = get_frame_register_unsigned (frame
,
4784 bits (this_instr
, 16, 19));
4786 if (bit (this_instr
, 23))
4789 unsigned long reglist
= bits (this_instr
, 0, 14);
4790 offset
= bitcount (reglist
) * 4;
4791 if (bit (this_instr
, 24)) /* pre */
4794 else if (bit (this_instr
, 24))
4798 (CORE_ADDR
) read_memory_unsigned_integer ((CORE_ADDR
)
4805 case 0xb: /* branch & link */
4806 case 0xa: /* branch */
4808 nextpc
= BranchDest (pc
, this_instr
);
4814 case 0xe: /* coproc ops */
4818 struct gdbarch_tdep
*tdep
;
4819 tdep
= gdbarch_tdep (gdbarch
);
4821 if (tdep
->syscall_next_pc
!= NULL
)
4822 nextpc
= tdep
->syscall_next_pc (frame
);
4828 fprintf_filtered (gdb_stderr
, _("Bad bit-field extraction\n"));
4836 /* Determine next PC after current instruction executes. Will call either
4837 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4838 loop is detected. */
4841 arm_get_next_pc (struct frame_info
*frame
, CORE_ADDR pc
)
4845 if (arm_frame_is_thumb (frame
))
4847 nextpc
= thumb_get_next_pc_raw (frame
, pc
);
4848 if (nextpc
== MAKE_THUMB_ADDR (pc
))
4849 error (_("Infinite loop detected"));
4853 nextpc
= arm_get_next_pc_raw (frame
, pc
);
4855 error (_("Infinite loop detected"));
4861 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4862 of the appropriate mode (as encoded in the PC value), even if this
4863 differs from what would be expected according to the symbol tables. */
4866 arm_insert_single_step_breakpoint (struct gdbarch
*gdbarch
,
4867 struct address_space
*aspace
,
4870 struct cleanup
*old_chain
4871 = make_cleanup_restore_integer (&arm_override_mode
);
4873 arm_override_mode
= IS_THUMB_ADDR (pc
);
4874 pc
= gdbarch_addr_bits_remove (gdbarch
, pc
);
4876 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
4878 do_cleanups (old_chain
);
4881 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
4882 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
4883 is found, attempt to step through it. A breakpoint is placed at the end of
4887 thumb_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
4889 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
4890 struct address_space
*aspace
= get_frame_address_space (frame
);
4891 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4892 CORE_ADDR pc
= get_frame_pc (frame
);
4893 CORE_ADDR breaks
[2] = {-1, -1};
4895 unsigned short insn1
, insn2
;
4898 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
4899 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
4900 ULONGEST status
, itstate
;
4902 /* We currently do not support atomic sequences within an IT block. */
4903 status
= get_frame_register_unsigned (frame
, ARM_PS_REGNUM
);
4904 itstate
= ((status
>> 8) & 0xfc) | ((status
>> 25) & 0x3);
4908 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
4909 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4911 if (thumb_insn_size (insn1
) != 4)
4914 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4916 if (!((insn1
& 0xfff0) == 0xe850
4917 || ((insn1
& 0xfff0) == 0xe8d0 && (insn2
& 0x00c0) == 0x0040)))
4920 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
4922 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
4924 insn1
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4927 if (thumb_insn_size (insn1
) != 4)
4929 /* Assume that there is at most one conditional branch in the
4930 atomic sequence. If a conditional branch is found, put a
4931 breakpoint in its destination address. */
4932 if ((insn1
& 0xf000) == 0xd000 && bits (insn1
, 8, 11) != 0x0f)
4934 if (last_breakpoint
> 0)
4935 return 0; /* More than one conditional branch found,
4936 fallback to the standard code. */
4938 breaks
[1] = loc
+ 2 + (sbits (insn1
, 0, 7) << 1);
4942 /* We do not support atomic sequences that use any *other*
4943 instructions but conditional branches to change the PC.
4944 Fall back to standard code to avoid losing control of
4946 else if (thumb_instruction_changes_pc (insn1
))
4951 insn2
= read_memory_unsigned_integer (loc
, 2, byte_order_for_code
);
4954 /* Assume that there is at most one conditional branch in the
4955 atomic sequence. If a conditional branch is found, put a
4956 breakpoint in its destination address. */
4957 if ((insn1
& 0xf800) == 0xf000
4958 && (insn2
& 0xd000) == 0x8000
4959 && (insn1
& 0x0380) != 0x0380)
4961 int sign
, j1
, j2
, imm1
, imm2
;
4962 unsigned int offset
;
4964 sign
= sbits (insn1
, 10, 10);
4965 imm1
= bits (insn1
, 0, 5);
4966 imm2
= bits (insn2
, 0, 10);
4967 j1
= bit (insn2
, 13);
4968 j2
= bit (insn2
, 11);
4970 offset
= (sign
<< 20) + (j2
<< 19) + (j1
<< 18);
4971 offset
+= (imm1
<< 12) + (imm2
<< 1);
4973 if (last_breakpoint
> 0)
4974 return 0; /* More than one conditional branch found,
4975 fallback to the standard code. */
4977 breaks
[1] = loc
+ offset
;
4981 /* We do not support atomic sequences that use any *other*
4982 instructions but conditional branches to change the PC.
4983 Fall back to standard code to avoid losing control of
4985 else if (thumb2_instruction_changes_pc (insn1
, insn2
))
4988 /* If we find a strex{,b,h,d}, we're done. */
4989 if ((insn1
& 0xfff0) == 0xe840
4990 || ((insn1
& 0xfff0) == 0xe8c0 && (insn2
& 0x00c0) == 0x0040))
4995 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
4996 if (insn_count
== atomic_sequence_length
)
4999 /* Insert a breakpoint right after the end of the atomic sequence. */
5002 /* Check for duplicated breakpoints. Check also for a breakpoint
5003 placed (branch instruction's destination) anywhere in sequence. */
5005 && (breaks
[1] == breaks
[0]
5006 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5007 last_breakpoint
= 0;
5009 /* Effectively inserts the breakpoints. */
5010 for (index
= 0; index
<= last_breakpoint
; index
++)
5011 arm_insert_single_step_breakpoint (gdbarch
, aspace
,
5012 MAKE_THUMB_ADDR (breaks
[index
]));
5018 arm_deal_with_atomic_sequence_raw (struct frame_info
*frame
)
5020 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5021 struct address_space
*aspace
= get_frame_address_space (frame
);
5022 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
5023 CORE_ADDR pc
= get_frame_pc (frame
);
5024 CORE_ADDR breaks
[2] = {-1, -1};
5029 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
5030 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
5032 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5033 Note that we do not currently support conditionally executed atomic
5035 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5037 if ((insn
& 0xff9000f0) != 0xe1900090)
5040 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5042 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
5044 insn
= read_memory_unsigned_integer (loc
, 4, byte_order_for_code
);
5047 /* Assume that there is at most one conditional branch in the atomic
5048 sequence. If a conditional branch is found, put a breakpoint in
5049 its destination address. */
5050 if (bits (insn
, 24, 27) == 0xa)
5052 if (last_breakpoint
> 0)
5053 return 0; /* More than one conditional branch found, fallback
5054 to the standard single-step code. */
5056 breaks
[1] = BranchDest (loc
- 4, insn
);
5060 /* We do not support atomic sequences that use any *other* instructions
5061 but conditional branches to change the PC. Fall back to standard
5062 code to avoid losing control of execution. */
5063 else if (arm_instruction_changes_pc (insn
))
5066 /* If we find a strex{,b,h,d}, we're done. */
5067 if ((insn
& 0xff9000f0) == 0xe1800090)
5071 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5072 if (insn_count
== atomic_sequence_length
)
5075 /* Insert a breakpoint right after the end of the atomic sequence. */
5078 /* Check for duplicated breakpoints. Check also for a breakpoint
5079 placed (branch instruction's destination) anywhere in sequence. */
5081 && (breaks
[1] == breaks
[0]
5082 || (breaks
[1] >= pc
&& breaks
[1] < loc
)))
5083 last_breakpoint
= 0;
5085 /* Effectively inserts the breakpoints. */
5086 for (index
= 0; index
<= last_breakpoint
; index
++)
5087 arm_insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
5093 arm_deal_with_atomic_sequence (struct frame_info
*frame
)
5095 if (arm_frame_is_thumb (frame
))
5096 return thumb_deal_with_atomic_sequence_raw (frame
);
5098 return arm_deal_with_atomic_sequence_raw (frame
);
5101 /* single_step() is called just before we want to resume the inferior,
5102 if we want to single-step it but there is no hardware or kernel
5103 single-step support. We find the target of the coming instruction
5104 and breakpoint it. */
5107 arm_software_single_step (struct frame_info
*frame
)
5109 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
5110 struct address_space
*aspace
= get_frame_address_space (frame
);
5113 if (arm_deal_with_atomic_sequence (frame
))
5116 next_pc
= arm_get_next_pc (frame
, get_frame_pc (frame
));
5117 arm_insert_single_step_breakpoint (gdbarch
, aspace
, next_pc
);
5122 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5123 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5124 NULL if an error occurs. BUF is freed. */
5127 extend_buffer_earlier (gdb_byte
*buf
, CORE_ADDR endaddr
,
5128 int old_len
, int new_len
)
5130 gdb_byte
*new_buf
, *middle
;
5131 int bytes_to_read
= new_len
- old_len
;
5133 new_buf
= xmalloc (new_len
);
5134 memcpy (new_buf
+ bytes_to_read
, buf
, old_len
);
5136 if (target_read_memory (endaddr
- new_len
, new_buf
, bytes_to_read
) != 0)
5144 /* An IT block is at most the 2-byte IT instruction followed by
5145 four 4-byte instructions. The furthest back we must search to
5146 find an IT block that affects the current instruction is thus
5147 2 + 3 * 4 == 14 bytes. */
5148 #define MAX_IT_BLOCK_PREFIX 14
5150 /* Use a quick scan if there are more than this many bytes of
5152 #define IT_SCAN_THRESHOLD 32
5154 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5155 A breakpoint in an IT block may not be hit, depending on the
5158 arm_adjust_breakpoint_address (struct gdbarch
*gdbarch
, CORE_ADDR bpaddr
)
5162 CORE_ADDR boundary
, func_start
;
5163 int buf_len
, buf2_len
;
5164 enum bfd_endian order
= gdbarch_byte_order_for_code (gdbarch
);
5165 int i
, any
, last_it
, last_it_count
;
5167 /* If we are using BKPT breakpoints, none of this is necessary. */
5168 if (gdbarch_tdep (gdbarch
)->thumb2_breakpoint
== NULL
)
5171 /* ARM mode does not have this problem. */
5172 if (!arm_pc_is_thumb (gdbarch
, bpaddr
))
5175 /* We are setting a breakpoint in Thumb code that could potentially
5176 contain an IT block. The first step is to find how much Thumb
5177 code there is; we do not need to read outside of known Thumb
5179 map_type
= arm_find_mapping_symbol (bpaddr
, &boundary
);
5181 /* Thumb-2 code must have mapping symbols to have a chance. */
5184 bpaddr
= gdbarch_addr_bits_remove (gdbarch
, bpaddr
);
5186 if (find_pc_partial_function (bpaddr
, NULL
, &func_start
, NULL
)
5187 && func_start
> boundary
)
5188 boundary
= func_start
;
5190 /* Search for a candidate IT instruction. We have to do some fancy
5191 footwork to distinguish a real IT instruction from the second
5192 half of a 32-bit instruction, but there is no need for that if
5193 there's no candidate. */
5194 buf_len
= min (bpaddr
- boundary
, MAX_IT_BLOCK_PREFIX
);
5196 /* No room for an IT instruction. */
5199 buf
= xmalloc (buf_len
);
5200 if (target_read_memory (bpaddr
- buf_len
, buf
, buf_len
) != 0)
5203 for (i
= 0; i
< buf_len
; i
+= 2)
5205 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5206 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5218 /* OK, the code bytes before this instruction contain at least one
5219 halfword which resembles an IT instruction. We know that it's
5220 Thumb code, but there are still two possibilities. Either the
5221 halfword really is an IT instruction, or it is the second half of
5222 a 32-bit Thumb instruction. The only way we can tell is to
5223 scan forwards from a known instruction boundary. */
5224 if (bpaddr
- boundary
> IT_SCAN_THRESHOLD
)
5228 /* There's a lot of code before this instruction. Start with an
5229 optimistic search; it's easy to recognize halfwords that can
5230 not be the start of a 32-bit instruction, and use that to
5231 lock on to the instruction boundaries. */
5232 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, IT_SCAN_THRESHOLD
);
5235 buf_len
= IT_SCAN_THRESHOLD
;
5238 for (i
= 0; i
< buf_len
- sizeof (buf
) && ! definite
; i
+= 2)
5240 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5241 if (thumb_insn_size (inst1
) == 2)
5248 /* At this point, if DEFINITE, BUF[I] is the first place we
5249 are sure that we know the instruction boundaries, and it is far
5250 enough from BPADDR that we could not miss an IT instruction
5251 affecting BPADDR. If ! DEFINITE, give up - start from a
5255 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
,
5259 buf_len
= bpaddr
- boundary
;
5265 buf
= extend_buffer_earlier (buf
, bpaddr
, buf_len
, bpaddr
- boundary
);
5268 buf_len
= bpaddr
- boundary
;
5272 /* Scan forwards. Find the last IT instruction before BPADDR. */
5277 unsigned short inst1
= extract_unsigned_integer (&buf
[i
], 2, order
);
5279 if ((inst1
& 0xff00) == 0xbf00 && (inst1
& 0x000f) != 0)
5284 else if (inst1
& 0x0002)
5286 else if (inst1
& 0x0004)
5291 i
+= thumb_insn_size (inst1
);
5297 /* There wasn't really an IT instruction after all. */
5300 if (last_it_count
< 1)
5301 /* It was too far away. */
5304 /* This really is a trouble spot. Move the breakpoint to the IT
5306 return bpaddr
- buf_len
+ last_it
;
5309 /* ARM displaced stepping support.
5311 Generally ARM displaced stepping works as follows:
5313 1. When an instruction is to be single-stepped, it is first decoded by
5314 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5315 Depending on the type of instruction, it is then copied to a scratch
5316 location, possibly in a modified form. The copy_* set of functions
5317 performs such modification, as necessary. A breakpoint is placed after
5318 the modified instruction in the scratch space to return control to GDB.
5319 Note in particular that instructions which modify the PC will no longer
5320 do so after modification.
5322 2. The instruction is single-stepped, by setting the PC to the scratch
5323 location address, and resuming. Control returns to GDB when the
5326 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5327 function used for the current instruction. This function's job is to
5328 put the CPU/memory state back to what it would have been if the
5329 instruction had been executed unmodified in its original location. */
5331 /* NOP instruction (mov r0, r0). */
5332 #define ARM_NOP 0xe1a00000
5333 #define THUMB_NOP 0x4600
5335 /* Helper for register reads for displaced stepping. In particular, this
5336 returns the PC as it would be seen by the instruction at its original
5340 displaced_read_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5344 CORE_ADDR from
= dsc
->insn_addr
;
5346 if (regno
== ARM_PC_REGNUM
)
5348 /* Compute pipeline offset:
5349 - When executing an ARM instruction, PC reads as the address of the
5350 current instruction plus 8.
5351 - When executing a Thumb instruction, PC reads as the address of the
5352 current instruction plus 4. */
5359 if (debug_displaced
)
5360 fprintf_unfiltered (gdb_stdlog
, "displaced: read pc value %.8lx\n",
5361 (unsigned long) from
);
5362 return (ULONGEST
) from
;
5366 regcache_cooked_read_unsigned (regs
, regno
, &ret
);
5367 if (debug_displaced
)
5368 fprintf_unfiltered (gdb_stdlog
, "displaced: read r%d value %.8lx\n",
5369 regno
, (unsigned long) ret
);
5375 displaced_in_arm_mode (struct regcache
*regs
)
5378 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5380 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5382 return (ps
& t_bit
) == 0;
5385 /* Write to the PC as from a branch instruction. */
5388 branch_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5392 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5393 architecture versions < 6. */
5394 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5395 val
& ~(ULONGEST
) 0x3);
5397 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
5398 val
& ~(ULONGEST
) 0x1);
5401 /* Write to the PC as from a branch-exchange instruction. */
5404 bx_write_pc (struct regcache
*regs
, ULONGEST val
)
5407 ULONGEST t_bit
= arm_psr_thumb_bit (get_regcache_arch (regs
));
5409 regcache_cooked_read_unsigned (regs
, ARM_PS_REGNUM
, &ps
);
5413 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
| t_bit
);
5414 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffe);
5416 else if ((val
& 2) == 0)
5418 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5419 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
);
5423 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5424 mode, align dest to 4 bytes). */
5425 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5426 regcache_cooked_write_unsigned (regs
, ARM_PS_REGNUM
, ps
& ~t_bit
);
5427 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
, val
& 0xfffffffc);
5431 /* Write to the PC as if from a load instruction. */
5434 load_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5437 if (DISPLACED_STEPPING_ARCH_VERSION
>= 5)
5438 bx_write_pc (regs
, val
);
5440 branch_write_pc (regs
, dsc
, val
);
5443 /* Write to the PC as if from an ALU instruction. */
5446 alu_write_pc (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5449 if (DISPLACED_STEPPING_ARCH_VERSION
>= 7 && !dsc
->is_thumb
)
5450 bx_write_pc (regs
, val
);
5452 branch_write_pc (regs
, dsc
, val
);
5455 /* Helper for writing to registers for displaced stepping. Writing to the PC
5456 has a varying effects depending on the instruction which does the write:
5457 this is controlled by the WRITE_PC argument. */
5460 displaced_write_reg (struct regcache
*regs
, struct displaced_step_closure
*dsc
,
5461 int regno
, ULONGEST val
, enum pc_write_style write_pc
)
5463 if (regno
== ARM_PC_REGNUM
)
5465 if (debug_displaced
)
5466 fprintf_unfiltered (gdb_stdlog
, "displaced: writing pc %.8lx\n",
5467 (unsigned long) val
);
5470 case BRANCH_WRITE_PC
:
5471 branch_write_pc (regs
, dsc
, val
);
5475 bx_write_pc (regs
, val
);
5479 load_write_pc (regs
, dsc
, val
);
5483 alu_write_pc (regs
, dsc
, val
);
5486 case CANNOT_WRITE_PC
:
5487 warning (_("Instruction wrote to PC in an unexpected way when "
5488 "single-stepping"));
5492 internal_error (__FILE__
, __LINE__
,
5493 _("Invalid argument to displaced_write_reg"));
5496 dsc
->wrote_to_pc
= 1;
5500 if (debug_displaced
)
5501 fprintf_unfiltered (gdb_stdlog
, "displaced: writing r%d value %.8lx\n",
5502 regno
, (unsigned long) val
);
5503 regcache_cooked_write_unsigned (regs
, regno
, val
);
5507 /* This function is used to concisely determine if an instruction INSN
5508 references PC. Register fields of interest in INSN should have the
5509 corresponding fields of BITMASK set to 0b1111. The function
5510 returns return 1 if any of these fields in INSN reference the PC
5511 (also 0b1111, r15), else it returns 0. */
5514 insn_references_pc (uint32_t insn
, uint32_t bitmask
)
5516 uint32_t lowbit
= 1;
5518 while (bitmask
!= 0)
5522 for (; lowbit
&& (bitmask
& lowbit
) == 0; lowbit
<<= 1)
5528 mask
= lowbit
* 0xf;
5530 if ((insn
& mask
) == mask
)
5539 /* The simplest copy function. Many instructions have the same effect no
5540 matter what address they are executed at: in those cases, use this. */
5543 arm_copy_unmodified (struct gdbarch
*gdbarch
, uint32_t insn
,
5544 const char *iname
, struct displaced_step_closure
*dsc
)
5546 if (debug_displaced
)
5547 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx, "
5548 "opcode/class '%s' unmodified\n", (unsigned long) insn
,
5551 dsc
->modinsn
[0] = insn
;
5557 thumb_copy_unmodified_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
5558 uint16_t insn2
, const char *iname
,
5559 struct displaced_step_closure
*dsc
)
5561 if (debug_displaced
)
5562 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x %.4x, "
5563 "opcode/class '%s' unmodified\n", insn1
, insn2
,
5566 dsc
->modinsn
[0] = insn1
;
5567 dsc
->modinsn
[1] = insn2
;
5573 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5576 thumb_copy_unmodified_16bit (struct gdbarch
*gdbarch
, unsigned int insn
,
5578 struct displaced_step_closure
*dsc
)
5580 if (debug_displaced
)
5581 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x, "
5582 "opcode/class '%s' unmodified\n", insn
,
5585 dsc
->modinsn
[0] = insn
;
5590 /* Preload instructions with immediate offset. */
5593 cleanup_preload (struct gdbarch
*gdbarch
,
5594 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5596 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5597 if (!dsc
->u
.preload
.immed
)
5598 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
5602 install_preload (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5603 struct displaced_step_closure
*dsc
, unsigned int rn
)
5606 /* Preload instructions:
5608 {pli/pld} [rn, #+/-imm]
5610 {pli/pld} [r0, #+/-imm]. */
5612 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5613 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5614 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5615 dsc
->u
.preload
.immed
= 1;
5617 dsc
->cleanup
= &cleanup_preload
;
5621 arm_copy_preload (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
5622 struct displaced_step_closure
*dsc
)
5624 unsigned int rn
= bits (insn
, 16, 19);
5626 if (!insn_references_pc (insn
, 0x000f0000ul
))
5627 return arm_copy_unmodified (gdbarch
, insn
, "preload", dsc
);
5629 if (debug_displaced
)
5630 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5631 (unsigned long) insn
);
5633 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5635 install_preload (gdbarch
, regs
, dsc
, rn
);
5641 thumb2_copy_preload (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
5642 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5644 unsigned int rn
= bits (insn1
, 0, 3);
5645 unsigned int u_bit
= bit (insn1
, 7);
5646 int imm12
= bits (insn2
, 0, 11);
5649 if (rn
!= ARM_PC_REGNUM
)
5650 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "preload", dsc
);
5652 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5653 PLD (literal) Encoding T1. */
5654 if (debug_displaced
)
5655 fprintf_unfiltered (gdb_stdlog
,
5656 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5657 (unsigned int) dsc
->insn_addr
, u_bit
? '+' : '-',
5663 /* Rewrite instruction {pli/pld} PC imm12 into:
5664 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5668 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5670 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5671 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5673 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
5675 displaced_write_reg (regs
, dsc
, 0, pc_val
, CANNOT_WRITE_PC
);
5676 displaced_write_reg (regs
, dsc
, 1, imm12
, CANNOT_WRITE_PC
);
5677 dsc
->u
.preload
.immed
= 0;
5679 /* {pli/pld} [r0, r1] */
5680 dsc
->modinsn
[0] = insn1
& 0xfff0;
5681 dsc
->modinsn
[1] = 0xf001;
5684 dsc
->cleanup
= &cleanup_preload
;
5688 /* Preload instructions with register offset. */
5691 install_preload_reg(struct gdbarch
*gdbarch
, struct regcache
*regs
,
5692 struct displaced_step_closure
*dsc
, unsigned int rn
,
5695 ULONGEST rn_val
, rm_val
;
5697 /* Preload register-offset instructions:
5699 {pli/pld} [rn, rm {, shift}]
5701 {pli/pld} [r0, r1 {, shift}]. */
5703 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5704 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
5705 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5706 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
5707 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5708 displaced_write_reg (regs
, dsc
, 1, rm_val
, CANNOT_WRITE_PC
);
5709 dsc
->u
.preload
.immed
= 0;
5711 dsc
->cleanup
= &cleanup_preload
;
5715 arm_copy_preload_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
5716 struct regcache
*regs
,
5717 struct displaced_step_closure
*dsc
)
5719 unsigned int rn
= bits (insn
, 16, 19);
5720 unsigned int rm
= bits (insn
, 0, 3);
5723 if (!insn_references_pc (insn
, 0x000f000ful
))
5724 return arm_copy_unmodified (gdbarch
, insn
, "preload reg", dsc
);
5726 if (debug_displaced
)
5727 fprintf_unfiltered (gdb_stdlog
, "displaced: copying preload insn %.8lx\n",
5728 (unsigned long) insn
);
5730 dsc
->modinsn
[0] = (insn
& 0xfff0fff0) | 0x1;
5732 install_preload_reg (gdbarch
, regs
, dsc
, rn
, rm
);
5736 /* Copy/cleanup coprocessor load and store instructions. */
5739 cleanup_copro_load_store (struct gdbarch
*gdbarch
,
5740 struct regcache
*regs
,
5741 struct displaced_step_closure
*dsc
)
5743 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 0);
5745 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
5747 if (dsc
->u
.ldst
.writeback
)
5748 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, LOAD_WRITE_PC
);
5752 install_copro_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5753 struct displaced_step_closure
*dsc
,
5754 int writeback
, unsigned int rn
)
5758 /* Coprocessor load/store instructions:
5760 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5762 {stc/stc2} [r0, #+/-imm].
5764 ldc/ldc2 are handled identically. */
5766 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
5767 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
5768 /* PC should be 4-byte aligned. */
5769 rn_val
= rn_val
& 0xfffffffc;
5770 displaced_write_reg (regs
, dsc
, 0, rn_val
, CANNOT_WRITE_PC
);
5772 dsc
->u
.ldst
.writeback
= writeback
;
5773 dsc
->u
.ldst
.rn
= rn
;
5775 dsc
->cleanup
= &cleanup_copro_load_store
;
5779 arm_copy_copro_load_store (struct gdbarch
*gdbarch
, uint32_t insn
,
5780 struct regcache
*regs
,
5781 struct displaced_step_closure
*dsc
)
5783 unsigned int rn
= bits (insn
, 16, 19);
5785 if (!insn_references_pc (insn
, 0x000f0000ul
))
5786 return arm_copy_unmodified (gdbarch
, insn
, "copro load/store", dsc
);
5788 if (debug_displaced
)
5789 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5790 "load/store insn %.8lx\n", (unsigned long) insn
);
5792 dsc
->modinsn
[0] = insn
& 0xfff0ffff;
5794 install_copro_load_store (gdbarch
, regs
, dsc
, bit (insn
, 25), rn
);
5800 thumb2_copy_copro_load_store (struct gdbarch
*gdbarch
, uint16_t insn1
,
5801 uint16_t insn2
, struct regcache
*regs
,
5802 struct displaced_step_closure
*dsc
)
5804 unsigned int rn
= bits (insn1
, 0, 3);
5806 if (rn
!= ARM_PC_REGNUM
)
5807 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
5808 "copro load/store", dsc
);
5810 if (debug_displaced
)
5811 fprintf_unfiltered (gdb_stdlog
, "displaced: copying coprocessor "
5812 "load/store insn %.4x%.4x\n", insn1
, insn2
);
5814 dsc
->modinsn
[0] = insn1
& 0xfff0;
5815 dsc
->modinsn
[1] = insn2
;
5818 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5819 doesn't support writeback, so pass 0. */
5820 install_copro_load_store (gdbarch
, regs
, dsc
, 0, rn
);
5825 /* Clean up branch instructions (actually perform the branch, by setting
5829 cleanup_branch (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5830 struct displaced_step_closure
*dsc
)
5832 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
5833 int branch_taken
= condition_true (dsc
->u
.branch
.cond
, status
);
5834 enum pc_write_style write_pc
= dsc
->u
.branch
.exchange
5835 ? BX_WRITE_PC
: BRANCH_WRITE_PC
;
5840 if (dsc
->u
.branch
.link
)
5842 /* The value of LR should be the next insn of current one. In order
5843 not to confuse logic hanlding later insn `bx lr', if current insn mode
5844 is Thumb, the bit 0 of LR value should be set to 1. */
5845 ULONGEST next_insn_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
5848 next_insn_addr
|= 0x1;
5850 displaced_write_reg (regs
, dsc
, ARM_LR_REGNUM
, next_insn_addr
,
5854 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, dsc
->u
.branch
.dest
, write_pc
);
5857 /* Copy B/BL/BLX instructions with immediate destinations. */
5860 install_b_bl_blx (struct gdbarch
*gdbarch
, struct regcache
*regs
,
5861 struct displaced_step_closure
*dsc
,
5862 unsigned int cond
, int exchange
, int link
, long offset
)
5864 /* Implement "BL<cond> <label>" as:
5866 Preparation: cond <- instruction condition
5867 Insn: mov r0, r0 (nop)
5868 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5870 B<cond> similar, but don't set r14 in cleanup. */
5872 dsc
->u
.branch
.cond
= cond
;
5873 dsc
->u
.branch
.link
= link
;
5874 dsc
->u
.branch
.exchange
= exchange
;
5876 dsc
->u
.branch
.dest
= dsc
->insn_addr
;
5877 if (link
&& exchange
)
5878 /* For BLX, offset is computed from the Align (PC, 4). */
5879 dsc
->u
.branch
.dest
= dsc
->u
.branch
.dest
& 0xfffffffc;
5882 dsc
->u
.branch
.dest
+= 4 + offset
;
5884 dsc
->u
.branch
.dest
+= 8 + offset
;
5886 dsc
->cleanup
= &cleanup_branch
;
5889 arm_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint32_t insn
,
5890 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
5892 unsigned int cond
= bits (insn
, 28, 31);
5893 int exchange
= (cond
== 0xf);
5894 int link
= exchange
|| bit (insn
, 24);
5897 if (debug_displaced
)
5898 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s immediate insn "
5899 "%.8lx\n", (exchange
) ? "blx" : (link
) ? "bl" : "b",
5900 (unsigned long) insn
);
5902 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5903 then arrange the switch into Thumb mode. */
5904 offset
= (bits (insn
, 0, 23) << 2) | (bit (insn
, 24) << 1) | 1;
5906 offset
= bits (insn
, 0, 23) << 2;
5908 if (bit (offset
, 25))
5909 offset
= offset
| ~0x3ffffff;
5911 dsc
->modinsn
[0] = ARM_NOP
;
5913 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5918 thumb2_copy_b_bl_blx (struct gdbarch
*gdbarch
, uint16_t insn1
,
5919 uint16_t insn2
, struct regcache
*regs
,
5920 struct displaced_step_closure
*dsc
)
5922 int link
= bit (insn2
, 14);
5923 int exchange
= link
&& !bit (insn2
, 12);
5926 int j1
= bit (insn2
, 13);
5927 int j2
= bit (insn2
, 11);
5928 int s
= sbits (insn1
, 10, 10);
5929 int i1
= !(j1
^ bit (insn1
, 10));
5930 int i2
= !(j2
^ bit (insn1
, 10));
5932 if (!link
&& !exchange
) /* B */
5934 offset
= (bits (insn2
, 0, 10) << 1);
5935 if (bit (insn2
, 12)) /* Encoding T4 */
5937 offset
|= (bits (insn1
, 0, 9) << 12)
5943 else /* Encoding T3 */
5945 offset
|= (bits (insn1
, 0, 5) << 12)
5949 cond
= bits (insn1
, 6, 9);
5954 offset
= (bits (insn1
, 0, 9) << 12);
5955 offset
|= ((i2
<< 22) | (i1
<< 23) | (s
<< 24));
5956 offset
|= exchange
?
5957 (bits (insn2
, 1, 10) << 2) : (bits (insn2
, 0, 10) << 1);
5960 if (debug_displaced
)
5961 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s insn "
5962 "%.4x %.4x with offset %.8lx\n",
5963 link
? (exchange
) ? "blx" : "bl" : "b",
5964 insn1
, insn2
, offset
);
5966 dsc
->modinsn
[0] = THUMB_NOP
;
5968 install_b_bl_blx (gdbarch
, regs
, dsc
, cond
, exchange
, link
, offset
);
5972 /* Copy B Thumb instructions. */
5974 thumb_copy_b (struct gdbarch
*gdbarch
, unsigned short insn
,
5975 struct displaced_step_closure
*dsc
)
5977 unsigned int cond
= 0;
5979 unsigned short bit_12_15
= bits (insn
, 12, 15);
5980 CORE_ADDR from
= dsc
->insn_addr
;
5982 if (bit_12_15
== 0xd)
5984 /* offset = SignExtend (imm8:0, 32) */
5985 offset
= sbits ((insn
<< 1), 0, 8);
5986 cond
= bits (insn
, 8, 11);
5988 else if (bit_12_15
== 0xe) /* Encoding T2 */
5990 offset
= sbits ((insn
<< 1), 0, 11);
5994 if (debug_displaced
)
5995 fprintf_unfiltered (gdb_stdlog
,
5996 "displaced: copying b immediate insn %.4x "
5997 "with offset %d\n", insn
, offset
);
5999 dsc
->u
.branch
.cond
= cond
;
6000 dsc
->u
.branch
.link
= 0;
6001 dsc
->u
.branch
.exchange
= 0;
6002 dsc
->u
.branch
.dest
= from
+ 4 + offset
;
6004 dsc
->modinsn
[0] = THUMB_NOP
;
6006 dsc
->cleanup
= &cleanup_branch
;
6011 /* Copy BX/BLX with register-specified destinations. */
6014 install_bx_blx_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6015 struct displaced_step_closure
*dsc
, int link
,
6016 unsigned int cond
, unsigned int rm
)
6018 /* Implement {BX,BLX}<cond> <reg>" as:
6020 Preparation: cond <- instruction condition
6021 Insn: mov r0, r0 (nop)
6022 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6024 Don't set r14 in cleanup for BX. */
6026 dsc
->u
.branch
.dest
= displaced_read_reg (regs
, dsc
, rm
);
6028 dsc
->u
.branch
.cond
= cond
;
6029 dsc
->u
.branch
.link
= link
;
6031 dsc
->u
.branch
.exchange
= 1;
6033 dsc
->cleanup
= &cleanup_branch
;
6037 arm_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6038 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6040 unsigned int cond
= bits (insn
, 28, 31);
6043 int link
= bit (insn
, 5);
6044 unsigned int rm
= bits (insn
, 0, 3);
6046 if (debug_displaced
)
6047 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.8lx",
6048 (unsigned long) insn
);
6050 dsc
->modinsn
[0] = ARM_NOP
;
6052 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, cond
, rm
);
6057 thumb_copy_bx_blx_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6058 struct regcache
*regs
,
6059 struct displaced_step_closure
*dsc
)
6061 int link
= bit (insn
, 7);
6062 unsigned int rm
= bits (insn
, 3, 6);
6064 if (debug_displaced
)
6065 fprintf_unfiltered (gdb_stdlog
, "displaced: copying insn %.4x",
6066 (unsigned short) insn
);
6068 dsc
->modinsn
[0] = THUMB_NOP
;
6070 install_bx_blx_reg (gdbarch
, regs
, dsc
, link
, INST_AL
, rm
);
6076 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6079 cleanup_alu_imm (struct gdbarch
*gdbarch
,
6080 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6082 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6083 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6084 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6085 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6089 arm_copy_alu_imm (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6090 struct displaced_step_closure
*dsc
)
6092 unsigned int rn
= bits (insn
, 16, 19);
6093 unsigned int rd
= bits (insn
, 12, 15);
6094 unsigned int op
= bits (insn
, 21, 24);
6095 int is_mov
= (op
== 0xd);
6096 ULONGEST rd_val
, rn_val
;
6098 if (!insn_references_pc (insn
, 0x000ff000ul
))
6099 return arm_copy_unmodified (gdbarch
, insn
, "ALU immediate", dsc
);
6101 if (debug_displaced
)
6102 fprintf_unfiltered (gdb_stdlog
, "displaced: copying immediate %s insn "
6103 "%.8lx\n", is_mov
? "move" : "ALU",
6104 (unsigned long) insn
);
6106 /* Instruction is of form:
6108 <op><cond> rd, [rn,] #imm
6112 Preparation: tmp1, tmp2 <- r0, r1;
6114 Insn: <op><cond> r0, r1, #imm
6115 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6118 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6119 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6120 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6121 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6122 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6123 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6127 dsc
->modinsn
[0] = insn
& 0xfff00fff;
6129 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x10000;
6131 dsc
->cleanup
= &cleanup_alu_imm
;
6137 thumb2_copy_alu_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6138 uint16_t insn2
, struct regcache
*regs
,
6139 struct displaced_step_closure
*dsc
)
6141 unsigned int op
= bits (insn1
, 5, 8);
6142 unsigned int rn
, rm
, rd
;
6143 ULONGEST rd_val
, rn_val
;
6145 rn
= bits (insn1
, 0, 3); /* Rn */
6146 rm
= bits (insn2
, 0, 3); /* Rm */
6147 rd
= bits (insn2
, 8, 11); /* Rd */
6149 /* This routine is only called for instruction MOV. */
6150 gdb_assert (op
== 0x2 && rn
== 0xf);
6152 if (rm
!= ARM_PC_REGNUM
&& rd
!= ARM_PC_REGNUM
)
6153 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ALU imm", dsc
);
6155 if (debug_displaced
)
6156 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x%.4x\n",
6157 "ALU", insn1
, insn2
);
6159 /* Instruction is of form:
6161 <op><cond> rd, [rn,] #imm
6165 Preparation: tmp1, tmp2 <- r0, r1;
6167 Insn: <op><cond> r0, r1, #imm
6168 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6171 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6172 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6173 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6174 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6175 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6176 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6179 dsc
->modinsn
[0] = insn1
;
6180 dsc
->modinsn
[1] = ((insn2
& 0xf0f0) | 0x1);
6183 dsc
->cleanup
= &cleanup_alu_imm
;
6188 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6191 cleanup_alu_reg (struct gdbarch
*gdbarch
,
6192 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6197 rd_val
= displaced_read_reg (regs
, dsc
, 0);
6199 for (i
= 0; i
< 3; i
++)
6200 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6202 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6206 install_alu_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6207 struct displaced_step_closure
*dsc
,
6208 unsigned int rd
, unsigned int rn
, unsigned int rm
)
6210 ULONGEST rd_val
, rn_val
, rm_val
;
6212 /* Instruction is of form:
6214 <op><cond> rd, [rn,] rm [, <shift>]
6218 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6219 r0, r1, r2 <- rd, rn, rm
6220 Insn: <op><cond> r0, r1, r2 [, <shift>]
6221 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6224 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6225 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6226 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6227 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6228 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6229 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6230 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6231 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6232 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6235 dsc
->cleanup
= &cleanup_alu_reg
;
6239 arm_copy_alu_reg (struct gdbarch
*gdbarch
, uint32_t insn
, struct regcache
*regs
,
6240 struct displaced_step_closure
*dsc
)
6242 unsigned int op
= bits (insn
, 21, 24);
6243 int is_mov
= (op
== 0xd);
6245 if (!insn_references_pc (insn
, 0x000ff00ful
))
6246 return arm_copy_unmodified (gdbarch
, insn
, "ALU reg", dsc
);
6248 if (debug_displaced
)
6249 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.8lx\n",
6250 is_mov
? "move" : "ALU", (unsigned long) insn
);
6253 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x2;
6255 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x10002;
6257 install_alu_reg (gdbarch
, regs
, dsc
, bits (insn
, 12, 15), bits (insn
, 16, 19),
6263 thumb_copy_alu_reg (struct gdbarch
*gdbarch
, uint16_t insn
,
6264 struct regcache
*regs
,
6265 struct displaced_step_closure
*dsc
)
6267 unsigned rn
, rm
, rd
;
6269 rd
= bits (insn
, 3, 6);
6270 rn
= (bit (insn
, 7) << 3) | bits (insn
, 0, 2);
6273 if (rd
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6274 return thumb_copy_unmodified_16bit (gdbarch
, insn
, "ALU reg", dsc
);
6276 if (debug_displaced
)
6277 fprintf_unfiltered (gdb_stdlog
, "displaced: copying reg %s insn %.4x\n",
6278 "ALU", (unsigned short) insn
);
6280 dsc
->modinsn
[0] = ((insn
& 0xff00) | 0x08);
6282 install_alu_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
);
6287 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6290 cleanup_alu_shifted_reg (struct gdbarch
*gdbarch
,
6291 struct regcache
*regs
,
6292 struct displaced_step_closure
*dsc
)
6294 ULONGEST rd_val
= displaced_read_reg (regs
, dsc
, 0);
6297 for (i
= 0; i
< 4; i
++)
6298 displaced_write_reg (regs
, dsc
, i
, dsc
->tmp
[i
], CANNOT_WRITE_PC
);
6300 displaced_write_reg (regs
, dsc
, dsc
->rd
, rd_val
, ALU_WRITE_PC
);
6304 install_alu_shifted_reg (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6305 struct displaced_step_closure
*dsc
,
6306 unsigned int rd
, unsigned int rn
, unsigned int rm
,
6310 ULONGEST rd_val
, rn_val
, rm_val
, rs_val
;
6312 /* Instruction is of form:
6314 <op><cond> rd, [rn,] rm, <shift> rs
6318 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6319 r0, r1, r2, r3 <- rd, rn, rm, rs
6320 Insn: <op><cond> r0, r1, r2, <shift> r3
6322 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6326 for (i
= 0; i
< 4; i
++)
6327 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
6329 rd_val
= displaced_read_reg (regs
, dsc
, rd
);
6330 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6331 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6332 rs_val
= displaced_read_reg (regs
, dsc
, rs
);
6333 displaced_write_reg (regs
, dsc
, 0, rd_val
, CANNOT_WRITE_PC
);
6334 displaced_write_reg (regs
, dsc
, 1, rn_val
, CANNOT_WRITE_PC
);
6335 displaced_write_reg (regs
, dsc
, 2, rm_val
, CANNOT_WRITE_PC
);
6336 displaced_write_reg (regs
, dsc
, 3, rs_val
, CANNOT_WRITE_PC
);
6338 dsc
->cleanup
= &cleanup_alu_shifted_reg
;
6342 arm_copy_alu_shifted_reg (struct gdbarch
*gdbarch
, uint32_t insn
,
6343 struct regcache
*regs
,
6344 struct displaced_step_closure
*dsc
)
6346 unsigned int op
= bits (insn
, 21, 24);
6347 int is_mov
= (op
== 0xd);
6348 unsigned int rd
, rn
, rm
, rs
;
6350 if (!insn_references_pc (insn
, 0x000fff0ful
))
6351 return arm_copy_unmodified (gdbarch
, insn
, "ALU shifted reg", dsc
);
6353 if (debug_displaced
)
6354 fprintf_unfiltered (gdb_stdlog
, "displaced: copying shifted reg %s insn "
6355 "%.8lx\n", is_mov
? "move" : "ALU",
6356 (unsigned long) insn
);
6358 rn
= bits (insn
, 16, 19);
6359 rm
= bits (insn
, 0, 3);
6360 rs
= bits (insn
, 8, 11);
6361 rd
= bits (insn
, 12, 15);
6364 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x302;
6366 dsc
->modinsn
[0] = (insn
& 0xfff000f0) | 0x10302;
6368 install_alu_shifted_reg (gdbarch
, regs
, dsc
, rd
, rn
, rm
, rs
);
6373 /* Clean up load instructions. */
6376 cleanup_load (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6377 struct displaced_step_closure
*dsc
)
6379 ULONGEST rt_val
, rt_val2
= 0, rn_val
;
6381 rt_val
= displaced_read_reg (regs
, dsc
, 0);
6382 if (dsc
->u
.ldst
.xfersize
== 8)
6383 rt_val2
= displaced_read_reg (regs
, dsc
, 1);
6384 rn_val
= displaced_read_reg (regs
, dsc
, 2);
6386 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6387 if (dsc
->u
.ldst
.xfersize
> 4)
6388 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6389 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6390 if (!dsc
->u
.ldst
.immed
)
6391 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6393 /* Handle register writeback. */
6394 if (dsc
->u
.ldst
.writeback
)
6395 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6396 /* Put result in right place. */
6397 displaced_write_reg (regs
, dsc
, dsc
->rd
, rt_val
, LOAD_WRITE_PC
);
6398 if (dsc
->u
.ldst
.xfersize
== 8)
6399 displaced_write_reg (regs
, dsc
, dsc
->rd
+ 1, rt_val2
, LOAD_WRITE_PC
);
6402 /* Clean up store instructions. */
6405 cleanup_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6406 struct displaced_step_closure
*dsc
)
6408 ULONGEST rn_val
= displaced_read_reg (regs
, dsc
, 2);
6410 displaced_write_reg (regs
, dsc
, 0, dsc
->tmp
[0], CANNOT_WRITE_PC
);
6411 if (dsc
->u
.ldst
.xfersize
> 4)
6412 displaced_write_reg (regs
, dsc
, 1, dsc
->tmp
[1], CANNOT_WRITE_PC
);
6413 displaced_write_reg (regs
, dsc
, 2, dsc
->tmp
[2], CANNOT_WRITE_PC
);
6414 if (!dsc
->u
.ldst
.immed
)
6415 displaced_write_reg (regs
, dsc
, 3, dsc
->tmp
[3], CANNOT_WRITE_PC
);
6416 if (!dsc
->u
.ldst
.restore_r4
)
6417 displaced_write_reg (regs
, dsc
, 4, dsc
->tmp
[4], CANNOT_WRITE_PC
);
6420 if (dsc
->u
.ldst
.writeback
)
6421 displaced_write_reg (regs
, dsc
, dsc
->u
.ldst
.rn
, rn_val
, CANNOT_WRITE_PC
);
6424 /* Copy "extra" load/store instructions. These are halfword/doubleword
6425 transfers, which have a different encoding to byte/word transfers. */
6428 arm_copy_extra_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
, int unpriveleged
,
6429 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
6431 unsigned int op1
= bits (insn
, 20, 24);
6432 unsigned int op2
= bits (insn
, 5, 6);
6433 unsigned int rt
= bits (insn
, 12, 15);
6434 unsigned int rn
= bits (insn
, 16, 19);
6435 unsigned int rm
= bits (insn
, 0, 3);
6436 char load
[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6437 char bytesize
[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6438 int immed
= (op1
& 0x4) != 0;
6440 ULONGEST rt_val
, rt_val2
= 0, rn_val
, rm_val
= 0;
6442 if (!insn_references_pc (insn
, 0x000ff00ful
))
6443 return arm_copy_unmodified (gdbarch
, insn
, "extra load/store", dsc
);
6445 if (debug_displaced
)
6446 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %sextra load/store "
6447 "insn %.8lx\n", unpriveleged
? "unpriveleged " : "",
6448 (unsigned long) insn
);
6450 opcode
= ((op2
<< 2) | (op1
& 0x1) | ((op1
& 0x4) >> 1)) - 4;
6453 internal_error (__FILE__
, __LINE__
,
6454 _("copy_extra_ld_st: instruction decode error"));
6456 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6457 dsc
->tmp
[1] = displaced_read_reg (regs
, dsc
, 1);
6458 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6460 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6462 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6463 if (bytesize
[opcode
] == 8)
6464 rt_val2
= displaced_read_reg (regs
, dsc
, rt
+ 1);
6465 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6467 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6469 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6470 if (bytesize
[opcode
] == 8)
6471 displaced_write_reg (regs
, dsc
, 1, rt_val2
, CANNOT_WRITE_PC
);
6472 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6474 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6477 dsc
->u
.ldst
.xfersize
= bytesize
[opcode
];
6478 dsc
->u
.ldst
.rn
= rn
;
6479 dsc
->u
.ldst
.immed
= immed
;
6480 dsc
->u
.ldst
.writeback
= bit (insn
, 24) == 0 || bit (insn
, 21) != 0;
6481 dsc
->u
.ldst
.restore_r4
= 0;
6484 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6486 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6487 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6489 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6491 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6492 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6494 dsc
->cleanup
= load
[opcode
] ? &cleanup_load
: &cleanup_store
;
6499 /* Copy byte/half word/word loads and stores. */
6502 install_load_store (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6503 struct displaced_step_closure
*dsc
, int load
,
6504 int immed
, int writeback
, int size
, int usermode
,
6505 int rt
, int rm
, int rn
)
6507 ULONGEST rt_val
, rn_val
, rm_val
= 0;
6509 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6510 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6512 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6514 dsc
->tmp
[4] = displaced_read_reg (regs
, dsc
, 4);
6516 rt_val
= displaced_read_reg (regs
, dsc
, rt
);
6517 rn_val
= displaced_read_reg (regs
, dsc
, rn
);
6519 rm_val
= displaced_read_reg (regs
, dsc
, rm
);
6521 displaced_write_reg (regs
, dsc
, 0, rt_val
, CANNOT_WRITE_PC
);
6522 displaced_write_reg (regs
, dsc
, 2, rn_val
, CANNOT_WRITE_PC
);
6524 displaced_write_reg (regs
, dsc
, 3, rm_val
, CANNOT_WRITE_PC
);
6526 dsc
->u
.ldst
.xfersize
= size
;
6527 dsc
->u
.ldst
.rn
= rn
;
6528 dsc
->u
.ldst
.immed
= immed
;
6529 dsc
->u
.ldst
.writeback
= writeback
;
6531 /* To write PC we can do:
6533 Before this sequence of instructions:
6534 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6535 r2 is the Rn value got from dispalced_read_reg.
6537 Insn1: push {pc} Write address of STR instruction + offset on stack
6538 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6539 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6540 = addr(Insn1) + offset - addr(Insn3) - 8
6542 Insn4: add r4, r4, #8 r4 = offset - 8
6543 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6545 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6547 Otherwise we don't know what value to write for PC, since the offset is
6548 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6549 of this can be found in Section "Saving from r15" in
6550 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6552 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6557 thumb2_copy_load_literal (struct gdbarch
*gdbarch
, uint16_t insn1
,
6558 uint16_t insn2
, struct regcache
*regs
,
6559 struct displaced_step_closure
*dsc
, int size
)
6561 unsigned int u_bit
= bit (insn1
, 7);
6562 unsigned int rt
= bits (insn2
, 12, 15);
6563 int imm12
= bits (insn2
, 0, 11);
6566 if (debug_displaced
)
6567 fprintf_unfiltered (gdb_stdlog
,
6568 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6569 (unsigned int) dsc
->insn_addr
, rt
, u_bit
? '+' : '-',
6575 /* Rewrite instruction LDR Rt imm12 into:
6577 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6581 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6584 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
6585 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
6586 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
6588 pc_val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
6590 pc_val
= pc_val
& 0xfffffffc;
6592 displaced_write_reg (regs
, dsc
, 2, pc_val
, CANNOT_WRITE_PC
);
6593 displaced_write_reg (regs
, dsc
, 3, imm12
, CANNOT_WRITE_PC
);
6597 dsc
->u
.ldst
.xfersize
= size
;
6598 dsc
->u
.ldst
.immed
= 0;
6599 dsc
->u
.ldst
.writeback
= 0;
6600 dsc
->u
.ldst
.restore_r4
= 0;
6602 /* LDR R0, R2, R3 */
6603 dsc
->modinsn
[0] = 0xf852;
6604 dsc
->modinsn
[1] = 0x3;
6607 dsc
->cleanup
= &cleanup_load
;
6613 thumb2_copy_load_reg_imm (struct gdbarch
*gdbarch
, uint16_t insn1
,
6614 uint16_t insn2
, struct regcache
*regs
,
6615 struct displaced_step_closure
*dsc
,
6616 int writeback
, int immed
)
6618 unsigned int rt
= bits (insn2
, 12, 15);
6619 unsigned int rn
= bits (insn1
, 0, 3);
6620 unsigned int rm
= bits (insn2
, 0, 3); /* Only valid if !immed. */
6621 /* In LDR (register), there is also a register Rm, which is not allowed to
6622 be PC, so we don't have to check it. */
6624 if (rt
!= ARM_PC_REGNUM
&& rn
!= ARM_PC_REGNUM
)
6625 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "load",
6628 if (debug_displaced
)
6629 fprintf_unfiltered (gdb_stdlog
,
6630 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6631 rt
, rn
, insn1
, insn2
);
6633 install_load_store (gdbarch
, regs
, dsc
, 1, immed
, writeback
, 4,
6636 dsc
->u
.ldst
.restore_r4
= 0;
6639 /* ldr[b]<cond> rt, [rn, #imm], etc.
6641 ldr[b]<cond> r0, [r2, #imm]. */
6643 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6644 dsc
->modinsn
[1] = insn2
& 0x0fff;
6647 /* ldr[b]<cond> rt, [rn, rm], etc.
6649 ldr[b]<cond> r0, [r2, r3]. */
6651 dsc
->modinsn
[0] = (insn1
& 0xfff0) | 0x2;
6652 dsc
->modinsn
[1] = (insn2
& 0x0ff0) | 0x3;
6662 arm_copy_ldr_str_ldrb_strb (struct gdbarch
*gdbarch
, uint32_t insn
,
6663 struct regcache
*regs
,
6664 struct displaced_step_closure
*dsc
,
6665 int load
, int size
, int usermode
)
6667 int immed
= !bit (insn
, 25);
6668 int writeback
= (bit (insn
, 24) == 0 || bit (insn
, 21) != 0);
6669 unsigned int rt
= bits (insn
, 12, 15);
6670 unsigned int rn
= bits (insn
, 16, 19);
6671 unsigned int rm
= bits (insn
, 0, 3); /* Only valid if !immed. */
6673 if (!insn_references_pc (insn
, 0x000ff00ful
))
6674 return arm_copy_unmodified (gdbarch
, insn
, "load/store", dsc
);
6676 if (debug_displaced
)
6677 fprintf_unfiltered (gdb_stdlog
,
6678 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6679 load
? (size
== 1 ? "ldrb" : "ldr")
6680 : (size
== 1 ? "strb" : "str"), usermode
? "t" : "",
6682 (unsigned long) insn
);
6684 install_load_store (gdbarch
, regs
, dsc
, load
, immed
, writeback
, size
,
6685 usermode
, rt
, rm
, rn
);
6687 if (load
|| rt
!= ARM_PC_REGNUM
)
6689 dsc
->u
.ldst
.restore_r4
= 0;
6692 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6694 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6695 dsc
->modinsn
[0] = (insn
& 0xfff00fff) | 0x20000;
6697 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6699 {ldr,str}[b]<cond> r0, [r2, r3]. */
6700 dsc
->modinsn
[0] = (insn
& 0xfff00ff0) | 0x20003;
6704 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6705 dsc
->u
.ldst
.restore_r4
= 1;
6706 dsc
->modinsn
[0] = 0xe92d8000; /* push {pc} */
6707 dsc
->modinsn
[1] = 0xe8bd0010; /* pop {r4} */
6708 dsc
->modinsn
[2] = 0xe044400f; /* sub r4, r4, pc. */
6709 dsc
->modinsn
[3] = 0xe2844008; /* add r4, r4, #8. */
6710 dsc
->modinsn
[4] = 0xe0800004; /* add r0, r0, r4. */
6714 dsc
->modinsn
[5] = (insn
& 0xfff00fff) | 0x20000;
6716 dsc
->modinsn
[5] = (insn
& 0xfff00ff0) | 0x20003;
6721 dsc
->cleanup
= load
? &cleanup_load
: &cleanup_store
;
6726 /* Cleanup LDM instructions with fully-populated register list. This is an
6727 unfortunate corner case: it's impossible to implement correctly by modifying
6728 the instruction. The issue is as follows: we have an instruction,
6732 which we must rewrite to avoid loading PC. A possible solution would be to
6733 do the load in two halves, something like (with suitable cleanup
6737 ldm[id][ab] r8!, {r0-r7}
6739 ldm[id][ab] r8, {r7-r14}
6742 but at present there's no suitable place for <temp>, since the scratch space
6743 is overwritten before the cleanup routine is called. For now, we simply
6744 emulate the instruction. */
6747 cleanup_block_load_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6748 struct displaced_step_closure
*dsc
)
6750 int inc
= dsc
->u
.block
.increment
;
6751 int bump_before
= dsc
->u
.block
.before
? (inc
? 4 : -4) : 0;
6752 int bump_after
= dsc
->u
.block
.before
? 0 : (inc
? 4 : -4);
6753 uint32_t regmask
= dsc
->u
.block
.regmask
;
6754 int regno
= inc
? 0 : 15;
6755 CORE_ADDR xfer_addr
= dsc
->u
.block
.xfer_addr
;
6756 int exception_return
= dsc
->u
.block
.load
&& dsc
->u
.block
.user
6757 && (regmask
& 0x8000) != 0;
6758 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6759 int do_transfer
= condition_true (dsc
->u
.block
.cond
, status
);
6760 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6765 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6766 sensible we can do here. Complain loudly. */
6767 if (exception_return
)
6768 error (_("Cannot single-step exception return"));
6770 /* We don't handle any stores here for now. */
6771 gdb_assert (dsc
->u
.block
.load
!= 0);
6773 if (debug_displaced
)
6774 fprintf_unfiltered (gdb_stdlog
, "displaced: emulating block transfer: "
6775 "%s %s %s\n", dsc
->u
.block
.load
? "ldm" : "stm",
6776 dsc
->u
.block
.increment
? "inc" : "dec",
6777 dsc
->u
.block
.before
? "before" : "after");
6784 while (regno
<= ARM_PC_REGNUM
&& (regmask
& (1 << regno
)) == 0)
6787 while (regno
>= 0 && (regmask
& (1 << regno
)) == 0)
6790 xfer_addr
+= bump_before
;
6792 memword
= read_memory_unsigned_integer (xfer_addr
, 4, byte_order
);
6793 displaced_write_reg (regs
, dsc
, regno
, memword
, LOAD_WRITE_PC
);
6795 xfer_addr
+= bump_after
;
6797 regmask
&= ~(1 << regno
);
6800 if (dsc
->u
.block
.writeback
)
6801 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, xfer_addr
,
6805 /* Clean up an STM which included the PC in the register list. */
6808 cleanup_block_store_pc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
6809 struct displaced_step_closure
*dsc
)
6811 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6812 int store_executed
= condition_true (dsc
->u
.block
.cond
, status
);
6813 CORE_ADDR pc_stored_at
, transferred_regs
= bitcount (dsc
->u
.block
.regmask
);
6814 CORE_ADDR stm_insn_addr
;
6817 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
6819 /* If condition code fails, there's nothing else to do. */
6820 if (!store_executed
)
6823 if (dsc
->u
.block
.increment
)
6825 pc_stored_at
= dsc
->u
.block
.xfer_addr
+ 4 * transferred_regs
;
6827 if (dsc
->u
.block
.before
)
6832 pc_stored_at
= dsc
->u
.block
.xfer_addr
;
6834 if (dsc
->u
.block
.before
)
6838 pc_val
= read_memory_unsigned_integer (pc_stored_at
, 4, byte_order
);
6839 stm_insn_addr
= dsc
->scratch_base
;
6840 offset
= pc_val
- stm_insn_addr
;
6842 if (debug_displaced
)
6843 fprintf_unfiltered (gdb_stdlog
, "displaced: detected PC offset %.8lx for "
6844 "STM instruction\n", offset
);
6846 /* Rewrite the stored PC to the proper value for the non-displaced original
6848 write_memory_unsigned_integer (pc_stored_at
, 4, byte_order
,
6849 dsc
->insn_addr
+ offset
);
6852 /* Clean up an LDM which includes the PC in the register list. We clumped all
6853 the registers in the transferred list into a contiguous range r0...rX (to
6854 avoid loading PC directly and losing control of the debugged program), so we
6855 must undo that here. */
6858 cleanup_block_load_pc (struct gdbarch
*gdbarch
,
6859 struct regcache
*regs
,
6860 struct displaced_step_closure
*dsc
)
6862 uint32_t status
= displaced_read_reg (regs
, dsc
, ARM_PS_REGNUM
);
6863 int load_executed
= condition_true (dsc
->u
.block
.cond
, status
), i
;
6864 unsigned int mask
= dsc
->u
.block
.regmask
, write_reg
= ARM_PC_REGNUM
;
6865 unsigned int regs_loaded
= bitcount (mask
);
6866 unsigned int num_to_shuffle
= regs_loaded
, clobbered
;
6868 /* The method employed here will fail if the register list is fully populated
6869 (we need to avoid loading PC directly). */
6870 gdb_assert (num_to_shuffle
< 16);
6875 clobbered
= (1 << num_to_shuffle
) - 1;
6877 while (num_to_shuffle
> 0)
6879 if ((mask
& (1 << write_reg
)) != 0)
6881 unsigned int read_reg
= num_to_shuffle
- 1;
6883 if (read_reg
!= write_reg
)
6885 ULONGEST rval
= displaced_read_reg (regs
, dsc
, read_reg
);
6886 displaced_write_reg (regs
, dsc
, write_reg
, rval
, LOAD_WRITE_PC
);
6887 if (debug_displaced
)
6888 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: move "
6889 "loaded register r%d to r%d\n"), read_reg
,
6892 else if (debug_displaced
)
6893 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: register "
6894 "r%d already in the right place\n"),
6897 clobbered
&= ~(1 << write_reg
);
6905 /* Restore any registers we scribbled over. */
6906 for (write_reg
= 0; clobbered
!= 0; write_reg
++)
6908 if ((clobbered
& (1 << write_reg
)) != 0)
6910 displaced_write_reg (regs
, dsc
, write_reg
, dsc
->tmp
[write_reg
],
6912 if (debug_displaced
)
6913 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM: restored "
6914 "clobbered register r%d\n"), write_reg
);
6915 clobbered
&= ~(1 << write_reg
);
6919 /* Perform register writeback manually. */
6920 if (dsc
->u
.block
.writeback
)
6922 ULONGEST new_rn_val
= dsc
->u
.block
.xfer_addr
;
6924 if (dsc
->u
.block
.increment
)
6925 new_rn_val
+= regs_loaded
* 4;
6927 new_rn_val
-= regs_loaded
* 4;
6929 displaced_write_reg (regs
, dsc
, dsc
->u
.block
.rn
, new_rn_val
,
6934 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6935 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6938 arm_copy_block_xfer (struct gdbarch
*gdbarch
, uint32_t insn
,
6939 struct regcache
*regs
,
6940 struct displaced_step_closure
*dsc
)
6942 int load
= bit (insn
, 20);
6943 int user
= bit (insn
, 22);
6944 int increment
= bit (insn
, 23);
6945 int before
= bit (insn
, 24);
6946 int writeback
= bit (insn
, 21);
6947 int rn
= bits (insn
, 16, 19);
6949 /* Block transfers which don't mention PC can be run directly
6951 if (rn
!= ARM_PC_REGNUM
&& (insn
& 0x8000) == 0)
6952 return arm_copy_unmodified (gdbarch
, insn
, "ldm/stm", dsc
);
6954 if (rn
== ARM_PC_REGNUM
)
6956 warning (_("displaced: Unpredictable LDM or STM with "
6957 "base register r15"));
6958 return arm_copy_unmodified (gdbarch
, insn
, "unpredictable ldm/stm", dsc
);
6961 if (debug_displaced
)
6962 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
6963 "%.8lx\n", (unsigned long) insn
);
6965 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
6966 dsc
->u
.block
.rn
= rn
;
6968 dsc
->u
.block
.load
= load
;
6969 dsc
->u
.block
.user
= user
;
6970 dsc
->u
.block
.increment
= increment
;
6971 dsc
->u
.block
.before
= before
;
6972 dsc
->u
.block
.writeback
= writeback
;
6973 dsc
->u
.block
.cond
= bits (insn
, 28, 31);
6975 dsc
->u
.block
.regmask
= insn
& 0xffff;
6979 if ((insn
& 0xffff) == 0xffff)
6981 /* LDM with a fully-populated register list. This case is
6982 particularly tricky. Implement for now by fully emulating the
6983 instruction (which might not behave perfectly in all cases, but
6984 these instructions should be rare enough for that not to matter
6986 dsc
->modinsn
[0] = ARM_NOP
;
6988 dsc
->cleanup
= &cleanup_block_load_all
;
6992 /* LDM of a list of registers which includes PC. Implement by
6993 rewriting the list of registers to be transferred into a
6994 contiguous chunk r0...rX before doing the transfer, then shuffling
6995 registers into the correct places in the cleanup routine. */
6996 unsigned int regmask
= insn
& 0xffff;
6997 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
6998 unsigned int to
= 0, from
= 0, i
, new_rn
;
7000 for (i
= 0; i
< num_in_list
; i
++)
7001 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7003 /* Writeback makes things complicated. We need to avoid clobbering
7004 the base register with one of the registers in our modified
7005 register list, but just using a different register can't work in
7008 ldm r14!, {r0-r13,pc}
7010 which would need to be rewritten as:
7014 but that can't work, because there's no free register for N.
7016 Solve this by turning off the writeback bit, and emulating
7017 writeback manually in the cleanup routine. */
7022 new_regmask
= (1 << num_in_list
) - 1;
7024 if (debug_displaced
)
7025 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7026 "{..., pc}: original reg list %.4x, modified "
7027 "list %.4x\n"), rn
, writeback
? "!" : "",
7028 (int) insn
& 0xffff, new_regmask
);
7030 dsc
->modinsn
[0] = (insn
& ~0xffff) | (new_regmask
& 0xffff);
7032 dsc
->cleanup
= &cleanup_block_load_pc
;
7037 /* STM of a list of registers which includes PC. Run the instruction
7038 as-is, but out of line: this will store the wrong value for the PC,
7039 so we must manually fix up the memory in the cleanup routine.
7040 Doing things this way has the advantage that we can auto-detect
7041 the offset of the PC write (which is architecture-dependent) in
7042 the cleanup routine. */
7043 dsc
->modinsn
[0] = insn
;
7045 dsc
->cleanup
= &cleanup_block_store_pc
;
7052 thumb2_copy_block_xfer (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7053 struct regcache
*regs
,
7054 struct displaced_step_closure
*dsc
)
7056 int rn
= bits (insn1
, 0, 3);
7057 int load
= bit (insn1
, 4);
7058 int writeback
= bit (insn1
, 5);
7060 /* Block transfers which don't mention PC can be run directly
7062 if (rn
!= ARM_PC_REGNUM
&& (insn2
& 0x8000) == 0)
7063 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "ldm/stm", dsc
);
7065 if (rn
== ARM_PC_REGNUM
)
7067 warning (_("displaced: Unpredictable LDM or STM with "
7068 "base register r15"));
7069 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7070 "unpredictable ldm/stm", dsc
);
7073 if (debug_displaced
)
7074 fprintf_unfiltered (gdb_stdlog
, "displaced: copying block transfer insn "
7075 "%.4x%.4x\n", insn1
, insn2
);
7077 /* Clear bit 13, since it should be always zero. */
7078 dsc
->u
.block
.regmask
= (insn2
& 0xdfff);
7079 dsc
->u
.block
.rn
= rn
;
7081 dsc
->u
.block
.load
= load
;
7082 dsc
->u
.block
.user
= 0;
7083 dsc
->u
.block
.increment
= bit (insn1
, 7);
7084 dsc
->u
.block
.before
= bit (insn1
, 8);
7085 dsc
->u
.block
.writeback
= writeback
;
7086 dsc
->u
.block
.cond
= INST_AL
;
7087 dsc
->u
.block
.xfer_addr
= displaced_read_reg (regs
, dsc
, rn
);
7091 if (dsc
->u
.block
.regmask
== 0xffff)
7093 /* This branch is impossible to happen. */
7098 unsigned int regmask
= dsc
->u
.block
.regmask
;
7099 unsigned int num_in_list
= bitcount (regmask
), new_regmask
, bit
= 1;
7100 unsigned int to
= 0, from
= 0, i
, new_rn
;
7102 for (i
= 0; i
< num_in_list
; i
++)
7103 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
7108 new_regmask
= (1 << num_in_list
) - 1;
7110 if (debug_displaced
)
7111 fprintf_unfiltered (gdb_stdlog
, _("displaced: LDM r%d%s, "
7112 "{..., pc}: original reg list %.4x, modified "
7113 "list %.4x\n"), rn
, writeback
? "!" : "",
7114 (int) dsc
->u
.block
.regmask
, new_regmask
);
7116 dsc
->modinsn
[0] = insn1
;
7117 dsc
->modinsn
[1] = (new_regmask
& 0xffff);
7120 dsc
->cleanup
= &cleanup_block_load_pc
;
7125 dsc
->modinsn
[0] = insn1
;
7126 dsc
->modinsn
[1] = insn2
;
7128 dsc
->cleanup
= &cleanup_block_store_pc
;
7133 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7134 for Linux, where some SVC instructions must be treated specially. */
7137 cleanup_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7138 struct displaced_step_closure
*dsc
)
7140 CORE_ADDR resume_addr
= dsc
->insn_addr
+ dsc
->insn_size
;
7142 if (debug_displaced
)
7143 fprintf_unfiltered (gdb_stdlog
, "displaced: cleanup for svc, resume at "
7144 "%.8lx\n", (unsigned long) resume_addr
);
7146 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, resume_addr
, BRANCH_WRITE_PC
);
7150 /* Common copy routine for svc instruciton. */
7153 install_svc (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7154 struct displaced_step_closure
*dsc
)
7156 /* Preparation: none.
7157 Insn: unmodified svc.
7158 Cleanup: pc <- insn_addr + insn_size. */
7160 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7162 dsc
->wrote_to_pc
= 1;
7164 /* Allow OS-specific code to override SVC handling. */
7165 if (dsc
->u
.svc
.copy_svc_os
)
7166 return dsc
->u
.svc
.copy_svc_os (gdbarch
, regs
, dsc
);
7169 dsc
->cleanup
= &cleanup_svc
;
7175 arm_copy_svc (struct gdbarch
*gdbarch
, uint32_t insn
,
7176 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7179 if (debug_displaced
)
7180 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.8lx\n",
7181 (unsigned long) insn
);
7183 dsc
->modinsn
[0] = insn
;
7185 return install_svc (gdbarch
, regs
, dsc
);
7189 thumb_copy_svc (struct gdbarch
*gdbarch
, uint16_t insn
,
7190 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7193 if (debug_displaced
)
7194 fprintf_unfiltered (gdb_stdlog
, "displaced: copying svc insn %.4x\n",
7197 dsc
->modinsn
[0] = insn
;
7199 return install_svc (gdbarch
, regs
, dsc
);
7202 /* Copy undefined instructions. */
7205 arm_copy_undef (struct gdbarch
*gdbarch
, uint32_t insn
,
7206 struct displaced_step_closure
*dsc
)
7208 if (debug_displaced
)
7209 fprintf_unfiltered (gdb_stdlog
,
7210 "displaced: copying undefined insn %.8lx\n",
7211 (unsigned long) insn
);
7213 dsc
->modinsn
[0] = insn
;
7219 thumb_32bit_copy_undef (struct gdbarch
*gdbarch
, uint16_t insn1
, uint16_t insn2
,
7220 struct displaced_step_closure
*dsc
)
7223 if (debug_displaced
)
7224 fprintf_unfiltered (gdb_stdlog
, "displaced: copying undefined insn "
7225 "%.4x %.4x\n", (unsigned short) insn1
,
7226 (unsigned short) insn2
);
7228 dsc
->modinsn
[0] = insn1
;
7229 dsc
->modinsn
[1] = insn2
;
7235 /* Copy unpredictable instructions. */
7238 arm_copy_unpred (struct gdbarch
*gdbarch
, uint32_t insn
,
7239 struct displaced_step_closure
*dsc
)
7241 if (debug_displaced
)
7242 fprintf_unfiltered (gdb_stdlog
, "displaced: copying unpredictable insn "
7243 "%.8lx\n", (unsigned long) insn
);
7245 dsc
->modinsn
[0] = insn
;
7250 /* The decode_* functions are instruction decoding helpers. They mostly follow
7251 the presentation in the ARM ARM. */
7254 arm_decode_misc_memhint_neon (struct gdbarch
*gdbarch
, uint32_t insn
,
7255 struct regcache
*regs
,
7256 struct displaced_step_closure
*dsc
)
7258 unsigned int op1
= bits (insn
, 20, 26), op2
= bits (insn
, 4, 7);
7259 unsigned int rn
= bits (insn
, 16, 19);
7261 if (op1
== 0x10 && (op2
& 0x2) == 0x0 && (rn
& 0xe) == 0x0)
7262 return arm_copy_unmodified (gdbarch
, insn
, "cps", dsc
);
7263 else if (op1
== 0x10 && op2
== 0x0 && (rn
& 0xe) == 0x1)
7264 return arm_copy_unmodified (gdbarch
, insn
, "setend", dsc
);
7265 else if ((op1
& 0x60) == 0x20)
7266 return arm_copy_unmodified (gdbarch
, insn
, "neon dataproc", dsc
);
7267 else if ((op1
& 0x71) == 0x40)
7268 return arm_copy_unmodified (gdbarch
, insn
, "neon elt/struct load/store",
7270 else if ((op1
& 0x77) == 0x41)
7271 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7272 else if ((op1
& 0x77) == 0x45)
7273 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pli. */
7274 else if ((op1
& 0x77) == 0x51)
7277 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7279 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7281 else if ((op1
& 0x77) == 0x55)
7282 return arm_copy_preload (gdbarch
, insn
, regs
, dsc
); /* pld/pldw. */
7283 else if (op1
== 0x57)
7286 case 0x1: return arm_copy_unmodified (gdbarch
, insn
, "clrex", dsc
);
7287 case 0x4: return arm_copy_unmodified (gdbarch
, insn
, "dsb", dsc
);
7288 case 0x5: return arm_copy_unmodified (gdbarch
, insn
, "dmb", dsc
);
7289 case 0x6: return arm_copy_unmodified (gdbarch
, insn
, "isb", dsc
);
7290 default: return arm_copy_unpred (gdbarch
, insn
, dsc
);
7292 else if ((op1
& 0x63) == 0x43)
7293 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7294 else if ((op2
& 0x1) == 0x0)
7295 switch (op1
& ~0x80)
7298 return arm_copy_unmodified (gdbarch
, insn
, "unallocated mem hint", dsc
);
7300 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
); /* pli reg. */
7301 case 0x71: case 0x75:
7303 return arm_copy_preload_reg (gdbarch
, insn
, regs
, dsc
);
7304 case 0x63: case 0x67: case 0x73: case 0x77:
7305 return arm_copy_unpred (gdbarch
, insn
, dsc
);
7307 return arm_copy_undef (gdbarch
, insn
, dsc
);
7310 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Probably unreachable. */
7314 arm_decode_unconditional (struct gdbarch
*gdbarch
, uint32_t insn
,
7315 struct regcache
*regs
,
7316 struct displaced_step_closure
*dsc
)
7318 if (bit (insn
, 27) == 0)
7319 return arm_decode_misc_memhint_neon (gdbarch
, insn
, regs
, dsc
);
7320 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7321 else switch (((insn
& 0x7000000) >> 23) | ((insn
& 0x100000) >> 20))
7324 return arm_copy_unmodified (gdbarch
, insn
, "srs", dsc
);
7327 return arm_copy_unmodified (gdbarch
, insn
, "rfe", dsc
);
7329 case 0x4: case 0x5: case 0x6: case 0x7:
7330 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7333 switch ((insn
& 0xe00000) >> 21)
7335 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7337 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7340 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7343 return arm_copy_undef (gdbarch
, insn
, dsc
);
7348 int rn_f
= (bits (insn
, 16, 19) == 0xf);
7349 switch ((insn
& 0xe00000) >> 21)
7352 /* ldc/ldc2 imm (undefined for rn == pc). */
7353 return rn_f
? arm_copy_undef (gdbarch
, insn
, dsc
)
7354 : arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7357 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7359 case 0x4: case 0x5: case 0x6: case 0x7:
7360 /* ldc/ldc2 lit (undefined for rn != pc). */
7361 return rn_f
? arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
)
7362 : arm_copy_undef (gdbarch
, insn
, dsc
);
7365 return arm_copy_undef (gdbarch
, insn
, dsc
);
7370 return arm_copy_unmodified (gdbarch
, insn
, "stc/stc2", dsc
);
7373 if (bits (insn
, 16, 19) == 0xf)
7375 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7377 return arm_copy_undef (gdbarch
, insn
, dsc
);
7381 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7383 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7387 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7389 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7392 return arm_copy_undef (gdbarch
, insn
, dsc
);
7396 /* Decode miscellaneous instructions in dp/misc encoding space. */
7399 arm_decode_miscellaneous (struct gdbarch
*gdbarch
, uint32_t insn
,
7400 struct regcache
*regs
,
7401 struct displaced_step_closure
*dsc
)
7403 unsigned int op2
= bits (insn
, 4, 6);
7404 unsigned int op
= bits (insn
, 21, 22);
7405 unsigned int op1
= bits (insn
, 16, 19);
7410 return arm_copy_unmodified (gdbarch
, insn
, "mrs/msr", dsc
);
7413 if (op
== 0x1) /* bx. */
7414 return arm_copy_bx_blx_reg (gdbarch
, insn
, regs
, dsc
);
7416 return arm_copy_unmodified (gdbarch
, insn
, "clz", dsc
);
7418 return arm_copy_undef (gdbarch
, insn
, dsc
);
7422 /* Not really supported. */
7423 return arm_copy_unmodified (gdbarch
, insn
, "bxj", dsc
);
7425 return arm_copy_undef (gdbarch
, insn
, dsc
);
7429 return arm_copy_bx_blx_reg (gdbarch
, insn
,
7430 regs
, dsc
); /* blx register. */
7432 return arm_copy_undef (gdbarch
, insn
, dsc
);
7435 return arm_copy_unmodified (gdbarch
, insn
, "saturating add/sub", dsc
);
7439 return arm_copy_unmodified (gdbarch
, insn
, "bkpt", dsc
);
7441 /* Not really supported. */
7442 return arm_copy_unmodified (gdbarch
, insn
, "smc", dsc
);
7445 return arm_copy_undef (gdbarch
, insn
, dsc
);
7450 arm_decode_dp_misc (struct gdbarch
*gdbarch
, uint32_t insn
,
7451 struct regcache
*regs
,
7452 struct displaced_step_closure
*dsc
)
7455 switch (bits (insn
, 20, 24))
7458 return arm_copy_unmodified (gdbarch
, insn
, "movw", dsc
);
7461 return arm_copy_unmodified (gdbarch
, insn
, "movt", dsc
);
7463 case 0x12: case 0x16:
7464 return arm_copy_unmodified (gdbarch
, insn
, "msr imm", dsc
);
7467 return arm_copy_alu_imm (gdbarch
, insn
, regs
, dsc
);
7471 uint32_t op1
= bits (insn
, 20, 24), op2
= bits (insn
, 4, 7);
7473 if ((op1
& 0x19) != 0x10 && (op2
& 0x1) == 0x0)
7474 return arm_copy_alu_reg (gdbarch
, insn
, regs
, dsc
);
7475 else if ((op1
& 0x19) != 0x10 && (op2
& 0x9) == 0x1)
7476 return arm_copy_alu_shifted_reg (gdbarch
, insn
, regs
, dsc
);
7477 else if ((op1
& 0x19) == 0x10 && (op2
& 0x8) == 0x0)
7478 return arm_decode_miscellaneous (gdbarch
, insn
, regs
, dsc
);
7479 else if ((op1
& 0x19) == 0x10 && (op2
& 0x9) == 0x8)
7480 return arm_copy_unmodified (gdbarch
, insn
, "halfword mul/mla", dsc
);
7481 else if ((op1
& 0x10) == 0x00 && op2
== 0x9)
7482 return arm_copy_unmodified (gdbarch
, insn
, "mul/mla", dsc
);
7483 else if ((op1
& 0x10) == 0x10 && op2
== 0x9)
7484 return arm_copy_unmodified (gdbarch
, insn
, "synch", dsc
);
7485 else if (op2
== 0xb || (op2
& 0xd) == 0xd)
7486 /* 2nd arg means "unpriveleged". */
7487 return arm_copy_extra_ld_st (gdbarch
, insn
, (op1
& 0x12) == 0x02, regs
,
7491 /* Should be unreachable. */
7496 arm_decode_ld_st_word_ubyte (struct gdbarch
*gdbarch
, uint32_t insn
,
7497 struct regcache
*regs
,
7498 struct displaced_step_closure
*dsc
)
7500 int a
= bit (insn
, 25), b
= bit (insn
, 4);
7501 uint32_t op1
= bits (insn
, 20, 24);
7502 int rn_f
= bits (insn
, 16, 19) == 0xf;
7504 if ((!a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02)
7505 || (a
&& (op1
& 0x05) == 0x00 && (op1
& 0x17) != 0x02 && !b
))
7506 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 0);
7507 else if ((!a
&& (op1
& 0x17) == 0x02)
7508 || (a
&& (op1
& 0x17) == 0x02 && !b
))
7509 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 4, 1);
7510 else if ((!a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03)
7511 || (a
&& (op1
& 0x05) == 0x01 && (op1
& 0x17) != 0x03 && !b
))
7512 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 0);
7513 else if ((!a
&& (op1
& 0x17) == 0x03)
7514 || (a
&& (op1
& 0x17) == 0x03 && !b
))
7515 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 4, 1);
7516 else if ((!a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06)
7517 || (a
&& (op1
& 0x05) == 0x04 && (op1
& 0x17) != 0x06 && !b
))
7518 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 0);
7519 else if ((!a
&& (op1
& 0x17) == 0x06)
7520 || (a
&& (op1
& 0x17) == 0x06 && !b
))
7521 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 0, 1, 1);
7522 else if ((!a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07)
7523 || (a
&& (op1
& 0x05) == 0x05 && (op1
& 0x17) != 0x07 && !b
))
7524 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 0);
7525 else if ((!a
&& (op1
& 0x17) == 0x07)
7526 || (a
&& (op1
& 0x17) == 0x07 && !b
))
7527 return arm_copy_ldr_str_ldrb_strb (gdbarch
, insn
, regs
, dsc
, 1, 1, 1);
7529 /* Should be unreachable. */
7534 arm_decode_media (struct gdbarch
*gdbarch
, uint32_t insn
,
7535 struct displaced_step_closure
*dsc
)
7537 switch (bits (insn
, 20, 24))
7539 case 0x00: case 0x01: case 0x02: case 0x03:
7540 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub signed", dsc
);
7542 case 0x04: case 0x05: case 0x06: case 0x07:
7543 return arm_copy_unmodified (gdbarch
, insn
, "parallel add/sub unsigned", dsc
);
7545 case 0x08: case 0x09: case 0x0a: case 0x0b:
7546 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7547 return arm_copy_unmodified (gdbarch
, insn
,
7548 "decode/pack/unpack/saturate/reverse", dsc
);
7551 if (bits (insn
, 5, 7) == 0) /* op2. */
7553 if (bits (insn
, 12, 15) == 0xf)
7554 return arm_copy_unmodified (gdbarch
, insn
, "usad8", dsc
);
7556 return arm_copy_unmodified (gdbarch
, insn
, "usada8", dsc
);
7559 return arm_copy_undef (gdbarch
, insn
, dsc
);
7561 case 0x1a: case 0x1b:
7562 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7563 return arm_copy_unmodified (gdbarch
, insn
, "sbfx", dsc
);
7565 return arm_copy_undef (gdbarch
, insn
, dsc
);
7567 case 0x1c: case 0x1d:
7568 if (bits (insn
, 5, 6) == 0x0) /* op2[1:0]. */
7570 if (bits (insn
, 0, 3) == 0xf)
7571 return arm_copy_unmodified (gdbarch
, insn
, "bfc", dsc
);
7573 return arm_copy_unmodified (gdbarch
, insn
, "bfi", dsc
);
7576 return arm_copy_undef (gdbarch
, insn
, dsc
);
7578 case 0x1e: case 0x1f:
7579 if (bits (insn
, 5, 6) == 0x2) /* op2[1:0]. */
7580 return arm_copy_unmodified (gdbarch
, insn
, "ubfx", dsc
);
7582 return arm_copy_undef (gdbarch
, insn
, dsc
);
7585 /* Should be unreachable. */
7590 arm_decode_b_bl_ldmstm (struct gdbarch
*gdbarch
, int32_t insn
,
7591 struct regcache
*regs
,
7592 struct displaced_step_closure
*dsc
)
7595 return arm_copy_b_bl_blx (gdbarch
, insn
, regs
, dsc
);
7597 return arm_copy_block_xfer (gdbarch
, insn
, regs
, dsc
);
7601 arm_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint32_t insn
,
7602 struct regcache
*regs
,
7603 struct displaced_step_closure
*dsc
)
7605 unsigned int opcode
= bits (insn
, 20, 24);
7609 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7610 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon mrrc/mcrr", dsc
);
7612 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7613 case 0x12: case 0x16:
7614 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vstm/vpush", dsc
);
7616 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7617 case 0x13: case 0x17:
7618 return arm_copy_unmodified (gdbarch
, insn
, "vfp/neon vldm/vpop", dsc
);
7620 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7621 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7622 /* Note: no writeback for these instructions. Bit 25 will always be
7623 zero though (via caller), so the following works OK. */
7624 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7627 /* Should be unreachable. */
7631 /* Decode shifted register instructions. */
7634 thumb2_decode_dp_shift_reg (struct gdbarch
*gdbarch
, uint16_t insn1
,
7635 uint16_t insn2
, struct regcache
*regs
,
7636 struct displaced_step_closure
*dsc
)
7638 /* PC is only allowed to be used in instruction MOV. */
7640 unsigned int op
= bits (insn1
, 5, 8);
7641 unsigned int rn
= bits (insn1
, 0, 3);
7643 if (op
== 0x2 && rn
== 0xf) /* MOV */
7644 return thumb2_copy_alu_imm (gdbarch
, insn1
, insn2
, regs
, dsc
);
7646 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7647 "dp (shift reg)", dsc
);
7651 /* Decode extension register load/store. Exactly the same as
7652 arm_decode_ext_reg_ld_st. */
7655 thumb2_decode_ext_reg_ld_st (struct gdbarch
*gdbarch
, uint16_t insn1
,
7656 uint16_t insn2
, struct regcache
*regs
,
7657 struct displaced_step_closure
*dsc
)
7659 unsigned int opcode
= bits (insn1
, 4, 8);
7663 case 0x04: case 0x05:
7664 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7665 "vfp/neon vmov", dsc
);
7667 case 0x08: case 0x0c: /* 01x00 */
7668 case 0x0a: case 0x0e: /* 01x10 */
7669 case 0x12: case 0x16: /* 10x10 */
7670 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7671 "vfp/neon vstm/vpush", dsc
);
7673 case 0x09: case 0x0d: /* 01x01 */
7674 case 0x0b: case 0x0f: /* 01x11 */
7675 case 0x13: case 0x17: /* 10x11 */
7676 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7677 "vfp/neon vldm/vpop", dsc
);
7679 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7680 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7682 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7683 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
, regs
, dsc
);
7686 /* Should be unreachable. */
7691 arm_decode_svc_copro (struct gdbarch
*gdbarch
, uint32_t insn
, CORE_ADDR to
,
7692 struct regcache
*regs
, struct displaced_step_closure
*dsc
)
7694 unsigned int op1
= bits (insn
, 20, 25);
7695 int op
= bit (insn
, 4);
7696 unsigned int coproc
= bits (insn
, 8, 11);
7697 unsigned int rn
= bits (insn
, 16, 19);
7699 if ((op1
& 0x20) == 0x00 && (op1
& 0x3a) != 0x00 && (coproc
& 0xe) == 0xa)
7700 return arm_decode_ext_reg_ld_st (gdbarch
, insn
, regs
, dsc
);
7701 else if ((op1
& 0x21) == 0x00 && (op1
& 0x3a) != 0x00
7702 && (coproc
& 0xe) != 0xa)
7704 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7705 else if ((op1
& 0x21) == 0x01 && (op1
& 0x3a) != 0x00
7706 && (coproc
& 0xe) != 0xa)
7707 /* ldc/ldc2 imm/lit. */
7708 return arm_copy_copro_load_store (gdbarch
, insn
, regs
, dsc
);
7709 else if ((op1
& 0x3e) == 0x00)
7710 return arm_copy_undef (gdbarch
, insn
, dsc
);
7711 else if ((op1
& 0x3e) == 0x04 && (coproc
& 0xe) == 0xa)
7712 return arm_copy_unmodified (gdbarch
, insn
, "neon 64bit xfer", dsc
);
7713 else if (op1
== 0x04 && (coproc
& 0xe) != 0xa)
7714 return arm_copy_unmodified (gdbarch
, insn
, "mcrr/mcrr2", dsc
);
7715 else if (op1
== 0x05 && (coproc
& 0xe) != 0xa)
7716 return arm_copy_unmodified (gdbarch
, insn
, "mrrc/mrrc2", dsc
);
7717 else if ((op1
& 0x30) == 0x20 && !op
)
7719 if ((coproc
& 0xe) == 0xa)
7720 return arm_copy_unmodified (gdbarch
, insn
, "vfp dataproc", dsc
);
7722 return arm_copy_unmodified (gdbarch
, insn
, "cdp/cdp2", dsc
);
7724 else if ((op1
& 0x30) == 0x20 && op
)
7725 return arm_copy_unmodified (gdbarch
, insn
, "neon 8/16/32 bit xfer", dsc
);
7726 else if ((op1
& 0x31) == 0x20 && op
&& (coproc
& 0xe) != 0xa)
7727 return arm_copy_unmodified (gdbarch
, insn
, "mcr/mcr2", dsc
);
7728 else if ((op1
& 0x31) == 0x21 && op
&& (coproc
& 0xe) != 0xa)
7729 return arm_copy_unmodified (gdbarch
, insn
, "mrc/mrc2", dsc
);
7730 else if ((op1
& 0x30) == 0x30)
7731 return arm_copy_svc (gdbarch
, insn
, regs
, dsc
);
7733 return arm_copy_undef (gdbarch
, insn
, dsc
); /* Possibly unreachable. */
7737 thumb2_decode_svc_copro (struct gdbarch
*gdbarch
, uint16_t insn1
,
7738 uint16_t insn2
, struct regcache
*regs
,
7739 struct displaced_step_closure
*dsc
)
7741 unsigned int coproc
= bits (insn2
, 8, 11);
7742 unsigned int op1
= bits (insn1
, 4, 9);
7743 unsigned int bit_5_8
= bits (insn1
, 5, 8);
7744 unsigned int bit_9
= bit (insn1
, 9);
7745 unsigned int bit_4
= bit (insn1
, 4);
7746 unsigned int rn
= bits (insn1
, 0, 3);
7751 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7752 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7754 else if (bit_5_8
== 0) /* UNDEFINED. */
7755 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
7758 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7759 if ((coproc
& 0xe) == 0xa)
7760 return thumb2_decode_ext_reg_ld_st (gdbarch
, insn1
, insn2
, regs
,
7762 else /* coproc is not 101x. */
7764 if (bit_4
== 0) /* STC/STC2. */
7765 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
7767 else /* LDC/LDC2 {literal, immeidate}. */
7768 return thumb2_copy_copro_load_store (gdbarch
, insn1
, insn2
,
7774 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
, "coproc", dsc
);
7780 install_pc_relative (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7781 struct displaced_step_closure
*dsc
, int rd
)
7787 Preparation: Rd <- PC
7793 int val
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7794 displaced_write_reg (regs
, dsc
, rd
, val
, CANNOT_WRITE_PC
);
7798 thumb_copy_pc_relative_16bit (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7799 struct displaced_step_closure
*dsc
,
7800 int rd
, unsigned int imm
)
7803 /* Encoding T2: ADDS Rd, #imm */
7804 dsc
->modinsn
[0] = (0x3000 | (rd
<< 8) | imm
);
7806 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7812 thumb_decode_pc_relative_16bit (struct gdbarch
*gdbarch
, uint16_t insn
,
7813 struct regcache
*regs
,
7814 struct displaced_step_closure
*dsc
)
7816 unsigned int rd
= bits (insn
, 8, 10);
7817 unsigned int imm8
= bits (insn
, 0, 7);
7819 if (debug_displaced
)
7820 fprintf_unfiltered (gdb_stdlog
,
7821 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7824 return thumb_copy_pc_relative_16bit (gdbarch
, regs
, dsc
, rd
, imm8
);
7828 thumb_copy_pc_relative_32bit (struct gdbarch
*gdbarch
, uint16_t insn1
,
7829 uint16_t insn2
, struct regcache
*regs
,
7830 struct displaced_step_closure
*dsc
)
7832 unsigned int rd
= bits (insn2
, 8, 11);
7833 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7834 extract raw immediate encoding rather than computing immediate. When
7835 generating ADD or SUB instruction, we can simply perform OR operation to
7836 set immediate into ADD. */
7837 unsigned int imm_3_8
= insn2
& 0x70ff;
7838 unsigned int imm_i
= insn1
& 0x0400; /* Clear all bits except bit 10. */
7840 if (debug_displaced
)
7841 fprintf_unfiltered (gdb_stdlog
,
7842 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7843 rd
, imm_i
, imm_3_8
, insn1
, insn2
);
7845 if (bit (insn1
, 7)) /* Encoding T2 */
7847 /* Encoding T3: SUB Rd, Rd, #imm */
7848 dsc
->modinsn
[0] = (0xf1a0 | rd
| imm_i
);
7849 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7851 else /* Encoding T3 */
7853 /* Encoding T3: ADD Rd, Rd, #imm */
7854 dsc
->modinsn
[0] = (0xf100 | rd
| imm_i
);
7855 dsc
->modinsn
[1] = ((rd
<< 8) | imm_3_8
);
7859 install_pc_relative (gdbarch
, regs
, dsc
, rd
);
7865 thumb_copy_16bit_ldr_literal (struct gdbarch
*gdbarch
, unsigned short insn1
,
7866 struct regcache
*regs
,
7867 struct displaced_step_closure
*dsc
)
7869 unsigned int rt
= bits (insn1
, 8, 10);
7871 int imm8
= (bits (insn1
, 0, 7) << 2);
7872 CORE_ADDR from
= dsc
->insn_addr
;
7878 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7880 Insn: LDR R0, [R2, R3];
7881 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7883 if (debug_displaced
)
7884 fprintf_unfiltered (gdb_stdlog
,
7885 "displaced: copying thumb ldr r%d [pc #%d]\n"
7888 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 0);
7889 dsc
->tmp
[2] = displaced_read_reg (regs
, dsc
, 2);
7890 dsc
->tmp
[3] = displaced_read_reg (regs
, dsc
, 3);
7891 pc
= displaced_read_reg (regs
, dsc
, ARM_PC_REGNUM
);
7892 /* The assembler calculates the required value of the offset from the
7893 Align(PC,4) value of this instruction to the label. */
7894 pc
= pc
& 0xfffffffc;
7896 displaced_write_reg (regs
, dsc
, 2, pc
, CANNOT_WRITE_PC
);
7897 displaced_write_reg (regs
, dsc
, 3, imm8
, CANNOT_WRITE_PC
);
7900 dsc
->u
.ldst
.xfersize
= 4;
7902 dsc
->u
.ldst
.immed
= 0;
7903 dsc
->u
.ldst
.writeback
= 0;
7904 dsc
->u
.ldst
.restore_r4
= 0;
7906 dsc
->modinsn
[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7908 dsc
->cleanup
= &cleanup_load
;
7913 /* Copy Thumb cbnz/cbz insruction. */
7916 thumb_copy_cbnz_cbz (struct gdbarch
*gdbarch
, uint16_t insn1
,
7917 struct regcache
*regs
,
7918 struct displaced_step_closure
*dsc
)
7920 int non_zero
= bit (insn1
, 11);
7921 unsigned int imm5
= (bit (insn1
, 9) << 6) | (bits (insn1
, 3, 7) << 1);
7922 CORE_ADDR from
= dsc
->insn_addr
;
7923 int rn
= bits (insn1
, 0, 2);
7924 int rn_val
= displaced_read_reg (regs
, dsc
, rn
);
7926 dsc
->u
.branch
.cond
= (rn_val
&& non_zero
) || (!rn_val
&& !non_zero
);
7927 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7928 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7929 condition is false, let it be, cleanup_branch will do nothing. */
7930 if (dsc
->u
.branch
.cond
)
7932 dsc
->u
.branch
.cond
= INST_AL
;
7933 dsc
->u
.branch
.dest
= from
+ 4 + imm5
;
7936 dsc
->u
.branch
.dest
= from
+ 2;
7938 dsc
->u
.branch
.link
= 0;
7939 dsc
->u
.branch
.exchange
= 0;
7941 if (debug_displaced
)
7942 fprintf_unfiltered (gdb_stdlog
, "displaced: copying %s [r%d = 0x%x]"
7943 " insn %.4x to %.8lx\n", non_zero
? "cbnz" : "cbz",
7944 rn
, rn_val
, insn1
, dsc
->u
.branch
.dest
);
7946 dsc
->modinsn
[0] = THUMB_NOP
;
7948 dsc
->cleanup
= &cleanup_branch
;
7952 /* Copy Table Branch Byte/Halfword */
7954 thumb2_copy_table_branch (struct gdbarch
*gdbarch
, uint16_t insn1
,
7955 uint16_t insn2
, struct regcache
*regs
,
7956 struct displaced_step_closure
*dsc
)
7958 ULONGEST rn_val
, rm_val
;
7959 int is_tbh
= bit (insn2
, 4);
7960 CORE_ADDR halfwords
= 0;
7961 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
7963 rn_val
= displaced_read_reg (regs
, dsc
, bits (insn1
, 0, 3));
7964 rm_val
= displaced_read_reg (regs
, dsc
, bits (insn2
, 0, 3));
7970 target_read_memory (rn_val
+ 2 * rm_val
, buf
, 2);
7971 halfwords
= extract_unsigned_integer (buf
, 2, byte_order
);
7977 target_read_memory (rn_val
+ rm_val
, buf
, 1);
7978 halfwords
= extract_unsigned_integer (buf
, 1, byte_order
);
7981 if (debug_displaced
)
7982 fprintf_unfiltered (gdb_stdlog
, "displaced: %s base 0x%x offset 0x%x"
7983 " offset 0x%x\n", is_tbh
? "tbh" : "tbb",
7984 (unsigned int) rn_val
, (unsigned int) rm_val
,
7985 (unsigned int) halfwords
);
7987 dsc
->u
.branch
.cond
= INST_AL
;
7988 dsc
->u
.branch
.link
= 0;
7989 dsc
->u
.branch
.exchange
= 0;
7990 dsc
->u
.branch
.dest
= dsc
->insn_addr
+ 4 + 2 * halfwords
;
7992 dsc
->cleanup
= &cleanup_branch
;
7998 cleanup_pop_pc_16bit_all (struct gdbarch
*gdbarch
, struct regcache
*regs
,
7999 struct displaced_step_closure
*dsc
)
8002 int val
= displaced_read_reg (regs
, dsc
, 7);
8003 displaced_write_reg (regs
, dsc
, ARM_PC_REGNUM
, val
, BX_WRITE_PC
);
8006 val
= displaced_read_reg (regs
, dsc
, 8);
8007 displaced_write_reg (regs
, dsc
, 7, val
, CANNOT_WRITE_PC
);
8010 displaced_write_reg (regs
, dsc
, 8, dsc
->tmp
[0], CANNOT_WRITE_PC
);
8015 thumb_copy_pop_pc_16bit (struct gdbarch
*gdbarch
, unsigned short insn1
,
8016 struct regcache
*regs
,
8017 struct displaced_step_closure
*dsc
)
8019 dsc
->u
.block
.regmask
= insn1
& 0x00ff;
8021 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8024 (1) register list is full, that is, r0-r7 are used.
8025 Prepare: tmp[0] <- r8
8027 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8028 MOV r8, r7; Move value of r7 to r8;
8029 POP {r7}; Store PC value into r7.
8031 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8033 (2) register list is not full, supposing there are N registers in
8034 register list (except PC, 0 <= N <= 7).
8035 Prepare: for each i, 0 - N, tmp[i] <- ri.
8037 POP {r0, r1, ...., rN};
8039 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8040 from tmp[] properly.
8042 if (debug_displaced
)
8043 fprintf_unfiltered (gdb_stdlog
,
8044 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8045 dsc
->u
.block
.regmask
, insn1
);
8047 if (dsc
->u
.block
.regmask
== 0xff)
8049 dsc
->tmp
[0] = displaced_read_reg (regs
, dsc
, 8);
8051 dsc
->modinsn
[0] = (insn1
& 0xfeff); /* POP {r0,r1,...,r6, r7} */
8052 dsc
->modinsn
[1] = 0x46b8; /* MOV r8, r7 */
8053 dsc
->modinsn
[2] = 0xbc80; /* POP {r7} */
8056 dsc
->cleanup
= &cleanup_pop_pc_16bit_all
;
8060 unsigned int num_in_list
= bitcount (dsc
->u
.block
.regmask
);
8061 unsigned int new_regmask
, bit
= 1;
8062 unsigned int to
= 0, from
= 0, i
, new_rn
;
8064 for (i
= 0; i
< num_in_list
+ 1; i
++)
8065 dsc
->tmp
[i
] = displaced_read_reg (regs
, dsc
, i
);
8067 new_regmask
= (1 << (num_in_list
+ 1)) - 1;
8069 if (debug_displaced
)
8070 fprintf_unfiltered (gdb_stdlog
, _("displaced: POP "
8071 "{..., pc}: original reg list %.4x,"
8072 " modified list %.4x\n"),
8073 (int) dsc
->u
.block
.regmask
, new_regmask
);
8075 dsc
->u
.block
.regmask
|= 0x8000;
8076 dsc
->u
.block
.writeback
= 0;
8077 dsc
->u
.block
.cond
= INST_AL
;
8079 dsc
->modinsn
[0] = (insn1
& ~0x1ff) | (new_regmask
& 0xff);
8081 dsc
->cleanup
= &cleanup_block_load_pc
;
8088 thumb_process_displaced_16bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8089 struct regcache
*regs
,
8090 struct displaced_step_closure
*dsc
)
8092 unsigned short op_bit_12_15
= bits (insn1
, 12, 15);
8093 unsigned short op_bit_10_11
= bits (insn1
, 10, 11);
8096 /* 16-bit thumb instructions. */
8097 switch (op_bit_12_15
)
8099 /* Shift (imme), add, subtract, move and compare. */
8100 case 0: case 1: case 2: case 3:
8101 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8102 "shift/add/sub/mov/cmp",
8106 switch (op_bit_10_11
)
8108 case 0: /* Data-processing */
8109 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
,
8113 case 1: /* Special data instructions and branch and exchange. */
8115 unsigned short op
= bits (insn1
, 7, 9);
8116 if (op
== 6 || op
== 7) /* BX or BLX */
8117 err
= thumb_copy_bx_blx_reg (gdbarch
, insn1
, regs
, dsc
);
8118 else if (bits (insn1
, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8119 err
= thumb_copy_alu_reg (gdbarch
, insn1
, regs
, dsc
);
8121 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "special data",
8125 default: /* LDR (literal) */
8126 err
= thumb_copy_16bit_ldr_literal (gdbarch
, insn1
, regs
, dsc
);
8129 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8130 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldr/str", dsc
);
8133 if (op_bit_10_11
< 2) /* Generate PC-relative address */
8134 err
= thumb_decode_pc_relative_16bit (gdbarch
, insn1
, regs
, dsc
);
8135 else /* Generate SP-relative address */
8136 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "sp-relative", dsc
);
8138 case 11: /* Misc 16-bit instructions */
8140 switch (bits (insn1
, 8, 11))
8142 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8143 err
= thumb_copy_cbnz_cbz (gdbarch
, insn1
, regs
, dsc
);
8145 case 12: case 13: /* POP */
8146 if (bit (insn1
, 8)) /* PC is in register list. */
8147 err
= thumb_copy_pop_pc_16bit (gdbarch
, insn1
, regs
, dsc
);
8149 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "pop", dsc
);
8151 case 15: /* If-Then, and hints */
8152 if (bits (insn1
, 0, 3))
8153 /* If-Then makes up to four following instructions conditional.
8154 IT instruction itself is not conditional, so handle it as a
8155 common unmodified instruction. */
8156 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "If-Then",
8159 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "hints", dsc
);
8162 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "misc", dsc
);
8167 if (op_bit_10_11
< 2) /* Store multiple registers */
8168 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "stm", dsc
);
8169 else /* Load multiple registers */
8170 err
= thumb_copy_unmodified_16bit (gdbarch
, insn1
, "ldm", dsc
);
8172 case 13: /* Conditional branch and supervisor call */
8173 if (bits (insn1
, 9, 11) != 7) /* conditional branch */
8174 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8176 err
= thumb_copy_svc (gdbarch
, insn1
, regs
, dsc
);
8178 case 14: /* Unconditional branch */
8179 err
= thumb_copy_b (gdbarch
, insn1
, dsc
);
8186 internal_error (__FILE__
, __LINE__
,
8187 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8191 decode_thumb_32bit_ld_mem_hints (struct gdbarch
*gdbarch
,
8192 uint16_t insn1
, uint16_t insn2
,
8193 struct regcache
*regs
,
8194 struct displaced_step_closure
*dsc
)
8196 int rt
= bits (insn2
, 12, 15);
8197 int rn
= bits (insn1
, 0, 3);
8198 int op1
= bits (insn1
, 7, 8);
8201 switch (bits (insn1
, 5, 6))
8203 case 0: /* Load byte and memory hints */
8204 if (rt
== 0xf) /* PLD/PLI */
8207 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8208 return thumb2_copy_preload (gdbarch
, insn1
, insn2
, regs
, dsc
);
8210 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8215 if (rn
== 0xf) /* LDRB/LDRSB (literal) */
8216 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8219 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8220 "ldrb{reg, immediate}/ldrbt",
8225 case 1: /* Load halfword and memory hints. */
8226 if (rt
== 0xf) /* PLD{W} and Unalloc memory hint. */
8227 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8228 "pld/unalloc memhint", dsc
);
8232 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
,
8235 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8239 case 2: /* Load word */
8241 int insn2_bit_8_11
= bits (insn2
, 8, 11);
8244 return thumb2_copy_load_literal (gdbarch
, insn1
, insn2
, regs
, dsc
, 4);
8245 else if (op1
== 0x1) /* Encoding T3 */
8246 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
, dsc
,
8248 else /* op1 == 0x0 */
8250 if (insn2_bit_8_11
== 0xc || (insn2_bit_8_11
& 0x9) == 0x9)
8251 /* LDR (immediate) */
8252 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8253 dsc
, bit (insn2
, 8), 1);
8254 else if (insn2_bit_8_11
== 0xe) /* LDRT */
8255 return thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8258 /* LDR (register) */
8259 return thumb2_copy_load_reg_imm (gdbarch
, insn1
, insn2
, regs
,
8265 return thumb_32bit_copy_undef (gdbarch
, insn1
, insn2
, dsc
);
8272 thumb_process_displaced_32bit_insn (struct gdbarch
*gdbarch
, uint16_t insn1
,
8273 uint16_t insn2
, struct regcache
*regs
,
8274 struct displaced_step_closure
*dsc
)
8277 unsigned short op
= bit (insn2
, 15);
8278 unsigned int op1
= bits (insn1
, 11, 12);
8284 switch (bits (insn1
, 9, 10))
8289 /* Load/store {dual, execlusive}, table branch. */
8290 if (bits (insn1
, 7, 8) == 1 && bits (insn1
, 4, 5) == 1
8291 && bits (insn2
, 5, 7) == 0)
8292 err
= thumb2_copy_table_branch (gdbarch
, insn1
, insn2
, regs
,
8295 /* PC is not allowed to use in load/store {dual, exclusive}
8297 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8298 "load/store dual/ex", dsc
);
8300 else /* load/store multiple */
8302 switch (bits (insn1
, 7, 8))
8304 case 0: case 3: /* SRS, RFE */
8305 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8308 case 1: case 2: /* LDM/STM/PUSH/POP */
8309 err
= thumb2_copy_block_xfer (gdbarch
, insn1
, insn2
, regs
, dsc
);
8316 /* Data-processing (shift register). */
8317 err
= thumb2_decode_dp_shift_reg (gdbarch
, insn1
, insn2
, regs
,
8320 default: /* Coprocessor instructions. */
8321 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8326 case 2: /* op1 = 2 */
8327 if (op
) /* Branch and misc control. */
8329 if (bit (insn2
, 14) /* BLX/BL */
8330 || bit (insn2
, 12) /* Unconditional branch */
8331 || (bits (insn1
, 7, 9) != 0x7)) /* Conditional branch */
8332 err
= thumb2_copy_b_bl_blx (gdbarch
, insn1
, insn2
, regs
, dsc
);
8334 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8339 if (bit (insn1
, 9)) /* Data processing (plain binary imm). */
8341 int op
= bits (insn1
, 4, 8);
8342 int rn
= bits (insn1
, 0, 3);
8343 if ((op
== 0 || op
== 0xa) && rn
== 0xf)
8344 err
= thumb_copy_pc_relative_32bit (gdbarch
, insn1
, insn2
,
8347 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8350 else /* Data processing (modified immeidate) */
8351 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8355 case 3: /* op1 = 3 */
8356 switch (bits (insn1
, 9, 10))
8360 err
= decode_thumb_32bit_ld_mem_hints (gdbarch
, insn1
, insn2
,
8362 else /* NEON Load/Store and Store single data item */
8363 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8364 "neon elt/struct load/store",
8367 case 1: /* op1 = 3, bits (9, 10) == 1 */
8368 switch (bits (insn1
, 7, 8))
8370 case 0: case 1: /* Data processing (register) */
8371 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8374 case 2: /* Multiply and absolute difference */
8375 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8376 "mul/mua/diff", dsc
);
8378 case 3: /* Long multiply and divide */
8379 err
= thumb_copy_unmodified_32bit (gdbarch
, insn1
, insn2
,
8384 default: /* Coprocessor instructions */
8385 err
= thumb2_decode_svc_copro (gdbarch
, insn1
, insn2
, regs
, dsc
);
8394 internal_error (__FILE__
, __LINE__
,
8395 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8400 thumb_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8401 CORE_ADDR to
, struct regcache
*regs
,
8402 struct displaced_step_closure
*dsc
)
8404 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8406 = read_memory_unsigned_integer (from
, 2, byte_order_for_code
);
8408 if (debug_displaced
)
8409 fprintf_unfiltered (gdb_stdlog
, "displaced: process thumb insn %.4x "
8410 "at %.8lx\n", insn1
, (unsigned long) from
);
8413 dsc
->insn_size
= thumb_insn_size (insn1
);
8414 if (thumb_insn_size (insn1
) == 4)
8417 = read_memory_unsigned_integer (from
+ 2, 2, byte_order_for_code
);
8418 thumb_process_displaced_32bit_insn (gdbarch
, insn1
, insn2
, regs
, dsc
);
8421 thumb_process_displaced_16bit_insn (gdbarch
, insn1
, regs
, dsc
);
8425 arm_process_displaced_insn (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8426 CORE_ADDR to
, struct regcache
*regs
,
8427 struct displaced_step_closure
*dsc
)
8430 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8433 /* Most displaced instructions use a 1-instruction scratch space, so set this
8434 here and override below if/when necessary. */
8436 dsc
->insn_addr
= from
;
8437 dsc
->scratch_base
= to
;
8438 dsc
->cleanup
= NULL
;
8439 dsc
->wrote_to_pc
= 0;
8441 if (!displaced_in_arm_mode (regs
))
8442 return thumb_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8446 insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
8447 if (debug_displaced
)
8448 fprintf_unfiltered (gdb_stdlog
, "displaced: stepping insn %.8lx "
8449 "at %.8lx\n", (unsigned long) insn
,
8450 (unsigned long) from
);
8452 if ((insn
& 0xf0000000) == 0xf0000000)
8453 err
= arm_decode_unconditional (gdbarch
, insn
, regs
, dsc
);
8454 else switch (((insn
& 0x10) >> 4) | ((insn
& 0xe000000) >> 24))
8456 case 0x0: case 0x1: case 0x2: case 0x3:
8457 err
= arm_decode_dp_misc (gdbarch
, insn
, regs
, dsc
);
8460 case 0x4: case 0x5: case 0x6:
8461 err
= arm_decode_ld_st_word_ubyte (gdbarch
, insn
, regs
, dsc
);
8465 err
= arm_decode_media (gdbarch
, insn
, dsc
);
8468 case 0x8: case 0x9: case 0xa: case 0xb:
8469 err
= arm_decode_b_bl_ldmstm (gdbarch
, insn
, regs
, dsc
);
8472 case 0xc: case 0xd: case 0xe: case 0xf:
8473 err
= arm_decode_svc_copro (gdbarch
, insn
, to
, regs
, dsc
);
8478 internal_error (__FILE__
, __LINE__
,
8479 _("arm_process_displaced_insn: Instruction decode error"));
8482 /* Actually set up the scratch space for a displaced instruction. */
8485 arm_displaced_init_closure (struct gdbarch
*gdbarch
, CORE_ADDR from
,
8486 CORE_ADDR to
, struct displaced_step_closure
*dsc
)
8488 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8489 unsigned int i
, len
, offset
;
8490 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8491 int size
= dsc
->is_thumb
? 2 : 4;
8492 const unsigned char *bkp_insn
;
8495 /* Poke modified instruction(s). */
8496 for (i
= 0; i
< dsc
->numinsns
; i
++)
8498 if (debug_displaced
)
8500 fprintf_unfiltered (gdb_stdlog
, "displaced: writing insn ");
8502 fprintf_unfiltered (gdb_stdlog
, "%.8lx",
8505 fprintf_unfiltered (gdb_stdlog
, "%.4x",
8506 (unsigned short)dsc
->modinsn
[i
]);
8508 fprintf_unfiltered (gdb_stdlog
, " at %.8lx\n",
8509 (unsigned long) to
+ offset
);
8512 write_memory_unsigned_integer (to
+ offset
, size
,
8513 byte_order_for_code
,
8518 /* Choose the correct breakpoint instruction. */
8521 bkp_insn
= tdep
->thumb_breakpoint
;
8522 len
= tdep
->thumb_breakpoint_size
;
8526 bkp_insn
= tdep
->arm_breakpoint
;
8527 len
= tdep
->arm_breakpoint_size
;
8530 /* Put breakpoint afterwards. */
8531 write_memory (to
+ offset
, bkp_insn
, len
);
8533 if (debug_displaced
)
8534 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
8535 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
8538 /* Entry point for copying an instruction into scratch space for displaced
8541 struct displaced_step_closure
*
8542 arm_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
8543 CORE_ADDR from
, CORE_ADDR to
,
8544 struct regcache
*regs
)
8546 struct displaced_step_closure
*dsc
8547 = xmalloc (sizeof (struct displaced_step_closure
));
8548 arm_process_displaced_insn (gdbarch
, from
, to
, regs
, dsc
);
8549 arm_displaced_init_closure (gdbarch
, from
, to
, dsc
);
8554 /* Entry point for cleaning things up after a displaced instruction has been
8558 arm_displaced_step_fixup (struct gdbarch
*gdbarch
,
8559 struct displaced_step_closure
*dsc
,
8560 CORE_ADDR from
, CORE_ADDR to
,
8561 struct regcache
*regs
)
8564 dsc
->cleanup (gdbarch
, regs
, dsc
);
8566 if (!dsc
->wrote_to_pc
)
8567 regcache_cooked_write_unsigned (regs
, ARM_PC_REGNUM
,
8568 dsc
->insn_addr
+ dsc
->insn_size
);
8572 #include "bfd-in2.h"
8573 #include "libcoff.h"
8576 gdb_print_insn_arm (bfd_vma memaddr
, disassemble_info
*info
)
8578 struct gdbarch
*gdbarch
= info
->application_data
;
8580 if (arm_pc_is_thumb (gdbarch
, memaddr
))
8582 static asymbol
*asym
;
8583 static combined_entry_type ce
;
8584 static struct coff_symbol_struct csym
;
8585 static struct bfd fake_bfd
;
8586 static bfd_target fake_target
;
8588 if (csym
.native
== NULL
)
8590 /* Create a fake symbol vector containing a Thumb symbol.
8591 This is solely so that the code in print_insn_little_arm()
8592 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8593 the presence of a Thumb symbol and switch to decoding
8594 Thumb instructions. */
8596 fake_target
.flavour
= bfd_target_coff_flavour
;
8597 fake_bfd
.xvec
= &fake_target
;
8598 ce
.u
.syment
.n_sclass
= C_THUMBEXTFUNC
;
8600 csym
.symbol
.the_bfd
= &fake_bfd
;
8601 csym
.symbol
.name
= "fake";
8602 asym
= (asymbol
*) & csym
;
8605 memaddr
= UNMAKE_THUMB_ADDR (memaddr
);
8606 info
->symbols
= &asym
;
8609 info
->symbols
= NULL
;
8611 if (info
->endian
== BFD_ENDIAN_BIG
)
8612 return print_insn_big_arm (memaddr
, info
);
8614 return print_insn_little_arm (memaddr
, info
);
8617 /* The following define instruction sequences that will cause ARM
8618 cpu's to take an undefined instruction trap. These are used to
8619 signal a breakpoint to GDB.
8621 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8622 modes. A different instruction is required for each mode. The ARM
8623 cpu's can also be big or little endian. Thus four different
8624 instructions are needed to support all cases.
8626 Note: ARMv4 defines several new instructions that will take the
8627 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8628 not in fact add the new instructions. The new undefined
8629 instructions in ARMv4 are all instructions that had no defined
8630 behaviour in earlier chips. There is no guarantee that they will
8631 raise an exception, but may be treated as NOP's. In practice, it
8632 may only safe to rely on instructions matching:
8634 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8635 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8636 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8638 Even this may only true if the condition predicate is true. The
8639 following use a condition predicate of ALWAYS so it is always TRUE.
8641 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8642 and NetBSD all use a software interrupt rather than an undefined
8643 instruction to force a trap. This can be handled by by the
8644 abi-specific code during establishment of the gdbarch vector. */
8646 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8647 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8648 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8649 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8651 static const char arm_default_arm_le_breakpoint
[] = ARM_LE_BREAKPOINT
;
8652 static const char arm_default_arm_be_breakpoint
[] = ARM_BE_BREAKPOINT
;
8653 static const char arm_default_thumb_le_breakpoint
[] = THUMB_LE_BREAKPOINT
;
8654 static const char arm_default_thumb_be_breakpoint
[] = THUMB_BE_BREAKPOINT
;
8656 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8657 the program counter value to determine whether a 16-bit or 32-bit
8658 breakpoint should be used. It returns a pointer to a string of
8659 bytes that encode a breakpoint instruction, stores the length of
8660 the string to *lenptr, and adjusts the program counter (if
8661 necessary) to point to the actual memory location where the
8662 breakpoint should be inserted. */
8664 static const unsigned char *
8665 arm_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
, int *lenptr
)
8667 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8668 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
8670 if (arm_pc_is_thumb (gdbarch
, *pcptr
))
8672 *pcptr
= UNMAKE_THUMB_ADDR (*pcptr
);
8674 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8675 check whether we are replacing a 32-bit instruction. */
8676 if (tdep
->thumb2_breakpoint
!= NULL
)
8679 if (target_read_memory (*pcptr
, buf
, 2) == 0)
8681 unsigned short inst1
;
8682 inst1
= extract_unsigned_integer (buf
, 2, byte_order_for_code
);
8683 if (thumb_insn_size (inst1
) == 4)
8685 *lenptr
= tdep
->thumb2_breakpoint_size
;
8686 return tdep
->thumb2_breakpoint
;
8691 *lenptr
= tdep
->thumb_breakpoint_size
;
8692 return tdep
->thumb_breakpoint
;
8696 *lenptr
= tdep
->arm_breakpoint_size
;
8697 return tdep
->arm_breakpoint
;
8702 arm_remote_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
8705 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
8707 arm_breakpoint_from_pc (gdbarch
, pcptr
, kindptr
);
8709 if (arm_pc_is_thumb (gdbarch
, *pcptr
) && *kindptr
== 4)
8710 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8711 that this is not confused with a 32-bit ARM breakpoint. */
8715 /* Extract from an array REGBUF containing the (raw) register state a
8716 function return value of type TYPE, and copy that, in virtual
8717 format, into VALBUF. */
8720 arm_extract_return_value (struct type
*type
, struct regcache
*regs
,
8723 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8724 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8726 if (TYPE_CODE_FLT
== TYPE_CODE (type
))
8728 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8732 /* The value is in register F0 in internal format. We need to
8733 extract the raw value and then convert it to the desired
8735 bfd_byte tmpbuf
[FP_REGISTER_SIZE
];
8737 regcache_cooked_read (regs
, ARM_F0_REGNUM
, tmpbuf
);
8738 convert_from_extended (floatformat_from_type (type
), tmpbuf
,
8739 valbuf
, gdbarch_byte_order (gdbarch
));
8743 case ARM_FLOAT_SOFT_FPA
:
8744 case ARM_FLOAT_SOFT_VFP
:
8745 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8746 not using the VFP ABI code. */
8748 regcache_cooked_read (regs
, ARM_A1_REGNUM
, valbuf
);
8749 if (TYPE_LENGTH (type
) > 4)
8750 regcache_cooked_read (regs
, ARM_A1_REGNUM
+ 1,
8751 valbuf
+ INT_REGISTER_SIZE
);
8755 internal_error (__FILE__
, __LINE__
,
8756 _("arm_extract_return_value: "
8757 "Floating point model not supported"));
8761 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8762 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8763 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8764 || TYPE_CODE (type
) == TYPE_CODE_PTR
8765 || TYPE_CODE (type
) == TYPE_CODE_REF
8766 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8768 /* If the type is a plain integer, then the access is
8769 straight-forward. Otherwise we have to play around a bit
8771 int len
= TYPE_LENGTH (type
);
8772 int regno
= ARM_A1_REGNUM
;
8777 /* By using store_unsigned_integer we avoid having to do
8778 anything special for small big-endian values. */
8779 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
8780 store_unsigned_integer (valbuf
,
8781 (len
> INT_REGISTER_SIZE
8782 ? INT_REGISTER_SIZE
: len
),
8784 len
-= INT_REGISTER_SIZE
;
8785 valbuf
+= INT_REGISTER_SIZE
;
8790 /* For a structure or union the behaviour is as if the value had
8791 been stored to word-aligned memory and then loaded into
8792 registers with 32-bit load instruction(s). */
8793 int len
= TYPE_LENGTH (type
);
8794 int regno
= ARM_A1_REGNUM
;
8795 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8799 regcache_cooked_read (regs
, regno
++, tmpbuf
);
8800 memcpy (valbuf
, tmpbuf
,
8801 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
8802 len
-= INT_REGISTER_SIZE
;
8803 valbuf
+= INT_REGISTER_SIZE
;
8809 /* Will a function return an aggregate type in memory or in a
8810 register? Return 0 if an aggregate type can be returned in a
8811 register, 1 if it must be returned in memory. */
8814 arm_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
8817 enum type_code code
;
8819 CHECK_TYPEDEF (type
);
8821 /* In the ARM ABI, "integer" like aggregate types are returned in
8822 registers. For an aggregate type to be integer like, its size
8823 must be less than or equal to INT_REGISTER_SIZE and the
8824 offset of each addressable subfield must be zero. Note that bit
8825 fields are not addressable, and all addressable subfields of
8826 unions always start at offset zero.
8828 This function is based on the behaviour of GCC 2.95.1.
8829 See: gcc/arm.c: arm_return_in_memory() for details.
8831 Note: All versions of GCC before GCC 2.95.2 do not set up the
8832 parameters correctly for a function returning the following
8833 structure: struct { float f;}; This should be returned in memory,
8834 not a register. Richard Earnshaw sent me a patch, but I do not
8835 know of any way to detect if a function like the above has been
8836 compiled with the correct calling convention. */
8838 /* All aggregate types that won't fit in a register must be returned
8840 if (TYPE_LENGTH (type
) > INT_REGISTER_SIZE
)
8845 /* The AAPCS says all aggregates not larger than a word are returned
8847 if (gdbarch_tdep (gdbarch
)->arm_abi
!= ARM_ABI_APCS
)
8850 /* The only aggregate types that can be returned in a register are
8851 structs and unions. Arrays must be returned in memory. */
8852 code
= TYPE_CODE (type
);
8853 if ((TYPE_CODE_STRUCT
!= code
) && (TYPE_CODE_UNION
!= code
))
8858 /* Assume all other aggregate types can be returned in a register.
8859 Run a check for structures, unions and arrays. */
8862 if ((TYPE_CODE_STRUCT
== code
) || (TYPE_CODE_UNION
== code
))
8865 /* Need to check if this struct/union is "integer" like. For
8866 this to be true, its size must be less than or equal to
8867 INT_REGISTER_SIZE and the offset of each addressable
8868 subfield must be zero. Note that bit fields are not
8869 addressable, and unions always start at offset zero. If any
8870 of the subfields is a floating point type, the struct/union
8871 cannot be an integer type. */
8873 /* For each field in the object, check:
8874 1) Is it FP? --> yes, nRc = 1;
8875 2) Is it addressable (bitpos != 0) and
8876 not packed (bitsize == 0)?
8880 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
8882 enum type_code field_type_code
;
8883 field_type_code
= TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type
,
8886 /* Is it a floating point type field? */
8887 if (field_type_code
== TYPE_CODE_FLT
)
8893 /* If bitpos != 0, then we have to care about it. */
8894 if (TYPE_FIELD_BITPOS (type
, i
) != 0)
8896 /* Bitfields are not addressable. If the field bitsize is
8897 zero, then the field is not packed. Hence it cannot be
8898 a bitfield or any other packed type. */
8899 if (TYPE_FIELD_BITSIZE (type
, i
) == 0)
8911 /* Write into appropriate registers a function return value of type
8912 TYPE, given in virtual format. */
8915 arm_store_return_value (struct type
*type
, struct regcache
*regs
,
8916 const gdb_byte
*valbuf
)
8918 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
8919 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
8921 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
8923 char buf
[MAX_REGISTER_SIZE
];
8925 switch (gdbarch_tdep (gdbarch
)->fp_model
)
8929 convert_to_extended (floatformat_from_type (type
), buf
, valbuf
,
8930 gdbarch_byte_order (gdbarch
));
8931 regcache_cooked_write (regs
, ARM_F0_REGNUM
, buf
);
8934 case ARM_FLOAT_SOFT_FPA
:
8935 case ARM_FLOAT_SOFT_VFP
:
8936 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8937 not using the VFP ABI code. */
8939 regcache_cooked_write (regs
, ARM_A1_REGNUM
, valbuf
);
8940 if (TYPE_LENGTH (type
) > 4)
8941 regcache_cooked_write (regs
, ARM_A1_REGNUM
+ 1,
8942 valbuf
+ INT_REGISTER_SIZE
);
8946 internal_error (__FILE__
, __LINE__
,
8947 _("arm_store_return_value: Floating "
8948 "point model not supported"));
8952 else if (TYPE_CODE (type
) == TYPE_CODE_INT
8953 || TYPE_CODE (type
) == TYPE_CODE_CHAR
8954 || TYPE_CODE (type
) == TYPE_CODE_BOOL
8955 || TYPE_CODE (type
) == TYPE_CODE_PTR
8956 || TYPE_CODE (type
) == TYPE_CODE_REF
8957 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
8959 if (TYPE_LENGTH (type
) <= 4)
8961 /* Values of one word or less are zero/sign-extended and
8963 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8964 LONGEST val
= unpack_long (type
, valbuf
);
8966 store_signed_integer (tmpbuf
, INT_REGISTER_SIZE
, byte_order
, val
);
8967 regcache_cooked_write (regs
, ARM_A1_REGNUM
, tmpbuf
);
8971 /* Integral values greater than one word are stored in consecutive
8972 registers starting with r0. This will always be a multiple of
8973 the regiser size. */
8974 int len
= TYPE_LENGTH (type
);
8975 int regno
= ARM_A1_REGNUM
;
8979 regcache_cooked_write (regs
, regno
++, valbuf
);
8980 len
-= INT_REGISTER_SIZE
;
8981 valbuf
+= INT_REGISTER_SIZE
;
8987 /* For a structure or union the behaviour is as if the value had
8988 been stored to word-aligned memory and then loaded into
8989 registers with 32-bit load instruction(s). */
8990 int len
= TYPE_LENGTH (type
);
8991 int regno
= ARM_A1_REGNUM
;
8992 bfd_byte tmpbuf
[INT_REGISTER_SIZE
];
8996 memcpy (tmpbuf
, valbuf
,
8997 len
> INT_REGISTER_SIZE
? INT_REGISTER_SIZE
: len
);
8998 regcache_cooked_write (regs
, regno
++, tmpbuf
);
8999 len
-= INT_REGISTER_SIZE
;
9000 valbuf
+= INT_REGISTER_SIZE
;
9006 /* Handle function return values. */
9008 static enum return_value_convention
9009 arm_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
9010 struct type
*valtype
, struct regcache
*regcache
,
9011 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
9013 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9014 enum arm_vfp_cprc_base_type vfp_base_type
;
9017 if (arm_vfp_abi_for_function (gdbarch
, func_type
)
9018 && arm_vfp_call_candidate (valtype
, &vfp_base_type
, &vfp_base_count
))
9020 int reg_char
= arm_vfp_cprc_reg_char (vfp_base_type
);
9021 int unit_length
= arm_vfp_cprc_unit_length (vfp_base_type
);
9023 for (i
= 0; i
< vfp_base_count
; i
++)
9025 if (reg_char
== 'q')
9028 arm_neon_quad_write (gdbarch
, regcache
, i
,
9029 writebuf
+ i
* unit_length
);
9032 arm_neon_quad_read (gdbarch
, regcache
, i
,
9033 readbuf
+ i
* unit_length
);
9040 sprintf (name_buf
, "%c%d", reg_char
, i
);
9041 regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9044 regcache_cooked_write (regcache
, regnum
,
9045 writebuf
+ i
* unit_length
);
9047 regcache_cooked_read (regcache
, regnum
,
9048 readbuf
+ i
* unit_length
);
9051 return RETURN_VALUE_REGISTER_CONVENTION
;
9054 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
9055 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
9056 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
9058 if (tdep
->struct_return
== pcc_struct_return
9059 || arm_return_in_memory (gdbarch
, valtype
))
9060 return RETURN_VALUE_STRUCT_CONVENTION
;
9063 /* AAPCS returns complex types longer than a register in memory. */
9064 if (tdep
->arm_abi
!= ARM_ABI_APCS
9065 && TYPE_CODE (valtype
) == TYPE_CODE_COMPLEX
9066 && TYPE_LENGTH (valtype
) > INT_REGISTER_SIZE
)
9067 return RETURN_VALUE_STRUCT_CONVENTION
;
9070 arm_store_return_value (valtype
, regcache
, writebuf
);
9073 arm_extract_return_value (valtype
, regcache
, readbuf
);
9075 return RETURN_VALUE_REGISTER_CONVENTION
;
9080 arm_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
9082 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
9083 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
9084 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
9086 char buf
[INT_REGISTER_SIZE
];
9088 jb_addr
= get_frame_register_unsigned (frame
, ARM_A1_REGNUM
);
9090 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
9094 *pc
= extract_unsigned_integer (buf
, INT_REGISTER_SIZE
, byte_order
);
9098 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9099 return the target PC. Otherwise return 0. */
9102 arm_skip_stub (struct frame_info
*frame
, CORE_ADDR pc
)
9106 CORE_ADDR start_addr
;
9108 /* Find the starting address and name of the function containing the PC. */
9109 if (find_pc_partial_function (pc
, &name
, &start_addr
, NULL
) == 0)
9112 /* If PC is in a Thumb call or return stub, return the address of the
9113 target PC, which is in a register. The thunk functions are called
9114 _call_via_xx, where x is the register name. The possible names
9115 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9116 functions, named __ARM_call_via_r[0-7]. */
9117 if (strncmp (name
, "_call_via_", 10) == 0
9118 || strncmp (name
, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9120 /* Use the name suffix to determine which register contains the
9122 static char *table
[15] =
9123 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9124 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9127 int offset
= strlen (name
) - 2;
9129 for (regno
= 0; regno
<= 14; regno
++)
9130 if (strcmp (&name
[offset
], table
[regno
]) == 0)
9131 return get_frame_register_unsigned (frame
, regno
);
9134 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9135 non-interworking calls to foo. We could decode the stubs
9136 to find the target but it's easier to use the symbol table. */
9137 namelen
= strlen (name
);
9138 if (name
[0] == '_' && name
[1] == '_'
9139 && ((namelen
> 2 + strlen ("_from_thumb")
9140 && strncmp (name
+ namelen
- strlen ("_from_thumb"), "_from_thumb",
9141 strlen ("_from_thumb")) == 0)
9142 || (namelen
> 2 + strlen ("_from_arm")
9143 && strncmp (name
+ namelen
- strlen ("_from_arm"), "_from_arm",
9144 strlen ("_from_arm")) == 0)))
9147 int target_len
= namelen
- 2;
9148 struct minimal_symbol
*minsym
;
9149 struct objfile
*objfile
;
9150 struct obj_section
*sec
;
9152 if (name
[namelen
- 1] == 'b')
9153 target_len
-= strlen ("_from_thumb");
9155 target_len
-= strlen ("_from_arm");
9157 target_name
= alloca (target_len
+ 1);
9158 memcpy (target_name
, name
+ 2, target_len
);
9159 target_name
[target_len
] = '\0';
9161 sec
= find_pc_section (pc
);
9162 objfile
= (sec
== NULL
) ? NULL
: sec
->objfile
;
9163 minsym
= lookup_minimal_symbol (target_name
, NULL
, objfile
);
9165 return SYMBOL_VALUE_ADDRESS (minsym
);
9170 return 0; /* not a stub */
9174 set_arm_command (char *args
, int from_tty
)
9176 printf_unfiltered (_("\
9177 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9178 help_list (setarmcmdlist
, "set arm ", all_commands
, gdb_stdout
);
9182 show_arm_command (char *args
, int from_tty
)
9184 cmd_show_list (showarmcmdlist
, from_tty
, "");
9188 arm_update_current_architecture (void)
9190 struct gdbarch_info info
;
9192 /* If the current architecture is not ARM, we have nothing to do. */
9193 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
!= bfd_arch_arm
)
9196 /* Update the architecture. */
9197 gdbarch_info_init (&info
);
9199 if (!gdbarch_update_p (info
))
9200 internal_error (__FILE__
, __LINE__
, _("could not update architecture"));
9204 set_fp_model_sfunc (char *args
, int from_tty
,
9205 struct cmd_list_element
*c
)
9207 enum arm_float_model fp_model
;
9209 for (fp_model
= ARM_FLOAT_AUTO
; fp_model
!= ARM_FLOAT_LAST
; fp_model
++)
9210 if (strcmp (current_fp_model
, fp_model_strings
[fp_model
]) == 0)
9212 arm_fp_model
= fp_model
;
9216 if (fp_model
== ARM_FLOAT_LAST
)
9217 internal_error (__FILE__
, __LINE__
, _("Invalid fp model accepted: %s."),
9220 arm_update_current_architecture ();
9224 show_fp_model (struct ui_file
*file
, int from_tty
,
9225 struct cmd_list_element
*c
, const char *value
)
9227 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9229 if (arm_fp_model
== ARM_FLOAT_AUTO
9230 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9231 fprintf_filtered (file
, _("\
9232 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9233 fp_model_strings
[tdep
->fp_model
]);
9235 fprintf_filtered (file
, _("\
9236 The current ARM floating point model is \"%s\".\n"),
9237 fp_model_strings
[arm_fp_model
]);
9241 arm_set_abi (char *args
, int from_tty
,
9242 struct cmd_list_element
*c
)
9244 enum arm_abi_kind arm_abi
;
9246 for (arm_abi
= ARM_ABI_AUTO
; arm_abi
!= ARM_ABI_LAST
; arm_abi
++)
9247 if (strcmp (arm_abi_string
, arm_abi_strings
[arm_abi
]) == 0)
9249 arm_abi_global
= arm_abi
;
9253 if (arm_abi
== ARM_ABI_LAST
)
9254 internal_error (__FILE__
, __LINE__
, _("Invalid ABI accepted: %s."),
9257 arm_update_current_architecture ();
9261 arm_show_abi (struct ui_file
*file
, int from_tty
,
9262 struct cmd_list_element
*c
, const char *value
)
9264 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9266 if (arm_abi_global
== ARM_ABI_AUTO
9267 && gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_arm
)
9268 fprintf_filtered (file
, _("\
9269 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9270 arm_abi_strings
[tdep
->arm_abi
]);
9272 fprintf_filtered (file
, _("The current ARM ABI is \"%s\".\n"),
9277 arm_show_fallback_mode (struct ui_file
*file
, int from_tty
,
9278 struct cmd_list_element
*c
, const char *value
)
9280 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9282 fprintf_filtered (file
,
9283 _("The current execution mode assumed "
9284 "(when symbols are unavailable) is \"%s\".\n"),
9285 arm_fallback_mode_string
);
9289 arm_show_force_mode (struct ui_file
*file
, int from_tty
,
9290 struct cmd_list_element
*c
, const char *value
)
9292 struct gdbarch_tdep
*tdep
= gdbarch_tdep (target_gdbarch
);
9294 fprintf_filtered (file
,
9295 _("The current execution mode assumed "
9296 "(even when symbols are available) is \"%s\".\n"),
9297 arm_force_mode_string
);
9300 /* If the user changes the register disassembly style used for info
9301 register and other commands, we have to also switch the style used
9302 in opcodes for disassembly output. This function is run in the "set
9303 arm disassembly" command, and does that. */
9306 set_disassembly_style_sfunc (char *args
, int from_tty
,
9307 struct cmd_list_element
*c
)
9309 set_disassembly_style ();
9312 /* Return the ARM register name corresponding to register I. */
9314 arm_register_name (struct gdbarch
*gdbarch
, int i
)
9316 const int num_regs
= gdbarch_num_regs (gdbarch
);
9318 if (gdbarch_tdep (gdbarch
)->have_vfp_pseudos
9319 && i
>= num_regs
&& i
< num_regs
+ 32)
9321 static const char *const vfp_pseudo_names
[] = {
9322 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9323 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9324 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9325 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9328 return vfp_pseudo_names
[i
- num_regs
];
9331 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
9332 && i
>= num_regs
+ 32 && i
< num_regs
+ 32 + 16)
9334 static const char *const neon_pseudo_names
[] = {
9335 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9336 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9339 return neon_pseudo_names
[i
- num_regs
- 32];
9342 if (i
>= ARRAY_SIZE (arm_register_names
))
9343 /* These registers are only supported on targets which supply
9344 an XML description. */
9347 return arm_register_names
[i
];
9351 set_disassembly_style (void)
9355 /* Find the style that the user wants. */
9356 for (current
= 0; current
< num_disassembly_options
; current
++)
9357 if (disassembly_style
== valid_disassembly_styles
[current
])
9359 gdb_assert (current
< num_disassembly_options
);
9361 /* Synchronize the disassembler. */
9362 set_arm_regname_option (current
);
9365 /* Test whether the coff symbol specific value corresponds to a Thumb
9369 coff_sym_is_thumb (int val
)
9371 return (val
== C_THUMBEXT
9372 || val
== C_THUMBSTAT
9373 || val
== C_THUMBEXTFUNC
9374 || val
== C_THUMBSTATFUNC
9375 || val
== C_THUMBLABEL
);
9378 /* arm_coff_make_msymbol_special()
9379 arm_elf_make_msymbol_special()
9381 These functions test whether the COFF or ELF symbol corresponds to
9382 an address in thumb code, and set a "special" bit in a minimal
9383 symbol to indicate that it does. */
9386 arm_elf_make_msymbol_special(asymbol
*sym
, struct minimal_symbol
*msym
)
9388 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type
*)sym
)->internal_elf_sym
)
9389 == ST_BRANCH_TO_THUMB
)
9390 MSYMBOL_SET_SPECIAL (msym
);
9394 arm_coff_make_msymbol_special(int val
, struct minimal_symbol
*msym
)
9396 if (coff_sym_is_thumb (val
))
9397 MSYMBOL_SET_SPECIAL (msym
);
9401 arm_objfile_data_free (struct objfile
*objfile
, void *arg
)
9403 struct arm_per_objfile
*data
= arg
;
9406 for (i
= 0; i
< objfile
->obfd
->section_count
; i
++)
9407 VEC_free (arm_mapping_symbol_s
, data
->section_maps
[i
]);
9411 arm_record_special_symbol (struct gdbarch
*gdbarch
, struct objfile
*objfile
,
9414 const char *name
= bfd_asymbol_name (sym
);
9415 struct arm_per_objfile
*data
;
9416 VEC(arm_mapping_symbol_s
) **map_p
;
9417 struct arm_mapping_symbol new_map_sym
;
9419 gdb_assert (name
[0] == '$');
9420 if (name
[1] != 'a' && name
[1] != 't' && name
[1] != 'd')
9423 data
= objfile_data (objfile
, arm_objfile_data_key
);
9426 data
= OBSTACK_ZALLOC (&objfile
->objfile_obstack
,
9427 struct arm_per_objfile
);
9428 set_objfile_data (objfile
, arm_objfile_data_key
, data
);
9429 data
->section_maps
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
9430 objfile
->obfd
->section_count
,
9431 VEC(arm_mapping_symbol_s
) *);
9433 map_p
= &data
->section_maps
[bfd_get_section (sym
)->index
];
9435 new_map_sym
.value
= sym
->value
;
9436 new_map_sym
.type
= name
[1];
9438 /* Assume that most mapping symbols appear in order of increasing
9439 value. If they were randomly distributed, it would be faster to
9440 always push here and then sort at first use. */
9441 if (!VEC_empty (arm_mapping_symbol_s
, *map_p
))
9443 struct arm_mapping_symbol
*prev_map_sym
;
9445 prev_map_sym
= VEC_last (arm_mapping_symbol_s
, *map_p
);
9446 if (prev_map_sym
->value
>= sym
->value
)
9449 idx
= VEC_lower_bound (arm_mapping_symbol_s
, *map_p
, &new_map_sym
,
9450 arm_compare_mapping_symbols
);
9451 VEC_safe_insert (arm_mapping_symbol_s
, *map_p
, idx
, &new_map_sym
);
9456 VEC_safe_push (arm_mapping_symbol_s
, *map_p
, &new_map_sym
);
9460 arm_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
9462 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
9463 regcache_cooked_write_unsigned (regcache
, ARM_PC_REGNUM
, pc
);
9465 /* If necessary, set the T bit. */
9468 ULONGEST val
, t_bit
;
9469 regcache_cooked_read_unsigned (regcache
, ARM_PS_REGNUM
, &val
);
9470 t_bit
= arm_psr_thumb_bit (gdbarch
);
9471 if (arm_pc_is_thumb (gdbarch
, pc
))
9472 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9475 regcache_cooked_write_unsigned (regcache
, ARM_PS_REGNUM
,
9480 /* Read the contents of a NEON quad register, by reading from two
9481 double registers. This is used to implement the quad pseudo
9482 registers, and for argument passing in case the quad registers are
9483 missing; vectors are passed in quad registers when using the VFP
9484 ABI, even if a NEON unit is not present. REGNUM is the index of
9485 the quad register, in [0, 15]. */
9487 static enum register_status
9488 arm_neon_quad_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9489 int regnum
, gdb_byte
*buf
)
9492 gdb_byte reg_buf
[8];
9493 int offset
, double_regnum
;
9494 enum register_status status
;
9496 sprintf (name_buf
, "d%d", regnum
<< 1);
9497 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9500 /* d0 is always the least significant half of q0. */
9501 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9506 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9507 if (status
!= REG_VALID
)
9509 memcpy (buf
+ offset
, reg_buf
, 8);
9511 offset
= 8 - offset
;
9512 status
= regcache_raw_read (regcache
, double_regnum
+ 1, reg_buf
);
9513 if (status
!= REG_VALID
)
9515 memcpy (buf
+ offset
, reg_buf
, 8);
9520 static enum register_status
9521 arm_pseudo_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9522 int regnum
, gdb_byte
*buf
)
9524 const int num_regs
= gdbarch_num_regs (gdbarch
);
9526 gdb_byte reg_buf
[8];
9527 int offset
, double_regnum
;
9529 gdb_assert (regnum
>= num_regs
);
9532 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9533 /* Quad-precision register. */
9534 return arm_neon_quad_read (gdbarch
, regcache
, regnum
- 32, buf
);
9537 enum register_status status
;
9539 /* Single-precision register. */
9540 gdb_assert (regnum
< 32);
9542 /* s0 is always the least significant half of d0. */
9543 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9544 offset
= (regnum
& 1) ? 0 : 4;
9546 offset
= (regnum
& 1) ? 4 : 0;
9548 sprintf (name_buf
, "d%d", regnum
>> 1);
9549 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9552 status
= regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9553 if (status
== REG_VALID
)
9554 memcpy (buf
, reg_buf
+ offset
, 4);
9559 /* Store the contents of BUF to a NEON quad register, by writing to
9560 two double registers. This is used to implement the quad pseudo
9561 registers, and for argument passing in case the quad registers are
9562 missing; vectors are passed in quad registers when using the VFP
9563 ABI, even if a NEON unit is not present. REGNUM is the index
9564 of the quad register, in [0, 15]. */
9567 arm_neon_quad_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9568 int regnum
, const gdb_byte
*buf
)
9571 gdb_byte reg_buf
[8];
9572 int offset
, double_regnum
;
9574 sprintf (name_buf
, "d%d", regnum
<< 1);
9575 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9578 /* d0 is always the least significant half of q0. */
9579 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9584 regcache_raw_write (regcache
, double_regnum
, buf
+ offset
);
9585 offset
= 8 - offset
;
9586 regcache_raw_write (regcache
, double_regnum
+ 1, buf
+ offset
);
9590 arm_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
9591 int regnum
, const gdb_byte
*buf
)
9593 const int num_regs
= gdbarch_num_regs (gdbarch
);
9595 gdb_byte reg_buf
[8];
9596 int offset
, double_regnum
;
9598 gdb_assert (regnum
>= num_regs
);
9601 if (gdbarch_tdep (gdbarch
)->have_neon_pseudos
&& regnum
>= 32 && regnum
< 48)
9602 /* Quad-precision register. */
9603 arm_neon_quad_write (gdbarch
, regcache
, regnum
- 32, buf
);
9606 /* Single-precision register. */
9607 gdb_assert (regnum
< 32);
9609 /* s0 is always the least significant half of d0. */
9610 if (gdbarch_byte_order (gdbarch
) == BFD_ENDIAN_BIG
)
9611 offset
= (regnum
& 1) ? 0 : 4;
9613 offset
= (regnum
& 1) ? 4 : 0;
9615 sprintf (name_buf
, "d%d", regnum
>> 1);
9616 double_regnum
= user_reg_map_name_to_regnum (gdbarch
, name_buf
,
9619 regcache_raw_read (regcache
, double_regnum
, reg_buf
);
9620 memcpy (reg_buf
+ offset
, buf
, 4);
9621 regcache_raw_write (regcache
, double_regnum
, reg_buf
);
9625 static struct value
*
9626 value_of_arm_user_reg (struct frame_info
*frame
, const void *baton
)
9628 const int *reg_p
= baton
;
9629 return value_of_register (*reg_p
, frame
);
9632 static enum gdb_osabi
9633 arm_elf_osabi_sniffer (bfd
*abfd
)
9635 unsigned int elfosabi
;
9636 enum gdb_osabi osabi
= GDB_OSABI_UNKNOWN
;
9638 elfosabi
= elf_elfheader (abfd
)->e_ident
[EI_OSABI
];
9640 if (elfosabi
== ELFOSABI_ARM
)
9641 /* GNU tools use this value. Check note sections in this case,
9643 bfd_map_over_sections (abfd
,
9644 generic_elf_osabi_sniff_abi_tag_sections
,
9647 /* Anything else will be handled by the generic ELF sniffer. */
9652 arm_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
9653 struct reggroup
*group
)
9655 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9656 this, FPS register belongs to save_regroup, restore_reggroup, and
9657 all_reggroup, of course. */
9658 if (regnum
== ARM_FPS_REGNUM
)
9659 return (group
== float_reggroup
9660 || group
== save_reggroup
9661 || group
== restore_reggroup
9662 || group
== all_reggroup
);
9664 return default_register_reggroup_p (gdbarch
, regnum
, group
);
9668 /* Initialize the current architecture based on INFO. If possible,
9669 re-use an architecture from ARCHES, which is a list of
9670 architectures already created during this debugging session.
9672 Called e.g. at program startup, when reading a core file, and when
9673 reading a binary file. */
9675 static struct gdbarch
*
9676 arm_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
9678 struct gdbarch_tdep
*tdep
;
9679 struct gdbarch
*gdbarch
;
9680 struct gdbarch_list
*best_arch
;
9681 enum arm_abi_kind arm_abi
= arm_abi_global
;
9682 enum arm_float_model fp_model
= arm_fp_model
;
9683 struct tdesc_arch_data
*tdesc_data
= NULL
;
9685 int have_vfp_registers
= 0, have_vfp_pseudos
= 0, have_neon_pseudos
= 0;
9687 int have_fpa_registers
= 1;
9688 const struct target_desc
*tdesc
= info
.target_desc
;
9690 /* If we have an object to base this architecture on, try to determine
9693 if (arm_abi
== ARM_ABI_AUTO
&& info
.abfd
!= NULL
)
9695 int ei_osabi
, e_flags
;
9697 switch (bfd_get_flavour (info
.abfd
))
9699 case bfd_target_aout_flavour
:
9700 /* Assume it's an old APCS-style ABI. */
9701 arm_abi
= ARM_ABI_APCS
;
9704 case bfd_target_coff_flavour
:
9705 /* Assume it's an old APCS-style ABI. */
9707 arm_abi
= ARM_ABI_APCS
;
9710 case bfd_target_elf_flavour
:
9711 ei_osabi
= elf_elfheader (info
.abfd
)->e_ident
[EI_OSABI
];
9712 e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9714 if (ei_osabi
== ELFOSABI_ARM
)
9716 /* GNU tools used to use this value, but do not for EABI
9717 objects. There's nowhere to tag an EABI version
9718 anyway, so assume APCS. */
9719 arm_abi
= ARM_ABI_APCS
;
9721 else if (ei_osabi
== ELFOSABI_NONE
)
9723 int eabi_ver
= EF_ARM_EABI_VERSION (e_flags
);
9724 int attr_arch
, attr_profile
;
9728 case EF_ARM_EABI_UNKNOWN
:
9729 /* Assume GNU tools. */
9730 arm_abi
= ARM_ABI_APCS
;
9733 case EF_ARM_EABI_VER4
:
9734 case EF_ARM_EABI_VER5
:
9735 arm_abi
= ARM_ABI_AAPCS
;
9736 /* EABI binaries default to VFP float ordering.
9737 They may also contain build attributes that can
9738 be used to identify if the VFP argument-passing
9740 if (fp_model
== ARM_FLOAT_AUTO
)
9743 switch (bfd_elf_get_obj_attr_int (info
.abfd
,
9748 /* "The user intended FP parameter/result
9749 passing to conform to AAPCS, base
9751 fp_model
= ARM_FLOAT_SOFT_VFP
;
9754 /* "The user intended FP parameter/result
9755 passing to conform to AAPCS, VFP
9757 fp_model
= ARM_FLOAT_VFP
;
9760 /* "The user intended FP parameter/result
9761 passing to conform to tool chain-specific
9762 conventions" - we don't know any such
9763 conventions, so leave it as "auto". */
9766 /* Attribute value not mentioned in the
9767 October 2008 ABI, so leave it as
9772 fp_model
= ARM_FLOAT_SOFT_VFP
;
9778 /* Leave it as "auto". */
9779 warning (_("unknown ARM EABI version 0x%x"), eabi_ver
);
9784 /* Detect M-profile programs. This only works if the
9785 executable file includes build attributes; GCC does
9786 copy them to the executable, but e.g. RealView does
9788 attr_arch
= bfd_elf_get_obj_attr_int (info
.abfd
, OBJ_ATTR_PROC
,
9790 attr_profile
= bfd_elf_get_obj_attr_int (info
.abfd
,
9792 Tag_CPU_arch_profile
);
9793 /* GCC specifies the profile for v6-M; RealView only
9794 specifies the profile for architectures starting with
9795 V7 (as opposed to architectures with a tag
9796 numerically greater than TAG_CPU_ARCH_V7). */
9797 if (!tdesc_has_registers (tdesc
)
9798 && (attr_arch
== TAG_CPU_ARCH_V6_M
9799 || attr_arch
== TAG_CPU_ARCH_V6S_M
9800 || attr_profile
== 'M'))
9801 tdesc
= tdesc_arm_with_m
;
9805 if (fp_model
== ARM_FLOAT_AUTO
)
9807 int e_flags
= elf_elfheader (info
.abfd
)->e_flags
;
9809 switch (e_flags
& (EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
))
9812 /* Leave it as "auto". Strictly speaking this case
9813 means FPA, but almost nobody uses that now, and
9814 many toolchains fail to set the appropriate bits
9815 for the floating-point model they use. */
9817 case EF_ARM_SOFT_FLOAT
:
9818 fp_model
= ARM_FLOAT_SOFT_FPA
;
9820 case EF_ARM_VFP_FLOAT
:
9821 fp_model
= ARM_FLOAT_VFP
;
9823 case EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
:
9824 fp_model
= ARM_FLOAT_SOFT_VFP
;
9829 if (e_flags
& EF_ARM_BE8
)
9830 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
9835 /* Leave it as "auto". */
9840 /* Check any target description for validity. */
9841 if (tdesc_has_registers (tdesc
))
9843 /* For most registers we require GDB's default names; but also allow
9844 the numeric names for sp / lr / pc, as a convenience. */
9845 static const char *const arm_sp_names
[] = { "r13", "sp", NULL
};
9846 static const char *const arm_lr_names
[] = { "r14", "lr", NULL
};
9847 static const char *const arm_pc_names
[] = { "r15", "pc", NULL
};
9849 const struct tdesc_feature
*feature
;
9852 feature
= tdesc_find_feature (tdesc
,
9853 "org.gnu.gdb.arm.core");
9854 if (feature
== NULL
)
9856 feature
= tdesc_find_feature (tdesc
,
9857 "org.gnu.gdb.arm.m-profile");
9858 if (feature
== NULL
)
9864 tdesc_data
= tdesc_data_alloc ();
9867 for (i
= 0; i
< ARM_SP_REGNUM
; i
++)
9868 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9869 arm_register_names
[i
]);
9870 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9873 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9876 valid_p
&= tdesc_numbered_register_choices (feature
, tdesc_data
,
9880 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9881 ARM_PS_REGNUM
, "xpsr");
9883 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9884 ARM_PS_REGNUM
, "cpsr");
9888 tdesc_data_cleanup (tdesc_data
);
9892 feature
= tdesc_find_feature (tdesc
,
9893 "org.gnu.gdb.arm.fpa");
9894 if (feature
!= NULL
)
9897 for (i
= ARM_F0_REGNUM
; i
<= ARM_FPS_REGNUM
; i
++)
9898 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
, i
,
9899 arm_register_names
[i
]);
9902 tdesc_data_cleanup (tdesc_data
);
9907 have_fpa_registers
= 0;
9909 feature
= tdesc_find_feature (tdesc
,
9910 "org.gnu.gdb.xscale.iwmmxt");
9911 if (feature
!= NULL
)
9913 static const char *const iwmmxt_names
[] = {
9914 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9915 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9916 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9917 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9921 for (i
= ARM_WR0_REGNUM
; i
<= ARM_WR15_REGNUM
; i
++)
9923 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9924 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9926 /* Check for the control registers, but do not fail if they
9928 for (i
= ARM_WC0_REGNUM
; i
<= ARM_WCASF_REGNUM
; i
++)
9929 tdesc_numbered_register (feature
, tdesc_data
, i
,
9930 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9932 for (i
= ARM_WCGR0_REGNUM
; i
<= ARM_WCGR3_REGNUM
; i
++)
9934 &= tdesc_numbered_register (feature
, tdesc_data
, i
,
9935 iwmmxt_names
[i
- ARM_WR0_REGNUM
]);
9939 tdesc_data_cleanup (tdesc_data
);
9944 /* If we have a VFP unit, check whether the single precision registers
9945 are present. If not, then we will synthesize them as pseudo
9947 feature
= tdesc_find_feature (tdesc
,
9948 "org.gnu.gdb.arm.vfp");
9949 if (feature
!= NULL
)
9951 static const char *const vfp_double_names
[] = {
9952 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9953 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9954 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9955 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9958 /* Require the double precision registers. There must be either
9961 for (i
= 0; i
< 32; i
++)
9963 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9965 vfp_double_names
[i
]);
9969 if (!valid_p
&& i
== 16)
9972 /* Also require FPSCR. */
9973 valid_p
&= tdesc_numbered_register (feature
, tdesc_data
,
9974 ARM_FPSCR_REGNUM
, "fpscr");
9977 tdesc_data_cleanup (tdesc_data
);
9981 if (tdesc_unnumbered_register (feature
, "s0") == 0)
9982 have_vfp_pseudos
= 1;
9984 have_vfp_registers
= 1;
9986 /* If we have VFP, also check for NEON. The architecture allows
9987 NEON without VFP (integer vector operations only), but GDB
9988 does not support that. */
9989 feature
= tdesc_find_feature (tdesc
,
9990 "org.gnu.gdb.arm.neon");
9991 if (feature
!= NULL
)
9993 /* NEON requires 32 double-precision registers. */
9996 tdesc_data_cleanup (tdesc_data
);
10000 /* If there are quad registers defined by the stub, use
10001 their type; otherwise (normally) provide them with
10002 the default type. */
10003 if (tdesc_unnumbered_register (feature
, "q0") == 0)
10004 have_neon_pseudos
= 1;
10011 /* If there is already a candidate, use it. */
10012 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
10014 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
10016 if (arm_abi
!= ARM_ABI_AUTO
10017 && arm_abi
!= gdbarch_tdep (best_arch
->gdbarch
)->arm_abi
)
10020 if (fp_model
!= ARM_FLOAT_AUTO
10021 && fp_model
!= gdbarch_tdep (best_arch
->gdbarch
)->fp_model
)
10024 /* There are various other properties in tdep that we do not
10025 need to check here: those derived from a target description,
10026 since gdbarches with a different target description are
10027 automatically disqualified. */
10029 /* Do check is_m, though, since it might come from the binary. */
10030 if (is_m
!= gdbarch_tdep (best_arch
->gdbarch
)->is_m
)
10033 /* Found a match. */
10037 if (best_arch
!= NULL
)
10039 if (tdesc_data
!= NULL
)
10040 tdesc_data_cleanup (tdesc_data
);
10041 return best_arch
->gdbarch
;
10044 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
10045 gdbarch
= gdbarch_alloc (&info
, tdep
);
10047 /* Record additional information about the architecture we are defining.
10048 These are gdbarch discriminators, like the OSABI. */
10049 tdep
->arm_abi
= arm_abi
;
10050 tdep
->fp_model
= fp_model
;
10052 tdep
->have_fpa_registers
= have_fpa_registers
;
10053 tdep
->have_vfp_registers
= have_vfp_registers
;
10054 tdep
->have_vfp_pseudos
= have_vfp_pseudos
;
10055 tdep
->have_neon_pseudos
= have_neon_pseudos
;
10056 tdep
->have_neon
= have_neon
;
10059 switch (info
.byte_order_for_code
)
10061 case BFD_ENDIAN_BIG
:
10062 tdep
->arm_breakpoint
= arm_default_arm_be_breakpoint
;
10063 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_be_breakpoint
);
10064 tdep
->thumb_breakpoint
= arm_default_thumb_be_breakpoint
;
10065 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_be_breakpoint
);
10069 case BFD_ENDIAN_LITTLE
:
10070 tdep
->arm_breakpoint
= arm_default_arm_le_breakpoint
;
10071 tdep
->arm_breakpoint_size
= sizeof (arm_default_arm_le_breakpoint
);
10072 tdep
->thumb_breakpoint
= arm_default_thumb_le_breakpoint
;
10073 tdep
->thumb_breakpoint_size
= sizeof (arm_default_thumb_le_breakpoint
);
10078 internal_error (__FILE__
, __LINE__
,
10079 _("arm_gdbarch_init: bad byte order for float format"));
10082 /* On ARM targets char defaults to unsigned. */
10083 set_gdbarch_char_signed (gdbarch
, 0);
10085 /* Note: for displaced stepping, this includes the breakpoint, and one word
10086 of additional scratch space. This setting isn't used for anything beside
10087 displaced stepping at present. */
10088 set_gdbarch_max_insn_length (gdbarch
, 4 * DISPLACED_MODIFIED_INSNS
);
10090 /* This should be low enough for everything. */
10091 tdep
->lowest_pc
= 0x20;
10092 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
10094 /* The default, for both APCS and AAPCS, is to return small
10095 structures in registers. */
10096 tdep
->struct_return
= reg_struct_return
;
10098 set_gdbarch_push_dummy_call (gdbarch
, arm_push_dummy_call
);
10099 set_gdbarch_frame_align (gdbarch
, arm_frame_align
);
10101 set_gdbarch_write_pc (gdbarch
, arm_write_pc
);
10103 /* Frame handling. */
10104 set_gdbarch_dummy_id (gdbarch
, arm_dummy_id
);
10105 set_gdbarch_unwind_pc (gdbarch
, arm_unwind_pc
);
10106 set_gdbarch_unwind_sp (gdbarch
, arm_unwind_sp
);
10108 frame_base_set_default (gdbarch
, &arm_normal_base
);
10110 /* Address manipulation. */
10111 set_gdbarch_smash_text_address (gdbarch
, arm_smash_text_address
);
10112 set_gdbarch_addr_bits_remove (gdbarch
, arm_addr_bits_remove
);
10114 /* Advance PC across function entry code. */
10115 set_gdbarch_skip_prologue (gdbarch
, arm_skip_prologue
);
10117 /* Detect whether PC is in function epilogue. */
10118 set_gdbarch_in_function_epilogue_p (gdbarch
, arm_in_function_epilogue_p
);
10120 /* Skip trampolines. */
10121 set_gdbarch_skip_trampoline_code (gdbarch
, arm_skip_stub
);
10123 /* The stack grows downward. */
10124 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
10126 /* Breakpoint manipulation. */
10127 set_gdbarch_breakpoint_from_pc (gdbarch
, arm_breakpoint_from_pc
);
10128 set_gdbarch_remote_breakpoint_from_pc (gdbarch
,
10129 arm_remote_breakpoint_from_pc
);
10131 /* Information about registers, etc. */
10132 set_gdbarch_sp_regnum (gdbarch
, ARM_SP_REGNUM
);
10133 set_gdbarch_pc_regnum (gdbarch
, ARM_PC_REGNUM
);
10134 set_gdbarch_num_regs (gdbarch
, ARM_NUM_REGS
);
10135 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10136 set_gdbarch_register_reggroup_p (gdbarch
, arm_register_reggroup_p
);
10138 /* This "info float" is FPA-specific. Use the generic version if we
10139 do not have FPA. */
10140 if (gdbarch_tdep (gdbarch
)->have_fpa_registers
)
10141 set_gdbarch_print_float_info (gdbarch
, arm_print_float_info
);
10143 /* Internal <-> external register number maps. */
10144 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, arm_dwarf_reg_to_regnum
);
10145 set_gdbarch_register_sim_regno (gdbarch
, arm_register_sim_regno
);
10147 set_gdbarch_register_name (gdbarch
, arm_register_name
);
10149 /* Returning results. */
10150 set_gdbarch_return_value (gdbarch
, arm_return_value
);
10153 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_arm
);
10155 /* Minsymbol frobbing. */
10156 set_gdbarch_elf_make_msymbol_special (gdbarch
, arm_elf_make_msymbol_special
);
10157 set_gdbarch_coff_make_msymbol_special (gdbarch
,
10158 arm_coff_make_msymbol_special
);
10159 set_gdbarch_record_special_symbol (gdbarch
, arm_record_special_symbol
);
10161 /* Thumb-2 IT block support. */
10162 set_gdbarch_adjust_breakpoint_address (gdbarch
,
10163 arm_adjust_breakpoint_address
);
10165 /* Virtual tables. */
10166 set_gdbarch_vbit_in_delta (gdbarch
, 1);
10168 /* Hook in the ABI-specific overrides, if they have been registered. */
10169 gdbarch_init_osabi (info
, gdbarch
);
10171 dwarf2_frame_set_init_reg (gdbarch
, arm_dwarf2_frame_init_reg
);
10173 /* Add some default predicates. */
10174 frame_unwind_append_unwinder (gdbarch
, &arm_stub_unwind
);
10175 dwarf2_append_unwinders (gdbarch
);
10176 frame_unwind_append_unwinder (gdbarch
, &arm_exidx_unwind
);
10177 frame_unwind_append_unwinder (gdbarch
, &arm_prologue_unwind
);
10179 /* Now we have tuned the configuration, set a few final things,
10180 based on what the OS ABI has told us. */
10182 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10183 binaries are always marked. */
10184 if (tdep
->arm_abi
== ARM_ABI_AUTO
)
10185 tdep
->arm_abi
= ARM_ABI_APCS
;
10187 /* Watchpoints are not steppable. */
10188 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
10190 /* We used to default to FPA for generic ARM, but almost nobody
10191 uses that now, and we now provide a way for the user to force
10192 the model. So default to the most useful variant. */
10193 if (tdep
->fp_model
== ARM_FLOAT_AUTO
)
10194 tdep
->fp_model
= ARM_FLOAT_SOFT_FPA
;
10196 if (tdep
->jb_pc
>= 0)
10197 set_gdbarch_get_longjmp_target (gdbarch
, arm_get_longjmp_target
);
10199 /* Floating point sizes and format. */
10200 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
10201 if (tdep
->fp_model
== ARM_FLOAT_SOFT_FPA
|| tdep
->fp_model
== ARM_FLOAT_FPA
)
10203 set_gdbarch_double_format
10204 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10205 set_gdbarch_long_double_format
10206 (gdbarch
, floatformats_ieee_double_littlebyte_bigword
);
10210 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
10211 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
10214 if (have_vfp_pseudos
)
10216 /* NOTE: These are the only pseudo registers used by
10217 the ARM target at the moment. If more are added, a
10218 little more care in numbering will be needed. */
10220 int num_pseudos
= 32;
10221 if (have_neon_pseudos
)
10223 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudos
);
10224 set_gdbarch_pseudo_register_read (gdbarch
, arm_pseudo_read
);
10225 set_gdbarch_pseudo_register_write (gdbarch
, arm_pseudo_write
);
10230 set_tdesc_pseudo_register_name (gdbarch
, arm_register_name
);
10232 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
10234 /* Override tdesc_register_type to adjust the types of VFP
10235 registers for NEON. */
10236 set_gdbarch_register_type (gdbarch
, arm_register_type
);
10239 /* Add standard register aliases. We add aliases even for those
10240 nanes which are used by the current architecture - it's simpler,
10241 and does no harm, since nothing ever lists user registers. */
10242 for (i
= 0; i
< ARRAY_SIZE (arm_register_aliases
); i
++)
10243 user_reg_add (gdbarch
, arm_register_aliases
[i
].name
,
10244 value_of_arm_user_reg
, &arm_register_aliases
[i
].regnum
);
10250 arm_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
10252 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
10257 fprintf_unfiltered (file
, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10258 (unsigned long) tdep
->lowest_pc
);
10261 extern initialize_file_ftype _initialize_arm_tdep
; /* -Wmissing-prototypes */
10264 _initialize_arm_tdep (void)
10266 struct ui_file
*stb
;
10268 struct cmd_list_element
*new_set
, *new_show
;
10269 const char *setname
;
10270 const char *setdesc
;
10271 const char *const *regnames
;
10273 static char *helptext
;
10274 char regdesc
[1024], *rdptr
= regdesc
;
10275 size_t rest
= sizeof (regdesc
);
10277 gdbarch_register (bfd_arch_arm
, arm_gdbarch_init
, arm_dump_tdep
);
10279 arm_objfile_data_key
10280 = register_objfile_data_with_cleanup (NULL
, arm_objfile_data_free
);
10282 /* Add ourselves to objfile event chain. */
10283 observer_attach_new_objfile (arm_exidx_new_objfile
);
10285 = register_objfile_data_with_cleanup (NULL
, arm_exidx_data_free
);
10287 /* Register an ELF OS ABI sniffer for ARM binaries. */
10288 gdbarch_register_osabi_sniffer (bfd_arch_arm
,
10289 bfd_target_elf_flavour
,
10290 arm_elf_osabi_sniffer
);
10292 /* Initialize the standard target descriptions. */
10293 initialize_tdesc_arm_with_m ();
10294 initialize_tdesc_arm_with_iwmmxt ();
10295 initialize_tdesc_arm_with_vfpv2 ();
10296 initialize_tdesc_arm_with_vfpv3 ();
10297 initialize_tdesc_arm_with_neon ();
10299 /* Get the number of possible sets of register names defined in opcodes. */
10300 num_disassembly_options
= get_arm_regname_num_options ();
10302 /* Add root prefix command for all "set arm"/"show arm" commands. */
10303 add_prefix_cmd ("arm", no_class
, set_arm_command
,
10304 _("Various ARM-specific commands."),
10305 &setarmcmdlist
, "set arm ", 0, &setlist
);
10307 add_prefix_cmd ("arm", no_class
, show_arm_command
,
10308 _("Various ARM-specific commands."),
10309 &showarmcmdlist
, "show arm ", 0, &showlist
);
10311 /* Sync the opcode insn printer with our register viewer. */
10312 parse_arm_disassembler_option ("reg-names-std");
10314 /* Initialize the array that will be passed to
10315 add_setshow_enum_cmd(). */
10316 valid_disassembly_styles
10317 = xmalloc ((num_disassembly_options
+ 1) * sizeof (char *));
10318 for (i
= 0; i
< num_disassembly_options
; i
++)
10320 numregs
= get_arm_regnames (i
, &setname
, &setdesc
, ®names
);
10321 valid_disassembly_styles
[i
] = setname
;
10322 length
= snprintf (rdptr
, rest
, "%s - %s\n", setname
, setdesc
);
10325 /* When we find the default names, tell the disassembler to use
10327 if (!strcmp (setname
, "std"))
10329 disassembly_style
= setname
;
10330 set_arm_regname_option (i
);
10333 /* Mark the end of valid options. */
10334 valid_disassembly_styles
[num_disassembly_options
] = NULL
;
10336 /* Create the help text. */
10337 stb
= mem_fileopen ();
10338 fprintf_unfiltered (stb
, "%s%s%s",
10339 _("The valid values are:\n"),
10341 _("The default is \"std\"."));
10342 helptext
= ui_file_xstrdup (stb
, NULL
);
10343 ui_file_delete (stb
);
10345 add_setshow_enum_cmd("disassembler", no_class
,
10346 valid_disassembly_styles
, &disassembly_style
,
10347 _("Set the disassembly style."),
10348 _("Show the disassembly style."),
10350 set_disassembly_style_sfunc
,
10351 NULL
, /* FIXME: i18n: The disassembly style is
10353 &setarmcmdlist
, &showarmcmdlist
);
10355 add_setshow_boolean_cmd ("apcs32", no_class
, &arm_apcs_32
,
10356 _("Set usage of ARM 32-bit mode."),
10357 _("Show usage of ARM 32-bit mode."),
10358 _("When off, a 26-bit PC will be used."),
10360 NULL
, /* FIXME: i18n: Usage of ARM 32-bit
10362 &setarmcmdlist
, &showarmcmdlist
);
10364 /* Add a command to allow the user to force the FPU model. */
10365 add_setshow_enum_cmd ("fpu", no_class
, fp_model_strings
, ¤t_fp_model
,
10366 _("Set the floating point type."),
10367 _("Show the floating point type."),
10368 _("auto - Determine the FP typefrom the OS-ABI.\n\
10369 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10370 fpa - FPA co-processor (GCC compiled).\n\
10371 softvfp - Software FP with pure-endian doubles.\n\
10372 vfp - VFP co-processor."),
10373 set_fp_model_sfunc
, show_fp_model
,
10374 &setarmcmdlist
, &showarmcmdlist
);
10376 /* Add a command to allow the user to force the ABI. */
10377 add_setshow_enum_cmd ("abi", class_support
, arm_abi_strings
, &arm_abi_string
,
10379 _("Show the ABI."),
10380 NULL
, arm_set_abi
, arm_show_abi
,
10381 &setarmcmdlist
, &showarmcmdlist
);
10383 /* Add two commands to allow the user to force the assumed
10385 add_setshow_enum_cmd ("fallback-mode", class_support
,
10386 arm_mode_strings
, &arm_fallback_mode_string
,
10387 _("Set the mode assumed when symbols are unavailable."),
10388 _("Show the mode assumed when symbols are unavailable."),
10389 NULL
, NULL
, arm_show_fallback_mode
,
10390 &setarmcmdlist
, &showarmcmdlist
);
10391 add_setshow_enum_cmd ("force-mode", class_support
,
10392 arm_mode_strings
, &arm_force_mode_string
,
10393 _("Set the mode assumed even when symbols are available."),
10394 _("Show the mode assumed even when symbols are available."),
10395 NULL
, NULL
, arm_show_force_mode
,
10396 &setarmcmdlist
, &showarmcmdlist
);
10398 /* Debugging flag. */
10399 add_setshow_boolean_cmd ("arm", class_maintenance
, &arm_debug
,
10400 _("Set ARM debugging."),
10401 _("Show ARM debugging."),
10402 _("When on, arm-specific debugging is enabled."),
10404 NULL
, /* FIXME: i18n: "ARM debugging is %s. */
10405 &setdebuglist
, &showdebuglist
);