2012-03-01 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-1989, 1991-1993, 1995-1996, 1998-2012 Free
4 Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <ctype.h> /* XXX for isupper (). */
22
23 #include "defs.h"
24 #include "frame.h"
25 #include "inferior.h"
26 #include "gdbcmd.h"
27 #include "gdbcore.h"
28 #include "gdb_string.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "regcache.h"
31 #include "reggroups.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45 #include "observer.h"
46
47 #include "arm-tdep.h"
48 #include "gdb/sim-arm.h"
49
50 #include "elf-bfd.h"
51 #include "coff/internal.h"
52 #include "elf/arm.h"
53
54 #include "gdb_assert.h"
55 #include "vec.h"
56
57 #include "features/arm-with-m.c"
58 #include "features/arm-with-iwmmxt.c"
59 #include "features/arm-with-vfpv2.c"
60 #include "features/arm-with-vfpv3.c"
61 #include "features/arm-with-neon.c"
62
63 static int arm_debug;
64
65 /* Macros for setting and testing a bit in a minimal symbol that marks
66 it as Thumb function. The MSB of the minimal symbol's "info" field
67 is used for this purpose.
68
69 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
70 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
71
72 #define MSYMBOL_SET_SPECIAL(msym) \
73 MSYMBOL_TARGET_FLAG_1 (msym) = 1
74
75 #define MSYMBOL_IS_SPECIAL(msym) \
76 MSYMBOL_TARGET_FLAG_1 (msym)
77
78 /* Per-objfile data used for mapping symbols. */
79 static const struct objfile_data *arm_objfile_data_key;
80
81 struct arm_mapping_symbol
82 {
83 bfd_vma value;
84 char type;
85 };
86 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
87 DEF_VEC_O(arm_mapping_symbol_s);
88
89 struct arm_per_objfile
90 {
91 VEC(arm_mapping_symbol_s) **section_maps;
92 };
93
94 /* The list of available "set arm ..." and "show arm ..." commands. */
95 static struct cmd_list_element *setarmcmdlist = NULL;
96 static struct cmd_list_element *showarmcmdlist = NULL;
97
98 /* The type of floating-point to use. Keep this in sync with enum
99 arm_float_model, and the help string in _initialize_arm_tdep. */
100 static const char *const fp_model_strings[] =
101 {
102 "auto",
103 "softfpa",
104 "fpa",
105 "softvfp",
106 "vfp",
107 NULL
108 };
109
110 /* A variable that can be configured by the user. */
111 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
112 static const char *current_fp_model = "auto";
113
114 /* The ABI to use. Keep this in sync with arm_abi_kind. */
115 static const char *const arm_abi_strings[] =
116 {
117 "auto",
118 "APCS",
119 "AAPCS",
120 NULL
121 };
122
123 /* A variable that can be configured by the user. */
124 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
125 static const char *arm_abi_string = "auto";
126
127 /* The execution mode to assume. */
128 static const char *const arm_mode_strings[] =
129 {
130 "auto",
131 "arm",
132 "thumb",
133 NULL
134 };
135
136 static const char *arm_fallback_mode_string = "auto";
137 static const char *arm_force_mode_string = "auto";
138
139 /* Internal override of the execution mode. -1 means no override,
140 0 means override to ARM mode, 1 means override to Thumb mode.
141 The effect is the same as if arm_force_mode has been set by the
142 user (except the internal override has precedence over a user's
143 arm_force_mode override). */
144 static int arm_override_mode = -1;
145
146 /* Number of different reg name sets (options). */
147 static int num_disassembly_options;
148
149 /* The standard register names, and all the valid aliases for them. Note
150 that `fp', `sp' and `pc' are not added in this alias list, because they
151 have been added as builtin user registers in
152 std-regs.c:_initialize_frame_reg. */
153 static const struct
154 {
155 const char *name;
156 int regnum;
157 } arm_register_aliases[] = {
158 /* Basic register numbers. */
159 { "r0", 0 },
160 { "r1", 1 },
161 { "r2", 2 },
162 { "r3", 3 },
163 { "r4", 4 },
164 { "r5", 5 },
165 { "r6", 6 },
166 { "r7", 7 },
167 { "r8", 8 },
168 { "r9", 9 },
169 { "r10", 10 },
170 { "r11", 11 },
171 { "r12", 12 },
172 { "r13", 13 },
173 { "r14", 14 },
174 { "r15", 15 },
175 /* Synonyms (argument and variable registers). */
176 { "a1", 0 },
177 { "a2", 1 },
178 { "a3", 2 },
179 { "a4", 3 },
180 { "v1", 4 },
181 { "v2", 5 },
182 { "v3", 6 },
183 { "v4", 7 },
184 { "v5", 8 },
185 { "v6", 9 },
186 { "v7", 10 },
187 { "v8", 11 },
188 /* Other platform-specific names for r9. */
189 { "sb", 9 },
190 { "tr", 9 },
191 /* Special names. */
192 { "ip", 12 },
193 { "lr", 14 },
194 /* Names used by GCC (not listed in the ARM EABI). */
195 { "sl", 10 },
196 /* A special name from the older ATPCS. */
197 { "wr", 7 },
198 };
199
200 static const char *const arm_register_names[] =
201 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
202 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
203 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
204 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
205 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
206 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
207 "fps", "cpsr" }; /* 24 25 */
208
209 /* Valid register name styles. */
210 static const char **valid_disassembly_styles;
211
212 /* Disassembly style to use. Default to "std" register names. */
213 static const char *disassembly_style;
214
215 /* This is used to keep the bfd arch_info in sync with the disassembly
216 style. */
217 static void set_disassembly_style_sfunc(char *, int,
218 struct cmd_list_element *);
219 static void set_disassembly_style (void);
220
221 static void convert_from_extended (const struct floatformat *, const void *,
222 void *, int);
223 static void convert_to_extended (const struct floatformat *, void *,
224 const void *, int);
225
226 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
227 struct regcache *regcache,
228 int regnum, gdb_byte *buf);
229 static void arm_neon_quad_write (struct gdbarch *gdbarch,
230 struct regcache *regcache,
231 int regnum, const gdb_byte *buf);
232
233 static int thumb_insn_size (unsigned short inst1);
234
235 struct arm_prologue_cache
236 {
237 /* The stack pointer at the time this frame was created; i.e. the
238 caller's stack pointer when this function was called. It is used
239 to identify this frame. */
240 CORE_ADDR prev_sp;
241
242 /* The frame base for this frame is just prev_sp - frame size.
243 FRAMESIZE is the distance from the frame pointer to the
244 initial stack pointer. */
245
246 int framesize;
247
248 /* The register used to hold the frame pointer for this frame. */
249 int framereg;
250
251 /* Saved register offsets. */
252 struct trad_frame_saved_reg *saved_regs;
253 };
254
255 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
256 CORE_ADDR prologue_start,
257 CORE_ADDR prologue_end,
258 struct arm_prologue_cache *cache);
259
260 /* Architecture version for displaced stepping. This effects the behaviour of
261 certain instructions, and really should not be hard-wired. */
262
263 #define DISPLACED_STEPPING_ARCH_VERSION 5
264
265 /* Addresses for calling Thumb functions have the bit 0 set.
266 Here are some macros to test, set, or clear bit 0 of addresses. */
267 #define IS_THUMB_ADDR(addr) ((addr) & 1)
268 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
269 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
270
271 /* Set to true if the 32-bit mode is in use. */
272
273 int arm_apcs_32 = 1;
274
275 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
276
277 int
278 arm_psr_thumb_bit (struct gdbarch *gdbarch)
279 {
280 if (gdbarch_tdep (gdbarch)->is_m)
281 return XPSR_T;
282 else
283 return CPSR_T;
284 }
285
286 /* Determine if FRAME is executing in Thumb mode. */
287
288 int
289 arm_frame_is_thumb (struct frame_info *frame)
290 {
291 CORE_ADDR cpsr;
292 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
293
294 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
295 directly (from a signal frame or dummy frame) or by interpreting
296 the saved LR (from a prologue or DWARF frame). So consult it and
297 trust the unwinders. */
298 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
299
300 return (cpsr & t_bit) != 0;
301 }
302
303 /* Callback for VEC_lower_bound. */
304
305 static inline int
306 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
307 const struct arm_mapping_symbol *rhs)
308 {
309 return lhs->value < rhs->value;
310 }
311
312 /* Search for the mapping symbol covering MEMADDR. If one is found,
313 return its type. Otherwise, return 0. If START is non-NULL,
314 set *START to the location of the mapping symbol. */
315
316 static char
317 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
318 {
319 struct obj_section *sec;
320
321 /* If there are mapping symbols, consult them. */
322 sec = find_pc_section (memaddr);
323 if (sec != NULL)
324 {
325 struct arm_per_objfile *data;
326 VEC(arm_mapping_symbol_s) *map;
327 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
328 0 };
329 unsigned int idx;
330
331 data = objfile_data (sec->objfile, arm_objfile_data_key);
332 if (data != NULL)
333 {
334 map = data->section_maps[sec->the_bfd_section->index];
335 if (!VEC_empty (arm_mapping_symbol_s, map))
336 {
337 struct arm_mapping_symbol *map_sym;
338
339 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
340 arm_compare_mapping_symbols);
341
342 /* VEC_lower_bound finds the earliest ordered insertion
343 point. If the following symbol starts at this exact
344 address, we use that; otherwise, the preceding
345 mapping symbol covers this address. */
346 if (idx < VEC_length (arm_mapping_symbol_s, map))
347 {
348 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
349 if (map_sym->value == map_key.value)
350 {
351 if (start)
352 *start = map_sym->value + obj_section_addr (sec);
353 return map_sym->type;
354 }
355 }
356
357 if (idx > 0)
358 {
359 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
360 if (start)
361 *start = map_sym->value + obj_section_addr (sec);
362 return map_sym->type;
363 }
364 }
365 }
366 }
367
368 return 0;
369 }
370
371 /* Determine if the program counter specified in MEMADDR is in a Thumb
372 function. This function should be called for addresses unrelated to
373 any executing frame; otherwise, prefer arm_frame_is_thumb. */
374
375 int
376 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
377 {
378 struct obj_section *sec;
379 struct minimal_symbol *sym;
380 char type;
381 struct displaced_step_closure* dsc
382 = get_displaced_step_closure_by_addr(memaddr);
383
384 /* If checking the mode of displaced instruction in copy area, the mode
385 should be determined by instruction on the original address. */
386 if (dsc)
387 {
388 if (debug_displaced)
389 fprintf_unfiltered (gdb_stdlog,
390 "displaced: check mode of %.8lx instead of %.8lx\n",
391 (unsigned long) dsc->insn_addr,
392 (unsigned long) memaddr);
393 memaddr = dsc->insn_addr;
394 }
395
396 /* If bit 0 of the address is set, assume this is a Thumb address. */
397 if (IS_THUMB_ADDR (memaddr))
398 return 1;
399
400 /* Respect internal mode override if active. */
401 if (arm_override_mode != -1)
402 return arm_override_mode;
403
404 /* If the user wants to override the symbol table, let him. */
405 if (strcmp (arm_force_mode_string, "arm") == 0)
406 return 0;
407 if (strcmp (arm_force_mode_string, "thumb") == 0)
408 return 1;
409
410 /* ARM v6-M and v7-M are always in Thumb mode. */
411 if (gdbarch_tdep (gdbarch)->is_m)
412 return 1;
413
414 /* If there are mapping symbols, consult them. */
415 type = arm_find_mapping_symbol (memaddr, NULL);
416 if (type)
417 return type == 't';
418
419 /* Thumb functions have a "special" bit set in minimal symbols. */
420 sym = lookup_minimal_symbol_by_pc (memaddr);
421 if (sym)
422 return (MSYMBOL_IS_SPECIAL (sym));
423
424 /* If the user wants to override the fallback mode, let them. */
425 if (strcmp (arm_fallback_mode_string, "arm") == 0)
426 return 0;
427 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
428 return 1;
429
430 /* If we couldn't find any symbol, but we're talking to a running
431 target, then trust the current value of $cpsr. This lets
432 "display/i $pc" always show the correct mode (though if there is
433 a symbol table we will not reach here, so it still may not be
434 displayed in the mode it will be executed). */
435 if (target_has_registers)
436 return arm_frame_is_thumb (get_current_frame ());
437
438 /* Otherwise we're out of luck; we assume ARM. */
439 return 0;
440 }
441
442 /* Remove useless bits from addresses in a running program. */
443 static CORE_ADDR
444 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
445 {
446 if (arm_apcs_32)
447 return UNMAKE_THUMB_ADDR (val);
448 else
449 return (val & 0x03fffffc);
450 }
451
452 /* When reading symbols, we need to zap the low bit of the address,
453 which may be set to 1 for Thumb functions. */
454 static CORE_ADDR
455 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
456 {
457 return val & ~1;
458 }
459
460 /* Return 1 if PC is the start of a compiler helper function which
461 can be safely ignored during prologue skipping. IS_THUMB is true
462 if the function is known to be a Thumb function due to the way it
463 is being called. */
464 static int
465 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
466 {
467 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
468 struct minimal_symbol *msym;
469
470 msym = lookup_minimal_symbol_by_pc (pc);
471 if (msym != NULL
472 && SYMBOL_VALUE_ADDRESS (msym) == pc
473 && SYMBOL_LINKAGE_NAME (msym) != NULL)
474 {
475 const char *name = SYMBOL_LINKAGE_NAME (msym);
476
477 /* The GNU linker's Thumb call stub to foo is named
478 __foo_from_thumb. */
479 if (strstr (name, "_from_thumb") != NULL)
480 name += 2;
481
482 /* On soft-float targets, __truncdfsf2 is called to convert promoted
483 arguments to their argument types in non-prototyped
484 functions. */
485 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
486 return 1;
487 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
488 return 1;
489
490 /* Internal functions related to thread-local storage. */
491 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
492 return 1;
493 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
494 return 1;
495 }
496 else
497 {
498 /* If we run against a stripped glibc, we may be unable to identify
499 special functions by name. Check for one important case,
500 __aeabi_read_tp, by comparing the *code* against the default
501 implementation (this is hand-written ARM assembler in glibc). */
502
503 if (!is_thumb
504 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
505 == 0xe3e00a0f /* mov r0, #0xffff0fff */
506 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
507 == 0xe240f01f) /* sub pc, r0, #31 */
508 return 1;
509 }
510
511 return 0;
512 }
513
514 /* Support routines for instruction parsing. */
515 #define submask(x) ((1L << ((x) + 1)) - 1)
516 #define bit(obj,st) (((obj) >> (st)) & 1)
517 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
518 #define sbits(obj,st,fn) \
519 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
520 #define BranchDest(addr,instr) \
521 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
522
523 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
524 the first 16-bit of instruction, and INSN2 is the second 16-bit of
525 instruction. */
526 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
527 ((bits ((insn1), 0, 3) << 12) \
528 | (bits ((insn1), 10, 10) << 11) \
529 | (bits ((insn2), 12, 14) << 8) \
530 | bits ((insn2), 0, 7))
531
532 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
533 the 32-bit instruction. */
534 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
535 ((bits ((insn), 16, 19) << 12) \
536 | bits ((insn), 0, 11))
537
538 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
539
540 static unsigned int
541 thumb_expand_immediate (unsigned int imm)
542 {
543 unsigned int count = imm >> 7;
544
545 if (count < 8)
546 switch (count / 2)
547 {
548 case 0:
549 return imm & 0xff;
550 case 1:
551 return (imm & 0xff) | ((imm & 0xff) << 16);
552 case 2:
553 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
554 case 3:
555 return (imm & 0xff) | ((imm & 0xff) << 8)
556 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
557 }
558
559 return (0x80 | (imm & 0x7f)) << (32 - count);
560 }
561
562 /* Return 1 if the 16-bit Thumb instruction INST might change
563 control flow, 0 otherwise. */
564
565 static int
566 thumb_instruction_changes_pc (unsigned short inst)
567 {
568 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
569 return 1;
570
571 if ((inst & 0xf000) == 0xd000) /* conditional branch */
572 return 1;
573
574 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
575 return 1;
576
577 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
578 return 1;
579
580 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
581 return 1;
582
583 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
584 return 1;
585
586 return 0;
587 }
588
589 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
590 might change control flow, 0 otherwise. */
591
592 static int
593 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
594 {
595 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
596 {
597 /* Branches and miscellaneous control instructions. */
598
599 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
600 {
601 /* B, BL, BLX. */
602 return 1;
603 }
604 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
605 {
606 /* SUBS PC, LR, #imm8. */
607 return 1;
608 }
609 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
610 {
611 /* Conditional branch. */
612 return 1;
613 }
614
615 return 0;
616 }
617
618 if ((inst1 & 0xfe50) == 0xe810)
619 {
620 /* Load multiple or RFE. */
621
622 if (bit (inst1, 7) && !bit (inst1, 8))
623 {
624 /* LDMIA or POP */
625 if (bit (inst2, 15))
626 return 1;
627 }
628 else if (!bit (inst1, 7) && bit (inst1, 8))
629 {
630 /* LDMDB */
631 if (bit (inst2, 15))
632 return 1;
633 }
634 else if (bit (inst1, 7) && bit (inst1, 8))
635 {
636 /* RFEIA */
637 return 1;
638 }
639 else if (!bit (inst1, 7) && !bit (inst1, 8))
640 {
641 /* RFEDB */
642 return 1;
643 }
644
645 return 0;
646 }
647
648 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
649 {
650 /* MOV PC or MOVS PC. */
651 return 1;
652 }
653
654 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
655 {
656 /* LDR PC. */
657 if (bits (inst1, 0, 3) == 15)
658 return 1;
659 if (bit (inst1, 7))
660 return 1;
661 if (bit (inst2, 11))
662 return 1;
663 if ((inst2 & 0x0fc0) == 0x0000)
664 return 1;
665
666 return 0;
667 }
668
669 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
670 {
671 /* TBB. */
672 return 1;
673 }
674
675 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
676 {
677 /* TBH. */
678 return 1;
679 }
680
681 return 0;
682 }
683
684 /* Analyze a Thumb prologue, looking for a recognizable stack frame
685 and frame pointer. Scan until we encounter a store that could
686 clobber the stack frame unexpectedly, or an unknown instruction.
687 Return the last address which is definitely safe to skip for an
688 initial breakpoint. */
689
690 static CORE_ADDR
691 thumb_analyze_prologue (struct gdbarch *gdbarch,
692 CORE_ADDR start, CORE_ADDR limit,
693 struct arm_prologue_cache *cache)
694 {
695 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
696 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
697 int i;
698 pv_t regs[16];
699 struct pv_area *stack;
700 struct cleanup *back_to;
701 CORE_ADDR offset;
702 CORE_ADDR unrecognized_pc = 0;
703
704 for (i = 0; i < 16; i++)
705 regs[i] = pv_register (i, 0);
706 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
707 back_to = make_cleanup_free_pv_area (stack);
708
709 while (start < limit)
710 {
711 unsigned short insn;
712
713 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
714
715 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
716 {
717 int regno;
718 int mask;
719
720 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
721 break;
722
723 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
724 whether to save LR (R14). */
725 mask = (insn & 0xff) | ((insn & 0x100) << 6);
726
727 /* Calculate offsets of saved R0-R7 and LR. */
728 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
729 if (mask & (1 << regno))
730 {
731 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
732 -4);
733 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
734 }
735 }
736 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
737 sub sp, #simm */
738 {
739 offset = (insn & 0x7f) << 2; /* get scaled offset */
740 if (insn & 0x80) /* Check for SUB. */
741 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
742 -offset);
743 else
744 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
745 offset);
746 }
747 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
748 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
749 (insn & 0xff) << 2);
750 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
751 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
752 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
753 bits (insn, 6, 8));
754 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
755 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
756 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
757 bits (insn, 0, 7));
758 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
759 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
760 && pv_is_constant (regs[bits (insn, 3, 5)]))
761 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
762 regs[bits (insn, 6, 8)]);
763 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
764 && pv_is_constant (regs[bits (insn, 3, 6)]))
765 {
766 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
767 int rm = bits (insn, 3, 6);
768 regs[rd] = pv_add (regs[rd], regs[rm]);
769 }
770 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
771 {
772 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
773 int src_reg = (insn & 0x78) >> 3;
774 regs[dst_reg] = regs[src_reg];
775 }
776 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
777 {
778 /* Handle stores to the stack. Normally pushes are used,
779 but with GCC -mtpcs-frame, there may be other stores
780 in the prologue to create the frame. */
781 int regno = (insn >> 8) & 0x7;
782 pv_t addr;
783
784 offset = (insn & 0xff) << 2;
785 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
786
787 if (pv_area_store_would_trash (stack, addr))
788 break;
789
790 pv_area_store (stack, addr, 4, regs[regno]);
791 }
792 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
793 {
794 int rd = bits (insn, 0, 2);
795 int rn = bits (insn, 3, 5);
796 pv_t addr;
797
798 offset = bits (insn, 6, 10) << 2;
799 addr = pv_add_constant (regs[rn], offset);
800
801 if (pv_area_store_would_trash (stack, addr))
802 break;
803
804 pv_area_store (stack, addr, 4, regs[rd]);
805 }
806 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
807 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
808 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
809 /* Ignore stores of argument registers to the stack. */
810 ;
811 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
812 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
813 /* Ignore block loads from the stack, potentially copying
814 parameters from memory. */
815 ;
816 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
817 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
818 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
819 /* Similarly ignore single loads from the stack. */
820 ;
821 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
822 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
823 /* Skip register copies, i.e. saves to another register
824 instead of the stack. */
825 ;
826 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
827 /* Recognize constant loads; even with small stacks these are necessary
828 on Thumb. */
829 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
830 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
831 {
832 /* Constant pool loads, for the same reason. */
833 unsigned int constant;
834 CORE_ADDR loc;
835
836 loc = start + 4 + bits (insn, 0, 7) * 4;
837 constant = read_memory_unsigned_integer (loc, 4, byte_order);
838 regs[bits (insn, 8, 10)] = pv_constant (constant);
839 }
840 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
841 {
842 unsigned short inst2;
843
844 inst2 = read_memory_unsigned_integer (start + 2, 2,
845 byte_order_for_code);
846
847 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
848 {
849 /* BL, BLX. Allow some special function calls when
850 skipping the prologue; GCC generates these before
851 storing arguments to the stack. */
852 CORE_ADDR nextpc;
853 int j1, j2, imm1, imm2;
854
855 imm1 = sbits (insn, 0, 10);
856 imm2 = bits (inst2, 0, 10);
857 j1 = bit (inst2, 13);
858 j2 = bit (inst2, 11);
859
860 offset = ((imm1 << 12) + (imm2 << 1));
861 offset ^= ((!j2) << 22) | ((!j1) << 23);
862
863 nextpc = start + 4 + offset;
864 /* For BLX make sure to clear the low bits. */
865 if (bit (inst2, 12) == 0)
866 nextpc = nextpc & 0xfffffffc;
867
868 if (!skip_prologue_function (gdbarch, nextpc,
869 bit (inst2, 12) != 0))
870 break;
871 }
872
873 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
874 { registers } */
875 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
876 {
877 pv_t addr = regs[bits (insn, 0, 3)];
878 int regno;
879
880 if (pv_area_store_would_trash (stack, addr))
881 break;
882
883 /* Calculate offsets of saved registers. */
884 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
885 if (inst2 & (1 << regno))
886 {
887 addr = pv_add_constant (addr, -4);
888 pv_area_store (stack, addr, 4, regs[regno]);
889 }
890
891 if (insn & 0x0020)
892 regs[bits (insn, 0, 3)] = addr;
893 }
894
895 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
896 [Rn, #+/-imm]{!} */
897 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 {
899 int regno1 = bits (inst2, 12, 15);
900 int regno2 = bits (inst2, 8, 11);
901 pv_t addr = regs[bits (insn, 0, 3)];
902
903 offset = inst2 & 0xff;
904 if (insn & 0x0080)
905 addr = pv_add_constant (addr, offset);
906 else
907 addr = pv_add_constant (addr, -offset);
908
909 if (pv_area_store_would_trash (stack, addr))
910 break;
911
912 pv_area_store (stack, addr, 4, regs[regno1]);
913 pv_area_store (stack, pv_add_constant (addr, 4),
914 4, regs[regno2]);
915
916 if (insn & 0x0020)
917 regs[bits (insn, 0, 3)] = addr;
918 }
919
920 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
921 && (inst2 & 0x0c00) == 0x0c00
922 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
923 {
924 int regno = bits (inst2, 12, 15);
925 pv_t addr = regs[bits (insn, 0, 3)];
926
927 offset = inst2 & 0xff;
928 if (inst2 & 0x0200)
929 addr = pv_add_constant (addr, offset);
930 else
931 addr = pv_add_constant (addr, -offset);
932
933 if (pv_area_store_would_trash (stack, addr))
934 break;
935
936 pv_area_store (stack, addr, 4, regs[regno]);
937
938 if (inst2 & 0x0100)
939 regs[bits (insn, 0, 3)] = addr;
940 }
941
942 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
943 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
944 {
945 int regno = bits (inst2, 12, 15);
946 pv_t addr;
947
948 offset = inst2 & 0xfff;
949 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
950
951 if (pv_area_store_would_trash (stack, addr))
952 break;
953
954 pv_area_store (stack, addr, 4, regs[regno]);
955 }
956
957 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
958 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
959 /* Ignore stores of argument registers to the stack. */
960 ;
961
962 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
963 && (inst2 & 0x0d00) == 0x0c00
964 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
965 /* Ignore stores of argument registers to the stack. */
966 ;
967
968 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
969 { registers } */
970 && (inst2 & 0x8000) == 0x0000
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore block loads from the stack, potentially copying
973 parameters from memory. */
974 ;
975
976 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
977 [Rn, #+/-imm] */
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Similarly ignore dual loads from the stack. */
980 ;
981
982 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
983 && (inst2 & 0x0d00) == 0x0c00
984 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
985 /* Similarly ignore single loads from the stack. */
986 ;
987
988 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
989 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
990 /* Similarly ignore single loads from the stack. */
991 ;
992
993 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
994 && (inst2 & 0x8000) == 0x0000)
995 {
996 unsigned int imm = ((bits (insn, 10, 10) << 11)
997 | (bits (inst2, 12, 14) << 8)
998 | bits (inst2, 0, 7));
999
1000 regs[bits (inst2, 8, 11)]
1001 = pv_add_constant (regs[bits (insn, 0, 3)],
1002 thumb_expand_immediate (imm));
1003 }
1004
1005 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1006 && (inst2 & 0x8000) == 0x0000)
1007 {
1008 unsigned int imm = ((bits (insn, 10, 10) << 11)
1009 | (bits (inst2, 12, 14) << 8)
1010 | bits (inst2, 0, 7));
1011
1012 regs[bits (inst2, 8, 11)]
1013 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1014 }
1015
1016 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1017 && (inst2 & 0x8000) == 0x0000)
1018 {
1019 unsigned int imm = ((bits (insn, 10, 10) << 11)
1020 | (bits (inst2, 12, 14) << 8)
1021 | bits (inst2, 0, 7));
1022
1023 regs[bits (inst2, 8, 11)]
1024 = pv_add_constant (regs[bits (insn, 0, 3)],
1025 - (CORE_ADDR) thumb_expand_immediate (imm));
1026 }
1027
1028 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1029 && (inst2 & 0x8000) == 0x0000)
1030 {
1031 unsigned int imm = ((bits (insn, 10, 10) << 11)
1032 | (bits (inst2, 12, 14) << 8)
1033 | bits (inst2, 0, 7));
1034
1035 regs[bits (inst2, 8, 11)]
1036 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1037 }
1038
1039 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1040 {
1041 unsigned int imm = ((bits (insn, 10, 10) << 11)
1042 | (bits (inst2, 12, 14) << 8)
1043 | bits (inst2, 0, 7));
1044
1045 regs[bits (inst2, 8, 11)]
1046 = pv_constant (thumb_expand_immediate (imm));
1047 }
1048
1049 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1050 {
1051 unsigned int imm
1052 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1053
1054 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1055 }
1056
1057 else if (insn == 0xea5f /* mov.w Rd,Rm */
1058 && (inst2 & 0xf0f0) == 0)
1059 {
1060 int dst_reg = (inst2 & 0x0f00) >> 8;
1061 int src_reg = inst2 & 0xf;
1062 regs[dst_reg] = regs[src_reg];
1063 }
1064
1065 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1066 {
1067 /* Constant pool loads. */
1068 unsigned int constant;
1069 CORE_ADDR loc;
1070
1071 offset = bits (insn, 0, 11);
1072 if (insn & 0x0080)
1073 loc = start + 4 + offset;
1074 else
1075 loc = start + 4 - offset;
1076
1077 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1078 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1079 }
1080
1081 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1082 {
1083 /* Constant pool loads. */
1084 unsigned int constant;
1085 CORE_ADDR loc;
1086
1087 offset = bits (insn, 0, 7) << 2;
1088 if (insn & 0x0080)
1089 loc = start + 4 + offset;
1090 else
1091 loc = start + 4 - offset;
1092
1093 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1094 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1095
1096 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1097 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1098 }
1099
1100 else if (thumb2_instruction_changes_pc (insn, inst2))
1101 {
1102 /* Don't scan past anything that might change control flow. */
1103 break;
1104 }
1105 else
1106 {
1107 /* The optimizer might shove anything into the prologue,
1108 so we just skip what we don't recognize. */
1109 unrecognized_pc = start;
1110 }
1111
1112 start += 2;
1113 }
1114 else if (thumb_instruction_changes_pc (insn))
1115 {
1116 /* Don't scan past anything that might change control flow. */
1117 break;
1118 }
1119 else
1120 {
1121 /* The optimizer might shove anything into the prologue,
1122 so we just skip what we don't recognize. */
1123 unrecognized_pc = start;
1124 }
1125
1126 start += 2;
1127 }
1128
1129 if (arm_debug)
1130 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1131 paddress (gdbarch, start));
1132
1133 if (unrecognized_pc == 0)
1134 unrecognized_pc = start;
1135
1136 if (cache == NULL)
1137 {
1138 do_cleanups (back_to);
1139 return unrecognized_pc;
1140 }
1141
1142 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1143 {
1144 /* Frame pointer is fp. Frame size is constant. */
1145 cache->framereg = ARM_FP_REGNUM;
1146 cache->framesize = -regs[ARM_FP_REGNUM].k;
1147 }
1148 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1149 {
1150 /* Frame pointer is r7. Frame size is constant. */
1151 cache->framereg = THUMB_FP_REGNUM;
1152 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1153 }
1154 else
1155 {
1156 /* Try the stack pointer... this is a bit desperate. */
1157 cache->framereg = ARM_SP_REGNUM;
1158 cache->framesize = -regs[ARM_SP_REGNUM].k;
1159 }
1160
1161 for (i = 0; i < 16; i++)
1162 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1163 cache->saved_regs[i].addr = offset;
1164
1165 do_cleanups (back_to);
1166 return unrecognized_pc;
1167 }
1168
1169
1170 /* Try to analyze the instructions starting from PC, which load symbol
1171 __stack_chk_guard. Return the address of instruction after loading this
1172 symbol, set the dest register number to *BASEREG, and set the size of
1173 instructions for loading symbol in OFFSET. Return 0 if instructions are
1174 not recognized. */
1175
1176 static CORE_ADDR
1177 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1178 unsigned int *destreg, int *offset)
1179 {
1180 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1181 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1182 unsigned int low, high, address;
1183
1184 address = 0;
1185 if (is_thumb)
1186 {
1187 unsigned short insn1
1188 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1189
1190 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1191 {
1192 *destreg = bits (insn1, 8, 10);
1193 *offset = 2;
1194 address = bits (insn1, 0, 7);
1195 }
1196 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1197 {
1198 unsigned short insn2
1199 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1200
1201 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1202
1203 insn1
1204 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1205 insn2
1206 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1207
1208 /* movt Rd, #const */
1209 if ((insn1 & 0xfbc0) == 0xf2c0)
1210 {
1211 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1212 *destreg = bits (insn2, 8, 11);
1213 *offset = 8;
1214 address = (high << 16 | low);
1215 }
1216 }
1217 }
1218 else
1219 {
1220 unsigned int insn
1221 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1222
1223 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1224 {
1225 address = bits (insn, 0, 11);
1226 *destreg = bits (insn, 12, 15);
1227 *offset = 4;
1228 }
1229 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1230 {
1231 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1232
1233 insn
1234 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1235
1236 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1237 {
1238 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1239 *destreg = bits (insn, 12, 15);
1240 *offset = 8;
1241 address = (high << 16 | low);
1242 }
1243 }
1244 }
1245
1246 return address;
1247 }
1248
1249 /* Try to skip a sequence of instructions used for stack protector. If PC
1250 points to the first instruction of this sequence, return the address of
1251 first instruction after this sequence, otherwise, return original PC.
1252
1253 On arm, this sequence of instructions is composed of mainly three steps,
1254 Step 1: load symbol __stack_chk_guard,
1255 Step 2: load from address of __stack_chk_guard,
1256 Step 3: store it to somewhere else.
1257
1258 Usually, instructions on step 2 and step 3 are the same on various ARM
1259 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1260 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1261 instructions in step 1 vary from different ARM architectures. On ARMv7,
1262 they are,
1263
1264 movw Rn, #:lower16:__stack_chk_guard
1265 movt Rn, #:upper16:__stack_chk_guard
1266
1267 On ARMv5t, it is,
1268
1269 ldr Rn, .Label
1270 ....
1271 .Lable:
1272 .word __stack_chk_guard
1273
1274 Since ldr/str is a very popular instruction, we can't use them as
1275 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1276 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1277 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1278
1279 static CORE_ADDR
1280 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1281 {
1282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1283 unsigned int address, basereg;
1284 struct minimal_symbol *stack_chk_guard;
1285 int offset;
1286 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1287 CORE_ADDR addr;
1288
1289 /* Try to parse the instructions in Step 1. */
1290 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1291 &basereg, &offset);
1292 if (!addr)
1293 return pc;
1294
1295 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1296 /* If name of symbol doesn't start with '__stack_chk_guard', this
1297 instruction sequence is not for stack protector. If symbol is
1298 removed, we conservatively think this sequence is for stack protector. */
1299 if (stack_chk_guard
1300 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1301 strlen ("__stack_chk_guard")) != 0)
1302 return pc;
1303
1304 if (is_thumb)
1305 {
1306 unsigned int destreg;
1307 unsigned short insn
1308 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1309
1310 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1311 if ((insn & 0xf800) != 0x6800)
1312 return pc;
1313 if (bits (insn, 3, 5) != basereg)
1314 return pc;
1315 destreg = bits (insn, 0, 2);
1316
1317 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1318 byte_order_for_code);
1319 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1320 if ((insn & 0xf800) != 0x6000)
1321 return pc;
1322 if (destreg != bits (insn, 0, 2))
1323 return pc;
1324 }
1325 else
1326 {
1327 unsigned int destreg;
1328 unsigned int insn
1329 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1330
1331 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1332 if ((insn & 0x0e500000) != 0x04100000)
1333 return pc;
1334 if (bits (insn, 16, 19) != basereg)
1335 return pc;
1336 destreg = bits (insn, 12, 15);
1337 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1338 insn = read_memory_unsigned_integer (pc + offset + 4,
1339 4, byte_order_for_code);
1340 if ((insn & 0x0e500000) != 0x04000000)
1341 return pc;
1342 if (bits (insn, 12, 15) != destreg)
1343 return pc;
1344 }
1345 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1346 on arm. */
1347 if (is_thumb)
1348 return pc + offset + 4;
1349 else
1350 return pc + offset + 8;
1351 }
1352
1353 /* Advance the PC across any function entry prologue instructions to
1354 reach some "real" code.
1355
1356 The APCS (ARM Procedure Call Standard) defines the following
1357 prologue:
1358
1359 mov ip, sp
1360 [stmfd sp!, {a1,a2,a3,a4}]
1361 stmfd sp!, {...,fp,ip,lr,pc}
1362 [stfe f7, [sp, #-12]!]
1363 [stfe f6, [sp, #-12]!]
1364 [stfe f5, [sp, #-12]!]
1365 [stfe f4, [sp, #-12]!]
1366 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1367
1368 static CORE_ADDR
1369 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1370 {
1371 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1372 unsigned long inst;
1373 CORE_ADDR skip_pc;
1374 CORE_ADDR func_addr, limit_pc;
1375 struct symtab_and_line sal;
1376
1377 /* See if we can determine the end of the prologue via the symbol table.
1378 If so, then return either PC, or the PC after the prologue, whichever
1379 is greater. */
1380 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1381 {
1382 CORE_ADDR post_prologue_pc
1383 = skip_prologue_using_sal (gdbarch, func_addr);
1384 struct symtab *s = find_pc_symtab (func_addr);
1385
1386 if (post_prologue_pc)
1387 post_prologue_pc
1388 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1389
1390
1391 /* GCC always emits a line note before the prologue and another
1392 one after, even if the two are at the same address or on the
1393 same line. Take advantage of this so that we do not need to
1394 know every instruction that might appear in the prologue. We
1395 will have producer information for most binaries; if it is
1396 missing (e.g. for -gstabs), assuming the GNU tools. */
1397 if (post_prologue_pc
1398 && (s == NULL
1399 || s->producer == NULL
1400 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1401 return post_prologue_pc;
1402
1403 if (post_prologue_pc != 0)
1404 {
1405 CORE_ADDR analyzed_limit;
1406
1407 /* For non-GCC compilers, make sure the entire line is an
1408 acceptable prologue; GDB will round this function's
1409 return value up to the end of the following line so we
1410 can not skip just part of a line (and we do not want to).
1411
1412 RealView does not treat the prologue specially, but does
1413 associate prologue code with the opening brace; so this
1414 lets us skip the first line if we think it is the opening
1415 brace. */
1416 if (arm_pc_is_thumb (gdbarch, func_addr))
1417 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1418 post_prologue_pc, NULL);
1419 else
1420 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1421 post_prologue_pc, NULL);
1422
1423 if (analyzed_limit != post_prologue_pc)
1424 return func_addr;
1425
1426 return post_prologue_pc;
1427 }
1428 }
1429
1430 /* Can't determine prologue from the symbol table, need to examine
1431 instructions. */
1432
1433 /* Find an upper limit on the function prologue using the debug
1434 information. If the debug information could not be used to provide
1435 that bound, then use an arbitrary large number as the upper bound. */
1436 /* Like arm_scan_prologue, stop no later than pc + 64. */
1437 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1438 if (limit_pc == 0)
1439 limit_pc = pc + 64; /* Magic. */
1440
1441
1442 /* Check if this is Thumb code. */
1443 if (arm_pc_is_thumb (gdbarch, pc))
1444 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1445
1446 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1447 {
1448 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1449
1450 /* "mov ip, sp" is no longer a required part of the prologue. */
1451 if (inst == 0xe1a0c00d) /* mov ip, sp */
1452 continue;
1453
1454 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1455 continue;
1456
1457 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1458 continue;
1459
1460 /* Some prologues begin with "str lr, [sp, #-4]!". */
1461 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1462 continue;
1463
1464 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1465 continue;
1466
1467 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1468 continue;
1469
1470 /* Any insns after this point may float into the code, if it makes
1471 for better instruction scheduling, so we skip them only if we
1472 find them, but still consider the function to be frame-ful. */
1473
1474 /* We may have either one sfmfd instruction here, or several stfe
1475 insns, depending on the version of floating point code we
1476 support. */
1477 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1478 continue;
1479
1480 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1481 continue;
1482
1483 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1484 continue;
1485
1486 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1487 continue;
1488
1489 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1490 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1491 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1492 continue;
1493
1494 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1495 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1496 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1497 continue;
1498
1499 /* Un-recognized instruction; stop scanning. */
1500 break;
1501 }
1502
1503 return skip_pc; /* End of prologue. */
1504 }
1505
1506 /* *INDENT-OFF* */
1507 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1508 This function decodes a Thumb function prologue to determine:
1509 1) the size of the stack frame
1510 2) which registers are saved on it
1511 3) the offsets of saved regs
1512 4) the offset from the stack pointer to the frame pointer
1513
1514 A typical Thumb function prologue would create this stack frame
1515 (offsets relative to FP)
1516 old SP -> 24 stack parameters
1517 20 LR
1518 16 R7
1519 R7 -> 0 local variables (16 bytes)
1520 SP -> -12 additional stack space (12 bytes)
1521 The frame size would thus be 36 bytes, and the frame offset would be
1522 12 bytes. The frame register is R7.
1523
1524 The comments for thumb_skip_prolog() describe the algorithm we use
1525 to detect the end of the prolog. */
1526 /* *INDENT-ON* */
1527
1528 static void
1529 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1530 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1531 {
1532 CORE_ADDR prologue_start;
1533 CORE_ADDR prologue_end;
1534 CORE_ADDR current_pc;
1535
1536 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1537 &prologue_end))
1538 {
1539 /* See comment in arm_scan_prologue for an explanation of
1540 this heuristics. */
1541 if (prologue_end > prologue_start + 64)
1542 {
1543 prologue_end = prologue_start + 64;
1544 }
1545 }
1546 else
1547 /* We're in the boondocks: we have no idea where the start of the
1548 function is. */
1549 return;
1550
1551 prologue_end = min (prologue_end, prev_pc);
1552
1553 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1554 }
1555
1556 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1557
1558 static int
1559 arm_instruction_changes_pc (uint32_t this_instr)
1560 {
1561 if (bits (this_instr, 28, 31) == INST_NV)
1562 /* Unconditional instructions. */
1563 switch (bits (this_instr, 24, 27))
1564 {
1565 case 0xa:
1566 case 0xb:
1567 /* Branch with Link and change to Thumb. */
1568 return 1;
1569 case 0xc:
1570 case 0xd:
1571 case 0xe:
1572 /* Coprocessor register transfer. */
1573 if (bits (this_instr, 12, 15) == 15)
1574 error (_("Invalid update to pc in instruction"));
1575 return 0;
1576 default:
1577 return 0;
1578 }
1579 else
1580 switch (bits (this_instr, 25, 27))
1581 {
1582 case 0x0:
1583 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1584 {
1585 /* Multiplies and extra load/stores. */
1586 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1587 /* Neither multiplies nor extension load/stores are allowed
1588 to modify PC. */
1589 return 0;
1590
1591 /* Otherwise, miscellaneous instructions. */
1592
1593 /* BX <reg>, BXJ <reg>, BLX <reg> */
1594 if (bits (this_instr, 4, 27) == 0x12fff1
1595 || bits (this_instr, 4, 27) == 0x12fff2
1596 || bits (this_instr, 4, 27) == 0x12fff3)
1597 return 1;
1598
1599 /* Other miscellaneous instructions are unpredictable if they
1600 modify PC. */
1601 return 0;
1602 }
1603 /* Data processing instruction. Fall through. */
1604
1605 case 0x1:
1606 if (bits (this_instr, 12, 15) == 15)
1607 return 1;
1608 else
1609 return 0;
1610
1611 case 0x2:
1612 case 0x3:
1613 /* Media instructions and architecturally undefined instructions. */
1614 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1615 return 0;
1616
1617 /* Stores. */
1618 if (bit (this_instr, 20) == 0)
1619 return 0;
1620
1621 /* Loads. */
1622 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1623 return 1;
1624 else
1625 return 0;
1626
1627 case 0x4:
1628 /* Load/store multiple. */
1629 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1630 return 1;
1631 else
1632 return 0;
1633
1634 case 0x5:
1635 /* Branch and branch with link. */
1636 return 1;
1637
1638 case 0x6:
1639 case 0x7:
1640 /* Coprocessor transfers or SWIs can not affect PC. */
1641 return 0;
1642
1643 default:
1644 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1645 }
1646 }
1647
1648 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1649 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1650 fill it in. Return the first address not recognized as a prologue
1651 instruction.
1652
1653 We recognize all the instructions typically found in ARM prologues,
1654 plus harmless instructions which can be skipped (either for analysis
1655 purposes, or a more restrictive set that can be skipped when finding
1656 the end of the prologue). */
1657
1658 static CORE_ADDR
1659 arm_analyze_prologue (struct gdbarch *gdbarch,
1660 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1661 struct arm_prologue_cache *cache)
1662 {
1663 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1664 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1665 int regno;
1666 CORE_ADDR offset, current_pc;
1667 pv_t regs[ARM_FPS_REGNUM];
1668 struct pv_area *stack;
1669 struct cleanup *back_to;
1670 int framereg, framesize;
1671 CORE_ADDR unrecognized_pc = 0;
1672
1673 /* Search the prologue looking for instructions that set up the
1674 frame pointer, adjust the stack pointer, and save registers.
1675
1676 Be careful, however, and if it doesn't look like a prologue,
1677 don't try to scan it. If, for instance, a frameless function
1678 begins with stmfd sp!, then we will tell ourselves there is
1679 a frame, which will confuse stack traceback, as well as "finish"
1680 and other operations that rely on a knowledge of the stack
1681 traceback. */
1682
1683 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1684 regs[regno] = pv_register (regno, 0);
1685 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1686 back_to = make_cleanup_free_pv_area (stack);
1687
1688 for (current_pc = prologue_start;
1689 current_pc < prologue_end;
1690 current_pc += 4)
1691 {
1692 unsigned int insn
1693 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1694
1695 if (insn == 0xe1a0c00d) /* mov ip, sp */
1696 {
1697 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1698 continue;
1699 }
1700 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1701 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1702 {
1703 unsigned imm = insn & 0xff; /* immediate value */
1704 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1705 int rd = bits (insn, 12, 15);
1706 imm = (imm >> rot) | (imm << (32 - rot));
1707 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1708 continue;
1709 }
1710 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1711 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1712 {
1713 unsigned imm = insn & 0xff; /* immediate value */
1714 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1715 int rd = bits (insn, 12, 15);
1716 imm = (imm >> rot) | (imm << (32 - rot));
1717 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1718 continue;
1719 }
1720 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1721 [sp, #-4]! */
1722 {
1723 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1724 break;
1725 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1726 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1727 regs[bits (insn, 12, 15)]);
1728 continue;
1729 }
1730 else if ((insn & 0xffff0000) == 0xe92d0000)
1731 /* stmfd sp!, {..., fp, ip, lr, pc}
1732 or
1733 stmfd sp!, {a1, a2, a3, a4} */
1734 {
1735 int mask = insn & 0xffff;
1736
1737 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1738 break;
1739
1740 /* Calculate offsets of saved registers. */
1741 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1742 if (mask & (1 << regno))
1743 {
1744 regs[ARM_SP_REGNUM]
1745 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1746 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1747 }
1748 }
1749 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1750 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1751 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1752 {
1753 /* No need to add this to saved_regs -- it's just an arg reg. */
1754 continue;
1755 }
1756 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1757 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1758 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1759 {
1760 /* No need to add this to saved_regs -- it's just an arg reg. */
1761 continue;
1762 }
1763 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1764 { registers } */
1765 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1766 {
1767 /* No need to add this to saved_regs -- it's just arg regs. */
1768 continue;
1769 }
1770 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1771 {
1772 unsigned imm = insn & 0xff; /* immediate value */
1773 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1774 imm = (imm >> rot) | (imm << (32 - rot));
1775 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1776 }
1777 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1778 {
1779 unsigned imm = insn & 0xff; /* immediate value */
1780 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1781 imm = (imm >> rot) | (imm << (32 - rot));
1782 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1783 }
1784 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1785 [sp, -#c]! */
1786 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1787 {
1788 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1789 break;
1790
1791 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1792 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1793 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1794 }
1795 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1796 [sp!] */
1797 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1798 {
1799 int n_saved_fp_regs;
1800 unsigned int fp_start_reg, fp_bound_reg;
1801
1802 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1803 break;
1804
1805 if ((insn & 0x800) == 0x800) /* N0 is set */
1806 {
1807 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1808 n_saved_fp_regs = 3;
1809 else
1810 n_saved_fp_regs = 1;
1811 }
1812 else
1813 {
1814 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1815 n_saved_fp_regs = 2;
1816 else
1817 n_saved_fp_regs = 4;
1818 }
1819
1820 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1821 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1822 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1823 {
1824 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1825 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1826 regs[fp_start_reg++]);
1827 }
1828 }
1829 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1830 {
1831 /* Allow some special function calls when skipping the
1832 prologue; GCC generates these before storing arguments to
1833 the stack. */
1834 CORE_ADDR dest = BranchDest (current_pc, insn);
1835
1836 if (skip_prologue_function (gdbarch, dest, 0))
1837 continue;
1838 else
1839 break;
1840 }
1841 else if ((insn & 0xf0000000) != 0xe0000000)
1842 break; /* Condition not true, exit early. */
1843 else if (arm_instruction_changes_pc (insn))
1844 /* Don't scan past anything that might change control flow. */
1845 break;
1846 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1847 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1848 /* Ignore block loads from the stack, potentially copying
1849 parameters from memory. */
1850 continue;
1851 else if ((insn & 0xfc500000) == 0xe4100000
1852 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1853 /* Similarly ignore single loads from the stack. */
1854 continue;
1855 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1856 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1857 register instead of the stack. */
1858 continue;
1859 else
1860 {
1861 /* The optimizer might shove anything into the prologue,
1862 so we just skip what we don't recognize. */
1863 unrecognized_pc = current_pc;
1864 continue;
1865 }
1866 }
1867
1868 if (unrecognized_pc == 0)
1869 unrecognized_pc = current_pc;
1870
1871 /* The frame size is just the distance from the frame register
1872 to the original stack pointer. */
1873 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1874 {
1875 /* Frame pointer is fp. */
1876 framereg = ARM_FP_REGNUM;
1877 framesize = -regs[ARM_FP_REGNUM].k;
1878 }
1879 else
1880 {
1881 /* Try the stack pointer... this is a bit desperate. */
1882 framereg = ARM_SP_REGNUM;
1883 framesize = -regs[ARM_SP_REGNUM].k;
1884 }
1885
1886 if (cache)
1887 {
1888 cache->framereg = framereg;
1889 cache->framesize = framesize;
1890
1891 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1892 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1893 cache->saved_regs[regno].addr = offset;
1894 }
1895
1896 if (arm_debug)
1897 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1898 paddress (gdbarch, unrecognized_pc));
1899
1900 do_cleanups (back_to);
1901 return unrecognized_pc;
1902 }
1903
1904 static void
1905 arm_scan_prologue (struct frame_info *this_frame,
1906 struct arm_prologue_cache *cache)
1907 {
1908 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1909 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1910 int regno;
1911 CORE_ADDR prologue_start, prologue_end, current_pc;
1912 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1913 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1914 pv_t regs[ARM_FPS_REGNUM];
1915 struct pv_area *stack;
1916 struct cleanup *back_to;
1917 CORE_ADDR offset;
1918
1919 /* Assume there is no frame until proven otherwise. */
1920 cache->framereg = ARM_SP_REGNUM;
1921 cache->framesize = 0;
1922
1923 /* Check for Thumb prologue. */
1924 if (arm_frame_is_thumb (this_frame))
1925 {
1926 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1927 return;
1928 }
1929
1930 /* Find the function prologue. If we can't find the function in
1931 the symbol table, peek in the stack frame to find the PC. */
1932 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1933 &prologue_end))
1934 {
1935 /* One way to find the end of the prologue (which works well
1936 for unoptimized code) is to do the following:
1937
1938 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1939
1940 if (sal.line == 0)
1941 prologue_end = prev_pc;
1942 else if (sal.end < prologue_end)
1943 prologue_end = sal.end;
1944
1945 This mechanism is very accurate so long as the optimizer
1946 doesn't move any instructions from the function body into the
1947 prologue. If this happens, sal.end will be the last
1948 instruction in the first hunk of prologue code just before
1949 the first instruction that the scheduler has moved from
1950 the body to the prologue.
1951
1952 In order to make sure that we scan all of the prologue
1953 instructions, we use a slightly less accurate mechanism which
1954 may scan more than necessary. To help compensate for this
1955 lack of accuracy, the prologue scanning loop below contains
1956 several clauses which'll cause the loop to terminate early if
1957 an implausible prologue instruction is encountered.
1958
1959 The expression
1960
1961 prologue_start + 64
1962
1963 is a suitable endpoint since it accounts for the largest
1964 possible prologue plus up to five instructions inserted by
1965 the scheduler. */
1966
1967 if (prologue_end > prologue_start + 64)
1968 {
1969 prologue_end = prologue_start + 64; /* See above. */
1970 }
1971 }
1972 else
1973 {
1974 /* We have no symbol information. Our only option is to assume this
1975 function has a standard stack frame and the normal frame register.
1976 Then, we can find the value of our frame pointer on entrance to
1977 the callee (or at the present moment if this is the innermost frame).
1978 The value stored there should be the address of the stmfd + 8. */
1979 CORE_ADDR frame_loc;
1980 LONGEST return_value;
1981
1982 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1983 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1984 return;
1985 else
1986 {
1987 prologue_start = gdbarch_addr_bits_remove
1988 (gdbarch, return_value) - 8;
1989 prologue_end = prologue_start + 64; /* See above. */
1990 }
1991 }
1992
1993 if (prev_pc < prologue_end)
1994 prologue_end = prev_pc;
1995
1996 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1997 }
1998
1999 static struct arm_prologue_cache *
2000 arm_make_prologue_cache (struct frame_info *this_frame)
2001 {
2002 int reg;
2003 struct arm_prologue_cache *cache;
2004 CORE_ADDR unwound_fp;
2005
2006 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2007 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2008
2009 arm_scan_prologue (this_frame, cache);
2010
2011 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2012 if (unwound_fp == 0)
2013 return cache;
2014
2015 cache->prev_sp = unwound_fp + cache->framesize;
2016
2017 /* Calculate actual addresses of saved registers using offsets
2018 determined by arm_scan_prologue. */
2019 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2020 if (trad_frame_addr_p (cache->saved_regs, reg))
2021 cache->saved_regs[reg].addr += cache->prev_sp;
2022
2023 return cache;
2024 }
2025
2026 /* Our frame ID for a normal frame is the current function's starting PC
2027 and the caller's SP when we were called. */
2028
2029 static void
2030 arm_prologue_this_id (struct frame_info *this_frame,
2031 void **this_cache,
2032 struct frame_id *this_id)
2033 {
2034 struct arm_prologue_cache *cache;
2035 struct frame_id id;
2036 CORE_ADDR pc, func;
2037
2038 if (*this_cache == NULL)
2039 *this_cache = arm_make_prologue_cache (this_frame);
2040 cache = *this_cache;
2041
2042 /* This is meant to halt the backtrace at "_start". */
2043 pc = get_frame_pc (this_frame);
2044 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2045 return;
2046
2047 /* If we've hit a wall, stop. */
2048 if (cache->prev_sp == 0)
2049 return;
2050
2051 /* Use function start address as part of the frame ID. If we cannot
2052 identify the start address (due to missing symbol information),
2053 fall back to just using the current PC. */
2054 func = get_frame_func (this_frame);
2055 if (!func)
2056 func = pc;
2057
2058 id = frame_id_build (cache->prev_sp, func);
2059 *this_id = id;
2060 }
2061
2062 static struct value *
2063 arm_prologue_prev_register (struct frame_info *this_frame,
2064 void **this_cache,
2065 int prev_regnum)
2066 {
2067 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2068 struct arm_prologue_cache *cache;
2069
2070 if (*this_cache == NULL)
2071 *this_cache = arm_make_prologue_cache (this_frame);
2072 cache = *this_cache;
2073
2074 /* If we are asked to unwind the PC, then we need to return the LR
2075 instead. The prologue may save PC, but it will point into this
2076 frame's prologue, not the next frame's resume location. Also
2077 strip the saved T bit. A valid LR may have the low bit set, but
2078 a valid PC never does. */
2079 if (prev_regnum == ARM_PC_REGNUM)
2080 {
2081 CORE_ADDR lr;
2082
2083 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2084 return frame_unwind_got_constant (this_frame, prev_regnum,
2085 arm_addr_bits_remove (gdbarch, lr));
2086 }
2087
2088 /* SP is generally not saved to the stack, but this frame is
2089 identified by the next frame's stack pointer at the time of the call.
2090 The value was already reconstructed into PREV_SP. */
2091 if (prev_regnum == ARM_SP_REGNUM)
2092 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2093
2094 /* The CPSR may have been changed by the call instruction and by the
2095 called function. The only bit we can reconstruct is the T bit,
2096 by checking the low bit of LR as of the call. This is a reliable
2097 indicator of Thumb-ness except for some ARM v4T pre-interworking
2098 Thumb code, which could get away with a clear low bit as long as
2099 the called function did not use bx. Guess that all other
2100 bits are unchanged; the condition flags are presumably lost,
2101 but the processor status is likely valid. */
2102 if (prev_regnum == ARM_PS_REGNUM)
2103 {
2104 CORE_ADDR lr, cpsr;
2105 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2106
2107 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2108 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2109 if (IS_THUMB_ADDR (lr))
2110 cpsr |= t_bit;
2111 else
2112 cpsr &= ~t_bit;
2113 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2114 }
2115
2116 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2117 prev_regnum);
2118 }
2119
2120 struct frame_unwind arm_prologue_unwind = {
2121 NORMAL_FRAME,
2122 default_frame_unwind_stop_reason,
2123 arm_prologue_this_id,
2124 arm_prologue_prev_register,
2125 NULL,
2126 default_frame_sniffer
2127 };
2128
2129 /* Maintain a list of ARM exception table entries per objfile, similar to the
2130 list of mapping symbols. We only cache entries for standard ARM-defined
2131 personality routines; the cache will contain only the frame unwinding
2132 instructions associated with the entry (not the descriptors). */
2133
2134 static const struct objfile_data *arm_exidx_data_key;
2135
2136 struct arm_exidx_entry
2137 {
2138 bfd_vma addr;
2139 gdb_byte *entry;
2140 };
2141 typedef struct arm_exidx_entry arm_exidx_entry_s;
2142 DEF_VEC_O(arm_exidx_entry_s);
2143
2144 struct arm_exidx_data
2145 {
2146 VEC(arm_exidx_entry_s) **section_maps;
2147 };
2148
2149 static void
2150 arm_exidx_data_free (struct objfile *objfile, void *arg)
2151 {
2152 struct arm_exidx_data *data = arg;
2153 unsigned int i;
2154
2155 for (i = 0; i < objfile->obfd->section_count; i++)
2156 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2157 }
2158
2159 static inline int
2160 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2161 const struct arm_exidx_entry *rhs)
2162 {
2163 return lhs->addr < rhs->addr;
2164 }
2165
2166 static struct obj_section *
2167 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2168 {
2169 struct obj_section *osect;
2170
2171 ALL_OBJFILE_OSECTIONS (objfile, osect)
2172 if (bfd_get_section_flags (objfile->obfd,
2173 osect->the_bfd_section) & SEC_ALLOC)
2174 {
2175 bfd_vma start, size;
2176 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2177 size = bfd_get_section_size (osect->the_bfd_section);
2178
2179 if (start <= vma && vma < start + size)
2180 return osect;
2181 }
2182
2183 return NULL;
2184 }
2185
2186 /* Parse contents of exception table and exception index sections
2187 of OBJFILE, and fill in the exception table entry cache.
2188
2189 For each entry that refers to a standard ARM-defined personality
2190 routine, extract the frame unwinding instructions (from either
2191 the index or the table section). The unwinding instructions
2192 are normalized by:
2193 - extracting them from the rest of the table data
2194 - converting to host endianness
2195 - appending the implicit 0xb0 ("Finish") code
2196
2197 The extracted and normalized instructions are stored for later
2198 retrieval by the arm_find_exidx_entry routine. */
2199
2200 static void
2201 arm_exidx_new_objfile (struct objfile *objfile)
2202 {
2203 struct cleanup *cleanups;
2204 struct arm_exidx_data *data;
2205 asection *exidx, *extab;
2206 bfd_vma exidx_vma = 0, extab_vma = 0;
2207 bfd_size_type exidx_size = 0, extab_size = 0;
2208 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2209 LONGEST i;
2210
2211 /* If we've already touched this file, do nothing. */
2212 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2213 return;
2214 cleanups = make_cleanup (null_cleanup, NULL);
2215
2216 /* Read contents of exception table and index. */
2217 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2218 if (exidx)
2219 {
2220 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2221 exidx_size = bfd_get_section_size (exidx);
2222 exidx_data = xmalloc (exidx_size);
2223 make_cleanup (xfree, exidx_data);
2224
2225 if (!bfd_get_section_contents (objfile->obfd, exidx,
2226 exidx_data, 0, exidx_size))
2227 {
2228 do_cleanups (cleanups);
2229 return;
2230 }
2231 }
2232
2233 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2234 if (extab)
2235 {
2236 extab_vma = bfd_section_vma (objfile->obfd, extab);
2237 extab_size = bfd_get_section_size (extab);
2238 extab_data = xmalloc (extab_size);
2239 make_cleanup (xfree, extab_data);
2240
2241 if (!bfd_get_section_contents (objfile->obfd, extab,
2242 extab_data, 0, extab_size))
2243 {
2244 do_cleanups (cleanups);
2245 return;
2246 }
2247 }
2248
2249 /* Allocate exception table data structure. */
2250 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2251 set_objfile_data (objfile, arm_exidx_data_key, data);
2252 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2253 objfile->obfd->section_count,
2254 VEC(arm_exidx_entry_s) *);
2255
2256 /* Fill in exception table. */
2257 for (i = 0; i < exidx_size / 8; i++)
2258 {
2259 struct arm_exidx_entry new_exidx_entry;
2260 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2261 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2262 bfd_vma addr = 0, word = 0;
2263 int n_bytes = 0, n_words = 0;
2264 struct obj_section *sec;
2265 gdb_byte *entry = NULL;
2266
2267 /* Extract address of start of function. */
2268 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2269 idx += exidx_vma + i * 8;
2270
2271 /* Find section containing function and compute section offset. */
2272 sec = arm_obj_section_from_vma (objfile, idx);
2273 if (sec == NULL)
2274 continue;
2275 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2276
2277 /* Determine address of exception table entry. */
2278 if (val == 1)
2279 {
2280 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2281 }
2282 else if ((val & 0xff000000) == 0x80000000)
2283 {
2284 /* Exception table entry embedded in .ARM.exidx
2285 -- must be short form. */
2286 word = val;
2287 n_bytes = 3;
2288 }
2289 else if (!(val & 0x80000000))
2290 {
2291 /* Exception table entry in .ARM.extab. */
2292 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2293 addr += exidx_vma + i * 8 + 4;
2294
2295 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2296 {
2297 word = bfd_h_get_32 (objfile->obfd,
2298 extab_data + addr - extab_vma);
2299 addr += 4;
2300
2301 if ((word & 0xff000000) == 0x80000000)
2302 {
2303 /* Short form. */
2304 n_bytes = 3;
2305 }
2306 else if ((word & 0xff000000) == 0x81000000
2307 || (word & 0xff000000) == 0x82000000)
2308 {
2309 /* Long form. */
2310 n_bytes = 2;
2311 n_words = ((word >> 16) & 0xff);
2312 }
2313 else if (!(word & 0x80000000))
2314 {
2315 bfd_vma pers;
2316 struct obj_section *pers_sec;
2317 int gnu_personality = 0;
2318
2319 /* Custom personality routine. */
2320 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2321 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2322
2323 /* Check whether we've got one of the variants of the
2324 GNU personality routines. */
2325 pers_sec = arm_obj_section_from_vma (objfile, pers);
2326 if (pers_sec)
2327 {
2328 static const char *personality[] =
2329 {
2330 "__gcc_personality_v0",
2331 "__gxx_personality_v0",
2332 "__gcj_personality_v0",
2333 "__gnu_objc_personality_v0",
2334 NULL
2335 };
2336
2337 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2338 int k;
2339
2340 for (k = 0; personality[k]; k++)
2341 if (lookup_minimal_symbol_by_pc_name
2342 (pc, personality[k], objfile))
2343 {
2344 gnu_personality = 1;
2345 break;
2346 }
2347 }
2348
2349 /* If so, the next word contains a word count in the high
2350 byte, followed by the same unwind instructions as the
2351 pre-defined forms. */
2352 if (gnu_personality
2353 && addr + 4 <= extab_vma + extab_size)
2354 {
2355 word = bfd_h_get_32 (objfile->obfd,
2356 extab_data + addr - extab_vma);
2357 addr += 4;
2358 n_bytes = 3;
2359 n_words = ((word >> 24) & 0xff);
2360 }
2361 }
2362 }
2363 }
2364
2365 /* Sanity check address. */
2366 if (n_words)
2367 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2368 n_words = n_bytes = 0;
2369
2370 /* The unwind instructions reside in WORD (only the N_BYTES least
2371 significant bytes are valid), followed by N_WORDS words in the
2372 extab section starting at ADDR. */
2373 if (n_bytes || n_words)
2374 {
2375 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2376 n_bytes + n_words * 4 + 1);
2377
2378 while (n_bytes--)
2379 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2380
2381 while (n_words--)
2382 {
2383 word = bfd_h_get_32 (objfile->obfd,
2384 extab_data + addr - extab_vma);
2385 addr += 4;
2386
2387 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2388 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2389 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2390 *p++ = (gdb_byte) (word & 0xff);
2391 }
2392
2393 /* Implied "Finish" to terminate the list. */
2394 *p++ = 0xb0;
2395 }
2396
2397 /* Push entry onto vector. They are guaranteed to always
2398 appear in order of increasing addresses. */
2399 new_exidx_entry.addr = idx;
2400 new_exidx_entry.entry = entry;
2401 VEC_safe_push (arm_exidx_entry_s,
2402 data->section_maps[sec->the_bfd_section->index],
2403 &new_exidx_entry);
2404 }
2405
2406 do_cleanups (cleanups);
2407 }
2408
2409 /* Search for the exception table entry covering MEMADDR. If one is found,
2410 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2411 set *START to the start of the region covered by this entry. */
2412
2413 static gdb_byte *
2414 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2415 {
2416 struct obj_section *sec;
2417
2418 sec = find_pc_section (memaddr);
2419 if (sec != NULL)
2420 {
2421 struct arm_exidx_data *data;
2422 VEC(arm_exidx_entry_s) *map;
2423 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2424 unsigned int idx;
2425
2426 data = objfile_data (sec->objfile, arm_exidx_data_key);
2427 if (data != NULL)
2428 {
2429 map = data->section_maps[sec->the_bfd_section->index];
2430 if (!VEC_empty (arm_exidx_entry_s, map))
2431 {
2432 struct arm_exidx_entry *map_sym;
2433
2434 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2435 arm_compare_exidx_entries);
2436
2437 /* VEC_lower_bound finds the earliest ordered insertion
2438 point. If the following symbol starts at this exact
2439 address, we use that; otherwise, the preceding
2440 exception table entry covers this address. */
2441 if (idx < VEC_length (arm_exidx_entry_s, map))
2442 {
2443 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2444 if (map_sym->addr == map_key.addr)
2445 {
2446 if (start)
2447 *start = map_sym->addr + obj_section_addr (sec);
2448 return map_sym->entry;
2449 }
2450 }
2451
2452 if (idx > 0)
2453 {
2454 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2455 if (start)
2456 *start = map_sym->addr + obj_section_addr (sec);
2457 return map_sym->entry;
2458 }
2459 }
2460 }
2461 }
2462
2463 return NULL;
2464 }
2465
2466 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2467 instruction list from the ARM exception table entry ENTRY, allocate and
2468 return a prologue cache structure describing how to unwind this frame.
2469
2470 Return NULL if the unwinding instruction list contains a "spare",
2471 "reserved" or "refuse to unwind" instruction as defined in section
2472 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2473 for the ARM Architecture" document. */
2474
2475 static struct arm_prologue_cache *
2476 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2477 {
2478 CORE_ADDR vsp = 0;
2479 int vsp_valid = 0;
2480
2481 struct arm_prologue_cache *cache;
2482 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2483 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2484
2485 for (;;)
2486 {
2487 gdb_byte insn;
2488
2489 /* Whenever we reload SP, we actually have to retrieve its
2490 actual value in the current frame. */
2491 if (!vsp_valid)
2492 {
2493 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2494 {
2495 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2496 vsp = get_frame_register_unsigned (this_frame, reg);
2497 }
2498 else
2499 {
2500 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2501 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2502 }
2503
2504 vsp_valid = 1;
2505 }
2506
2507 /* Decode next unwind instruction. */
2508 insn = *entry++;
2509
2510 if ((insn & 0xc0) == 0)
2511 {
2512 int offset = insn & 0x3f;
2513 vsp += (offset << 2) + 4;
2514 }
2515 else if ((insn & 0xc0) == 0x40)
2516 {
2517 int offset = insn & 0x3f;
2518 vsp -= (offset << 2) + 4;
2519 }
2520 else if ((insn & 0xf0) == 0x80)
2521 {
2522 int mask = ((insn & 0xf) << 8) | *entry++;
2523 int i;
2524
2525 /* The special case of an all-zero mask identifies
2526 "Refuse to unwind". We return NULL to fall back
2527 to the prologue analyzer. */
2528 if (mask == 0)
2529 return NULL;
2530
2531 /* Pop registers r4..r15 under mask. */
2532 for (i = 0; i < 12; i++)
2533 if (mask & (1 << i))
2534 {
2535 cache->saved_regs[4 + i].addr = vsp;
2536 vsp += 4;
2537 }
2538
2539 /* Special-case popping SP -- we need to reload vsp. */
2540 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2541 vsp_valid = 0;
2542 }
2543 else if ((insn & 0xf0) == 0x90)
2544 {
2545 int reg = insn & 0xf;
2546
2547 /* Reserved cases. */
2548 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2549 return NULL;
2550
2551 /* Set SP from another register and mark VSP for reload. */
2552 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2553 vsp_valid = 0;
2554 }
2555 else if ((insn & 0xf0) == 0xa0)
2556 {
2557 int count = insn & 0x7;
2558 int pop_lr = (insn & 0x8) != 0;
2559 int i;
2560
2561 /* Pop r4..r[4+count]. */
2562 for (i = 0; i <= count; i++)
2563 {
2564 cache->saved_regs[4 + i].addr = vsp;
2565 vsp += 4;
2566 }
2567
2568 /* If indicated by flag, pop LR as well. */
2569 if (pop_lr)
2570 {
2571 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2572 vsp += 4;
2573 }
2574 }
2575 else if (insn == 0xb0)
2576 {
2577 /* We could only have updated PC by popping into it; if so, it
2578 will show up as address. Otherwise, copy LR into PC. */
2579 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2580 cache->saved_regs[ARM_PC_REGNUM]
2581 = cache->saved_regs[ARM_LR_REGNUM];
2582
2583 /* We're done. */
2584 break;
2585 }
2586 else if (insn == 0xb1)
2587 {
2588 int mask = *entry++;
2589 int i;
2590
2591 /* All-zero mask and mask >= 16 is "spare". */
2592 if (mask == 0 || mask >= 16)
2593 return NULL;
2594
2595 /* Pop r0..r3 under mask. */
2596 for (i = 0; i < 4; i++)
2597 if (mask & (1 << i))
2598 {
2599 cache->saved_regs[i].addr = vsp;
2600 vsp += 4;
2601 }
2602 }
2603 else if (insn == 0xb2)
2604 {
2605 ULONGEST offset = 0;
2606 unsigned shift = 0;
2607
2608 do
2609 {
2610 offset |= (*entry & 0x7f) << shift;
2611 shift += 7;
2612 }
2613 while (*entry++ & 0x80);
2614
2615 vsp += 0x204 + (offset << 2);
2616 }
2617 else if (insn == 0xb3)
2618 {
2619 int start = *entry >> 4;
2620 int count = (*entry++) & 0xf;
2621 int i;
2622
2623 /* Only registers D0..D15 are valid here. */
2624 if (start + count >= 16)
2625 return NULL;
2626
2627 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2628 for (i = 0; i <= count; i++)
2629 {
2630 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2631 vsp += 8;
2632 }
2633
2634 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2635 vsp += 4;
2636 }
2637 else if ((insn & 0xf8) == 0xb8)
2638 {
2639 int count = insn & 0x7;
2640 int i;
2641
2642 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2643 for (i = 0; i <= count; i++)
2644 {
2645 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2646 vsp += 8;
2647 }
2648
2649 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2650 vsp += 4;
2651 }
2652 else if (insn == 0xc6)
2653 {
2654 int start = *entry >> 4;
2655 int count = (*entry++) & 0xf;
2656 int i;
2657
2658 /* Only registers WR0..WR15 are valid. */
2659 if (start + count >= 16)
2660 return NULL;
2661
2662 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2663 for (i = 0; i <= count; i++)
2664 {
2665 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2666 vsp += 8;
2667 }
2668 }
2669 else if (insn == 0xc7)
2670 {
2671 int mask = *entry++;
2672 int i;
2673
2674 /* All-zero mask and mask >= 16 is "spare". */
2675 if (mask == 0 || mask >= 16)
2676 return NULL;
2677
2678 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2679 for (i = 0; i < 4; i++)
2680 if (mask & (1 << i))
2681 {
2682 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2683 vsp += 4;
2684 }
2685 }
2686 else if ((insn & 0xf8) == 0xc0)
2687 {
2688 int count = insn & 0x7;
2689 int i;
2690
2691 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2692 for (i = 0; i <= count; i++)
2693 {
2694 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2695 vsp += 8;
2696 }
2697 }
2698 else if (insn == 0xc8)
2699 {
2700 int start = *entry >> 4;
2701 int count = (*entry++) & 0xf;
2702 int i;
2703
2704 /* Only registers D0..D31 are valid. */
2705 if (start + count >= 16)
2706 return NULL;
2707
2708 /* Pop VFP double-precision registers
2709 D[16+start]..D[16+start+count]. */
2710 for (i = 0; i <= count; i++)
2711 {
2712 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2713 vsp += 8;
2714 }
2715 }
2716 else if (insn == 0xc9)
2717 {
2718 int start = *entry >> 4;
2719 int count = (*entry++) & 0xf;
2720 int i;
2721
2722 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2723 for (i = 0; i <= count; i++)
2724 {
2725 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2726 vsp += 8;
2727 }
2728 }
2729 else if ((insn & 0xf8) == 0xd0)
2730 {
2731 int count = insn & 0x7;
2732 int i;
2733
2734 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2735 for (i = 0; i <= count; i++)
2736 {
2737 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2738 vsp += 8;
2739 }
2740 }
2741 else
2742 {
2743 /* Everything else is "spare". */
2744 return NULL;
2745 }
2746 }
2747
2748 /* If we restore SP from a register, assume this was the frame register.
2749 Otherwise just fall back to SP as frame register. */
2750 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2751 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2752 else
2753 cache->framereg = ARM_SP_REGNUM;
2754
2755 /* Determine offset to previous frame. */
2756 cache->framesize
2757 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2758
2759 /* We already got the previous SP. */
2760 cache->prev_sp = vsp;
2761
2762 return cache;
2763 }
2764
2765 /* Unwinding via ARM exception table entries. Note that the sniffer
2766 already computes a filled-in prologue cache, which is then used
2767 with the same arm_prologue_this_id and arm_prologue_prev_register
2768 routines also used for prologue-parsing based unwinding. */
2769
2770 static int
2771 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2772 struct frame_info *this_frame,
2773 void **this_prologue_cache)
2774 {
2775 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2776 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2777 CORE_ADDR addr_in_block, exidx_region, func_start;
2778 struct arm_prologue_cache *cache;
2779 gdb_byte *entry;
2780
2781 /* See if we have an ARM exception table entry covering this address. */
2782 addr_in_block = get_frame_address_in_block (this_frame);
2783 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2784 if (!entry)
2785 return 0;
2786
2787 /* The ARM exception table does not describe unwind information
2788 for arbitrary PC values, but is guaranteed to be correct only
2789 at call sites. We have to decide here whether we want to use
2790 ARM exception table information for this frame, or fall back
2791 to using prologue parsing. (Note that if we have DWARF CFI,
2792 this sniffer isn't even called -- CFI is always preferred.)
2793
2794 Before we make this decision, however, we check whether we
2795 actually have *symbol* information for the current frame.
2796 If not, prologue parsing would not work anyway, so we might
2797 as well use the exception table and hope for the best. */
2798 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2799 {
2800 int exc_valid = 0;
2801
2802 /* If the next frame is "normal", we are at a call site in this
2803 frame, so exception information is guaranteed to be valid. */
2804 if (get_next_frame (this_frame)
2805 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2806 exc_valid = 1;
2807
2808 /* We also assume exception information is valid if we're currently
2809 blocked in a system call. The system library is supposed to
2810 ensure this, so that e.g. pthread cancellation works. */
2811 if (arm_frame_is_thumb (this_frame))
2812 {
2813 LONGEST insn;
2814
2815 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2816 byte_order_for_code, &insn)
2817 && (insn & 0xff00) == 0xdf00 /* svc */)
2818 exc_valid = 1;
2819 }
2820 else
2821 {
2822 LONGEST insn;
2823
2824 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2825 byte_order_for_code, &insn)
2826 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2827 exc_valid = 1;
2828 }
2829
2830 /* Bail out if we don't know that exception information is valid. */
2831 if (!exc_valid)
2832 return 0;
2833
2834 /* The ARM exception index does not mark the *end* of the region
2835 covered by the entry, and some functions will not have any entry.
2836 To correctly recognize the end of the covered region, the linker
2837 should have inserted dummy records with a CANTUNWIND marker.
2838
2839 Unfortunately, current versions of GNU ld do not reliably do
2840 this, and thus we may have found an incorrect entry above.
2841 As a (temporary) sanity check, we only use the entry if it
2842 lies *within* the bounds of the function. Note that this check
2843 might reject perfectly valid entries that just happen to cover
2844 multiple functions; therefore this check ought to be removed
2845 once the linker is fixed. */
2846 if (func_start > exidx_region)
2847 return 0;
2848 }
2849
2850 /* Decode the list of unwinding instructions into a prologue cache.
2851 Note that this may fail due to e.g. a "refuse to unwind" code. */
2852 cache = arm_exidx_fill_cache (this_frame, entry);
2853 if (!cache)
2854 return 0;
2855
2856 *this_prologue_cache = cache;
2857 return 1;
2858 }
2859
2860 struct frame_unwind arm_exidx_unwind = {
2861 NORMAL_FRAME,
2862 default_frame_unwind_stop_reason,
2863 arm_prologue_this_id,
2864 arm_prologue_prev_register,
2865 NULL,
2866 arm_exidx_unwind_sniffer
2867 };
2868
2869 static struct arm_prologue_cache *
2870 arm_make_stub_cache (struct frame_info *this_frame)
2871 {
2872 struct arm_prologue_cache *cache;
2873
2874 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2875 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2876
2877 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2878
2879 return cache;
2880 }
2881
2882 /* Our frame ID for a stub frame is the current SP and LR. */
2883
2884 static void
2885 arm_stub_this_id (struct frame_info *this_frame,
2886 void **this_cache,
2887 struct frame_id *this_id)
2888 {
2889 struct arm_prologue_cache *cache;
2890
2891 if (*this_cache == NULL)
2892 *this_cache = arm_make_stub_cache (this_frame);
2893 cache = *this_cache;
2894
2895 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2896 }
2897
2898 static int
2899 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2900 struct frame_info *this_frame,
2901 void **this_prologue_cache)
2902 {
2903 CORE_ADDR addr_in_block;
2904 char dummy[4];
2905
2906 addr_in_block = get_frame_address_in_block (this_frame);
2907 if (in_plt_section (addr_in_block, NULL)
2908 /* We also use the stub winder if the target memory is unreadable
2909 to avoid having the prologue unwinder trying to read it. */
2910 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2911 return 1;
2912
2913 return 0;
2914 }
2915
2916 struct frame_unwind arm_stub_unwind = {
2917 NORMAL_FRAME,
2918 default_frame_unwind_stop_reason,
2919 arm_stub_this_id,
2920 arm_prologue_prev_register,
2921 NULL,
2922 arm_stub_unwind_sniffer
2923 };
2924
2925 static CORE_ADDR
2926 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2927 {
2928 struct arm_prologue_cache *cache;
2929
2930 if (*this_cache == NULL)
2931 *this_cache = arm_make_prologue_cache (this_frame);
2932 cache = *this_cache;
2933
2934 return cache->prev_sp - cache->framesize;
2935 }
2936
2937 struct frame_base arm_normal_base = {
2938 &arm_prologue_unwind,
2939 arm_normal_frame_base,
2940 arm_normal_frame_base,
2941 arm_normal_frame_base
2942 };
2943
2944 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2945 dummy frame. The frame ID's base needs to match the TOS value
2946 saved by save_dummy_frame_tos() and returned from
2947 arm_push_dummy_call, and the PC needs to match the dummy frame's
2948 breakpoint. */
2949
2950 static struct frame_id
2951 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2952 {
2953 return frame_id_build (get_frame_register_unsigned (this_frame,
2954 ARM_SP_REGNUM),
2955 get_frame_pc (this_frame));
2956 }
2957
2958 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2959 be used to construct the previous frame's ID, after looking up the
2960 containing function). */
2961
2962 static CORE_ADDR
2963 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2964 {
2965 CORE_ADDR pc;
2966 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2967 return arm_addr_bits_remove (gdbarch, pc);
2968 }
2969
2970 static CORE_ADDR
2971 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2972 {
2973 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2974 }
2975
2976 static struct value *
2977 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2978 int regnum)
2979 {
2980 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2981 CORE_ADDR lr, cpsr;
2982 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2983
2984 switch (regnum)
2985 {
2986 case ARM_PC_REGNUM:
2987 /* The PC is normally copied from the return column, which
2988 describes saves of LR. However, that version may have an
2989 extra bit set to indicate Thumb state. The bit is not
2990 part of the PC. */
2991 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2992 return frame_unwind_got_constant (this_frame, regnum,
2993 arm_addr_bits_remove (gdbarch, lr));
2994
2995 case ARM_PS_REGNUM:
2996 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2997 cpsr = get_frame_register_unsigned (this_frame, regnum);
2998 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2999 if (IS_THUMB_ADDR (lr))
3000 cpsr |= t_bit;
3001 else
3002 cpsr &= ~t_bit;
3003 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3004
3005 default:
3006 internal_error (__FILE__, __LINE__,
3007 _("Unexpected register %d"), regnum);
3008 }
3009 }
3010
3011 static void
3012 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3013 struct dwarf2_frame_state_reg *reg,
3014 struct frame_info *this_frame)
3015 {
3016 switch (regnum)
3017 {
3018 case ARM_PC_REGNUM:
3019 case ARM_PS_REGNUM:
3020 reg->how = DWARF2_FRAME_REG_FN;
3021 reg->loc.fn = arm_dwarf2_prev_register;
3022 break;
3023 case ARM_SP_REGNUM:
3024 reg->how = DWARF2_FRAME_REG_CFA;
3025 break;
3026 }
3027 }
3028
3029 /* Return true if we are in the function's epilogue, i.e. after the
3030 instruction that destroyed the function's stack frame. */
3031
3032 static int
3033 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3034 {
3035 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3036 unsigned int insn, insn2;
3037 int found_return = 0, found_stack_adjust = 0;
3038 CORE_ADDR func_start, func_end;
3039 CORE_ADDR scan_pc;
3040 gdb_byte buf[4];
3041
3042 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3043 return 0;
3044
3045 /* The epilogue is a sequence of instructions along the following lines:
3046
3047 - add stack frame size to SP or FP
3048 - [if frame pointer used] restore SP from FP
3049 - restore registers from SP [may include PC]
3050 - a return-type instruction [if PC wasn't already restored]
3051
3052 In a first pass, we scan forward from the current PC and verify the
3053 instructions we find as compatible with this sequence, ending in a
3054 return instruction.
3055
3056 However, this is not sufficient to distinguish indirect function calls
3057 within a function from indirect tail calls in the epilogue in some cases.
3058 Therefore, if we didn't already find any SP-changing instruction during
3059 forward scan, we add a backward scanning heuristic to ensure we actually
3060 are in the epilogue. */
3061
3062 scan_pc = pc;
3063 while (scan_pc < func_end && !found_return)
3064 {
3065 if (target_read_memory (scan_pc, buf, 2))
3066 break;
3067
3068 scan_pc += 2;
3069 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3070
3071 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3072 found_return = 1;
3073 else if (insn == 0x46f7) /* mov pc, lr */
3074 found_return = 1;
3075 else if (insn == 0x46bd) /* mov sp, r7 */
3076 found_stack_adjust = 1;
3077 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3078 found_stack_adjust = 1;
3079 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3080 {
3081 found_stack_adjust = 1;
3082 if (insn & 0x0100) /* <registers> include PC. */
3083 found_return = 1;
3084 }
3085 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3086 {
3087 if (target_read_memory (scan_pc, buf, 2))
3088 break;
3089
3090 scan_pc += 2;
3091 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3092
3093 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3094 {
3095 found_stack_adjust = 1;
3096 if (insn2 & 0x8000) /* <registers> include PC. */
3097 found_return = 1;
3098 }
3099 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3100 && (insn2 & 0x0fff) == 0x0b04)
3101 {
3102 found_stack_adjust = 1;
3103 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3104 found_return = 1;
3105 }
3106 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3107 && (insn2 & 0x0e00) == 0x0a00)
3108 found_stack_adjust = 1;
3109 else
3110 break;
3111 }
3112 else
3113 break;
3114 }
3115
3116 if (!found_return)
3117 return 0;
3118
3119 /* Since any instruction in the epilogue sequence, with the possible
3120 exception of return itself, updates the stack pointer, we need to
3121 scan backwards for at most one instruction. Try either a 16-bit or
3122 a 32-bit instruction. This is just a heuristic, so we do not worry
3123 too much about false positives. */
3124
3125 if (!found_stack_adjust)
3126 {
3127 if (pc - 4 < func_start)
3128 return 0;
3129 if (target_read_memory (pc - 4, buf, 4))
3130 return 0;
3131
3132 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3133 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3134
3135 if (insn2 == 0x46bd) /* mov sp, r7 */
3136 found_stack_adjust = 1;
3137 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3138 found_stack_adjust = 1;
3139 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3140 found_stack_adjust = 1;
3141 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3142 found_stack_adjust = 1;
3143 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3144 && (insn2 & 0x0fff) == 0x0b04)
3145 found_stack_adjust = 1;
3146 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3147 && (insn2 & 0x0e00) == 0x0a00)
3148 found_stack_adjust = 1;
3149 }
3150
3151 return found_stack_adjust;
3152 }
3153
3154 /* Return true if we are in the function's epilogue, i.e. after the
3155 instruction that destroyed the function's stack frame. */
3156
3157 static int
3158 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3159 {
3160 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3161 unsigned int insn;
3162 int found_return, found_stack_adjust;
3163 CORE_ADDR func_start, func_end;
3164
3165 if (arm_pc_is_thumb (gdbarch, pc))
3166 return thumb_in_function_epilogue_p (gdbarch, pc);
3167
3168 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3169 return 0;
3170
3171 /* We are in the epilogue if the previous instruction was a stack
3172 adjustment and the next instruction is a possible return (bx, mov
3173 pc, or pop). We could have to scan backwards to find the stack
3174 adjustment, or forwards to find the return, but this is a decent
3175 approximation. First scan forwards. */
3176
3177 found_return = 0;
3178 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3179 if (bits (insn, 28, 31) != INST_NV)
3180 {
3181 if ((insn & 0x0ffffff0) == 0x012fff10)
3182 /* BX. */
3183 found_return = 1;
3184 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3185 /* MOV PC. */
3186 found_return = 1;
3187 else if ((insn & 0x0fff0000) == 0x08bd0000
3188 && (insn & 0x0000c000) != 0)
3189 /* POP (LDMIA), including PC or LR. */
3190 found_return = 1;
3191 }
3192
3193 if (!found_return)
3194 return 0;
3195
3196 /* Scan backwards. This is just a heuristic, so do not worry about
3197 false positives from mode changes. */
3198
3199 if (pc < func_start + 4)
3200 return 0;
3201
3202 found_stack_adjust = 0;
3203 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3204 if (bits (insn, 28, 31) != INST_NV)
3205 {
3206 if ((insn & 0x0df0f000) == 0x0080d000)
3207 /* ADD SP (register or immediate). */
3208 found_stack_adjust = 1;
3209 else if ((insn & 0x0df0f000) == 0x0040d000)
3210 /* SUB SP (register or immediate). */
3211 found_stack_adjust = 1;
3212 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3213 /* MOV SP. */
3214 found_stack_adjust = 1;
3215 else if ((insn & 0x0fff0000) == 0x08bd0000)
3216 /* POP (LDMIA). */
3217 found_stack_adjust = 1;
3218 }
3219
3220 if (found_stack_adjust)
3221 return 1;
3222
3223 return 0;
3224 }
3225
3226
3227 /* When arguments must be pushed onto the stack, they go on in reverse
3228 order. The code below implements a FILO (stack) to do this. */
3229
3230 struct stack_item
3231 {
3232 int len;
3233 struct stack_item *prev;
3234 void *data;
3235 };
3236
3237 static struct stack_item *
3238 push_stack_item (struct stack_item *prev, const void *contents, int len)
3239 {
3240 struct stack_item *si;
3241 si = xmalloc (sizeof (struct stack_item));
3242 si->data = xmalloc (len);
3243 si->len = len;
3244 si->prev = prev;
3245 memcpy (si->data, contents, len);
3246 return si;
3247 }
3248
3249 static struct stack_item *
3250 pop_stack_item (struct stack_item *si)
3251 {
3252 struct stack_item *dead = si;
3253 si = si->prev;
3254 xfree (dead->data);
3255 xfree (dead);
3256 return si;
3257 }
3258
3259
3260 /* Return the alignment (in bytes) of the given type. */
3261
3262 static int
3263 arm_type_align (struct type *t)
3264 {
3265 int n;
3266 int align;
3267 int falign;
3268
3269 t = check_typedef (t);
3270 switch (TYPE_CODE (t))
3271 {
3272 default:
3273 /* Should never happen. */
3274 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3275 return 4;
3276
3277 case TYPE_CODE_PTR:
3278 case TYPE_CODE_ENUM:
3279 case TYPE_CODE_INT:
3280 case TYPE_CODE_FLT:
3281 case TYPE_CODE_SET:
3282 case TYPE_CODE_RANGE:
3283 case TYPE_CODE_BITSTRING:
3284 case TYPE_CODE_REF:
3285 case TYPE_CODE_CHAR:
3286 case TYPE_CODE_BOOL:
3287 return TYPE_LENGTH (t);
3288
3289 case TYPE_CODE_ARRAY:
3290 case TYPE_CODE_COMPLEX:
3291 /* TODO: What about vector types? */
3292 return arm_type_align (TYPE_TARGET_TYPE (t));
3293
3294 case TYPE_CODE_STRUCT:
3295 case TYPE_CODE_UNION:
3296 align = 1;
3297 for (n = 0; n < TYPE_NFIELDS (t); n++)
3298 {
3299 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3300 if (falign > align)
3301 align = falign;
3302 }
3303 return align;
3304 }
3305 }
3306
3307 /* Possible base types for a candidate for passing and returning in
3308 VFP registers. */
3309
3310 enum arm_vfp_cprc_base_type
3311 {
3312 VFP_CPRC_UNKNOWN,
3313 VFP_CPRC_SINGLE,
3314 VFP_CPRC_DOUBLE,
3315 VFP_CPRC_VEC64,
3316 VFP_CPRC_VEC128
3317 };
3318
3319 /* The length of one element of base type B. */
3320
3321 static unsigned
3322 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3323 {
3324 switch (b)
3325 {
3326 case VFP_CPRC_SINGLE:
3327 return 4;
3328 case VFP_CPRC_DOUBLE:
3329 return 8;
3330 case VFP_CPRC_VEC64:
3331 return 8;
3332 case VFP_CPRC_VEC128:
3333 return 16;
3334 default:
3335 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3336 (int) b);
3337 }
3338 }
3339
3340 /* The character ('s', 'd' or 'q') for the type of VFP register used
3341 for passing base type B. */
3342
3343 static int
3344 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3345 {
3346 switch (b)
3347 {
3348 case VFP_CPRC_SINGLE:
3349 return 's';
3350 case VFP_CPRC_DOUBLE:
3351 return 'd';
3352 case VFP_CPRC_VEC64:
3353 return 'd';
3354 case VFP_CPRC_VEC128:
3355 return 'q';
3356 default:
3357 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3358 (int) b);
3359 }
3360 }
3361
3362 /* Determine whether T may be part of a candidate for passing and
3363 returning in VFP registers, ignoring the limit on the total number
3364 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3365 classification of the first valid component found; if it is not
3366 VFP_CPRC_UNKNOWN, all components must have the same classification
3367 as *BASE_TYPE. If it is found that T contains a type not permitted
3368 for passing and returning in VFP registers, a type differently
3369 classified from *BASE_TYPE, or two types differently classified
3370 from each other, return -1, otherwise return the total number of
3371 base-type elements found (possibly 0 in an empty structure or
3372 array). Vectors and complex types are not currently supported,
3373 matching the generic AAPCS support. */
3374
3375 static int
3376 arm_vfp_cprc_sub_candidate (struct type *t,
3377 enum arm_vfp_cprc_base_type *base_type)
3378 {
3379 t = check_typedef (t);
3380 switch (TYPE_CODE (t))
3381 {
3382 case TYPE_CODE_FLT:
3383 switch (TYPE_LENGTH (t))
3384 {
3385 case 4:
3386 if (*base_type == VFP_CPRC_UNKNOWN)
3387 *base_type = VFP_CPRC_SINGLE;
3388 else if (*base_type != VFP_CPRC_SINGLE)
3389 return -1;
3390 return 1;
3391
3392 case 8:
3393 if (*base_type == VFP_CPRC_UNKNOWN)
3394 *base_type = VFP_CPRC_DOUBLE;
3395 else if (*base_type != VFP_CPRC_DOUBLE)
3396 return -1;
3397 return 1;
3398
3399 default:
3400 return -1;
3401 }
3402 break;
3403
3404 case TYPE_CODE_ARRAY:
3405 {
3406 int count;
3407 unsigned unitlen;
3408 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3409 if (count == -1)
3410 return -1;
3411 if (TYPE_LENGTH (t) == 0)
3412 {
3413 gdb_assert (count == 0);
3414 return 0;
3415 }
3416 else if (count == 0)
3417 return -1;
3418 unitlen = arm_vfp_cprc_unit_length (*base_type);
3419 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3420 return TYPE_LENGTH (t) / unitlen;
3421 }
3422 break;
3423
3424 case TYPE_CODE_STRUCT:
3425 {
3426 int count = 0;
3427 unsigned unitlen;
3428 int i;
3429 for (i = 0; i < TYPE_NFIELDS (t); i++)
3430 {
3431 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3432 base_type);
3433 if (sub_count == -1)
3434 return -1;
3435 count += sub_count;
3436 }
3437 if (TYPE_LENGTH (t) == 0)
3438 {
3439 gdb_assert (count == 0);
3440 return 0;
3441 }
3442 else if (count == 0)
3443 return -1;
3444 unitlen = arm_vfp_cprc_unit_length (*base_type);
3445 if (TYPE_LENGTH (t) != unitlen * count)
3446 return -1;
3447 return count;
3448 }
3449
3450 case TYPE_CODE_UNION:
3451 {
3452 int count = 0;
3453 unsigned unitlen;
3454 int i;
3455 for (i = 0; i < TYPE_NFIELDS (t); i++)
3456 {
3457 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3458 base_type);
3459 if (sub_count == -1)
3460 return -1;
3461 count = (count > sub_count ? count : sub_count);
3462 }
3463 if (TYPE_LENGTH (t) == 0)
3464 {
3465 gdb_assert (count == 0);
3466 return 0;
3467 }
3468 else if (count == 0)
3469 return -1;
3470 unitlen = arm_vfp_cprc_unit_length (*base_type);
3471 if (TYPE_LENGTH (t) != unitlen * count)
3472 return -1;
3473 return count;
3474 }
3475
3476 default:
3477 break;
3478 }
3479
3480 return -1;
3481 }
3482
3483 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3484 if passed to or returned from a non-variadic function with the VFP
3485 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3486 *BASE_TYPE to the base type for T and *COUNT to the number of
3487 elements of that base type before returning. */
3488
3489 static int
3490 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3491 int *count)
3492 {
3493 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3494 int c = arm_vfp_cprc_sub_candidate (t, &b);
3495 if (c <= 0 || c > 4)
3496 return 0;
3497 *base_type = b;
3498 *count = c;
3499 return 1;
3500 }
3501
3502 /* Return 1 if the VFP ABI should be used for passing arguments to and
3503 returning values from a function of type FUNC_TYPE, 0
3504 otherwise. */
3505
3506 static int
3507 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3508 {
3509 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3510 /* Variadic functions always use the base ABI. Assume that functions
3511 without debug info are not variadic. */
3512 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3513 return 0;
3514 /* The VFP ABI is only supported as a variant of AAPCS. */
3515 if (tdep->arm_abi != ARM_ABI_AAPCS)
3516 return 0;
3517 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3518 }
3519
3520 /* We currently only support passing parameters in integer registers, which
3521 conforms with GCC's default model, and VFP argument passing following
3522 the VFP variant of AAPCS. Several other variants exist and
3523 we should probably support some of them based on the selected ABI. */
3524
3525 static CORE_ADDR
3526 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3527 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3528 struct value **args, CORE_ADDR sp, int struct_return,
3529 CORE_ADDR struct_addr)
3530 {
3531 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3532 int argnum;
3533 int argreg;
3534 int nstack;
3535 struct stack_item *si = NULL;
3536 int use_vfp_abi;
3537 struct type *ftype;
3538 unsigned vfp_regs_free = (1 << 16) - 1;
3539
3540 /* Determine the type of this function and whether the VFP ABI
3541 applies. */
3542 ftype = check_typedef (value_type (function));
3543 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3544 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3545 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3546
3547 /* Set the return address. For the ARM, the return breakpoint is
3548 always at BP_ADDR. */
3549 if (arm_pc_is_thumb (gdbarch, bp_addr))
3550 bp_addr |= 1;
3551 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3552
3553 /* Walk through the list of args and determine how large a temporary
3554 stack is required. Need to take care here as structs may be
3555 passed on the stack, and we have to push them. */
3556 nstack = 0;
3557
3558 argreg = ARM_A1_REGNUM;
3559 nstack = 0;
3560
3561 /* The struct_return pointer occupies the first parameter
3562 passing register. */
3563 if (struct_return)
3564 {
3565 if (arm_debug)
3566 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3567 gdbarch_register_name (gdbarch, argreg),
3568 paddress (gdbarch, struct_addr));
3569 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3570 argreg++;
3571 }
3572
3573 for (argnum = 0; argnum < nargs; argnum++)
3574 {
3575 int len;
3576 struct type *arg_type;
3577 struct type *target_type;
3578 enum type_code typecode;
3579 const bfd_byte *val;
3580 int align;
3581 enum arm_vfp_cprc_base_type vfp_base_type;
3582 int vfp_base_count;
3583 int may_use_core_reg = 1;
3584
3585 arg_type = check_typedef (value_type (args[argnum]));
3586 len = TYPE_LENGTH (arg_type);
3587 target_type = TYPE_TARGET_TYPE (arg_type);
3588 typecode = TYPE_CODE (arg_type);
3589 val = value_contents (args[argnum]);
3590
3591 align = arm_type_align (arg_type);
3592 /* Round alignment up to a whole number of words. */
3593 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3594 /* Different ABIs have different maximum alignments. */
3595 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3596 {
3597 /* The APCS ABI only requires word alignment. */
3598 align = INT_REGISTER_SIZE;
3599 }
3600 else
3601 {
3602 /* The AAPCS requires at most doubleword alignment. */
3603 if (align > INT_REGISTER_SIZE * 2)
3604 align = INT_REGISTER_SIZE * 2;
3605 }
3606
3607 if (use_vfp_abi
3608 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3609 &vfp_base_count))
3610 {
3611 int regno;
3612 int unit_length;
3613 int shift;
3614 unsigned mask;
3615
3616 /* Because this is a CPRC it cannot go in a core register or
3617 cause a core register to be skipped for alignment.
3618 Either it goes in VFP registers and the rest of this loop
3619 iteration is skipped for this argument, or it goes on the
3620 stack (and the stack alignment code is correct for this
3621 case). */
3622 may_use_core_reg = 0;
3623
3624 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3625 shift = unit_length / 4;
3626 mask = (1 << (shift * vfp_base_count)) - 1;
3627 for (regno = 0; regno < 16; regno += shift)
3628 if (((vfp_regs_free >> regno) & mask) == mask)
3629 break;
3630
3631 if (regno < 16)
3632 {
3633 int reg_char;
3634 int reg_scaled;
3635 int i;
3636
3637 vfp_regs_free &= ~(mask << regno);
3638 reg_scaled = regno / shift;
3639 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3640 for (i = 0; i < vfp_base_count; i++)
3641 {
3642 char name_buf[4];
3643 int regnum;
3644 if (reg_char == 'q')
3645 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3646 val + i * unit_length);
3647 else
3648 {
3649 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3650 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3651 strlen (name_buf));
3652 regcache_cooked_write (regcache, regnum,
3653 val + i * unit_length);
3654 }
3655 }
3656 continue;
3657 }
3658 else
3659 {
3660 /* This CPRC could not go in VFP registers, so all VFP
3661 registers are now marked as used. */
3662 vfp_regs_free = 0;
3663 }
3664 }
3665
3666 /* Push stack padding for dowubleword alignment. */
3667 if (nstack & (align - 1))
3668 {
3669 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3670 nstack += INT_REGISTER_SIZE;
3671 }
3672
3673 /* Doubleword aligned quantities must go in even register pairs. */
3674 if (may_use_core_reg
3675 && argreg <= ARM_LAST_ARG_REGNUM
3676 && align > INT_REGISTER_SIZE
3677 && argreg & 1)
3678 argreg++;
3679
3680 /* If the argument is a pointer to a function, and it is a
3681 Thumb function, create a LOCAL copy of the value and set
3682 the THUMB bit in it. */
3683 if (TYPE_CODE_PTR == typecode
3684 && target_type != NULL
3685 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3686 {
3687 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3688 if (arm_pc_is_thumb (gdbarch, regval))
3689 {
3690 bfd_byte *copy = alloca (len);
3691 store_unsigned_integer (copy, len, byte_order,
3692 MAKE_THUMB_ADDR (regval));
3693 val = copy;
3694 }
3695 }
3696
3697 /* Copy the argument to general registers or the stack in
3698 register-sized pieces. Large arguments are split between
3699 registers and stack. */
3700 while (len > 0)
3701 {
3702 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3703
3704 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3705 {
3706 /* The argument is being passed in a general purpose
3707 register. */
3708 CORE_ADDR regval
3709 = extract_unsigned_integer (val, partial_len, byte_order);
3710 if (byte_order == BFD_ENDIAN_BIG)
3711 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3712 if (arm_debug)
3713 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3714 argnum,
3715 gdbarch_register_name
3716 (gdbarch, argreg),
3717 phex (regval, INT_REGISTER_SIZE));
3718 regcache_cooked_write_unsigned (regcache, argreg, regval);
3719 argreg++;
3720 }
3721 else
3722 {
3723 /* Push the arguments onto the stack. */
3724 if (arm_debug)
3725 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3726 argnum, nstack);
3727 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3728 nstack += INT_REGISTER_SIZE;
3729 }
3730
3731 len -= partial_len;
3732 val += partial_len;
3733 }
3734 }
3735 /* If we have an odd number of words to push, then decrement the stack
3736 by one word now, so first stack argument will be dword aligned. */
3737 if (nstack & 4)
3738 sp -= 4;
3739
3740 while (si)
3741 {
3742 sp -= si->len;
3743 write_memory (sp, si->data, si->len);
3744 si = pop_stack_item (si);
3745 }
3746
3747 /* Finally, update teh SP register. */
3748 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3749
3750 return sp;
3751 }
3752
3753
3754 /* Always align the frame to an 8-byte boundary. This is required on
3755 some platforms and harmless on the rest. */
3756
3757 static CORE_ADDR
3758 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3759 {
3760 /* Align the stack to eight bytes. */
3761 return sp & ~ (CORE_ADDR) 7;
3762 }
3763
3764 static void
3765 print_fpu_flags (int flags)
3766 {
3767 if (flags & (1 << 0))
3768 fputs ("IVO ", stdout);
3769 if (flags & (1 << 1))
3770 fputs ("DVZ ", stdout);
3771 if (flags & (1 << 2))
3772 fputs ("OFL ", stdout);
3773 if (flags & (1 << 3))
3774 fputs ("UFL ", stdout);
3775 if (flags & (1 << 4))
3776 fputs ("INX ", stdout);
3777 putchar ('\n');
3778 }
3779
3780 /* Print interesting information about the floating point processor
3781 (if present) or emulator. */
3782 static void
3783 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3784 struct frame_info *frame, const char *args)
3785 {
3786 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3787 int type;
3788
3789 type = (status >> 24) & 127;
3790 if (status & (1 << 31))
3791 printf (_("Hardware FPU type %d\n"), type);
3792 else
3793 printf (_("Software FPU type %d\n"), type);
3794 /* i18n: [floating point unit] mask */
3795 fputs (_("mask: "), stdout);
3796 print_fpu_flags (status >> 16);
3797 /* i18n: [floating point unit] flags */
3798 fputs (_("flags: "), stdout);
3799 print_fpu_flags (status);
3800 }
3801
3802 /* Construct the ARM extended floating point type. */
3803 static struct type *
3804 arm_ext_type (struct gdbarch *gdbarch)
3805 {
3806 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3807
3808 if (!tdep->arm_ext_type)
3809 tdep->arm_ext_type
3810 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3811 floatformats_arm_ext);
3812
3813 return tdep->arm_ext_type;
3814 }
3815
3816 static struct type *
3817 arm_neon_double_type (struct gdbarch *gdbarch)
3818 {
3819 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3820
3821 if (tdep->neon_double_type == NULL)
3822 {
3823 struct type *t, *elem;
3824
3825 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3826 TYPE_CODE_UNION);
3827 elem = builtin_type (gdbarch)->builtin_uint8;
3828 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3829 elem = builtin_type (gdbarch)->builtin_uint16;
3830 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3831 elem = builtin_type (gdbarch)->builtin_uint32;
3832 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3833 elem = builtin_type (gdbarch)->builtin_uint64;
3834 append_composite_type_field (t, "u64", elem);
3835 elem = builtin_type (gdbarch)->builtin_float;
3836 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3837 elem = builtin_type (gdbarch)->builtin_double;
3838 append_composite_type_field (t, "f64", elem);
3839
3840 TYPE_VECTOR (t) = 1;
3841 TYPE_NAME (t) = "neon_d";
3842 tdep->neon_double_type = t;
3843 }
3844
3845 return tdep->neon_double_type;
3846 }
3847
3848 /* FIXME: The vector types are not correctly ordered on big-endian
3849 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3850 bits of d0 - regardless of what unit size is being held in d0. So
3851 the offset of the first uint8 in d0 is 7, but the offset of the
3852 first float is 4. This code works as-is for little-endian
3853 targets. */
3854
3855 static struct type *
3856 arm_neon_quad_type (struct gdbarch *gdbarch)
3857 {
3858 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3859
3860 if (tdep->neon_quad_type == NULL)
3861 {
3862 struct type *t, *elem;
3863
3864 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3865 TYPE_CODE_UNION);
3866 elem = builtin_type (gdbarch)->builtin_uint8;
3867 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3868 elem = builtin_type (gdbarch)->builtin_uint16;
3869 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3870 elem = builtin_type (gdbarch)->builtin_uint32;
3871 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3872 elem = builtin_type (gdbarch)->builtin_uint64;
3873 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3874 elem = builtin_type (gdbarch)->builtin_float;
3875 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3876 elem = builtin_type (gdbarch)->builtin_double;
3877 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3878
3879 TYPE_VECTOR (t) = 1;
3880 TYPE_NAME (t) = "neon_q";
3881 tdep->neon_quad_type = t;
3882 }
3883
3884 return tdep->neon_quad_type;
3885 }
3886
3887 /* Return the GDB type object for the "standard" data type of data in
3888 register N. */
3889
3890 static struct type *
3891 arm_register_type (struct gdbarch *gdbarch, int regnum)
3892 {
3893 int num_regs = gdbarch_num_regs (gdbarch);
3894
3895 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3896 && regnum >= num_regs && regnum < num_regs + 32)
3897 return builtin_type (gdbarch)->builtin_float;
3898
3899 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3900 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3901 return arm_neon_quad_type (gdbarch);
3902
3903 /* If the target description has register information, we are only
3904 in this function so that we can override the types of
3905 double-precision registers for NEON. */
3906 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3907 {
3908 struct type *t = tdesc_register_type (gdbarch, regnum);
3909
3910 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3911 && TYPE_CODE (t) == TYPE_CODE_FLT
3912 && gdbarch_tdep (gdbarch)->have_neon)
3913 return arm_neon_double_type (gdbarch);
3914 else
3915 return t;
3916 }
3917
3918 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3919 {
3920 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3921 return builtin_type (gdbarch)->builtin_void;
3922
3923 return arm_ext_type (gdbarch);
3924 }
3925 else if (regnum == ARM_SP_REGNUM)
3926 return builtin_type (gdbarch)->builtin_data_ptr;
3927 else if (regnum == ARM_PC_REGNUM)
3928 return builtin_type (gdbarch)->builtin_func_ptr;
3929 else if (regnum >= ARRAY_SIZE (arm_register_names))
3930 /* These registers are only supported on targets which supply
3931 an XML description. */
3932 return builtin_type (gdbarch)->builtin_int0;
3933 else
3934 return builtin_type (gdbarch)->builtin_uint32;
3935 }
3936
3937 /* Map a DWARF register REGNUM onto the appropriate GDB register
3938 number. */
3939
3940 static int
3941 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3942 {
3943 /* Core integer regs. */
3944 if (reg >= 0 && reg <= 15)
3945 return reg;
3946
3947 /* Legacy FPA encoding. These were once used in a way which
3948 overlapped with VFP register numbering, so their use is
3949 discouraged, but GDB doesn't support the ARM toolchain
3950 which used them for VFP. */
3951 if (reg >= 16 && reg <= 23)
3952 return ARM_F0_REGNUM + reg - 16;
3953
3954 /* New assignments for the FPA registers. */
3955 if (reg >= 96 && reg <= 103)
3956 return ARM_F0_REGNUM + reg - 96;
3957
3958 /* WMMX register assignments. */
3959 if (reg >= 104 && reg <= 111)
3960 return ARM_WCGR0_REGNUM + reg - 104;
3961
3962 if (reg >= 112 && reg <= 127)
3963 return ARM_WR0_REGNUM + reg - 112;
3964
3965 if (reg >= 192 && reg <= 199)
3966 return ARM_WC0_REGNUM + reg - 192;
3967
3968 /* VFP v2 registers. A double precision value is actually
3969 in d1 rather than s2, but the ABI only defines numbering
3970 for the single precision registers. This will "just work"
3971 in GDB for little endian targets (we'll read eight bytes,
3972 starting in s0 and then progressing to s1), but will be
3973 reversed on big endian targets with VFP. This won't
3974 be a problem for the new Neon quad registers; you're supposed
3975 to use DW_OP_piece for those. */
3976 if (reg >= 64 && reg <= 95)
3977 {
3978 char name_buf[4];
3979
3980 sprintf (name_buf, "s%d", reg - 64);
3981 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3982 strlen (name_buf));
3983 }
3984
3985 /* VFP v3 / Neon registers. This range is also used for VFP v2
3986 registers, except that it now describes d0 instead of s0. */
3987 if (reg >= 256 && reg <= 287)
3988 {
3989 char name_buf[4];
3990
3991 sprintf (name_buf, "d%d", reg - 256);
3992 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3993 strlen (name_buf));
3994 }
3995
3996 return -1;
3997 }
3998
3999 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4000 static int
4001 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4002 {
4003 int reg = regnum;
4004 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4005
4006 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4007 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4008
4009 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4010 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4011
4012 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4013 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4014
4015 if (reg < NUM_GREGS)
4016 return SIM_ARM_R0_REGNUM + reg;
4017 reg -= NUM_GREGS;
4018
4019 if (reg < NUM_FREGS)
4020 return SIM_ARM_FP0_REGNUM + reg;
4021 reg -= NUM_FREGS;
4022
4023 if (reg < NUM_SREGS)
4024 return SIM_ARM_FPS_REGNUM + reg;
4025 reg -= NUM_SREGS;
4026
4027 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4028 }
4029
4030 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4031 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4032 It is thought that this is is the floating-point register format on
4033 little-endian systems. */
4034
4035 static void
4036 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4037 void *dbl, int endianess)
4038 {
4039 DOUBLEST d;
4040
4041 if (endianess == BFD_ENDIAN_BIG)
4042 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4043 else
4044 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4045 ptr, &d);
4046 floatformat_from_doublest (fmt, &d, dbl);
4047 }
4048
4049 static void
4050 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4051 int endianess)
4052 {
4053 DOUBLEST d;
4054
4055 floatformat_to_doublest (fmt, ptr, &d);
4056 if (endianess == BFD_ENDIAN_BIG)
4057 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4058 else
4059 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4060 &d, dbl);
4061 }
4062
4063 static int
4064 condition_true (unsigned long cond, unsigned long status_reg)
4065 {
4066 if (cond == INST_AL || cond == INST_NV)
4067 return 1;
4068
4069 switch (cond)
4070 {
4071 case INST_EQ:
4072 return ((status_reg & FLAG_Z) != 0);
4073 case INST_NE:
4074 return ((status_reg & FLAG_Z) == 0);
4075 case INST_CS:
4076 return ((status_reg & FLAG_C) != 0);
4077 case INST_CC:
4078 return ((status_reg & FLAG_C) == 0);
4079 case INST_MI:
4080 return ((status_reg & FLAG_N) != 0);
4081 case INST_PL:
4082 return ((status_reg & FLAG_N) == 0);
4083 case INST_VS:
4084 return ((status_reg & FLAG_V) != 0);
4085 case INST_VC:
4086 return ((status_reg & FLAG_V) == 0);
4087 case INST_HI:
4088 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4089 case INST_LS:
4090 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4091 case INST_GE:
4092 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4093 case INST_LT:
4094 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4095 case INST_GT:
4096 return (((status_reg & FLAG_Z) == 0)
4097 && (((status_reg & FLAG_N) == 0)
4098 == ((status_reg & FLAG_V) == 0)));
4099 case INST_LE:
4100 return (((status_reg & FLAG_Z) != 0)
4101 || (((status_reg & FLAG_N) == 0)
4102 != ((status_reg & FLAG_V) == 0)));
4103 }
4104 return 1;
4105 }
4106
4107 static unsigned long
4108 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4109 unsigned long pc_val, unsigned long status_reg)
4110 {
4111 unsigned long res, shift;
4112 int rm = bits (inst, 0, 3);
4113 unsigned long shifttype = bits (inst, 5, 6);
4114
4115 if (bit (inst, 4))
4116 {
4117 int rs = bits (inst, 8, 11);
4118 shift = (rs == 15 ? pc_val + 8
4119 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4120 }
4121 else
4122 shift = bits (inst, 7, 11);
4123
4124 res = (rm == ARM_PC_REGNUM
4125 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4126 : get_frame_register_unsigned (frame, rm));
4127
4128 switch (shifttype)
4129 {
4130 case 0: /* LSL */
4131 res = shift >= 32 ? 0 : res << shift;
4132 break;
4133
4134 case 1: /* LSR */
4135 res = shift >= 32 ? 0 : res >> shift;
4136 break;
4137
4138 case 2: /* ASR */
4139 if (shift >= 32)
4140 shift = 31;
4141 res = ((res & 0x80000000L)
4142 ? ~((~res) >> shift) : res >> shift);
4143 break;
4144
4145 case 3: /* ROR/RRX */
4146 shift &= 31;
4147 if (shift == 0)
4148 res = (res >> 1) | (carry ? 0x80000000L : 0);
4149 else
4150 res = (res >> shift) | (res << (32 - shift));
4151 break;
4152 }
4153
4154 return res & 0xffffffff;
4155 }
4156
4157 /* Return number of 1-bits in VAL. */
4158
4159 static int
4160 bitcount (unsigned long val)
4161 {
4162 int nbits;
4163 for (nbits = 0; val != 0; nbits++)
4164 val &= val - 1; /* Delete rightmost 1-bit in val. */
4165 return nbits;
4166 }
4167
4168 /* Return the size in bytes of the complete Thumb instruction whose
4169 first halfword is INST1. */
4170
4171 static int
4172 thumb_insn_size (unsigned short inst1)
4173 {
4174 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4175 return 4;
4176 else
4177 return 2;
4178 }
4179
4180 static int
4181 thumb_advance_itstate (unsigned int itstate)
4182 {
4183 /* Preserve IT[7:5], the first three bits of the condition. Shift
4184 the upcoming condition flags left by one bit. */
4185 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4186
4187 /* If we have finished the IT block, clear the state. */
4188 if ((itstate & 0x0f) == 0)
4189 itstate = 0;
4190
4191 return itstate;
4192 }
4193
4194 /* Find the next PC after the current instruction executes. In some
4195 cases we can not statically determine the answer (see the IT state
4196 handling in this function); in that case, a breakpoint may be
4197 inserted in addition to the returned PC, which will be used to set
4198 another breakpoint by our caller. */
4199
4200 static CORE_ADDR
4201 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4202 {
4203 struct gdbarch *gdbarch = get_frame_arch (frame);
4204 struct address_space *aspace = get_frame_address_space (frame);
4205 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4207 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4208 unsigned short inst1;
4209 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4210 unsigned long offset;
4211 ULONGEST status, itstate;
4212
4213 nextpc = MAKE_THUMB_ADDR (nextpc);
4214 pc_val = MAKE_THUMB_ADDR (pc_val);
4215
4216 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4217
4218 /* Thumb-2 conditional execution support. There are eight bits in
4219 the CPSR which describe conditional execution state. Once
4220 reconstructed (they're in a funny order), the low five bits
4221 describe the low bit of the condition for each instruction and
4222 how many instructions remain. The high three bits describe the
4223 base condition. One of the low four bits will be set if an IT
4224 block is active. These bits read as zero on earlier
4225 processors. */
4226 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4227 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4228
4229 /* If-Then handling. On GNU/Linux, where this routine is used, we
4230 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4231 can disable execution of the undefined instruction. So we might
4232 miss the breakpoint if we set it on a skipped conditional
4233 instruction. Because conditional instructions can change the
4234 flags, affecting the execution of further instructions, we may
4235 need to set two breakpoints. */
4236
4237 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4238 {
4239 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4240 {
4241 /* An IT instruction. Because this instruction does not
4242 modify the flags, we can accurately predict the next
4243 executed instruction. */
4244 itstate = inst1 & 0x00ff;
4245 pc += thumb_insn_size (inst1);
4246
4247 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4248 {
4249 inst1 = read_memory_unsigned_integer (pc, 2,
4250 byte_order_for_code);
4251 pc += thumb_insn_size (inst1);
4252 itstate = thumb_advance_itstate (itstate);
4253 }
4254
4255 return MAKE_THUMB_ADDR (pc);
4256 }
4257 else if (itstate != 0)
4258 {
4259 /* We are in a conditional block. Check the condition. */
4260 if (! condition_true (itstate >> 4, status))
4261 {
4262 /* Advance to the next executed instruction. */
4263 pc += thumb_insn_size (inst1);
4264 itstate = thumb_advance_itstate (itstate);
4265
4266 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4267 {
4268 inst1 = read_memory_unsigned_integer (pc, 2,
4269 byte_order_for_code);
4270 pc += thumb_insn_size (inst1);
4271 itstate = thumb_advance_itstate (itstate);
4272 }
4273
4274 return MAKE_THUMB_ADDR (pc);
4275 }
4276 else if ((itstate & 0x0f) == 0x08)
4277 {
4278 /* This is the last instruction of the conditional
4279 block, and it is executed. We can handle it normally
4280 because the following instruction is not conditional,
4281 and we must handle it normally because it is
4282 permitted to branch. Fall through. */
4283 }
4284 else
4285 {
4286 int cond_negated;
4287
4288 /* There are conditional instructions after this one.
4289 If this instruction modifies the flags, then we can
4290 not predict what the next executed instruction will
4291 be. Fortunately, this instruction is architecturally
4292 forbidden to branch; we know it will fall through.
4293 Start by skipping past it. */
4294 pc += thumb_insn_size (inst1);
4295 itstate = thumb_advance_itstate (itstate);
4296
4297 /* Set a breakpoint on the following instruction. */
4298 gdb_assert ((itstate & 0x0f) != 0);
4299 arm_insert_single_step_breakpoint (gdbarch, aspace,
4300 MAKE_THUMB_ADDR (pc));
4301 cond_negated = (itstate >> 4) & 1;
4302
4303 /* Skip all following instructions with the same
4304 condition. If there is a later instruction in the IT
4305 block with the opposite condition, set the other
4306 breakpoint there. If not, then set a breakpoint on
4307 the instruction after the IT block. */
4308 do
4309 {
4310 inst1 = read_memory_unsigned_integer (pc, 2,
4311 byte_order_for_code);
4312 pc += thumb_insn_size (inst1);
4313 itstate = thumb_advance_itstate (itstate);
4314 }
4315 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4316
4317 return MAKE_THUMB_ADDR (pc);
4318 }
4319 }
4320 }
4321 else if (itstate & 0x0f)
4322 {
4323 /* We are in a conditional block. Check the condition. */
4324 int cond = itstate >> 4;
4325
4326 if (! condition_true (cond, status))
4327 /* Advance to the next instruction. All the 32-bit
4328 instructions share a common prefix. */
4329 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4330
4331 /* Otherwise, handle the instruction normally. */
4332 }
4333
4334 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4335 {
4336 CORE_ADDR sp;
4337
4338 /* Fetch the saved PC from the stack. It's stored above
4339 all of the other registers. */
4340 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4341 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4342 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4343 }
4344 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4345 {
4346 unsigned long cond = bits (inst1, 8, 11);
4347 if (cond == 0x0f) /* 0x0f = SWI */
4348 {
4349 struct gdbarch_tdep *tdep;
4350 tdep = gdbarch_tdep (gdbarch);
4351
4352 if (tdep->syscall_next_pc != NULL)
4353 nextpc = tdep->syscall_next_pc (frame);
4354
4355 }
4356 else if (cond != 0x0f && condition_true (cond, status))
4357 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4358 }
4359 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4360 {
4361 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4362 }
4363 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4364 {
4365 unsigned short inst2;
4366 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4367
4368 /* Default to the next instruction. */
4369 nextpc = pc + 4;
4370 nextpc = MAKE_THUMB_ADDR (nextpc);
4371
4372 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4373 {
4374 /* Branches and miscellaneous control instructions. */
4375
4376 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4377 {
4378 /* B, BL, BLX. */
4379 int j1, j2, imm1, imm2;
4380
4381 imm1 = sbits (inst1, 0, 10);
4382 imm2 = bits (inst2, 0, 10);
4383 j1 = bit (inst2, 13);
4384 j2 = bit (inst2, 11);
4385
4386 offset = ((imm1 << 12) + (imm2 << 1));
4387 offset ^= ((!j2) << 22) | ((!j1) << 23);
4388
4389 nextpc = pc_val + offset;
4390 /* For BLX make sure to clear the low bits. */
4391 if (bit (inst2, 12) == 0)
4392 nextpc = nextpc & 0xfffffffc;
4393 }
4394 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4395 {
4396 /* SUBS PC, LR, #imm8. */
4397 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4398 nextpc -= inst2 & 0x00ff;
4399 }
4400 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4401 {
4402 /* Conditional branch. */
4403 if (condition_true (bits (inst1, 6, 9), status))
4404 {
4405 int sign, j1, j2, imm1, imm2;
4406
4407 sign = sbits (inst1, 10, 10);
4408 imm1 = bits (inst1, 0, 5);
4409 imm2 = bits (inst2, 0, 10);
4410 j1 = bit (inst2, 13);
4411 j2 = bit (inst2, 11);
4412
4413 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4414 offset += (imm1 << 12) + (imm2 << 1);
4415
4416 nextpc = pc_val + offset;
4417 }
4418 }
4419 }
4420 else if ((inst1 & 0xfe50) == 0xe810)
4421 {
4422 /* Load multiple or RFE. */
4423 int rn, offset, load_pc = 1;
4424
4425 rn = bits (inst1, 0, 3);
4426 if (bit (inst1, 7) && !bit (inst1, 8))
4427 {
4428 /* LDMIA or POP */
4429 if (!bit (inst2, 15))
4430 load_pc = 0;
4431 offset = bitcount (inst2) * 4 - 4;
4432 }
4433 else if (!bit (inst1, 7) && bit (inst1, 8))
4434 {
4435 /* LDMDB */
4436 if (!bit (inst2, 15))
4437 load_pc = 0;
4438 offset = -4;
4439 }
4440 else if (bit (inst1, 7) && bit (inst1, 8))
4441 {
4442 /* RFEIA */
4443 offset = 0;
4444 }
4445 else if (!bit (inst1, 7) && !bit (inst1, 8))
4446 {
4447 /* RFEDB */
4448 offset = -8;
4449 }
4450 else
4451 load_pc = 0;
4452
4453 if (load_pc)
4454 {
4455 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4456 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4457 }
4458 }
4459 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4460 {
4461 /* MOV PC or MOVS PC. */
4462 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4463 nextpc = MAKE_THUMB_ADDR (nextpc);
4464 }
4465 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4466 {
4467 /* LDR PC. */
4468 CORE_ADDR base;
4469 int rn, load_pc = 1;
4470
4471 rn = bits (inst1, 0, 3);
4472 base = get_frame_register_unsigned (frame, rn);
4473 if (rn == ARM_PC_REGNUM)
4474 {
4475 base = (base + 4) & ~(CORE_ADDR) 0x3;
4476 if (bit (inst1, 7))
4477 base += bits (inst2, 0, 11);
4478 else
4479 base -= bits (inst2, 0, 11);
4480 }
4481 else if (bit (inst1, 7))
4482 base += bits (inst2, 0, 11);
4483 else if (bit (inst2, 11))
4484 {
4485 if (bit (inst2, 10))
4486 {
4487 if (bit (inst2, 9))
4488 base += bits (inst2, 0, 7);
4489 else
4490 base -= bits (inst2, 0, 7);
4491 }
4492 }
4493 else if ((inst2 & 0x0fc0) == 0x0000)
4494 {
4495 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4496 base += get_frame_register_unsigned (frame, rm) << shift;
4497 }
4498 else
4499 /* Reserved. */
4500 load_pc = 0;
4501
4502 if (load_pc)
4503 nextpc = get_frame_memory_unsigned (frame, base, 4);
4504 }
4505 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4506 {
4507 /* TBB. */
4508 CORE_ADDR tbl_reg, table, offset, length;
4509
4510 tbl_reg = bits (inst1, 0, 3);
4511 if (tbl_reg == 0x0f)
4512 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4513 else
4514 table = get_frame_register_unsigned (frame, tbl_reg);
4515
4516 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4517 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4518 nextpc = pc_val + length;
4519 }
4520 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4521 {
4522 /* TBH. */
4523 CORE_ADDR tbl_reg, table, offset, length;
4524
4525 tbl_reg = bits (inst1, 0, 3);
4526 if (tbl_reg == 0x0f)
4527 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4528 else
4529 table = get_frame_register_unsigned (frame, tbl_reg);
4530
4531 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4532 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4533 nextpc = pc_val + length;
4534 }
4535 }
4536 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4537 {
4538 if (bits (inst1, 3, 6) == 0x0f)
4539 nextpc = pc_val;
4540 else
4541 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4542 }
4543 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4544 {
4545 if (bits (inst1, 3, 6) == 0x0f)
4546 nextpc = pc_val;
4547 else
4548 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4549
4550 nextpc = MAKE_THUMB_ADDR (nextpc);
4551 }
4552 else if ((inst1 & 0xf500) == 0xb100)
4553 {
4554 /* CBNZ or CBZ. */
4555 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4556 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4557
4558 if (bit (inst1, 11) && reg != 0)
4559 nextpc = pc_val + imm;
4560 else if (!bit (inst1, 11) && reg == 0)
4561 nextpc = pc_val + imm;
4562 }
4563 return nextpc;
4564 }
4565
4566 /* Get the raw next address. PC is the current program counter, in
4567 FRAME, which is assumed to be executing in ARM mode.
4568
4569 The value returned has the execution state of the next instruction
4570 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4571 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4572 address. */
4573
4574 static CORE_ADDR
4575 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4576 {
4577 struct gdbarch *gdbarch = get_frame_arch (frame);
4578 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4579 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4580 unsigned long pc_val;
4581 unsigned long this_instr;
4582 unsigned long status;
4583 CORE_ADDR nextpc;
4584
4585 pc_val = (unsigned long) pc;
4586 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4587
4588 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4589 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4590
4591 if (bits (this_instr, 28, 31) == INST_NV)
4592 switch (bits (this_instr, 24, 27))
4593 {
4594 case 0xa:
4595 case 0xb:
4596 {
4597 /* Branch with Link and change to Thumb. */
4598 nextpc = BranchDest (pc, this_instr);
4599 nextpc |= bit (this_instr, 24) << 1;
4600 nextpc = MAKE_THUMB_ADDR (nextpc);
4601 break;
4602 }
4603 case 0xc:
4604 case 0xd:
4605 case 0xe:
4606 /* Coprocessor register transfer. */
4607 if (bits (this_instr, 12, 15) == 15)
4608 error (_("Invalid update to pc in instruction"));
4609 break;
4610 }
4611 else if (condition_true (bits (this_instr, 28, 31), status))
4612 {
4613 switch (bits (this_instr, 24, 27))
4614 {
4615 case 0x0:
4616 case 0x1: /* data processing */
4617 case 0x2:
4618 case 0x3:
4619 {
4620 unsigned long operand1, operand2, result = 0;
4621 unsigned long rn;
4622 int c;
4623
4624 if (bits (this_instr, 12, 15) != 15)
4625 break;
4626
4627 if (bits (this_instr, 22, 25) == 0
4628 && bits (this_instr, 4, 7) == 9) /* multiply */
4629 error (_("Invalid update to pc in instruction"));
4630
4631 /* BX <reg>, BLX <reg> */
4632 if (bits (this_instr, 4, 27) == 0x12fff1
4633 || bits (this_instr, 4, 27) == 0x12fff3)
4634 {
4635 rn = bits (this_instr, 0, 3);
4636 nextpc = ((rn == ARM_PC_REGNUM)
4637 ? (pc_val + 8)
4638 : get_frame_register_unsigned (frame, rn));
4639
4640 return nextpc;
4641 }
4642
4643 /* Multiply into PC. */
4644 c = (status & FLAG_C) ? 1 : 0;
4645 rn = bits (this_instr, 16, 19);
4646 operand1 = ((rn == ARM_PC_REGNUM)
4647 ? (pc_val + 8)
4648 : get_frame_register_unsigned (frame, rn));
4649
4650 if (bit (this_instr, 25))
4651 {
4652 unsigned long immval = bits (this_instr, 0, 7);
4653 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4654 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4655 & 0xffffffff;
4656 }
4657 else /* operand 2 is a shifted register. */
4658 operand2 = shifted_reg_val (frame, this_instr, c,
4659 pc_val, status);
4660
4661 switch (bits (this_instr, 21, 24))
4662 {
4663 case 0x0: /*and */
4664 result = operand1 & operand2;
4665 break;
4666
4667 case 0x1: /*eor */
4668 result = operand1 ^ operand2;
4669 break;
4670
4671 case 0x2: /*sub */
4672 result = operand1 - operand2;
4673 break;
4674
4675 case 0x3: /*rsb */
4676 result = operand2 - operand1;
4677 break;
4678
4679 case 0x4: /*add */
4680 result = operand1 + operand2;
4681 break;
4682
4683 case 0x5: /*adc */
4684 result = operand1 + operand2 + c;
4685 break;
4686
4687 case 0x6: /*sbc */
4688 result = operand1 - operand2 + c;
4689 break;
4690
4691 case 0x7: /*rsc */
4692 result = operand2 - operand1 + c;
4693 break;
4694
4695 case 0x8:
4696 case 0x9:
4697 case 0xa:
4698 case 0xb: /* tst, teq, cmp, cmn */
4699 result = (unsigned long) nextpc;
4700 break;
4701
4702 case 0xc: /*orr */
4703 result = operand1 | operand2;
4704 break;
4705
4706 case 0xd: /*mov */
4707 /* Always step into a function. */
4708 result = operand2;
4709 break;
4710
4711 case 0xe: /*bic */
4712 result = operand1 & ~operand2;
4713 break;
4714
4715 case 0xf: /*mvn */
4716 result = ~operand2;
4717 break;
4718 }
4719
4720 /* In 26-bit APCS the bottom two bits of the result are
4721 ignored, and we always end up in ARM state. */
4722 if (!arm_apcs_32)
4723 nextpc = arm_addr_bits_remove (gdbarch, result);
4724 else
4725 nextpc = result;
4726
4727 break;
4728 }
4729
4730 case 0x4:
4731 case 0x5: /* data transfer */
4732 case 0x6:
4733 case 0x7:
4734 if (bit (this_instr, 20))
4735 {
4736 /* load */
4737 if (bits (this_instr, 12, 15) == 15)
4738 {
4739 /* rd == pc */
4740 unsigned long rn;
4741 unsigned long base;
4742
4743 if (bit (this_instr, 22))
4744 error (_("Invalid update to pc in instruction"));
4745
4746 /* byte write to PC */
4747 rn = bits (this_instr, 16, 19);
4748 base = ((rn == ARM_PC_REGNUM)
4749 ? (pc_val + 8)
4750 : get_frame_register_unsigned (frame, rn));
4751
4752 if (bit (this_instr, 24))
4753 {
4754 /* pre-indexed */
4755 int c = (status & FLAG_C) ? 1 : 0;
4756 unsigned long offset =
4757 (bit (this_instr, 25)
4758 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4759 : bits (this_instr, 0, 11));
4760
4761 if (bit (this_instr, 23))
4762 base += offset;
4763 else
4764 base -= offset;
4765 }
4766 nextpc =
4767 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4768 4, byte_order);
4769 }
4770 }
4771 break;
4772
4773 case 0x8:
4774 case 0x9: /* block transfer */
4775 if (bit (this_instr, 20))
4776 {
4777 /* LDM */
4778 if (bit (this_instr, 15))
4779 {
4780 /* loading pc */
4781 int offset = 0;
4782 unsigned long rn_val
4783 = get_frame_register_unsigned (frame,
4784 bits (this_instr, 16, 19));
4785
4786 if (bit (this_instr, 23))
4787 {
4788 /* up */
4789 unsigned long reglist = bits (this_instr, 0, 14);
4790 offset = bitcount (reglist) * 4;
4791 if (bit (this_instr, 24)) /* pre */
4792 offset += 4;
4793 }
4794 else if (bit (this_instr, 24))
4795 offset = -4;
4796
4797 nextpc =
4798 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4799 (rn_val + offset),
4800 4, byte_order);
4801 }
4802 }
4803 break;
4804
4805 case 0xb: /* branch & link */
4806 case 0xa: /* branch */
4807 {
4808 nextpc = BranchDest (pc, this_instr);
4809 break;
4810 }
4811
4812 case 0xc:
4813 case 0xd:
4814 case 0xe: /* coproc ops */
4815 break;
4816 case 0xf: /* SWI */
4817 {
4818 struct gdbarch_tdep *tdep;
4819 tdep = gdbarch_tdep (gdbarch);
4820
4821 if (tdep->syscall_next_pc != NULL)
4822 nextpc = tdep->syscall_next_pc (frame);
4823
4824 }
4825 break;
4826
4827 default:
4828 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4829 return (pc);
4830 }
4831 }
4832
4833 return nextpc;
4834 }
4835
4836 /* Determine next PC after current instruction executes. Will call either
4837 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4838 loop is detected. */
4839
4840 CORE_ADDR
4841 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4842 {
4843 CORE_ADDR nextpc;
4844
4845 if (arm_frame_is_thumb (frame))
4846 {
4847 nextpc = thumb_get_next_pc_raw (frame, pc);
4848 if (nextpc == MAKE_THUMB_ADDR (pc))
4849 error (_("Infinite loop detected"));
4850 }
4851 else
4852 {
4853 nextpc = arm_get_next_pc_raw (frame, pc);
4854 if (nextpc == pc)
4855 error (_("Infinite loop detected"));
4856 }
4857
4858 return nextpc;
4859 }
4860
4861 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4862 of the appropriate mode (as encoded in the PC value), even if this
4863 differs from what would be expected according to the symbol tables. */
4864
4865 void
4866 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4867 struct address_space *aspace,
4868 CORE_ADDR pc)
4869 {
4870 struct cleanup *old_chain
4871 = make_cleanup_restore_integer (&arm_override_mode);
4872
4873 arm_override_mode = IS_THUMB_ADDR (pc);
4874 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4875
4876 insert_single_step_breakpoint (gdbarch, aspace, pc);
4877
4878 do_cleanups (old_chain);
4879 }
4880
4881 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
4882 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
4883 is found, attempt to step through it. A breakpoint is placed at the end of
4884 the sequence. */
4885
4886 static int
4887 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
4888 {
4889 struct gdbarch *gdbarch = get_frame_arch (frame);
4890 struct address_space *aspace = get_frame_address_space (frame);
4891 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4892 CORE_ADDR pc = get_frame_pc (frame);
4893 CORE_ADDR breaks[2] = {-1, -1};
4894 CORE_ADDR loc = pc;
4895 unsigned short insn1, insn2;
4896 int insn_count;
4897 int index;
4898 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
4899 const int atomic_sequence_length = 16; /* Instruction sequence length. */
4900 ULONGEST status, itstate;
4901
4902 /* We currently do not support atomic sequences within an IT block. */
4903 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4904 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4905 if (itstate & 0x0f)
4906 return 0;
4907
4908 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
4909 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4910 loc += 2;
4911 if (thumb_insn_size (insn1) != 4)
4912 return 0;
4913
4914 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4915 loc += 2;
4916 if (!((insn1 & 0xfff0) == 0xe850
4917 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
4918 return 0;
4919
4920 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
4921 instructions. */
4922 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
4923 {
4924 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4925 loc += 2;
4926
4927 if (thumb_insn_size (insn1) != 4)
4928 {
4929 /* Assume that there is at most one conditional branch in the
4930 atomic sequence. If a conditional branch is found, put a
4931 breakpoint in its destination address. */
4932 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
4933 {
4934 if (last_breakpoint > 0)
4935 return 0; /* More than one conditional branch found,
4936 fallback to the standard code. */
4937
4938 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
4939 last_breakpoint++;
4940 }
4941
4942 /* We do not support atomic sequences that use any *other*
4943 instructions but conditional branches to change the PC.
4944 Fall back to standard code to avoid losing control of
4945 execution. */
4946 else if (thumb_instruction_changes_pc (insn1))
4947 return 0;
4948 }
4949 else
4950 {
4951 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4952 loc += 2;
4953
4954 /* Assume that there is at most one conditional branch in the
4955 atomic sequence. If a conditional branch is found, put a
4956 breakpoint in its destination address. */
4957 if ((insn1 & 0xf800) == 0xf000
4958 && (insn2 & 0xd000) == 0x8000
4959 && (insn1 & 0x0380) != 0x0380)
4960 {
4961 int sign, j1, j2, imm1, imm2;
4962 unsigned int offset;
4963
4964 sign = sbits (insn1, 10, 10);
4965 imm1 = bits (insn1, 0, 5);
4966 imm2 = bits (insn2, 0, 10);
4967 j1 = bit (insn2, 13);
4968 j2 = bit (insn2, 11);
4969
4970 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4971 offset += (imm1 << 12) + (imm2 << 1);
4972
4973 if (last_breakpoint > 0)
4974 return 0; /* More than one conditional branch found,
4975 fallback to the standard code. */
4976
4977 breaks[1] = loc + offset;
4978 last_breakpoint++;
4979 }
4980
4981 /* We do not support atomic sequences that use any *other*
4982 instructions but conditional branches to change the PC.
4983 Fall back to standard code to avoid losing control of
4984 execution. */
4985 else if (thumb2_instruction_changes_pc (insn1, insn2))
4986 return 0;
4987
4988 /* If we find a strex{,b,h,d}, we're done. */
4989 if ((insn1 & 0xfff0) == 0xe840
4990 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
4991 break;
4992 }
4993 }
4994
4995 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
4996 if (insn_count == atomic_sequence_length)
4997 return 0;
4998
4999 /* Insert a breakpoint right after the end of the atomic sequence. */
5000 breaks[0] = loc;
5001
5002 /* Check for duplicated breakpoints. Check also for a breakpoint
5003 placed (branch instruction's destination) anywhere in sequence. */
5004 if (last_breakpoint
5005 && (breaks[1] == breaks[0]
5006 || (breaks[1] >= pc && breaks[1] < loc)))
5007 last_breakpoint = 0;
5008
5009 /* Effectively inserts the breakpoints. */
5010 for (index = 0; index <= last_breakpoint; index++)
5011 arm_insert_single_step_breakpoint (gdbarch, aspace,
5012 MAKE_THUMB_ADDR (breaks[index]));
5013
5014 return 1;
5015 }
5016
5017 static int
5018 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5019 {
5020 struct gdbarch *gdbarch = get_frame_arch (frame);
5021 struct address_space *aspace = get_frame_address_space (frame);
5022 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5023 CORE_ADDR pc = get_frame_pc (frame);
5024 CORE_ADDR breaks[2] = {-1, -1};
5025 CORE_ADDR loc = pc;
5026 unsigned int insn;
5027 int insn_count;
5028 int index;
5029 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5030 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5031
5032 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5033 Note that we do not currently support conditionally executed atomic
5034 instructions. */
5035 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5036 loc += 4;
5037 if ((insn & 0xff9000f0) != 0xe1900090)
5038 return 0;
5039
5040 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5041 instructions. */
5042 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5043 {
5044 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5045 loc += 4;
5046
5047 /* Assume that there is at most one conditional branch in the atomic
5048 sequence. If a conditional branch is found, put a breakpoint in
5049 its destination address. */
5050 if (bits (insn, 24, 27) == 0xa)
5051 {
5052 if (last_breakpoint > 0)
5053 return 0; /* More than one conditional branch found, fallback
5054 to the standard single-step code. */
5055
5056 breaks[1] = BranchDest (loc - 4, insn);
5057 last_breakpoint++;
5058 }
5059
5060 /* We do not support atomic sequences that use any *other* instructions
5061 but conditional branches to change the PC. Fall back to standard
5062 code to avoid losing control of execution. */
5063 else if (arm_instruction_changes_pc (insn))
5064 return 0;
5065
5066 /* If we find a strex{,b,h,d}, we're done. */
5067 if ((insn & 0xff9000f0) == 0xe1800090)
5068 break;
5069 }
5070
5071 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5072 if (insn_count == atomic_sequence_length)
5073 return 0;
5074
5075 /* Insert a breakpoint right after the end of the atomic sequence. */
5076 breaks[0] = loc;
5077
5078 /* Check for duplicated breakpoints. Check also for a breakpoint
5079 placed (branch instruction's destination) anywhere in sequence. */
5080 if (last_breakpoint
5081 && (breaks[1] == breaks[0]
5082 || (breaks[1] >= pc && breaks[1] < loc)))
5083 last_breakpoint = 0;
5084
5085 /* Effectively inserts the breakpoints. */
5086 for (index = 0; index <= last_breakpoint; index++)
5087 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5088
5089 return 1;
5090 }
5091
5092 int
5093 arm_deal_with_atomic_sequence (struct frame_info *frame)
5094 {
5095 if (arm_frame_is_thumb (frame))
5096 return thumb_deal_with_atomic_sequence_raw (frame);
5097 else
5098 return arm_deal_with_atomic_sequence_raw (frame);
5099 }
5100
5101 /* single_step() is called just before we want to resume the inferior,
5102 if we want to single-step it but there is no hardware or kernel
5103 single-step support. We find the target of the coming instruction
5104 and breakpoint it. */
5105
5106 int
5107 arm_software_single_step (struct frame_info *frame)
5108 {
5109 struct gdbarch *gdbarch = get_frame_arch (frame);
5110 struct address_space *aspace = get_frame_address_space (frame);
5111 CORE_ADDR next_pc;
5112
5113 if (arm_deal_with_atomic_sequence (frame))
5114 return 1;
5115
5116 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5117 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5118
5119 return 1;
5120 }
5121
5122 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5123 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5124 NULL if an error occurs. BUF is freed. */
5125
5126 static gdb_byte *
5127 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5128 int old_len, int new_len)
5129 {
5130 gdb_byte *new_buf, *middle;
5131 int bytes_to_read = new_len - old_len;
5132
5133 new_buf = xmalloc (new_len);
5134 memcpy (new_buf + bytes_to_read, buf, old_len);
5135 xfree (buf);
5136 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5137 {
5138 xfree (new_buf);
5139 return NULL;
5140 }
5141 return new_buf;
5142 }
5143
5144 /* An IT block is at most the 2-byte IT instruction followed by
5145 four 4-byte instructions. The furthest back we must search to
5146 find an IT block that affects the current instruction is thus
5147 2 + 3 * 4 == 14 bytes. */
5148 #define MAX_IT_BLOCK_PREFIX 14
5149
5150 /* Use a quick scan if there are more than this many bytes of
5151 code. */
5152 #define IT_SCAN_THRESHOLD 32
5153
5154 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5155 A breakpoint in an IT block may not be hit, depending on the
5156 condition flags. */
5157 static CORE_ADDR
5158 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5159 {
5160 gdb_byte *buf;
5161 char map_type;
5162 CORE_ADDR boundary, func_start;
5163 int buf_len, buf2_len;
5164 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5165 int i, any, last_it, last_it_count;
5166
5167 /* If we are using BKPT breakpoints, none of this is necessary. */
5168 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5169 return bpaddr;
5170
5171 /* ARM mode does not have this problem. */
5172 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5173 return bpaddr;
5174
5175 /* We are setting a breakpoint in Thumb code that could potentially
5176 contain an IT block. The first step is to find how much Thumb
5177 code there is; we do not need to read outside of known Thumb
5178 sequences. */
5179 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5180 if (map_type == 0)
5181 /* Thumb-2 code must have mapping symbols to have a chance. */
5182 return bpaddr;
5183
5184 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5185
5186 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5187 && func_start > boundary)
5188 boundary = func_start;
5189
5190 /* Search for a candidate IT instruction. We have to do some fancy
5191 footwork to distinguish a real IT instruction from the second
5192 half of a 32-bit instruction, but there is no need for that if
5193 there's no candidate. */
5194 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5195 if (buf_len == 0)
5196 /* No room for an IT instruction. */
5197 return bpaddr;
5198
5199 buf = xmalloc (buf_len);
5200 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5201 return bpaddr;
5202 any = 0;
5203 for (i = 0; i < buf_len; i += 2)
5204 {
5205 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5206 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5207 {
5208 any = 1;
5209 break;
5210 }
5211 }
5212 if (any == 0)
5213 {
5214 xfree (buf);
5215 return bpaddr;
5216 }
5217
5218 /* OK, the code bytes before this instruction contain at least one
5219 halfword which resembles an IT instruction. We know that it's
5220 Thumb code, but there are still two possibilities. Either the
5221 halfword really is an IT instruction, or it is the second half of
5222 a 32-bit Thumb instruction. The only way we can tell is to
5223 scan forwards from a known instruction boundary. */
5224 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5225 {
5226 int definite;
5227
5228 /* There's a lot of code before this instruction. Start with an
5229 optimistic search; it's easy to recognize halfwords that can
5230 not be the start of a 32-bit instruction, and use that to
5231 lock on to the instruction boundaries. */
5232 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5233 if (buf == NULL)
5234 return bpaddr;
5235 buf_len = IT_SCAN_THRESHOLD;
5236
5237 definite = 0;
5238 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5239 {
5240 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5241 if (thumb_insn_size (inst1) == 2)
5242 {
5243 definite = 1;
5244 break;
5245 }
5246 }
5247
5248 /* At this point, if DEFINITE, BUF[I] is the first place we
5249 are sure that we know the instruction boundaries, and it is far
5250 enough from BPADDR that we could not miss an IT instruction
5251 affecting BPADDR. If ! DEFINITE, give up - start from a
5252 known boundary. */
5253 if (! definite)
5254 {
5255 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5256 bpaddr - boundary);
5257 if (buf == NULL)
5258 return bpaddr;
5259 buf_len = bpaddr - boundary;
5260 i = 0;
5261 }
5262 }
5263 else
5264 {
5265 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5266 if (buf == NULL)
5267 return bpaddr;
5268 buf_len = bpaddr - boundary;
5269 i = 0;
5270 }
5271
5272 /* Scan forwards. Find the last IT instruction before BPADDR. */
5273 last_it = -1;
5274 last_it_count = 0;
5275 while (i < buf_len)
5276 {
5277 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5278 last_it_count--;
5279 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5280 {
5281 last_it = i;
5282 if (inst1 & 0x0001)
5283 last_it_count = 4;
5284 else if (inst1 & 0x0002)
5285 last_it_count = 3;
5286 else if (inst1 & 0x0004)
5287 last_it_count = 2;
5288 else
5289 last_it_count = 1;
5290 }
5291 i += thumb_insn_size (inst1);
5292 }
5293
5294 xfree (buf);
5295
5296 if (last_it == -1)
5297 /* There wasn't really an IT instruction after all. */
5298 return bpaddr;
5299
5300 if (last_it_count < 1)
5301 /* It was too far away. */
5302 return bpaddr;
5303
5304 /* This really is a trouble spot. Move the breakpoint to the IT
5305 instruction. */
5306 return bpaddr - buf_len + last_it;
5307 }
5308
5309 /* ARM displaced stepping support.
5310
5311 Generally ARM displaced stepping works as follows:
5312
5313 1. When an instruction is to be single-stepped, it is first decoded by
5314 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5315 Depending on the type of instruction, it is then copied to a scratch
5316 location, possibly in a modified form. The copy_* set of functions
5317 performs such modification, as necessary. A breakpoint is placed after
5318 the modified instruction in the scratch space to return control to GDB.
5319 Note in particular that instructions which modify the PC will no longer
5320 do so after modification.
5321
5322 2. The instruction is single-stepped, by setting the PC to the scratch
5323 location address, and resuming. Control returns to GDB when the
5324 breakpoint is hit.
5325
5326 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5327 function used for the current instruction. This function's job is to
5328 put the CPU/memory state back to what it would have been if the
5329 instruction had been executed unmodified in its original location. */
5330
5331 /* NOP instruction (mov r0, r0). */
5332 #define ARM_NOP 0xe1a00000
5333 #define THUMB_NOP 0x4600
5334
5335 /* Helper for register reads for displaced stepping. In particular, this
5336 returns the PC as it would be seen by the instruction at its original
5337 location. */
5338
5339 ULONGEST
5340 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5341 int regno)
5342 {
5343 ULONGEST ret;
5344 CORE_ADDR from = dsc->insn_addr;
5345
5346 if (regno == ARM_PC_REGNUM)
5347 {
5348 /* Compute pipeline offset:
5349 - When executing an ARM instruction, PC reads as the address of the
5350 current instruction plus 8.
5351 - When executing a Thumb instruction, PC reads as the address of the
5352 current instruction plus 4. */
5353
5354 if (!dsc->is_thumb)
5355 from += 8;
5356 else
5357 from += 4;
5358
5359 if (debug_displaced)
5360 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5361 (unsigned long) from);
5362 return (ULONGEST) from;
5363 }
5364 else
5365 {
5366 regcache_cooked_read_unsigned (regs, regno, &ret);
5367 if (debug_displaced)
5368 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5369 regno, (unsigned long) ret);
5370 return ret;
5371 }
5372 }
5373
5374 static int
5375 displaced_in_arm_mode (struct regcache *regs)
5376 {
5377 ULONGEST ps;
5378 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5379
5380 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5381
5382 return (ps & t_bit) == 0;
5383 }
5384
5385 /* Write to the PC as from a branch instruction. */
5386
5387 static void
5388 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5389 ULONGEST val)
5390 {
5391 if (!dsc->is_thumb)
5392 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5393 architecture versions < 6. */
5394 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5395 val & ~(ULONGEST) 0x3);
5396 else
5397 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5398 val & ~(ULONGEST) 0x1);
5399 }
5400
5401 /* Write to the PC as from a branch-exchange instruction. */
5402
5403 static void
5404 bx_write_pc (struct regcache *regs, ULONGEST val)
5405 {
5406 ULONGEST ps;
5407 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5408
5409 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5410
5411 if ((val & 1) == 1)
5412 {
5413 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5414 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5415 }
5416 else if ((val & 2) == 0)
5417 {
5418 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5419 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5420 }
5421 else
5422 {
5423 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5424 mode, align dest to 4 bytes). */
5425 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5426 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5427 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5428 }
5429 }
5430
5431 /* Write to the PC as if from a load instruction. */
5432
5433 static void
5434 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5435 ULONGEST val)
5436 {
5437 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5438 bx_write_pc (regs, val);
5439 else
5440 branch_write_pc (regs, dsc, val);
5441 }
5442
5443 /* Write to the PC as if from an ALU instruction. */
5444
5445 static void
5446 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5447 ULONGEST val)
5448 {
5449 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5450 bx_write_pc (regs, val);
5451 else
5452 branch_write_pc (regs, dsc, val);
5453 }
5454
5455 /* Helper for writing to registers for displaced stepping. Writing to the PC
5456 has a varying effects depending on the instruction which does the write:
5457 this is controlled by the WRITE_PC argument. */
5458
5459 void
5460 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5461 int regno, ULONGEST val, enum pc_write_style write_pc)
5462 {
5463 if (regno == ARM_PC_REGNUM)
5464 {
5465 if (debug_displaced)
5466 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5467 (unsigned long) val);
5468 switch (write_pc)
5469 {
5470 case BRANCH_WRITE_PC:
5471 branch_write_pc (regs, dsc, val);
5472 break;
5473
5474 case BX_WRITE_PC:
5475 bx_write_pc (regs, val);
5476 break;
5477
5478 case LOAD_WRITE_PC:
5479 load_write_pc (regs, dsc, val);
5480 break;
5481
5482 case ALU_WRITE_PC:
5483 alu_write_pc (regs, dsc, val);
5484 break;
5485
5486 case CANNOT_WRITE_PC:
5487 warning (_("Instruction wrote to PC in an unexpected way when "
5488 "single-stepping"));
5489 break;
5490
5491 default:
5492 internal_error (__FILE__, __LINE__,
5493 _("Invalid argument to displaced_write_reg"));
5494 }
5495
5496 dsc->wrote_to_pc = 1;
5497 }
5498 else
5499 {
5500 if (debug_displaced)
5501 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5502 regno, (unsigned long) val);
5503 regcache_cooked_write_unsigned (regs, regno, val);
5504 }
5505 }
5506
5507 /* This function is used to concisely determine if an instruction INSN
5508 references PC. Register fields of interest in INSN should have the
5509 corresponding fields of BITMASK set to 0b1111. The function
5510 returns return 1 if any of these fields in INSN reference the PC
5511 (also 0b1111, r15), else it returns 0. */
5512
5513 static int
5514 insn_references_pc (uint32_t insn, uint32_t bitmask)
5515 {
5516 uint32_t lowbit = 1;
5517
5518 while (bitmask != 0)
5519 {
5520 uint32_t mask;
5521
5522 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5523 ;
5524
5525 if (!lowbit)
5526 break;
5527
5528 mask = lowbit * 0xf;
5529
5530 if ((insn & mask) == mask)
5531 return 1;
5532
5533 bitmask &= ~mask;
5534 }
5535
5536 return 0;
5537 }
5538
5539 /* The simplest copy function. Many instructions have the same effect no
5540 matter what address they are executed at: in those cases, use this. */
5541
5542 static int
5543 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5544 const char *iname, struct displaced_step_closure *dsc)
5545 {
5546 if (debug_displaced)
5547 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5548 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5549 iname);
5550
5551 dsc->modinsn[0] = insn;
5552
5553 return 0;
5554 }
5555
5556 static int
5557 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5558 uint16_t insn2, const char *iname,
5559 struct displaced_step_closure *dsc)
5560 {
5561 if (debug_displaced)
5562 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5563 "opcode/class '%s' unmodified\n", insn1, insn2,
5564 iname);
5565
5566 dsc->modinsn[0] = insn1;
5567 dsc->modinsn[1] = insn2;
5568 dsc->numinsns = 2;
5569
5570 return 0;
5571 }
5572
5573 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5574 modification. */
5575 static int
5576 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5577 const char *iname,
5578 struct displaced_step_closure *dsc)
5579 {
5580 if (debug_displaced)
5581 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5582 "opcode/class '%s' unmodified\n", insn,
5583 iname);
5584
5585 dsc->modinsn[0] = insn;
5586
5587 return 0;
5588 }
5589
5590 /* Preload instructions with immediate offset. */
5591
5592 static void
5593 cleanup_preload (struct gdbarch *gdbarch,
5594 struct regcache *regs, struct displaced_step_closure *dsc)
5595 {
5596 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5597 if (!dsc->u.preload.immed)
5598 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5599 }
5600
5601 static void
5602 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5603 struct displaced_step_closure *dsc, unsigned int rn)
5604 {
5605 ULONGEST rn_val;
5606 /* Preload instructions:
5607
5608 {pli/pld} [rn, #+/-imm]
5609 ->
5610 {pli/pld} [r0, #+/-imm]. */
5611
5612 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5613 rn_val = displaced_read_reg (regs, dsc, rn);
5614 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5615 dsc->u.preload.immed = 1;
5616
5617 dsc->cleanup = &cleanup_preload;
5618 }
5619
5620 static int
5621 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5622 struct displaced_step_closure *dsc)
5623 {
5624 unsigned int rn = bits (insn, 16, 19);
5625
5626 if (!insn_references_pc (insn, 0x000f0000ul))
5627 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5628
5629 if (debug_displaced)
5630 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5631 (unsigned long) insn);
5632
5633 dsc->modinsn[0] = insn & 0xfff0ffff;
5634
5635 install_preload (gdbarch, regs, dsc, rn);
5636
5637 return 0;
5638 }
5639
5640 static int
5641 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5642 struct regcache *regs, struct displaced_step_closure *dsc)
5643 {
5644 unsigned int rn = bits (insn1, 0, 3);
5645 unsigned int u_bit = bit (insn1, 7);
5646 int imm12 = bits (insn2, 0, 11);
5647 ULONGEST pc_val;
5648
5649 if (rn != ARM_PC_REGNUM)
5650 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5651
5652 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5653 PLD (literal) Encoding T1. */
5654 if (debug_displaced)
5655 fprintf_unfiltered (gdb_stdlog,
5656 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5657 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5658 imm12);
5659
5660 if (!u_bit)
5661 imm12 = -1 * imm12;
5662
5663 /* Rewrite instruction {pli/pld} PC imm12 into:
5664 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5665
5666 {pli/pld} [r0, r1]
5667
5668 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5669
5670 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5671 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5672
5673 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5674
5675 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5676 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5677 dsc->u.preload.immed = 0;
5678
5679 /* {pli/pld} [r0, r1] */
5680 dsc->modinsn[0] = insn1 & 0xfff0;
5681 dsc->modinsn[1] = 0xf001;
5682 dsc->numinsns = 2;
5683
5684 dsc->cleanup = &cleanup_preload;
5685 return 0;
5686 }
5687
5688 /* Preload instructions with register offset. */
5689
5690 static void
5691 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5692 struct displaced_step_closure *dsc, unsigned int rn,
5693 unsigned int rm)
5694 {
5695 ULONGEST rn_val, rm_val;
5696
5697 /* Preload register-offset instructions:
5698
5699 {pli/pld} [rn, rm {, shift}]
5700 ->
5701 {pli/pld} [r0, r1 {, shift}]. */
5702
5703 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5704 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5705 rn_val = displaced_read_reg (regs, dsc, rn);
5706 rm_val = displaced_read_reg (regs, dsc, rm);
5707 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5708 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5709 dsc->u.preload.immed = 0;
5710
5711 dsc->cleanup = &cleanup_preload;
5712 }
5713
5714 static int
5715 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5716 struct regcache *regs,
5717 struct displaced_step_closure *dsc)
5718 {
5719 unsigned int rn = bits (insn, 16, 19);
5720 unsigned int rm = bits (insn, 0, 3);
5721
5722
5723 if (!insn_references_pc (insn, 0x000f000ful))
5724 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5725
5726 if (debug_displaced)
5727 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5728 (unsigned long) insn);
5729
5730 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5731
5732 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5733 return 0;
5734 }
5735
5736 /* Copy/cleanup coprocessor load and store instructions. */
5737
5738 static void
5739 cleanup_copro_load_store (struct gdbarch *gdbarch,
5740 struct regcache *regs,
5741 struct displaced_step_closure *dsc)
5742 {
5743 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5744
5745 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5746
5747 if (dsc->u.ldst.writeback)
5748 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5749 }
5750
5751 static void
5752 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5753 struct displaced_step_closure *dsc,
5754 int writeback, unsigned int rn)
5755 {
5756 ULONGEST rn_val;
5757
5758 /* Coprocessor load/store instructions:
5759
5760 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5761 ->
5762 {stc/stc2} [r0, #+/-imm].
5763
5764 ldc/ldc2 are handled identically. */
5765
5766 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5767 rn_val = displaced_read_reg (regs, dsc, rn);
5768 /* PC should be 4-byte aligned. */
5769 rn_val = rn_val & 0xfffffffc;
5770 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5771
5772 dsc->u.ldst.writeback = writeback;
5773 dsc->u.ldst.rn = rn;
5774
5775 dsc->cleanup = &cleanup_copro_load_store;
5776 }
5777
5778 static int
5779 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5780 struct regcache *regs,
5781 struct displaced_step_closure *dsc)
5782 {
5783 unsigned int rn = bits (insn, 16, 19);
5784
5785 if (!insn_references_pc (insn, 0x000f0000ul))
5786 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5787
5788 if (debug_displaced)
5789 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5790 "load/store insn %.8lx\n", (unsigned long) insn);
5791
5792 dsc->modinsn[0] = insn & 0xfff0ffff;
5793
5794 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5795
5796 return 0;
5797 }
5798
5799 static int
5800 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5801 uint16_t insn2, struct regcache *regs,
5802 struct displaced_step_closure *dsc)
5803 {
5804 unsigned int rn = bits (insn1, 0, 3);
5805
5806 if (rn != ARM_PC_REGNUM)
5807 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5808 "copro load/store", dsc);
5809
5810 if (debug_displaced)
5811 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5812 "load/store insn %.4x%.4x\n", insn1, insn2);
5813
5814 dsc->modinsn[0] = insn1 & 0xfff0;
5815 dsc->modinsn[1] = insn2;
5816 dsc->numinsns = 2;
5817
5818 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5819 doesn't support writeback, so pass 0. */
5820 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5821
5822 return 0;
5823 }
5824
5825 /* Clean up branch instructions (actually perform the branch, by setting
5826 PC). */
5827
5828 static void
5829 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5830 struct displaced_step_closure *dsc)
5831 {
5832 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5833 int branch_taken = condition_true (dsc->u.branch.cond, status);
5834 enum pc_write_style write_pc = dsc->u.branch.exchange
5835 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5836
5837 if (!branch_taken)
5838 return;
5839
5840 if (dsc->u.branch.link)
5841 {
5842 /* The value of LR should be the next insn of current one. In order
5843 not to confuse logic hanlding later insn `bx lr', if current insn mode
5844 is Thumb, the bit 0 of LR value should be set to 1. */
5845 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5846
5847 if (dsc->is_thumb)
5848 next_insn_addr |= 0x1;
5849
5850 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5851 CANNOT_WRITE_PC);
5852 }
5853
5854 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5855 }
5856
5857 /* Copy B/BL/BLX instructions with immediate destinations. */
5858
5859 static void
5860 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5861 struct displaced_step_closure *dsc,
5862 unsigned int cond, int exchange, int link, long offset)
5863 {
5864 /* Implement "BL<cond> <label>" as:
5865
5866 Preparation: cond <- instruction condition
5867 Insn: mov r0, r0 (nop)
5868 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5869
5870 B<cond> similar, but don't set r14 in cleanup. */
5871
5872 dsc->u.branch.cond = cond;
5873 dsc->u.branch.link = link;
5874 dsc->u.branch.exchange = exchange;
5875
5876 dsc->u.branch.dest = dsc->insn_addr;
5877 if (link && exchange)
5878 /* For BLX, offset is computed from the Align (PC, 4). */
5879 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5880
5881 if (dsc->is_thumb)
5882 dsc->u.branch.dest += 4 + offset;
5883 else
5884 dsc->u.branch.dest += 8 + offset;
5885
5886 dsc->cleanup = &cleanup_branch;
5887 }
5888 static int
5889 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5890 struct regcache *regs, struct displaced_step_closure *dsc)
5891 {
5892 unsigned int cond = bits (insn, 28, 31);
5893 int exchange = (cond == 0xf);
5894 int link = exchange || bit (insn, 24);
5895 long offset;
5896
5897 if (debug_displaced)
5898 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5899 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5900 (unsigned long) insn);
5901 if (exchange)
5902 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5903 then arrange the switch into Thumb mode. */
5904 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5905 else
5906 offset = bits (insn, 0, 23) << 2;
5907
5908 if (bit (offset, 25))
5909 offset = offset | ~0x3ffffff;
5910
5911 dsc->modinsn[0] = ARM_NOP;
5912
5913 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5914 return 0;
5915 }
5916
5917 static int
5918 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5919 uint16_t insn2, struct regcache *regs,
5920 struct displaced_step_closure *dsc)
5921 {
5922 int link = bit (insn2, 14);
5923 int exchange = link && !bit (insn2, 12);
5924 int cond = INST_AL;
5925 long offset = 0;
5926 int j1 = bit (insn2, 13);
5927 int j2 = bit (insn2, 11);
5928 int s = sbits (insn1, 10, 10);
5929 int i1 = !(j1 ^ bit (insn1, 10));
5930 int i2 = !(j2 ^ bit (insn1, 10));
5931
5932 if (!link && !exchange) /* B */
5933 {
5934 offset = (bits (insn2, 0, 10) << 1);
5935 if (bit (insn2, 12)) /* Encoding T4 */
5936 {
5937 offset |= (bits (insn1, 0, 9) << 12)
5938 | (i2 << 22)
5939 | (i1 << 23)
5940 | (s << 24);
5941 cond = INST_AL;
5942 }
5943 else /* Encoding T3 */
5944 {
5945 offset |= (bits (insn1, 0, 5) << 12)
5946 | (j1 << 18)
5947 | (j2 << 19)
5948 | (s << 20);
5949 cond = bits (insn1, 6, 9);
5950 }
5951 }
5952 else
5953 {
5954 offset = (bits (insn1, 0, 9) << 12);
5955 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5956 offset |= exchange ?
5957 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5958 }
5959
5960 if (debug_displaced)
5961 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
5962 "%.4x %.4x with offset %.8lx\n",
5963 link ? (exchange) ? "blx" : "bl" : "b",
5964 insn1, insn2, offset);
5965
5966 dsc->modinsn[0] = THUMB_NOP;
5967
5968 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5969 return 0;
5970 }
5971
5972 /* Copy B Thumb instructions. */
5973 static int
5974 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
5975 struct displaced_step_closure *dsc)
5976 {
5977 unsigned int cond = 0;
5978 int offset = 0;
5979 unsigned short bit_12_15 = bits (insn, 12, 15);
5980 CORE_ADDR from = dsc->insn_addr;
5981
5982 if (bit_12_15 == 0xd)
5983 {
5984 /* offset = SignExtend (imm8:0, 32) */
5985 offset = sbits ((insn << 1), 0, 8);
5986 cond = bits (insn, 8, 11);
5987 }
5988 else if (bit_12_15 == 0xe) /* Encoding T2 */
5989 {
5990 offset = sbits ((insn << 1), 0, 11);
5991 cond = INST_AL;
5992 }
5993
5994 if (debug_displaced)
5995 fprintf_unfiltered (gdb_stdlog,
5996 "displaced: copying b immediate insn %.4x "
5997 "with offset %d\n", insn, offset);
5998
5999 dsc->u.branch.cond = cond;
6000 dsc->u.branch.link = 0;
6001 dsc->u.branch.exchange = 0;
6002 dsc->u.branch.dest = from + 4 + offset;
6003
6004 dsc->modinsn[0] = THUMB_NOP;
6005
6006 dsc->cleanup = &cleanup_branch;
6007
6008 return 0;
6009 }
6010
6011 /* Copy BX/BLX with register-specified destinations. */
6012
6013 static void
6014 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6015 struct displaced_step_closure *dsc, int link,
6016 unsigned int cond, unsigned int rm)
6017 {
6018 /* Implement {BX,BLX}<cond> <reg>" as:
6019
6020 Preparation: cond <- instruction condition
6021 Insn: mov r0, r0 (nop)
6022 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6023
6024 Don't set r14 in cleanup for BX. */
6025
6026 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6027
6028 dsc->u.branch.cond = cond;
6029 dsc->u.branch.link = link;
6030
6031 dsc->u.branch.exchange = 1;
6032
6033 dsc->cleanup = &cleanup_branch;
6034 }
6035
6036 static int
6037 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6038 struct regcache *regs, struct displaced_step_closure *dsc)
6039 {
6040 unsigned int cond = bits (insn, 28, 31);
6041 /* BX: x12xxx1x
6042 BLX: x12xxx3x. */
6043 int link = bit (insn, 5);
6044 unsigned int rm = bits (insn, 0, 3);
6045
6046 if (debug_displaced)
6047 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6048 (unsigned long) insn);
6049
6050 dsc->modinsn[0] = ARM_NOP;
6051
6052 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6053 return 0;
6054 }
6055
6056 static int
6057 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6058 struct regcache *regs,
6059 struct displaced_step_closure *dsc)
6060 {
6061 int link = bit (insn, 7);
6062 unsigned int rm = bits (insn, 3, 6);
6063
6064 if (debug_displaced)
6065 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6066 (unsigned short) insn);
6067
6068 dsc->modinsn[0] = THUMB_NOP;
6069
6070 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6071
6072 return 0;
6073 }
6074
6075
6076 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6077
6078 static void
6079 cleanup_alu_imm (struct gdbarch *gdbarch,
6080 struct regcache *regs, struct displaced_step_closure *dsc)
6081 {
6082 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6083 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6084 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6085 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6086 }
6087
6088 static int
6089 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6090 struct displaced_step_closure *dsc)
6091 {
6092 unsigned int rn = bits (insn, 16, 19);
6093 unsigned int rd = bits (insn, 12, 15);
6094 unsigned int op = bits (insn, 21, 24);
6095 int is_mov = (op == 0xd);
6096 ULONGEST rd_val, rn_val;
6097
6098 if (!insn_references_pc (insn, 0x000ff000ul))
6099 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6100
6101 if (debug_displaced)
6102 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6103 "%.8lx\n", is_mov ? "move" : "ALU",
6104 (unsigned long) insn);
6105
6106 /* Instruction is of form:
6107
6108 <op><cond> rd, [rn,] #imm
6109
6110 Rewrite as:
6111
6112 Preparation: tmp1, tmp2 <- r0, r1;
6113 r0, r1 <- rd, rn
6114 Insn: <op><cond> r0, r1, #imm
6115 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6116 */
6117
6118 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6119 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6120 rn_val = displaced_read_reg (regs, dsc, rn);
6121 rd_val = displaced_read_reg (regs, dsc, rd);
6122 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6123 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6124 dsc->rd = rd;
6125
6126 if (is_mov)
6127 dsc->modinsn[0] = insn & 0xfff00fff;
6128 else
6129 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6130
6131 dsc->cleanup = &cleanup_alu_imm;
6132
6133 return 0;
6134 }
6135
6136 static int
6137 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6138 uint16_t insn2, struct regcache *regs,
6139 struct displaced_step_closure *dsc)
6140 {
6141 unsigned int op = bits (insn1, 5, 8);
6142 unsigned int rn, rm, rd;
6143 ULONGEST rd_val, rn_val;
6144
6145 rn = bits (insn1, 0, 3); /* Rn */
6146 rm = bits (insn2, 0, 3); /* Rm */
6147 rd = bits (insn2, 8, 11); /* Rd */
6148
6149 /* This routine is only called for instruction MOV. */
6150 gdb_assert (op == 0x2 && rn == 0xf);
6151
6152 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6153 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6154
6155 if (debug_displaced)
6156 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6157 "ALU", insn1, insn2);
6158
6159 /* Instruction is of form:
6160
6161 <op><cond> rd, [rn,] #imm
6162
6163 Rewrite as:
6164
6165 Preparation: tmp1, tmp2 <- r0, r1;
6166 r0, r1 <- rd, rn
6167 Insn: <op><cond> r0, r1, #imm
6168 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6169 */
6170
6171 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6172 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6173 rn_val = displaced_read_reg (regs, dsc, rn);
6174 rd_val = displaced_read_reg (regs, dsc, rd);
6175 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6176 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6177 dsc->rd = rd;
6178
6179 dsc->modinsn[0] = insn1;
6180 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6181 dsc->numinsns = 2;
6182
6183 dsc->cleanup = &cleanup_alu_imm;
6184
6185 return 0;
6186 }
6187
6188 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6189
6190 static void
6191 cleanup_alu_reg (struct gdbarch *gdbarch,
6192 struct regcache *regs, struct displaced_step_closure *dsc)
6193 {
6194 ULONGEST rd_val;
6195 int i;
6196
6197 rd_val = displaced_read_reg (regs, dsc, 0);
6198
6199 for (i = 0; i < 3; i++)
6200 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6201
6202 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6203 }
6204
6205 static void
6206 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6207 struct displaced_step_closure *dsc,
6208 unsigned int rd, unsigned int rn, unsigned int rm)
6209 {
6210 ULONGEST rd_val, rn_val, rm_val;
6211
6212 /* Instruction is of form:
6213
6214 <op><cond> rd, [rn,] rm [, <shift>]
6215
6216 Rewrite as:
6217
6218 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6219 r0, r1, r2 <- rd, rn, rm
6220 Insn: <op><cond> r0, r1, r2 [, <shift>]
6221 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6222 */
6223
6224 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6225 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6226 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6227 rd_val = displaced_read_reg (regs, dsc, rd);
6228 rn_val = displaced_read_reg (regs, dsc, rn);
6229 rm_val = displaced_read_reg (regs, dsc, rm);
6230 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6231 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6232 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6233 dsc->rd = rd;
6234
6235 dsc->cleanup = &cleanup_alu_reg;
6236 }
6237
6238 static int
6239 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6240 struct displaced_step_closure *dsc)
6241 {
6242 unsigned int op = bits (insn, 21, 24);
6243 int is_mov = (op == 0xd);
6244
6245 if (!insn_references_pc (insn, 0x000ff00ful))
6246 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6247
6248 if (debug_displaced)
6249 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6250 is_mov ? "move" : "ALU", (unsigned long) insn);
6251
6252 if (is_mov)
6253 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6254 else
6255 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6256
6257 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6258 bits (insn, 0, 3));
6259 return 0;
6260 }
6261
6262 static int
6263 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6264 struct regcache *regs,
6265 struct displaced_step_closure *dsc)
6266 {
6267 unsigned rn, rm, rd;
6268
6269 rd = bits (insn, 3, 6);
6270 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6271 rm = 2;
6272
6273 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6274 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6275
6276 if (debug_displaced)
6277 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6278 "ALU", (unsigned short) insn);
6279
6280 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6281
6282 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6283
6284 return 0;
6285 }
6286
6287 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6288
6289 static void
6290 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6291 struct regcache *regs,
6292 struct displaced_step_closure *dsc)
6293 {
6294 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6295 int i;
6296
6297 for (i = 0; i < 4; i++)
6298 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6299
6300 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6301 }
6302
6303 static void
6304 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6305 struct displaced_step_closure *dsc,
6306 unsigned int rd, unsigned int rn, unsigned int rm,
6307 unsigned rs)
6308 {
6309 int i;
6310 ULONGEST rd_val, rn_val, rm_val, rs_val;
6311
6312 /* Instruction is of form:
6313
6314 <op><cond> rd, [rn,] rm, <shift> rs
6315
6316 Rewrite as:
6317
6318 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6319 r0, r1, r2, r3 <- rd, rn, rm, rs
6320 Insn: <op><cond> r0, r1, r2, <shift> r3
6321 Cleanup: tmp5 <- r0
6322 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6323 rd <- tmp5
6324 */
6325
6326 for (i = 0; i < 4; i++)
6327 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6328
6329 rd_val = displaced_read_reg (regs, dsc, rd);
6330 rn_val = displaced_read_reg (regs, dsc, rn);
6331 rm_val = displaced_read_reg (regs, dsc, rm);
6332 rs_val = displaced_read_reg (regs, dsc, rs);
6333 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6334 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6335 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6336 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6337 dsc->rd = rd;
6338 dsc->cleanup = &cleanup_alu_shifted_reg;
6339 }
6340
6341 static int
6342 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6343 struct regcache *regs,
6344 struct displaced_step_closure *dsc)
6345 {
6346 unsigned int op = bits (insn, 21, 24);
6347 int is_mov = (op == 0xd);
6348 unsigned int rd, rn, rm, rs;
6349
6350 if (!insn_references_pc (insn, 0x000fff0ful))
6351 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6352
6353 if (debug_displaced)
6354 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6355 "%.8lx\n", is_mov ? "move" : "ALU",
6356 (unsigned long) insn);
6357
6358 rn = bits (insn, 16, 19);
6359 rm = bits (insn, 0, 3);
6360 rs = bits (insn, 8, 11);
6361 rd = bits (insn, 12, 15);
6362
6363 if (is_mov)
6364 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6365 else
6366 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6367
6368 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6369
6370 return 0;
6371 }
6372
6373 /* Clean up load instructions. */
6374
6375 static void
6376 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6377 struct displaced_step_closure *dsc)
6378 {
6379 ULONGEST rt_val, rt_val2 = 0, rn_val;
6380
6381 rt_val = displaced_read_reg (regs, dsc, 0);
6382 if (dsc->u.ldst.xfersize == 8)
6383 rt_val2 = displaced_read_reg (regs, dsc, 1);
6384 rn_val = displaced_read_reg (regs, dsc, 2);
6385
6386 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6387 if (dsc->u.ldst.xfersize > 4)
6388 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6389 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6390 if (!dsc->u.ldst.immed)
6391 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6392
6393 /* Handle register writeback. */
6394 if (dsc->u.ldst.writeback)
6395 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6396 /* Put result in right place. */
6397 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6398 if (dsc->u.ldst.xfersize == 8)
6399 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6400 }
6401
6402 /* Clean up store instructions. */
6403
6404 static void
6405 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6406 struct displaced_step_closure *dsc)
6407 {
6408 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6409
6410 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6411 if (dsc->u.ldst.xfersize > 4)
6412 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6413 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6414 if (!dsc->u.ldst.immed)
6415 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6416 if (!dsc->u.ldst.restore_r4)
6417 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6418
6419 /* Writeback. */
6420 if (dsc->u.ldst.writeback)
6421 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6422 }
6423
6424 /* Copy "extra" load/store instructions. These are halfword/doubleword
6425 transfers, which have a different encoding to byte/word transfers. */
6426
6427 static int
6428 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6429 struct regcache *regs, struct displaced_step_closure *dsc)
6430 {
6431 unsigned int op1 = bits (insn, 20, 24);
6432 unsigned int op2 = bits (insn, 5, 6);
6433 unsigned int rt = bits (insn, 12, 15);
6434 unsigned int rn = bits (insn, 16, 19);
6435 unsigned int rm = bits (insn, 0, 3);
6436 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6437 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6438 int immed = (op1 & 0x4) != 0;
6439 int opcode;
6440 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6441
6442 if (!insn_references_pc (insn, 0x000ff00ful))
6443 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6444
6445 if (debug_displaced)
6446 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6447 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6448 (unsigned long) insn);
6449
6450 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6451
6452 if (opcode < 0)
6453 internal_error (__FILE__, __LINE__,
6454 _("copy_extra_ld_st: instruction decode error"));
6455
6456 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6457 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6458 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6459 if (!immed)
6460 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6461
6462 rt_val = displaced_read_reg (regs, dsc, rt);
6463 if (bytesize[opcode] == 8)
6464 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6465 rn_val = displaced_read_reg (regs, dsc, rn);
6466 if (!immed)
6467 rm_val = displaced_read_reg (regs, dsc, rm);
6468
6469 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6470 if (bytesize[opcode] == 8)
6471 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6472 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6473 if (!immed)
6474 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6475
6476 dsc->rd = rt;
6477 dsc->u.ldst.xfersize = bytesize[opcode];
6478 dsc->u.ldst.rn = rn;
6479 dsc->u.ldst.immed = immed;
6480 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6481 dsc->u.ldst.restore_r4 = 0;
6482
6483 if (immed)
6484 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6485 ->
6486 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6487 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6488 else
6489 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6490 ->
6491 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6492 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6493
6494 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6495
6496 return 0;
6497 }
6498
6499 /* Copy byte/half word/word loads and stores. */
6500
6501 static void
6502 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6503 struct displaced_step_closure *dsc, int load,
6504 int immed, int writeback, int size, int usermode,
6505 int rt, int rm, int rn)
6506 {
6507 ULONGEST rt_val, rn_val, rm_val = 0;
6508
6509 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6510 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6511 if (!immed)
6512 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6513 if (!load)
6514 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6515
6516 rt_val = displaced_read_reg (regs, dsc, rt);
6517 rn_val = displaced_read_reg (regs, dsc, rn);
6518 if (!immed)
6519 rm_val = displaced_read_reg (regs, dsc, rm);
6520
6521 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6522 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6523 if (!immed)
6524 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6525 dsc->rd = rt;
6526 dsc->u.ldst.xfersize = size;
6527 dsc->u.ldst.rn = rn;
6528 dsc->u.ldst.immed = immed;
6529 dsc->u.ldst.writeback = writeback;
6530
6531 /* To write PC we can do:
6532
6533 Before this sequence of instructions:
6534 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6535 r2 is the Rn value got from dispalced_read_reg.
6536
6537 Insn1: push {pc} Write address of STR instruction + offset on stack
6538 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6539 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6540 = addr(Insn1) + offset - addr(Insn3) - 8
6541 = offset - 16
6542 Insn4: add r4, r4, #8 r4 = offset - 8
6543 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6544 = from + offset
6545 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6546
6547 Otherwise we don't know what value to write for PC, since the offset is
6548 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6549 of this can be found in Section "Saving from r15" in
6550 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6551
6552 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6553 }
6554
6555
6556 static int
6557 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6558 uint16_t insn2, struct regcache *regs,
6559 struct displaced_step_closure *dsc, int size)
6560 {
6561 unsigned int u_bit = bit (insn1, 7);
6562 unsigned int rt = bits (insn2, 12, 15);
6563 int imm12 = bits (insn2, 0, 11);
6564 ULONGEST pc_val;
6565
6566 if (debug_displaced)
6567 fprintf_unfiltered (gdb_stdlog,
6568 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6569 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6570 imm12);
6571
6572 if (!u_bit)
6573 imm12 = -1 * imm12;
6574
6575 /* Rewrite instruction LDR Rt imm12 into:
6576
6577 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6578
6579 LDR R0, R2, R3,
6580
6581 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6582
6583
6584 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6585 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6586 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6587
6588 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6589
6590 pc_val = pc_val & 0xfffffffc;
6591
6592 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6593 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6594
6595 dsc->rd = rt;
6596
6597 dsc->u.ldst.xfersize = size;
6598 dsc->u.ldst.immed = 0;
6599 dsc->u.ldst.writeback = 0;
6600 dsc->u.ldst.restore_r4 = 0;
6601
6602 /* LDR R0, R2, R3 */
6603 dsc->modinsn[0] = 0xf852;
6604 dsc->modinsn[1] = 0x3;
6605 dsc->numinsns = 2;
6606
6607 dsc->cleanup = &cleanup_load;
6608
6609 return 0;
6610 }
6611
6612 static int
6613 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6614 uint16_t insn2, struct regcache *regs,
6615 struct displaced_step_closure *dsc,
6616 int writeback, int immed)
6617 {
6618 unsigned int rt = bits (insn2, 12, 15);
6619 unsigned int rn = bits (insn1, 0, 3);
6620 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6621 /* In LDR (register), there is also a register Rm, which is not allowed to
6622 be PC, so we don't have to check it. */
6623
6624 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6625 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6626 dsc);
6627
6628 if (debug_displaced)
6629 fprintf_unfiltered (gdb_stdlog,
6630 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6631 rt, rn, insn1, insn2);
6632
6633 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6634 0, rt, rm, rn);
6635
6636 dsc->u.ldst.restore_r4 = 0;
6637
6638 if (immed)
6639 /* ldr[b]<cond> rt, [rn, #imm], etc.
6640 ->
6641 ldr[b]<cond> r0, [r2, #imm]. */
6642 {
6643 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6644 dsc->modinsn[1] = insn2 & 0x0fff;
6645 }
6646 else
6647 /* ldr[b]<cond> rt, [rn, rm], etc.
6648 ->
6649 ldr[b]<cond> r0, [r2, r3]. */
6650 {
6651 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6652 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6653 }
6654
6655 dsc->numinsns = 2;
6656
6657 return 0;
6658 }
6659
6660
6661 static int
6662 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6663 struct regcache *regs,
6664 struct displaced_step_closure *dsc,
6665 int load, int size, int usermode)
6666 {
6667 int immed = !bit (insn, 25);
6668 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6669 unsigned int rt = bits (insn, 12, 15);
6670 unsigned int rn = bits (insn, 16, 19);
6671 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6672
6673 if (!insn_references_pc (insn, 0x000ff00ful))
6674 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6675
6676 if (debug_displaced)
6677 fprintf_unfiltered (gdb_stdlog,
6678 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6679 load ? (size == 1 ? "ldrb" : "ldr")
6680 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6681 rt, rn,
6682 (unsigned long) insn);
6683
6684 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6685 usermode, rt, rm, rn);
6686
6687 if (load || rt != ARM_PC_REGNUM)
6688 {
6689 dsc->u.ldst.restore_r4 = 0;
6690
6691 if (immed)
6692 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6693 ->
6694 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6695 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6696 else
6697 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6698 ->
6699 {ldr,str}[b]<cond> r0, [r2, r3]. */
6700 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6701 }
6702 else
6703 {
6704 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6705 dsc->u.ldst.restore_r4 = 1;
6706 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6707 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6708 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6709 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6710 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6711
6712 /* As above. */
6713 if (immed)
6714 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6715 else
6716 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6717
6718 dsc->numinsns = 6;
6719 }
6720
6721 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6722
6723 return 0;
6724 }
6725
6726 /* Cleanup LDM instructions with fully-populated register list. This is an
6727 unfortunate corner case: it's impossible to implement correctly by modifying
6728 the instruction. The issue is as follows: we have an instruction,
6729
6730 ldm rN, {r0-r15}
6731
6732 which we must rewrite to avoid loading PC. A possible solution would be to
6733 do the load in two halves, something like (with suitable cleanup
6734 afterwards):
6735
6736 mov r8, rN
6737 ldm[id][ab] r8!, {r0-r7}
6738 str r7, <temp>
6739 ldm[id][ab] r8, {r7-r14}
6740 <bkpt>
6741
6742 but at present there's no suitable place for <temp>, since the scratch space
6743 is overwritten before the cleanup routine is called. For now, we simply
6744 emulate the instruction. */
6745
6746 static void
6747 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6748 struct displaced_step_closure *dsc)
6749 {
6750 int inc = dsc->u.block.increment;
6751 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6752 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6753 uint32_t regmask = dsc->u.block.regmask;
6754 int regno = inc ? 0 : 15;
6755 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6756 int exception_return = dsc->u.block.load && dsc->u.block.user
6757 && (regmask & 0x8000) != 0;
6758 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6759 int do_transfer = condition_true (dsc->u.block.cond, status);
6760 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6761
6762 if (!do_transfer)
6763 return;
6764
6765 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6766 sensible we can do here. Complain loudly. */
6767 if (exception_return)
6768 error (_("Cannot single-step exception return"));
6769
6770 /* We don't handle any stores here for now. */
6771 gdb_assert (dsc->u.block.load != 0);
6772
6773 if (debug_displaced)
6774 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6775 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6776 dsc->u.block.increment ? "inc" : "dec",
6777 dsc->u.block.before ? "before" : "after");
6778
6779 while (regmask)
6780 {
6781 uint32_t memword;
6782
6783 if (inc)
6784 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6785 regno++;
6786 else
6787 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6788 regno--;
6789
6790 xfer_addr += bump_before;
6791
6792 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6793 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6794
6795 xfer_addr += bump_after;
6796
6797 regmask &= ~(1 << regno);
6798 }
6799
6800 if (dsc->u.block.writeback)
6801 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6802 CANNOT_WRITE_PC);
6803 }
6804
6805 /* Clean up an STM which included the PC in the register list. */
6806
6807 static void
6808 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6809 struct displaced_step_closure *dsc)
6810 {
6811 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6812 int store_executed = condition_true (dsc->u.block.cond, status);
6813 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6814 CORE_ADDR stm_insn_addr;
6815 uint32_t pc_val;
6816 long offset;
6817 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6818
6819 /* If condition code fails, there's nothing else to do. */
6820 if (!store_executed)
6821 return;
6822
6823 if (dsc->u.block.increment)
6824 {
6825 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6826
6827 if (dsc->u.block.before)
6828 pc_stored_at += 4;
6829 }
6830 else
6831 {
6832 pc_stored_at = dsc->u.block.xfer_addr;
6833
6834 if (dsc->u.block.before)
6835 pc_stored_at -= 4;
6836 }
6837
6838 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6839 stm_insn_addr = dsc->scratch_base;
6840 offset = pc_val - stm_insn_addr;
6841
6842 if (debug_displaced)
6843 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6844 "STM instruction\n", offset);
6845
6846 /* Rewrite the stored PC to the proper value for the non-displaced original
6847 instruction. */
6848 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6849 dsc->insn_addr + offset);
6850 }
6851
6852 /* Clean up an LDM which includes the PC in the register list. We clumped all
6853 the registers in the transferred list into a contiguous range r0...rX (to
6854 avoid loading PC directly and losing control of the debugged program), so we
6855 must undo that here. */
6856
6857 static void
6858 cleanup_block_load_pc (struct gdbarch *gdbarch,
6859 struct regcache *regs,
6860 struct displaced_step_closure *dsc)
6861 {
6862 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6863 int load_executed = condition_true (dsc->u.block.cond, status), i;
6864 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6865 unsigned int regs_loaded = bitcount (mask);
6866 unsigned int num_to_shuffle = regs_loaded, clobbered;
6867
6868 /* The method employed here will fail if the register list is fully populated
6869 (we need to avoid loading PC directly). */
6870 gdb_assert (num_to_shuffle < 16);
6871
6872 if (!load_executed)
6873 return;
6874
6875 clobbered = (1 << num_to_shuffle) - 1;
6876
6877 while (num_to_shuffle > 0)
6878 {
6879 if ((mask & (1 << write_reg)) != 0)
6880 {
6881 unsigned int read_reg = num_to_shuffle - 1;
6882
6883 if (read_reg != write_reg)
6884 {
6885 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6886 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6887 if (debug_displaced)
6888 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6889 "loaded register r%d to r%d\n"), read_reg,
6890 write_reg);
6891 }
6892 else if (debug_displaced)
6893 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6894 "r%d already in the right place\n"),
6895 write_reg);
6896
6897 clobbered &= ~(1 << write_reg);
6898
6899 num_to_shuffle--;
6900 }
6901
6902 write_reg--;
6903 }
6904
6905 /* Restore any registers we scribbled over. */
6906 for (write_reg = 0; clobbered != 0; write_reg++)
6907 {
6908 if ((clobbered & (1 << write_reg)) != 0)
6909 {
6910 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6911 CANNOT_WRITE_PC);
6912 if (debug_displaced)
6913 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6914 "clobbered register r%d\n"), write_reg);
6915 clobbered &= ~(1 << write_reg);
6916 }
6917 }
6918
6919 /* Perform register writeback manually. */
6920 if (dsc->u.block.writeback)
6921 {
6922 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6923
6924 if (dsc->u.block.increment)
6925 new_rn_val += regs_loaded * 4;
6926 else
6927 new_rn_val -= regs_loaded * 4;
6928
6929 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6930 CANNOT_WRITE_PC);
6931 }
6932 }
6933
6934 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6935 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6936
6937 static int
6938 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6939 struct regcache *regs,
6940 struct displaced_step_closure *dsc)
6941 {
6942 int load = bit (insn, 20);
6943 int user = bit (insn, 22);
6944 int increment = bit (insn, 23);
6945 int before = bit (insn, 24);
6946 int writeback = bit (insn, 21);
6947 int rn = bits (insn, 16, 19);
6948
6949 /* Block transfers which don't mention PC can be run directly
6950 out-of-line. */
6951 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6952 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6953
6954 if (rn == ARM_PC_REGNUM)
6955 {
6956 warning (_("displaced: Unpredictable LDM or STM with "
6957 "base register r15"));
6958 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6959 }
6960
6961 if (debug_displaced)
6962 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6963 "%.8lx\n", (unsigned long) insn);
6964
6965 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6966 dsc->u.block.rn = rn;
6967
6968 dsc->u.block.load = load;
6969 dsc->u.block.user = user;
6970 dsc->u.block.increment = increment;
6971 dsc->u.block.before = before;
6972 dsc->u.block.writeback = writeback;
6973 dsc->u.block.cond = bits (insn, 28, 31);
6974
6975 dsc->u.block.regmask = insn & 0xffff;
6976
6977 if (load)
6978 {
6979 if ((insn & 0xffff) == 0xffff)
6980 {
6981 /* LDM with a fully-populated register list. This case is
6982 particularly tricky. Implement for now by fully emulating the
6983 instruction (which might not behave perfectly in all cases, but
6984 these instructions should be rare enough for that not to matter
6985 too much). */
6986 dsc->modinsn[0] = ARM_NOP;
6987
6988 dsc->cleanup = &cleanup_block_load_all;
6989 }
6990 else
6991 {
6992 /* LDM of a list of registers which includes PC. Implement by
6993 rewriting the list of registers to be transferred into a
6994 contiguous chunk r0...rX before doing the transfer, then shuffling
6995 registers into the correct places in the cleanup routine. */
6996 unsigned int regmask = insn & 0xffff;
6997 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6998 unsigned int to = 0, from = 0, i, new_rn;
6999
7000 for (i = 0; i < num_in_list; i++)
7001 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7002
7003 /* Writeback makes things complicated. We need to avoid clobbering
7004 the base register with one of the registers in our modified
7005 register list, but just using a different register can't work in
7006 all cases, e.g.:
7007
7008 ldm r14!, {r0-r13,pc}
7009
7010 which would need to be rewritten as:
7011
7012 ldm rN!, {r0-r14}
7013
7014 but that can't work, because there's no free register for N.
7015
7016 Solve this by turning off the writeback bit, and emulating
7017 writeback manually in the cleanup routine. */
7018
7019 if (writeback)
7020 insn &= ~(1 << 21);
7021
7022 new_regmask = (1 << num_in_list) - 1;
7023
7024 if (debug_displaced)
7025 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7026 "{..., pc}: original reg list %.4x, modified "
7027 "list %.4x\n"), rn, writeback ? "!" : "",
7028 (int) insn & 0xffff, new_regmask);
7029
7030 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7031
7032 dsc->cleanup = &cleanup_block_load_pc;
7033 }
7034 }
7035 else
7036 {
7037 /* STM of a list of registers which includes PC. Run the instruction
7038 as-is, but out of line: this will store the wrong value for the PC,
7039 so we must manually fix up the memory in the cleanup routine.
7040 Doing things this way has the advantage that we can auto-detect
7041 the offset of the PC write (which is architecture-dependent) in
7042 the cleanup routine. */
7043 dsc->modinsn[0] = insn;
7044
7045 dsc->cleanup = &cleanup_block_store_pc;
7046 }
7047
7048 return 0;
7049 }
7050
7051 static int
7052 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7053 struct regcache *regs,
7054 struct displaced_step_closure *dsc)
7055 {
7056 int rn = bits (insn1, 0, 3);
7057 int load = bit (insn1, 4);
7058 int writeback = bit (insn1, 5);
7059
7060 /* Block transfers which don't mention PC can be run directly
7061 out-of-line. */
7062 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7063 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7064
7065 if (rn == ARM_PC_REGNUM)
7066 {
7067 warning (_("displaced: Unpredictable LDM or STM with "
7068 "base register r15"));
7069 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7070 "unpredictable ldm/stm", dsc);
7071 }
7072
7073 if (debug_displaced)
7074 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7075 "%.4x%.4x\n", insn1, insn2);
7076
7077 /* Clear bit 13, since it should be always zero. */
7078 dsc->u.block.regmask = (insn2 & 0xdfff);
7079 dsc->u.block.rn = rn;
7080
7081 dsc->u.block.load = load;
7082 dsc->u.block.user = 0;
7083 dsc->u.block.increment = bit (insn1, 7);
7084 dsc->u.block.before = bit (insn1, 8);
7085 dsc->u.block.writeback = writeback;
7086 dsc->u.block.cond = INST_AL;
7087 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7088
7089 if (load)
7090 {
7091 if (dsc->u.block.regmask == 0xffff)
7092 {
7093 /* This branch is impossible to happen. */
7094 gdb_assert (0);
7095 }
7096 else
7097 {
7098 unsigned int regmask = dsc->u.block.regmask;
7099 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7100 unsigned int to = 0, from = 0, i, new_rn;
7101
7102 for (i = 0; i < num_in_list; i++)
7103 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7104
7105 if (writeback)
7106 insn1 &= ~(1 << 5);
7107
7108 new_regmask = (1 << num_in_list) - 1;
7109
7110 if (debug_displaced)
7111 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7112 "{..., pc}: original reg list %.4x, modified "
7113 "list %.4x\n"), rn, writeback ? "!" : "",
7114 (int) dsc->u.block.regmask, new_regmask);
7115
7116 dsc->modinsn[0] = insn1;
7117 dsc->modinsn[1] = (new_regmask & 0xffff);
7118 dsc->numinsns = 2;
7119
7120 dsc->cleanup = &cleanup_block_load_pc;
7121 }
7122 }
7123 else
7124 {
7125 dsc->modinsn[0] = insn1;
7126 dsc->modinsn[1] = insn2;
7127 dsc->numinsns = 2;
7128 dsc->cleanup = &cleanup_block_store_pc;
7129 }
7130 return 0;
7131 }
7132
7133 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7134 for Linux, where some SVC instructions must be treated specially. */
7135
7136 static void
7137 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7138 struct displaced_step_closure *dsc)
7139 {
7140 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7141
7142 if (debug_displaced)
7143 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7144 "%.8lx\n", (unsigned long) resume_addr);
7145
7146 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7147 }
7148
7149
7150 /* Common copy routine for svc instruciton. */
7151
7152 static int
7153 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7154 struct displaced_step_closure *dsc)
7155 {
7156 /* Preparation: none.
7157 Insn: unmodified svc.
7158 Cleanup: pc <- insn_addr + insn_size. */
7159
7160 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7161 instruction. */
7162 dsc->wrote_to_pc = 1;
7163
7164 /* Allow OS-specific code to override SVC handling. */
7165 if (dsc->u.svc.copy_svc_os)
7166 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7167 else
7168 {
7169 dsc->cleanup = &cleanup_svc;
7170 return 0;
7171 }
7172 }
7173
7174 static int
7175 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7176 struct regcache *regs, struct displaced_step_closure *dsc)
7177 {
7178
7179 if (debug_displaced)
7180 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7181 (unsigned long) insn);
7182
7183 dsc->modinsn[0] = insn;
7184
7185 return install_svc (gdbarch, regs, dsc);
7186 }
7187
7188 static int
7189 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7190 struct regcache *regs, struct displaced_step_closure *dsc)
7191 {
7192
7193 if (debug_displaced)
7194 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7195 insn);
7196
7197 dsc->modinsn[0] = insn;
7198
7199 return install_svc (gdbarch, regs, dsc);
7200 }
7201
7202 /* Copy undefined instructions. */
7203
7204 static int
7205 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7206 struct displaced_step_closure *dsc)
7207 {
7208 if (debug_displaced)
7209 fprintf_unfiltered (gdb_stdlog,
7210 "displaced: copying undefined insn %.8lx\n",
7211 (unsigned long) insn);
7212
7213 dsc->modinsn[0] = insn;
7214
7215 return 0;
7216 }
7217
7218 static int
7219 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7220 struct displaced_step_closure *dsc)
7221 {
7222
7223 if (debug_displaced)
7224 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7225 "%.4x %.4x\n", (unsigned short) insn1,
7226 (unsigned short) insn2);
7227
7228 dsc->modinsn[0] = insn1;
7229 dsc->modinsn[1] = insn2;
7230 dsc->numinsns = 2;
7231
7232 return 0;
7233 }
7234
7235 /* Copy unpredictable instructions. */
7236
7237 static int
7238 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7239 struct displaced_step_closure *dsc)
7240 {
7241 if (debug_displaced)
7242 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7243 "%.8lx\n", (unsigned long) insn);
7244
7245 dsc->modinsn[0] = insn;
7246
7247 return 0;
7248 }
7249
7250 /* The decode_* functions are instruction decoding helpers. They mostly follow
7251 the presentation in the ARM ARM. */
7252
7253 static int
7254 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7255 struct regcache *regs,
7256 struct displaced_step_closure *dsc)
7257 {
7258 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7259 unsigned int rn = bits (insn, 16, 19);
7260
7261 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7262 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7263 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7264 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7265 else if ((op1 & 0x60) == 0x20)
7266 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7267 else if ((op1 & 0x71) == 0x40)
7268 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7269 dsc);
7270 else if ((op1 & 0x77) == 0x41)
7271 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7272 else if ((op1 & 0x77) == 0x45)
7273 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7274 else if ((op1 & 0x77) == 0x51)
7275 {
7276 if (rn != 0xf)
7277 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7278 else
7279 return arm_copy_unpred (gdbarch, insn, dsc);
7280 }
7281 else if ((op1 & 0x77) == 0x55)
7282 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7283 else if (op1 == 0x57)
7284 switch (op2)
7285 {
7286 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7287 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7288 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7289 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7290 default: return arm_copy_unpred (gdbarch, insn, dsc);
7291 }
7292 else if ((op1 & 0x63) == 0x43)
7293 return arm_copy_unpred (gdbarch, insn, dsc);
7294 else if ((op2 & 0x1) == 0x0)
7295 switch (op1 & ~0x80)
7296 {
7297 case 0x61:
7298 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7299 case 0x65:
7300 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7301 case 0x71: case 0x75:
7302 /* pld/pldw reg. */
7303 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7304 case 0x63: case 0x67: case 0x73: case 0x77:
7305 return arm_copy_unpred (gdbarch, insn, dsc);
7306 default:
7307 return arm_copy_undef (gdbarch, insn, dsc);
7308 }
7309 else
7310 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7311 }
7312
7313 static int
7314 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7315 struct regcache *regs,
7316 struct displaced_step_closure *dsc)
7317 {
7318 if (bit (insn, 27) == 0)
7319 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7320 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7321 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7322 {
7323 case 0x0: case 0x2:
7324 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7325
7326 case 0x1: case 0x3:
7327 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7328
7329 case 0x4: case 0x5: case 0x6: case 0x7:
7330 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7331
7332 case 0x8:
7333 switch ((insn & 0xe00000) >> 21)
7334 {
7335 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7336 /* stc/stc2. */
7337 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7338
7339 case 0x2:
7340 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7341
7342 default:
7343 return arm_copy_undef (gdbarch, insn, dsc);
7344 }
7345
7346 case 0x9:
7347 {
7348 int rn_f = (bits (insn, 16, 19) == 0xf);
7349 switch ((insn & 0xe00000) >> 21)
7350 {
7351 case 0x1: case 0x3:
7352 /* ldc/ldc2 imm (undefined for rn == pc). */
7353 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7354 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7355
7356 case 0x2:
7357 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7358
7359 case 0x4: case 0x5: case 0x6: case 0x7:
7360 /* ldc/ldc2 lit (undefined for rn != pc). */
7361 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7362 : arm_copy_undef (gdbarch, insn, dsc);
7363
7364 default:
7365 return arm_copy_undef (gdbarch, insn, dsc);
7366 }
7367 }
7368
7369 case 0xa:
7370 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7371
7372 case 0xb:
7373 if (bits (insn, 16, 19) == 0xf)
7374 /* ldc/ldc2 lit. */
7375 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7376 else
7377 return arm_copy_undef (gdbarch, insn, dsc);
7378
7379 case 0xc:
7380 if (bit (insn, 4))
7381 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7382 else
7383 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7384
7385 case 0xd:
7386 if (bit (insn, 4))
7387 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7388 else
7389 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7390
7391 default:
7392 return arm_copy_undef (gdbarch, insn, dsc);
7393 }
7394 }
7395
7396 /* Decode miscellaneous instructions in dp/misc encoding space. */
7397
7398 static int
7399 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7400 struct regcache *regs,
7401 struct displaced_step_closure *dsc)
7402 {
7403 unsigned int op2 = bits (insn, 4, 6);
7404 unsigned int op = bits (insn, 21, 22);
7405 unsigned int op1 = bits (insn, 16, 19);
7406
7407 switch (op2)
7408 {
7409 case 0x0:
7410 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7411
7412 case 0x1:
7413 if (op == 0x1) /* bx. */
7414 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7415 else if (op == 0x3)
7416 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7417 else
7418 return arm_copy_undef (gdbarch, insn, dsc);
7419
7420 case 0x2:
7421 if (op == 0x1)
7422 /* Not really supported. */
7423 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7424 else
7425 return arm_copy_undef (gdbarch, insn, dsc);
7426
7427 case 0x3:
7428 if (op == 0x1)
7429 return arm_copy_bx_blx_reg (gdbarch, insn,
7430 regs, dsc); /* blx register. */
7431 else
7432 return arm_copy_undef (gdbarch, insn, dsc);
7433
7434 case 0x5:
7435 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7436
7437 case 0x7:
7438 if (op == 0x1)
7439 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7440 else if (op == 0x3)
7441 /* Not really supported. */
7442 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7443
7444 default:
7445 return arm_copy_undef (gdbarch, insn, dsc);
7446 }
7447 }
7448
7449 static int
7450 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7451 struct regcache *regs,
7452 struct displaced_step_closure *dsc)
7453 {
7454 if (bit (insn, 25))
7455 switch (bits (insn, 20, 24))
7456 {
7457 case 0x10:
7458 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7459
7460 case 0x14:
7461 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7462
7463 case 0x12: case 0x16:
7464 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7465
7466 default:
7467 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7468 }
7469 else
7470 {
7471 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7472
7473 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7474 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7475 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7476 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7477 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7478 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7479 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7480 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7481 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7482 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7483 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7484 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7485 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7486 /* 2nd arg means "unpriveleged". */
7487 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7488 dsc);
7489 }
7490
7491 /* Should be unreachable. */
7492 return 1;
7493 }
7494
7495 static int
7496 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7497 struct regcache *regs,
7498 struct displaced_step_closure *dsc)
7499 {
7500 int a = bit (insn, 25), b = bit (insn, 4);
7501 uint32_t op1 = bits (insn, 20, 24);
7502 int rn_f = bits (insn, 16, 19) == 0xf;
7503
7504 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7505 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7506 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7507 else if ((!a && (op1 & 0x17) == 0x02)
7508 || (a && (op1 & 0x17) == 0x02 && !b))
7509 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7510 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7511 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7512 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7513 else if ((!a && (op1 & 0x17) == 0x03)
7514 || (a && (op1 & 0x17) == 0x03 && !b))
7515 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7516 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7517 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7518 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7519 else if ((!a && (op1 & 0x17) == 0x06)
7520 || (a && (op1 & 0x17) == 0x06 && !b))
7521 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7522 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7523 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7524 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7525 else if ((!a && (op1 & 0x17) == 0x07)
7526 || (a && (op1 & 0x17) == 0x07 && !b))
7527 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7528
7529 /* Should be unreachable. */
7530 return 1;
7531 }
7532
7533 static int
7534 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7535 struct displaced_step_closure *dsc)
7536 {
7537 switch (bits (insn, 20, 24))
7538 {
7539 case 0x00: case 0x01: case 0x02: case 0x03:
7540 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7541
7542 case 0x04: case 0x05: case 0x06: case 0x07:
7543 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7544
7545 case 0x08: case 0x09: case 0x0a: case 0x0b:
7546 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7547 return arm_copy_unmodified (gdbarch, insn,
7548 "decode/pack/unpack/saturate/reverse", dsc);
7549
7550 case 0x18:
7551 if (bits (insn, 5, 7) == 0) /* op2. */
7552 {
7553 if (bits (insn, 12, 15) == 0xf)
7554 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7555 else
7556 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7557 }
7558 else
7559 return arm_copy_undef (gdbarch, insn, dsc);
7560
7561 case 0x1a: case 0x1b:
7562 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7563 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7564 else
7565 return arm_copy_undef (gdbarch, insn, dsc);
7566
7567 case 0x1c: case 0x1d:
7568 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7569 {
7570 if (bits (insn, 0, 3) == 0xf)
7571 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7572 else
7573 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7574 }
7575 else
7576 return arm_copy_undef (gdbarch, insn, dsc);
7577
7578 case 0x1e: case 0x1f:
7579 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7580 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7581 else
7582 return arm_copy_undef (gdbarch, insn, dsc);
7583 }
7584
7585 /* Should be unreachable. */
7586 return 1;
7587 }
7588
7589 static int
7590 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7591 struct regcache *regs,
7592 struct displaced_step_closure *dsc)
7593 {
7594 if (bit (insn, 25))
7595 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7596 else
7597 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7598 }
7599
7600 static int
7601 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7602 struct regcache *regs,
7603 struct displaced_step_closure *dsc)
7604 {
7605 unsigned int opcode = bits (insn, 20, 24);
7606
7607 switch (opcode)
7608 {
7609 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7610 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7611
7612 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7613 case 0x12: case 0x16:
7614 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7615
7616 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7617 case 0x13: case 0x17:
7618 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7619
7620 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7621 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7622 /* Note: no writeback for these instructions. Bit 25 will always be
7623 zero though (via caller), so the following works OK. */
7624 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7625 }
7626
7627 /* Should be unreachable. */
7628 return 1;
7629 }
7630
7631 /* Decode shifted register instructions. */
7632
7633 static int
7634 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7635 uint16_t insn2, struct regcache *regs,
7636 struct displaced_step_closure *dsc)
7637 {
7638 /* PC is only allowed to be used in instruction MOV. */
7639
7640 unsigned int op = bits (insn1, 5, 8);
7641 unsigned int rn = bits (insn1, 0, 3);
7642
7643 if (op == 0x2 && rn == 0xf) /* MOV */
7644 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7645 else
7646 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7647 "dp (shift reg)", dsc);
7648 }
7649
7650
7651 /* Decode extension register load/store. Exactly the same as
7652 arm_decode_ext_reg_ld_st. */
7653
7654 static int
7655 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7656 uint16_t insn2, struct regcache *regs,
7657 struct displaced_step_closure *dsc)
7658 {
7659 unsigned int opcode = bits (insn1, 4, 8);
7660
7661 switch (opcode)
7662 {
7663 case 0x04: case 0x05:
7664 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7665 "vfp/neon vmov", dsc);
7666
7667 case 0x08: case 0x0c: /* 01x00 */
7668 case 0x0a: case 0x0e: /* 01x10 */
7669 case 0x12: case 0x16: /* 10x10 */
7670 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7671 "vfp/neon vstm/vpush", dsc);
7672
7673 case 0x09: case 0x0d: /* 01x01 */
7674 case 0x0b: case 0x0f: /* 01x11 */
7675 case 0x13: case 0x17: /* 10x11 */
7676 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7677 "vfp/neon vldm/vpop", dsc);
7678
7679 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7680 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7681 "vstr", dsc);
7682 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7683 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7684 }
7685
7686 /* Should be unreachable. */
7687 return 1;
7688 }
7689
7690 static int
7691 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7692 struct regcache *regs, struct displaced_step_closure *dsc)
7693 {
7694 unsigned int op1 = bits (insn, 20, 25);
7695 int op = bit (insn, 4);
7696 unsigned int coproc = bits (insn, 8, 11);
7697 unsigned int rn = bits (insn, 16, 19);
7698
7699 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7700 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7701 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7702 && (coproc & 0xe) != 0xa)
7703 /* stc/stc2. */
7704 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7705 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7706 && (coproc & 0xe) != 0xa)
7707 /* ldc/ldc2 imm/lit. */
7708 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7709 else if ((op1 & 0x3e) == 0x00)
7710 return arm_copy_undef (gdbarch, insn, dsc);
7711 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7712 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7713 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7714 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7715 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7716 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7717 else if ((op1 & 0x30) == 0x20 && !op)
7718 {
7719 if ((coproc & 0xe) == 0xa)
7720 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7721 else
7722 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7723 }
7724 else if ((op1 & 0x30) == 0x20 && op)
7725 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7726 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7727 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7728 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7729 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7730 else if ((op1 & 0x30) == 0x30)
7731 return arm_copy_svc (gdbarch, insn, regs, dsc);
7732 else
7733 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7734 }
7735
7736 static int
7737 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7738 uint16_t insn2, struct regcache *regs,
7739 struct displaced_step_closure *dsc)
7740 {
7741 unsigned int coproc = bits (insn2, 8, 11);
7742 unsigned int op1 = bits (insn1, 4, 9);
7743 unsigned int bit_5_8 = bits (insn1, 5, 8);
7744 unsigned int bit_9 = bit (insn1, 9);
7745 unsigned int bit_4 = bit (insn1, 4);
7746 unsigned int rn = bits (insn1, 0, 3);
7747
7748 if (bit_9 == 0)
7749 {
7750 if (bit_5_8 == 2)
7751 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7752 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7753 dsc);
7754 else if (bit_5_8 == 0) /* UNDEFINED. */
7755 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7756 else
7757 {
7758 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7759 if ((coproc & 0xe) == 0xa)
7760 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7761 dsc);
7762 else /* coproc is not 101x. */
7763 {
7764 if (bit_4 == 0) /* STC/STC2. */
7765 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7766 "stc/stc2", dsc);
7767 else /* LDC/LDC2 {literal, immeidate}. */
7768 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7769 regs, dsc);
7770 }
7771 }
7772 }
7773 else
7774 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7775
7776 return 0;
7777 }
7778
7779 static void
7780 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7781 struct displaced_step_closure *dsc, int rd)
7782 {
7783 /* ADR Rd, #imm
7784
7785 Rewrite as:
7786
7787 Preparation: Rd <- PC
7788 Insn: ADD Rd, #imm
7789 Cleanup: Null.
7790 */
7791
7792 /* Rd <- PC */
7793 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7794 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7795 }
7796
7797 static int
7798 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7799 struct displaced_step_closure *dsc,
7800 int rd, unsigned int imm)
7801 {
7802
7803 /* Encoding T2: ADDS Rd, #imm */
7804 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7805
7806 install_pc_relative (gdbarch, regs, dsc, rd);
7807
7808 return 0;
7809 }
7810
7811 static int
7812 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7813 struct regcache *regs,
7814 struct displaced_step_closure *dsc)
7815 {
7816 unsigned int rd = bits (insn, 8, 10);
7817 unsigned int imm8 = bits (insn, 0, 7);
7818
7819 if (debug_displaced)
7820 fprintf_unfiltered (gdb_stdlog,
7821 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7822 rd, imm8, insn);
7823
7824 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7825 }
7826
7827 static int
7828 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7829 uint16_t insn2, struct regcache *regs,
7830 struct displaced_step_closure *dsc)
7831 {
7832 unsigned int rd = bits (insn2, 8, 11);
7833 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7834 extract raw immediate encoding rather than computing immediate. When
7835 generating ADD or SUB instruction, we can simply perform OR operation to
7836 set immediate into ADD. */
7837 unsigned int imm_3_8 = insn2 & 0x70ff;
7838 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7839
7840 if (debug_displaced)
7841 fprintf_unfiltered (gdb_stdlog,
7842 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7843 rd, imm_i, imm_3_8, insn1, insn2);
7844
7845 if (bit (insn1, 7)) /* Encoding T2 */
7846 {
7847 /* Encoding T3: SUB Rd, Rd, #imm */
7848 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7849 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7850 }
7851 else /* Encoding T3 */
7852 {
7853 /* Encoding T3: ADD Rd, Rd, #imm */
7854 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7855 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7856 }
7857 dsc->numinsns = 2;
7858
7859 install_pc_relative (gdbarch, regs, dsc, rd);
7860
7861 return 0;
7862 }
7863
7864 static int
7865 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7866 struct regcache *regs,
7867 struct displaced_step_closure *dsc)
7868 {
7869 unsigned int rt = bits (insn1, 8, 10);
7870 unsigned int pc;
7871 int imm8 = (bits (insn1, 0, 7) << 2);
7872 CORE_ADDR from = dsc->insn_addr;
7873
7874 /* LDR Rd, #imm8
7875
7876 Rwrite as:
7877
7878 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7879
7880 Insn: LDR R0, [R2, R3];
7881 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7882
7883 if (debug_displaced)
7884 fprintf_unfiltered (gdb_stdlog,
7885 "displaced: copying thumb ldr r%d [pc #%d]\n"
7886 , rt, imm8);
7887
7888 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7889 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7890 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7891 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7892 /* The assembler calculates the required value of the offset from the
7893 Align(PC,4) value of this instruction to the label. */
7894 pc = pc & 0xfffffffc;
7895
7896 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7897 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7898
7899 dsc->rd = rt;
7900 dsc->u.ldst.xfersize = 4;
7901 dsc->u.ldst.rn = 0;
7902 dsc->u.ldst.immed = 0;
7903 dsc->u.ldst.writeback = 0;
7904 dsc->u.ldst.restore_r4 = 0;
7905
7906 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7907
7908 dsc->cleanup = &cleanup_load;
7909
7910 return 0;
7911 }
7912
7913 /* Copy Thumb cbnz/cbz insruction. */
7914
7915 static int
7916 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7917 struct regcache *regs,
7918 struct displaced_step_closure *dsc)
7919 {
7920 int non_zero = bit (insn1, 11);
7921 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7922 CORE_ADDR from = dsc->insn_addr;
7923 int rn = bits (insn1, 0, 2);
7924 int rn_val = displaced_read_reg (regs, dsc, rn);
7925
7926 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7927 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7928 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7929 condition is false, let it be, cleanup_branch will do nothing. */
7930 if (dsc->u.branch.cond)
7931 {
7932 dsc->u.branch.cond = INST_AL;
7933 dsc->u.branch.dest = from + 4 + imm5;
7934 }
7935 else
7936 dsc->u.branch.dest = from + 2;
7937
7938 dsc->u.branch.link = 0;
7939 dsc->u.branch.exchange = 0;
7940
7941 if (debug_displaced)
7942 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7943 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7944 rn, rn_val, insn1, dsc->u.branch.dest);
7945
7946 dsc->modinsn[0] = THUMB_NOP;
7947
7948 dsc->cleanup = &cleanup_branch;
7949 return 0;
7950 }
7951
7952 /* Copy Table Branch Byte/Halfword */
7953 static int
7954 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7955 uint16_t insn2, struct regcache *regs,
7956 struct displaced_step_closure *dsc)
7957 {
7958 ULONGEST rn_val, rm_val;
7959 int is_tbh = bit (insn2, 4);
7960 CORE_ADDR halfwords = 0;
7961 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7962
7963 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7964 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7965
7966 if (is_tbh)
7967 {
7968 gdb_byte buf[2];
7969
7970 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7971 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7972 }
7973 else
7974 {
7975 gdb_byte buf[1];
7976
7977 target_read_memory (rn_val + rm_val, buf, 1);
7978 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7979 }
7980
7981 if (debug_displaced)
7982 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7983 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7984 (unsigned int) rn_val, (unsigned int) rm_val,
7985 (unsigned int) halfwords);
7986
7987 dsc->u.branch.cond = INST_AL;
7988 dsc->u.branch.link = 0;
7989 dsc->u.branch.exchange = 0;
7990 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7991
7992 dsc->cleanup = &cleanup_branch;
7993
7994 return 0;
7995 }
7996
7997 static void
7998 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7999 struct displaced_step_closure *dsc)
8000 {
8001 /* PC <- r7 */
8002 int val = displaced_read_reg (regs, dsc, 7);
8003 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8004
8005 /* r7 <- r8 */
8006 val = displaced_read_reg (regs, dsc, 8);
8007 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8008
8009 /* r8 <- tmp[0] */
8010 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8011
8012 }
8013
8014 static int
8015 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8016 struct regcache *regs,
8017 struct displaced_step_closure *dsc)
8018 {
8019 dsc->u.block.regmask = insn1 & 0x00ff;
8020
8021 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8022 to :
8023
8024 (1) register list is full, that is, r0-r7 are used.
8025 Prepare: tmp[0] <- r8
8026
8027 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8028 MOV r8, r7; Move value of r7 to r8;
8029 POP {r7}; Store PC value into r7.
8030
8031 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8032
8033 (2) register list is not full, supposing there are N registers in
8034 register list (except PC, 0 <= N <= 7).
8035 Prepare: for each i, 0 - N, tmp[i] <- ri.
8036
8037 POP {r0, r1, ...., rN};
8038
8039 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8040 from tmp[] properly.
8041 */
8042 if (debug_displaced)
8043 fprintf_unfiltered (gdb_stdlog,
8044 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8045 dsc->u.block.regmask, insn1);
8046
8047 if (dsc->u.block.regmask == 0xff)
8048 {
8049 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8050
8051 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8052 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8053 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8054
8055 dsc->numinsns = 3;
8056 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8057 }
8058 else
8059 {
8060 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8061 unsigned int new_regmask, bit = 1;
8062 unsigned int to = 0, from = 0, i, new_rn;
8063
8064 for (i = 0; i < num_in_list + 1; i++)
8065 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8066
8067 new_regmask = (1 << (num_in_list + 1)) - 1;
8068
8069 if (debug_displaced)
8070 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8071 "{..., pc}: original reg list %.4x,"
8072 " modified list %.4x\n"),
8073 (int) dsc->u.block.regmask, new_regmask);
8074
8075 dsc->u.block.regmask |= 0x8000;
8076 dsc->u.block.writeback = 0;
8077 dsc->u.block.cond = INST_AL;
8078
8079 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8080
8081 dsc->cleanup = &cleanup_block_load_pc;
8082 }
8083
8084 return 0;
8085 }
8086
8087 static void
8088 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8089 struct regcache *regs,
8090 struct displaced_step_closure *dsc)
8091 {
8092 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8093 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8094 int err = 0;
8095
8096 /* 16-bit thumb instructions. */
8097 switch (op_bit_12_15)
8098 {
8099 /* Shift (imme), add, subtract, move and compare. */
8100 case 0: case 1: case 2: case 3:
8101 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8102 "shift/add/sub/mov/cmp",
8103 dsc);
8104 break;
8105 case 4:
8106 switch (op_bit_10_11)
8107 {
8108 case 0: /* Data-processing */
8109 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8110 "data-processing",
8111 dsc);
8112 break;
8113 case 1: /* Special data instructions and branch and exchange. */
8114 {
8115 unsigned short op = bits (insn1, 7, 9);
8116 if (op == 6 || op == 7) /* BX or BLX */
8117 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8118 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8119 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8120 else
8121 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8122 dsc);
8123 }
8124 break;
8125 default: /* LDR (literal) */
8126 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8127 }
8128 break;
8129 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8130 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8131 break;
8132 case 10:
8133 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8134 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8135 else /* Generate SP-relative address */
8136 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8137 break;
8138 case 11: /* Misc 16-bit instructions */
8139 {
8140 switch (bits (insn1, 8, 11))
8141 {
8142 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8143 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8144 break;
8145 case 12: case 13: /* POP */
8146 if (bit (insn1, 8)) /* PC is in register list. */
8147 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8148 else
8149 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8150 break;
8151 case 15: /* If-Then, and hints */
8152 if (bits (insn1, 0, 3))
8153 /* If-Then makes up to four following instructions conditional.
8154 IT instruction itself is not conditional, so handle it as a
8155 common unmodified instruction. */
8156 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8157 dsc);
8158 else
8159 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8160 break;
8161 default:
8162 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8163 }
8164 }
8165 break;
8166 case 12:
8167 if (op_bit_10_11 < 2) /* Store multiple registers */
8168 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8169 else /* Load multiple registers */
8170 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8171 break;
8172 case 13: /* Conditional branch and supervisor call */
8173 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8174 err = thumb_copy_b (gdbarch, insn1, dsc);
8175 else
8176 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8177 break;
8178 case 14: /* Unconditional branch */
8179 err = thumb_copy_b (gdbarch, insn1, dsc);
8180 break;
8181 default:
8182 err = 1;
8183 }
8184
8185 if (err)
8186 internal_error (__FILE__, __LINE__,
8187 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8188 }
8189
8190 static int
8191 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8192 uint16_t insn1, uint16_t insn2,
8193 struct regcache *regs,
8194 struct displaced_step_closure *dsc)
8195 {
8196 int rt = bits (insn2, 12, 15);
8197 int rn = bits (insn1, 0, 3);
8198 int op1 = bits (insn1, 7, 8);
8199 int err = 0;
8200
8201 switch (bits (insn1, 5, 6))
8202 {
8203 case 0: /* Load byte and memory hints */
8204 if (rt == 0xf) /* PLD/PLI */
8205 {
8206 if (rn == 0xf)
8207 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8208 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8209 else
8210 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8211 "pli/pld", dsc);
8212 }
8213 else
8214 {
8215 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8216 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8217 1);
8218 else
8219 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8220 "ldrb{reg, immediate}/ldrbt",
8221 dsc);
8222 }
8223
8224 break;
8225 case 1: /* Load halfword and memory hints. */
8226 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8227 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8228 "pld/unalloc memhint", dsc);
8229 else
8230 {
8231 if (rn == 0xf)
8232 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8233 2);
8234 else
8235 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8236 "ldrh/ldrht", dsc);
8237 }
8238 break;
8239 case 2: /* Load word */
8240 {
8241 int insn2_bit_8_11 = bits (insn2, 8, 11);
8242
8243 if (rn == 0xf)
8244 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8245 else if (op1 == 0x1) /* Encoding T3 */
8246 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8247 0, 1);
8248 else /* op1 == 0x0 */
8249 {
8250 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8251 /* LDR (immediate) */
8252 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8253 dsc, bit (insn2, 8), 1);
8254 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8255 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8256 "ldrt", dsc);
8257 else
8258 /* LDR (register) */
8259 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8260 dsc, 0, 0);
8261 }
8262 break;
8263 }
8264 default:
8265 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8266 break;
8267 }
8268 return 0;
8269 }
8270
8271 static void
8272 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8273 uint16_t insn2, struct regcache *regs,
8274 struct displaced_step_closure *dsc)
8275 {
8276 int err = 0;
8277 unsigned short op = bit (insn2, 15);
8278 unsigned int op1 = bits (insn1, 11, 12);
8279
8280 switch (op1)
8281 {
8282 case 1:
8283 {
8284 switch (bits (insn1, 9, 10))
8285 {
8286 case 0:
8287 if (bit (insn1, 6))
8288 {
8289 /* Load/store {dual, execlusive}, table branch. */
8290 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8291 && bits (insn2, 5, 7) == 0)
8292 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8293 dsc);
8294 else
8295 /* PC is not allowed to use in load/store {dual, exclusive}
8296 instructions. */
8297 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8298 "load/store dual/ex", dsc);
8299 }
8300 else /* load/store multiple */
8301 {
8302 switch (bits (insn1, 7, 8))
8303 {
8304 case 0: case 3: /* SRS, RFE */
8305 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8306 "srs/rfe", dsc);
8307 break;
8308 case 1: case 2: /* LDM/STM/PUSH/POP */
8309 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8310 break;
8311 }
8312 }
8313 break;
8314
8315 case 1:
8316 /* Data-processing (shift register). */
8317 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8318 dsc);
8319 break;
8320 default: /* Coprocessor instructions. */
8321 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8322 break;
8323 }
8324 break;
8325 }
8326 case 2: /* op1 = 2 */
8327 if (op) /* Branch and misc control. */
8328 {
8329 if (bit (insn2, 14) /* BLX/BL */
8330 || bit (insn2, 12) /* Unconditional branch */
8331 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8332 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8333 else
8334 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8335 "misc ctrl", dsc);
8336 }
8337 else
8338 {
8339 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8340 {
8341 int op = bits (insn1, 4, 8);
8342 int rn = bits (insn1, 0, 3);
8343 if ((op == 0 || op == 0xa) && rn == 0xf)
8344 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8345 regs, dsc);
8346 else
8347 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8348 "dp/pb", dsc);
8349 }
8350 else /* Data processing (modified immeidate) */
8351 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8352 "dp/mi", dsc);
8353 }
8354 break;
8355 case 3: /* op1 = 3 */
8356 switch (bits (insn1, 9, 10))
8357 {
8358 case 0:
8359 if (bit (insn1, 4))
8360 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8361 regs, dsc);
8362 else /* NEON Load/Store and Store single data item */
8363 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8364 "neon elt/struct load/store",
8365 dsc);
8366 break;
8367 case 1: /* op1 = 3, bits (9, 10) == 1 */
8368 switch (bits (insn1, 7, 8))
8369 {
8370 case 0: case 1: /* Data processing (register) */
8371 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8372 "dp(reg)", dsc);
8373 break;
8374 case 2: /* Multiply and absolute difference */
8375 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8376 "mul/mua/diff", dsc);
8377 break;
8378 case 3: /* Long multiply and divide */
8379 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8380 "lmul/lmua", dsc);
8381 break;
8382 }
8383 break;
8384 default: /* Coprocessor instructions */
8385 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8386 break;
8387 }
8388 break;
8389 default:
8390 err = 1;
8391 }
8392
8393 if (err)
8394 internal_error (__FILE__, __LINE__,
8395 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8396
8397 }
8398
8399 static void
8400 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8401 CORE_ADDR to, struct regcache *regs,
8402 struct displaced_step_closure *dsc)
8403 {
8404 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8405 uint16_t insn1
8406 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8407
8408 if (debug_displaced)
8409 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8410 "at %.8lx\n", insn1, (unsigned long) from);
8411
8412 dsc->is_thumb = 1;
8413 dsc->insn_size = thumb_insn_size (insn1);
8414 if (thumb_insn_size (insn1) == 4)
8415 {
8416 uint16_t insn2
8417 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8418 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8419 }
8420 else
8421 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8422 }
8423
8424 void
8425 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8426 CORE_ADDR to, struct regcache *regs,
8427 struct displaced_step_closure *dsc)
8428 {
8429 int err = 0;
8430 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8431 uint32_t insn;
8432
8433 /* Most displaced instructions use a 1-instruction scratch space, so set this
8434 here and override below if/when necessary. */
8435 dsc->numinsns = 1;
8436 dsc->insn_addr = from;
8437 dsc->scratch_base = to;
8438 dsc->cleanup = NULL;
8439 dsc->wrote_to_pc = 0;
8440
8441 if (!displaced_in_arm_mode (regs))
8442 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8443
8444 dsc->is_thumb = 0;
8445 dsc->insn_size = 4;
8446 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8447 if (debug_displaced)
8448 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8449 "at %.8lx\n", (unsigned long) insn,
8450 (unsigned long) from);
8451
8452 if ((insn & 0xf0000000) == 0xf0000000)
8453 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8454 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8455 {
8456 case 0x0: case 0x1: case 0x2: case 0x3:
8457 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8458 break;
8459
8460 case 0x4: case 0x5: case 0x6:
8461 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8462 break;
8463
8464 case 0x7:
8465 err = arm_decode_media (gdbarch, insn, dsc);
8466 break;
8467
8468 case 0x8: case 0x9: case 0xa: case 0xb:
8469 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8470 break;
8471
8472 case 0xc: case 0xd: case 0xe: case 0xf:
8473 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8474 break;
8475 }
8476
8477 if (err)
8478 internal_error (__FILE__, __LINE__,
8479 _("arm_process_displaced_insn: Instruction decode error"));
8480 }
8481
8482 /* Actually set up the scratch space for a displaced instruction. */
8483
8484 void
8485 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8486 CORE_ADDR to, struct displaced_step_closure *dsc)
8487 {
8488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8489 unsigned int i, len, offset;
8490 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8491 int size = dsc->is_thumb? 2 : 4;
8492 const unsigned char *bkp_insn;
8493
8494 offset = 0;
8495 /* Poke modified instruction(s). */
8496 for (i = 0; i < dsc->numinsns; i++)
8497 {
8498 if (debug_displaced)
8499 {
8500 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8501 if (size == 4)
8502 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8503 dsc->modinsn[i]);
8504 else if (size == 2)
8505 fprintf_unfiltered (gdb_stdlog, "%.4x",
8506 (unsigned short)dsc->modinsn[i]);
8507
8508 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8509 (unsigned long) to + offset);
8510
8511 }
8512 write_memory_unsigned_integer (to + offset, size,
8513 byte_order_for_code,
8514 dsc->modinsn[i]);
8515 offset += size;
8516 }
8517
8518 /* Choose the correct breakpoint instruction. */
8519 if (dsc->is_thumb)
8520 {
8521 bkp_insn = tdep->thumb_breakpoint;
8522 len = tdep->thumb_breakpoint_size;
8523 }
8524 else
8525 {
8526 bkp_insn = tdep->arm_breakpoint;
8527 len = tdep->arm_breakpoint_size;
8528 }
8529
8530 /* Put breakpoint afterwards. */
8531 write_memory (to + offset, bkp_insn, len);
8532
8533 if (debug_displaced)
8534 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8535 paddress (gdbarch, from), paddress (gdbarch, to));
8536 }
8537
8538 /* Entry point for copying an instruction into scratch space for displaced
8539 stepping. */
8540
8541 struct displaced_step_closure *
8542 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8543 CORE_ADDR from, CORE_ADDR to,
8544 struct regcache *regs)
8545 {
8546 struct displaced_step_closure *dsc
8547 = xmalloc (sizeof (struct displaced_step_closure));
8548 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8549 arm_displaced_init_closure (gdbarch, from, to, dsc);
8550
8551 return dsc;
8552 }
8553
8554 /* Entry point for cleaning things up after a displaced instruction has been
8555 single-stepped. */
8556
8557 void
8558 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8559 struct displaced_step_closure *dsc,
8560 CORE_ADDR from, CORE_ADDR to,
8561 struct regcache *regs)
8562 {
8563 if (dsc->cleanup)
8564 dsc->cleanup (gdbarch, regs, dsc);
8565
8566 if (!dsc->wrote_to_pc)
8567 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8568 dsc->insn_addr + dsc->insn_size);
8569
8570 }
8571
8572 #include "bfd-in2.h"
8573 #include "libcoff.h"
8574
8575 static int
8576 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8577 {
8578 struct gdbarch *gdbarch = info->application_data;
8579
8580 if (arm_pc_is_thumb (gdbarch, memaddr))
8581 {
8582 static asymbol *asym;
8583 static combined_entry_type ce;
8584 static struct coff_symbol_struct csym;
8585 static struct bfd fake_bfd;
8586 static bfd_target fake_target;
8587
8588 if (csym.native == NULL)
8589 {
8590 /* Create a fake symbol vector containing a Thumb symbol.
8591 This is solely so that the code in print_insn_little_arm()
8592 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8593 the presence of a Thumb symbol and switch to decoding
8594 Thumb instructions. */
8595
8596 fake_target.flavour = bfd_target_coff_flavour;
8597 fake_bfd.xvec = &fake_target;
8598 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8599 csym.native = &ce;
8600 csym.symbol.the_bfd = &fake_bfd;
8601 csym.symbol.name = "fake";
8602 asym = (asymbol *) & csym;
8603 }
8604
8605 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8606 info->symbols = &asym;
8607 }
8608 else
8609 info->symbols = NULL;
8610
8611 if (info->endian == BFD_ENDIAN_BIG)
8612 return print_insn_big_arm (memaddr, info);
8613 else
8614 return print_insn_little_arm (memaddr, info);
8615 }
8616
8617 /* The following define instruction sequences that will cause ARM
8618 cpu's to take an undefined instruction trap. These are used to
8619 signal a breakpoint to GDB.
8620
8621 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8622 modes. A different instruction is required for each mode. The ARM
8623 cpu's can also be big or little endian. Thus four different
8624 instructions are needed to support all cases.
8625
8626 Note: ARMv4 defines several new instructions that will take the
8627 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8628 not in fact add the new instructions. The new undefined
8629 instructions in ARMv4 are all instructions that had no defined
8630 behaviour in earlier chips. There is no guarantee that they will
8631 raise an exception, but may be treated as NOP's. In practice, it
8632 may only safe to rely on instructions matching:
8633
8634 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8635 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8636 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8637
8638 Even this may only true if the condition predicate is true. The
8639 following use a condition predicate of ALWAYS so it is always TRUE.
8640
8641 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8642 and NetBSD all use a software interrupt rather than an undefined
8643 instruction to force a trap. This can be handled by by the
8644 abi-specific code during establishment of the gdbarch vector. */
8645
8646 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8647 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8648 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8649 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8650
8651 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8652 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8653 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8654 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8655
8656 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8657 the program counter value to determine whether a 16-bit or 32-bit
8658 breakpoint should be used. It returns a pointer to a string of
8659 bytes that encode a breakpoint instruction, stores the length of
8660 the string to *lenptr, and adjusts the program counter (if
8661 necessary) to point to the actual memory location where the
8662 breakpoint should be inserted. */
8663
8664 static const unsigned char *
8665 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8666 {
8667 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8668 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8669
8670 if (arm_pc_is_thumb (gdbarch, *pcptr))
8671 {
8672 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8673
8674 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8675 check whether we are replacing a 32-bit instruction. */
8676 if (tdep->thumb2_breakpoint != NULL)
8677 {
8678 gdb_byte buf[2];
8679 if (target_read_memory (*pcptr, buf, 2) == 0)
8680 {
8681 unsigned short inst1;
8682 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8683 if (thumb_insn_size (inst1) == 4)
8684 {
8685 *lenptr = tdep->thumb2_breakpoint_size;
8686 return tdep->thumb2_breakpoint;
8687 }
8688 }
8689 }
8690
8691 *lenptr = tdep->thumb_breakpoint_size;
8692 return tdep->thumb_breakpoint;
8693 }
8694 else
8695 {
8696 *lenptr = tdep->arm_breakpoint_size;
8697 return tdep->arm_breakpoint;
8698 }
8699 }
8700
8701 static void
8702 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8703 int *kindptr)
8704 {
8705 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8706
8707 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8708
8709 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8710 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8711 that this is not confused with a 32-bit ARM breakpoint. */
8712 *kindptr = 3;
8713 }
8714
8715 /* Extract from an array REGBUF containing the (raw) register state a
8716 function return value of type TYPE, and copy that, in virtual
8717 format, into VALBUF. */
8718
8719 static void
8720 arm_extract_return_value (struct type *type, struct regcache *regs,
8721 gdb_byte *valbuf)
8722 {
8723 struct gdbarch *gdbarch = get_regcache_arch (regs);
8724 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8725
8726 if (TYPE_CODE_FLT == TYPE_CODE (type))
8727 {
8728 switch (gdbarch_tdep (gdbarch)->fp_model)
8729 {
8730 case ARM_FLOAT_FPA:
8731 {
8732 /* The value is in register F0 in internal format. We need to
8733 extract the raw value and then convert it to the desired
8734 internal type. */
8735 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8736
8737 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8738 convert_from_extended (floatformat_from_type (type), tmpbuf,
8739 valbuf, gdbarch_byte_order (gdbarch));
8740 }
8741 break;
8742
8743 case ARM_FLOAT_SOFT_FPA:
8744 case ARM_FLOAT_SOFT_VFP:
8745 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8746 not using the VFP ABI code. */
8747 case ARM_FLOAT_VFP:
8748 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8749 if (TYPE_LENGTH (type) > 4)
8750 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8751 valbuf + INT_REGISTER_SIZE);
8752 break;
8753
8754 default:
8755 internal_error (__FILE__, __LINE__,
8756 _("arm_extract_return_value: "
8757 "Floating point model not supported"));
8758 break;
8759 }
8760 }
8761 else if (TYPE_CODE (type) == TYPE_CODE_INT
8762 || TYPE_CODE (type) == TYPE_CODE_CHAR
8763 || TYPE_CODE (type) == TYPE_CODE_BOOL
8764 || TYPE_CODE (type) == TYPE_CODE_PTR
8765 || TYPE_CODE (type) == TYPE_CODE_REF
8766 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8767 {
8768 /* If the type is a plain integer, then the access is
8769 straight-forward. Otherwise we have to play around a bit
8770 more. */
8771 int len = TYPE_LENGTH (type);
8772 int regno = ARM_A1_REGNUM;
8773 ULONGEST tmp;
8774
8775 while (len > 0)
8776 {
8777 /* By using store_unsigned_integer we avoid having to do
8778 anything special for small big-endian values. */
8779 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8780 store_unsigned_integer (valbuf,
8781 (len > INT_REGISTER_SIZE
8782 ? INT_REGISTER_SIZE : len),
8783 byte_order, tmp);
8784 len -= INT_REGISTER_SIZE;
8785 valbuf += INT_REGISTER_SIZE;
8786 }
8787 }
8788 else
8789 {
8790 /* For a structure or union the behaviour is as if the value had
8791 been stored to word-aligned memory and then loaded into
8792 registers with 32-bit load instruction(s). */
8793 int len = TYPE_LENGTH (type);
8794 int regno = ARM_A1_REGNUM;
8795 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8796
8797 while (len > 0)
8798 {
8799 regcache_cooked_read (regs, regno++, tmpbuf);
8800 memcpy (valbuf, tmpbuf,
8801 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8802 len -= INT_REGISTER_SIZE;
8803 valbuf += INT_REGISTER_SIZE;
8804 }
8805 }
8806 }
8807
8808
8809 /* Will a function return an aggregate type in memory or in a
8810 register? Return 0 if an aggregate type can be returned in a
8811 register, 1 if it must be returned in memory. */
8812
8813 static int
8814 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8815 {
8816 int nRc;
8817 enum type_code code;
8818
8819 CHECK_TYPEDEF (type);
8820
8821 /* In the ARM ABI, "integer" like aggregate types are returned in
8822 registers. For an aggregate type to be integer like, its size
8823 must be less than or equal to INT_REGISTER_SIZE and the
8824 offset of each addressable subfield must be zero. Note that bit
8825 fields are not addressable, and all addressable subfields of
8826 unions always start at offset zero.
8827
8828 This function is based on the behaviour of GCC 2.95.1.
8829 See: gcc/arm.c: arm_return_in_memory() for details.
8830
8831 Note: All versions of GCC before GCC 2.95.2 do not set up the
8832 parameters correctly for a function returning the following
8833 structure: struct { float f;}; This should be returned in memory,
8834 not a register. Richard Earnshaw sent me a patch, but I do not
8835 know of any way to detect if a function like the above has been
8836 compiled with the correct calling convention. */
8837
8838 /* All aggregate types that won't fit in a register must be returned
8839 in memory. */
8840 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8841 {
8842 return 1;
8843 }
8844
8845 /* The AAPCS says all aggregates not larger than a word are returned
8846 in a register. */
8847 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8848 return 0;
8849
8850 /* The only aggregate types that can be returned in a register are
8851 structs and unions. Arrays must be returned in memory. */
8852 code = TYPE_CODE (type);
8853 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8854 {
8855 return 1;
8856 }
8857
8858 /* Assume all other aggregate types can be returned in a register.
8859 Run a check for structures, unions and arrays. */
8860 nRc = 0;
8861
8862 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8863 {
8864 int i;
8865 /* Need to check if this struct/union is "integer" like. For
8866 this to be true, its size must be less than or equal to
8867 INT_REGISTER_SIZE and the offset of each addressable
8868 subfield must be zero. Note that bit fields are not
8869 addressable, and unions always start at offset zero. If any
8870 of the subfields is a floating point type, the struct/union
8871 cannot be an integer type. */
8872
8873 /* For each field in the object, check:
8874 1) Is it FP? --> yes, nRc = 1;
8875 2) Is it addressable (bitpos != 0) and
8876 not packed (bitsize == 0)?
8877 --> yes, nRc = 1
8878 */
8879
8880 for (i = 0; i < TYPE_NFIELDS (type); i++)
8881 {
8882 enum type_code field_type_code;
8883 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
8884 i)));
8885
8886 /* Is it a floating point type field? */
8887 if (field_type_code == TYPE_CODE_FLT)
8888 {
8889 nRc = 1;
8890 break;
8891 }
8892
8893 /* If bitpos != 0, then we have to care about it. */
8894 if (TYPE_FIELD_BITPOS (type, i) != 0)
8895 {
8896 /* Bitfields are not addressable. If the field bitsize is
8897 zero, then the field is not packed. Hence it cannot be
8898 a bitfield or any other packed type. */
8899 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8900 {
8901 nRc = 1;
8902 break;
8903 }
8904 }
8905 }
8906 }
8907
8908 return nRc;
8909 }
8910
8911 /* Write into appropriate registers a function return value of type
8912 TYPE, given in virtual format. */
8913
8914 static void
8915 arm_store_return_value (struct type *type, struct regcache *regs,
8916 const gdb_byte *valbuf)
8917 {
8918 struct gdbarch *gdbarch = get_regcache_arch (regs);
8919 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8920
8921 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8922 {
8923 char buf[MAX_REGISTER_SIZE];
8924
8925 switch (gdbarch_tdep (gdbarch)->fp_model)
8926 {
8927 case ARM_FLOAT_FPA:
8928
8929 convert_to_extended (floatformat_from_type (type), buf, valbuf,
8930 gdbarch_byte_order (gdbarch));
8931 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
8932 break;
8933
8934 case ARM_FLOAT_SOFT_FPA:
8935 case ARM_FLOAT_SOFT_VFP:
8936 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8937 not using the VFP ABI code. */
8938 case ARM_FLOAT_VFP:
8939 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
8940 if (TYPE_LENGTH (type) > 4)
8941 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
8942 valbuf + INT_REGISTER_SIZE);
8943 break;
8944
8945 default:
8946 internal_error (__FILE__, __LINE__,
8947 _("arm_store_return_value: Floating "
8948 "point model not supported"));
8949 break;
8950 }
8951 }
8952 else if (TYPE_CODE (type) == TYPE_CODE_INT
8953 || TYPE_CODE (type) == TYPE_CODE_CHAR
8954 || TYPE_CODE (type) == TYPE_CODE_BOOL
8955 || TYPE_CODE (type) == TYPE_CODE_PTR
8956 || TYPE_CODE (type) == TYPE_CODE_REF
8957 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8958 {
8959 if (TYPE_LENGTH (type) <= 4)
8960 {
8961 /* Values of one word or less are zero/sign-extended and
8962 returned in r0. */
8963 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8964 LONGEST val = unpack_long (type, valbuf);
8965
8966 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8967 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
8968 }
8969 else
8970 {
8971 /* Integral values greater than one word are stored in consecutive
8972 registers starting with r0. This will always be a multiple of
8973 the regiser size. */
8974 int len = TYPE_LENGTH (type);
8975 int regno = ARM_A1_REGNUM;
8976
8977 while (len > 0)
8978 {
8979 regcache_cooked_write (regs, regno++, valbuf);
8980 len -= INT_REGISTER_SIZE;
8981 valbuf += INT_REGISTER_SIZE;
8982 }
8983 }
8984 }
8985 else
8986 {
8987 /* For a structure or union the behaviour is as if the value had
8988 been stored to word-aligned memory and then loaded into
8989 registers with 32-bit load instruction(s). */
8990 int len = TYPE_LENGTH (type);
8991 int regno = ARM_A1_REGNUM;
8992 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8993
8994 while (len > 0)
8995 {
8996 memcpy (tmpbuf, valbuf,
8997 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8998 regcache_cooked_write (regs, regno++, tmpbuf);
8999 len -= INT_REGISTER_SIZE;
9000 valbuf += INT_REGISTER_SIZE;
9001 }
9002 }
9003 }
9004
9005
9006 /* Handle function return values. */
9007
9008 static enum return_value_convention
9009 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
9010 struct type *valtype, struct regcache *regcache,
9011 gdb_byte *readbuf, const gdb_byte *writebuf)
9012 {
9013 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9014 enum arm_vfp_cprc_base_type vfp_base_type;
9015 int vfp_base_count;
9016
9017 if (arm_vfp_abi_for_function (gdbarch, func_type)
9018 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9019 {
9020 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9021 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9022 int i;
9023 for (i = 0; i < vfp_base_count; i++)
9024 {
9025 if (reg_char == 'q')
9026 {
9027 if (writebuf)
9028 arm_neon_quad_write (gdbarch, regcache, i,
9029 writebuf + i * unit_length);
9030
9031 if (readbuf)
9032 arm_neon_quad_read (gdbarch, regcache, i,
9033 readbuf + i * unit_length);
9034 }
9035 else
9036 {
9037 char name_buf[4];
9038 int regnum;
9039
9040 sprintf (name_buf, "%c%d", reg_char, i);
9041 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9042 strlen (name_buf));
9043 if (writebuf)
9044 regcache_cooked_write (regcache, regnum,
9045 writebuf + i * unit_length);
9046 if (readbuf)
9047 regcache_cooked_read (regcache, regnum,
9048 readbuf + i * unit_length);
9049 }
9050 }
9051 return RETURN_VALUE_REGISTER_CONVENTION;
9052 }
9053
9054 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9055 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9056 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9057 {
9058 if (tdep->struct_return == pcc_struct_return
9059 || arm_return_in_memory (gdbarch, valtype))
9060 return RETURN_VALUE_STRUCT_CONVENTION;
9061 }
9062
9063 /* AAPCS returns complex types longer than a register in memory. */
9064 if (tdep->arm_abi != ARM_ABI_APCS
9065 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9066 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9067 return RETURN_VALUE_STRUCT_CONVENTION;
9068
9069 if (writebuf)
9070 arm_store_return_value (valtype, regcache, writebuf);
9071
9072 if (readbuf)
9073 arm_extract_return_value (valtype, regcache, readbuf);
9074
9075 return RETURN_VALUE_REGISTER_CONVENTION;
9076 }
9077
9078
9079 static int
9080 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9081 {
9082 struct gdbarch *gdbarch = get_frame_arch (frame);
9083 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9084 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9085 CORE_ADDR jb_addr;
9086 char buf[INT_REGISTER_SIZE];
9087
9088 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9089
9090 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9091 INT_REGISTER_SIZE))
9092 return 0;
9093
9094 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9095 return 1;
9096 }
9097
9098 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9099 return the target PC. Otherwise return 0. */
9100
9101 CORE_ADDR
9102 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9103 {
9104 const char *name;
9105 int namelen;
9106 CORE_ADDR start_addr;
9107
9108 /* Find the starting address and name of the function containing the PC. */
9109 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9110 return 0;
9111
9112 /* If PC is in a Thumb call or return stub, return the address of the
9113 target PC, which is in a register. The thunk functions are called
9114 _call_via_xx, where x is the register name. The possible names
9115 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9116 functions, named __ARM_call_via_r[0-7]. */
9117 if (strncmp (name, "_call_via_", 10) == 0
9118 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9119 {
9120 /* Use the name suffix to determine which register contains the
9121 target PC. */
9122 static char *table[15] =
9123 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9124 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9125 };
9126 int regno;
9127 int offset = strlen (name) - 2;
9128
9129 for (regno = 0; regno <= 14; regno++)
9130 if (strcmp (&name[offset], table[regno]) == 0)
9131 return get_frame_register_unsigned (frame, regno);
9132 }
9133
9134 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9135 non-interworking calls to foo. We could decode the stubs
9136 to find the target but it's easier to use the symbol table. */
9137 namelen = strlen (name);
9138 if (name[0] == '_' && name[1] == '_'
9139 && ((namelen > 2 + strlen ("_from_thumb")
9140 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9141 strlen ("_from_thumb")) == 0)
9142 || (namelen > 2 + strlen ("_from_arm")
9143 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9144 strlen ("_from_arm")) == 0)))
9145 {
9146 char *target_name;
9147 int target_len = namelen - 2;
9148 struct minimal_symbol *minsym;
9149 struct objfile *objfile;
9150 struct obj_section *sec;
9151
9152 if (name[namelen - 1] == 'b')
9153 target_len -= strlen ("_from_thumb");
9154 else
9155 target_len -= strlen ("_from_arm");
9156
9157 target_name = alloca (target_len + 1);
9158 memcpy (target_name, name + 2, target_len);
9159 target_name[target_len] = '\0';
9160
9161 sec = find_pc_section (pc);
9162 objfile = (sec == NULL) ? NULL : sec->objfile;
9163 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9164 if (minsym != NULL)
9165 return SYMBOL_VALUE_ADDRESS (minsym);
9166 else
9167 return 0;
9168 }
9169
9170 return 0; /* not a stub */
9171 }
9172
9173 static void
9174 set_arm_command (char *args, int from_tty)
9175 {
9176 printf_unfiltered (_("\
9177 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9178 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9179 }
9180
9181 static void
9182 show_arm_command (char *args, int from_tty)
9183 {
9184 cmd_show_list (showarmcmdlist, from_tty, "");
9185 }
9186
9187 static void
9188 arm_update_current_architecture (void)
9189 {
9190 struct gdbarch_info info;
9191
9192 /* If the current architecture is not ARM, we have nothing to do. */
9193 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
9194 return;
9195
9196 /* Update the architecture. */
9197 gdbarch_info_init (&info);
9198
9199 if (!gdbarch_update_p (info))
9200 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9201 }
9202
9203 static void
9204 set_fp_model_sfunc (char *args, int from_tty,
9205 struct cmd_list_element *c)
9206 {
9207 enum arm_float_model fp_model;
9208
9209 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9210 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9211 {
9212 arm_fp_model = fp_model;
9213 break;
9214 }
9215
9216 if (fp_model == ARM_FLOAT_LAST)
9217 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9218 current_fp_model);
9219
9220 arm_update_current_architecture ();
9221 }
9222
9223 static void
9224 show_fp_model (struct ui_file *file, int from_tty,
9225 struct cmd_list_element *c, const char *value)
9226 {
9227 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9228
9229 if (arm_fp_model == ARM_FLOAT_AUTO
9230 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9231 fprintf_filtered (file, _("\
9232 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9233 fp_model_strings[tdep->fp_model]);
9234 else
9235 fprintf_filtered (file, _("\
9236 The current ARM floating point model is \"%s\".\n"),
9237 fp_model_strings[arm_fp_model]);
9238 }
9239
9240 static void
9241 arm_set_abi (char *args, int from_tty,
9242 struct cmd_list_element *c)
9243 {
9244 enum arm_abi_kind arm_abi;
9245
9246 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9247 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9248 {
9249 arm_abi_global = arm_abi;
9250 break;
9251 }
9252
9253 if (arm_abi == ARM_ABI_LAST)
9254 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9255 arm_abi_string);
9256
9257 arm_update_current_architecture ();
9258 }
9259
9260 static void
9261 arm_show_abi (struct ui_file *file, int from_tty,
9262 struct cmd_list_element *c, const char *value)
9263 {
9264 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9265
9266 if (arm_abi_global == ARM_ABI_AUTO
9267 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9268 fprintf_filtered (file, _("\
9269 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9270 arm_abi_strings[tdep->arm_abi]);
9271 else
9272 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9273 arm_abi_string);
9274 }
9275
9276 static void
9277 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9278 struct cmd_list_element *c, const char *value)
9279 {
9280 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9281
9282 fprintf_filtered (file,
9283 _("The current execution mode assumed "
9284 "(when symbols are unavailable) is \"%s\".\n"),
9285 arm_fallback_mode_string);
9286 }
9287
9288 static void
9289 arm_show_force_mode (struct ui_file *file, int from_tty,
9290 struct cmd_list_element *c, const char *value)
9291 {
9292 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9293
9294 fprintf_filtered (file,
9295 _("The current execution mode assumed "
9296 "(even when symbols are available) is \"%s\".\n"),
9297 arm_force_mode_string);
9298 }
9299
9300 /* If the user changes the register disassembly style used for info
9301 register and other commands, we have to also switch the style used
9302 in opcodes for disassembly output. This function is run in the "set
9303 arm disassembly" command, and does that. */
9304
9305 static void
9306 set_disassembly_style_sfunc (char *args, int from_tty,
9307 struct cmd_list_element *c)
9308 {
9309 set_disassembly_style ();
9310 }
9311 \f
9312 /* Return the ARM register name corresponding to register I. */
9313 static const char *
9314 arm_register_name (struct gdbarch *gdbarch, int i)
9315 {
9316 const int num_regs = gdbarch_num_regs (gdbarch);
9317
9318 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9319 && i >= num_regs && i < num_regs + 32)
9320 {
9321 static const char *const vfp_pseudo_names[] = {
9322 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9323 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9324 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9325 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9326 };
9327
9328 return vfp_pseudo_names[i - num_regs];
9329 }
9330
9331 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9332 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9333 {
9334 static const char *const neon_pseudo_names[] = {
9335 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9336 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9337 };
9338
9339 return neon_pseudo_names[i - num_regs - 32];
9340 }
9341
9342 if (i >= ARRAY_SIZE (arm_register_names))
9343 /* These registers are only supported on targets which supply
9344 an XML description. */
9345 return "";
9346
9347 return arm_register_names[i];
9348 }
9349
9350 static void
9351 set_disassembly_style (void)
9352 {
9353 int current;
9354
9355 /* Find the style that the user wants. */
9356 for (current = 0; current < num_disassembly_options; current++)
9357 if (disassembly_style == valid_disassembly_styles[current])
9358 break;
9359 gdb_assert (current < num_disassembly_options);
9360
9361 /* Synchronize the disassembler. */
9362 set_arm_regname_option (current);
9363 }
9364
9365 /* Test whether the coff symbol specific value corresponds to a Thumb
9366 function. */
9367
9368 static int
9369 coff_sym_is_thumb (int val)
9370 {
9371 return (val == C_THUMBEXT
9372 || val == C_THUMBSTAT
9373 || val == C_THUMBEXTFUNC
9374 || val == C_THUMBSTATFUNC
9375 || val == C_THUMBLABEL);
9376 }
9377
9378 /* arm_coff_make_msymbol_special()
9379 arm_elf_make_msymbol_special()
9380
9381 These functions test whether the COFF or ELF symbol corresponds to
9382 an address in thumb code, and set a "special" bit in a minimal
9383 symbol to indicate that it does. */
9384
9385 static void
9386 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9387 {
9388 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9389 == ST_BRANCH_TO_THUMB)
9390 MSYMBOL_SET_SPECIAL (msym);
9391 }
9392
9393 static void
9394 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9395 {
9396 if (coff_sym_is_thumb (val))
9397 MSYMBOL_SET_SPECIAL (msym);
9398 }
9399
9400 static void
9401 arm_objfile_data_free (struct objfile *objfile, void *arg)
9402 {
9403 struct arm_per_objfile *data = arg;
9404 unsigned int i;
9405
9406 for (i = 0; i < objfile->obfd->section_count; i++)
9407 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9408 }
9409
9410 static void
9411 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9412 asymbol *sym)
9413 {
9414 const char *name = bfd_asymbol_name (sym);
9415 struct arm_per_objfile *data;
9416 VEC(arm_mapping_symbol_s) **map_p;
9417 struct arm_mapping_symbol new_map_sym;
9418
9419 gdb_assert (name[0] == '$');
9420 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9421 return;
9422
9423 data = objfile_data (objfile, arm_objfile_data_key);
9424 if (data == NULL)
9425 {
9426 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9427 struct arm_per_objfile);
9428 set_objfile_data (objfile, arm_objfile_data_key, data);
9429 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9430 objfile->obfd->section_count,
9431 VEC(arm_mapping_symbol_s) *);
9432 }
9433 map_p = &data->section_maps[bfd_get_section (sym)->index];
9434
9435 new_map_sym.value = sym->value;
9436 new_map_sym.type = name[1];
9437
9438 /* Assume that most mapping symbols appear in order of increasing
9439 value. If they were randomly distributed, it would be faster to
9440 always push here and then sort at first use. */
9441 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9442 {
9443 struct arm_mapping_symbol *prev_map_sym;
9444
9445 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9446 if (prev_map_sym->value >= sym->value)
9447 {
9448 unsigned int idx;
9449 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9450 arm_compare_mapping_symbols);
9451 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9452 return;
9453 }
9454 }
9455
9456 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9457 }
9458
9459 static void
9460 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9461 {
9462 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9463 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9464
9465 /* If necessary, set the T bit. */
9466 if (arm_apcs_32)
9467 {
9468 ULONGEST val, t_bit;
9469 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9470 t_bit = arm_psr_thumb_bit (gdbarch);
9471 if (arm_pc_is_thumb (gdbarch, pc))
9472 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9473 val | t_bit);
9474 else
9475 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9476 val & ~t_bit);
9477 }
9478 }
9479
9480 /* Read the contents of a NEON quad register, by reading from two
9481 double registers. This is used to implement the quad pseudo
9482 registers, and for argument passing in case the quad registers are
9483 missing; vectors are passed in quad registers when using the VFP
9484 ABI, even if a NEON unit is not present. REGNUM is the index of
9485 the quad register, in [0, 15]. */
9486
9487 static enum register_status
9488 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9489 int regnum, gdb_byte *buf)
9490 {
9491 char name_buf[4];
9492 gdb_byte reg_buf[8];
9493 int offset, double_regnum;
9494 enum register_status status;
9495
9496 sprintf (name_buf, "d%d", regnum << 1);
9497 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9498 strlen (name_buf));
9499
9500 /* d0 is always the least significant half of q0. */
9501 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9502 offset = 8;
9503 else
9504 offset = 0;
9505
9506 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9507 if (status != REG_VALID)
9508 return status;
9509 memcpy (buf + offset, reg_buf, 8);
9510
9511 offset = 8 - offset;
9512 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9513 if (status != REG_VALID)
9514 return status;
9515 memcpy (buf + offset, reg_buf, 8);
9516
9517 return REG_VALID;
9518 }
9519
9520 static enum register_status
9521 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9522 int regnum, gdb_byte *buf)
9523 {
9524 const int num_regs = gdbarch_num_regs (gdbarch);
9525 char name_buf[4];
9526 gdb_byte reg_buf[8];
9527 int offset, double_regnum;
9528
9529 gdb_assert (regnum >= num_regs);
9530 regnum -= num_regs;
9531
9532 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9533 /* Quad-precision register. */
9534 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9535 else
9536 {
9537 enum register_status status;
9538
9539 /* Single-precision register. */
9540 gdb_assert (regnum < 32);
9541
9542 /* s0 is always the least significant half of d0. */
9543 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9544 offset = (regnum & 1) ? 0 : 4;
9545 else
9546 offset = (regnum & 1) ? 4 : 0;
9547
9548 sprintf (name_buf, "d%d", regnum >> 1);
9549 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9550 strlen (name_buf));
9551
9552 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9553 if (status == REG_VALID)
9554 memcpy (buf, reg_buf + offset, 4);
9555 return status;
9556 }
9557 }
9558
9559 /* Store the contents of BUF to a NEON quad register, by writing to
9560 two double registers. This is used to implement the quad pseudo
9561 registers, and for argument passing in case the quad registers are
9562 missing; vectors are passed in quad registers when using the VFP
9563 ABI, even if a NEON unit is not present. REGNUM is the index
9564 of the quad register, in [0, 15]. */
9565
9566 static void
9567 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9568 int regnum, const gdb_byte *buf)
9569 {
9570 char name_buf[4];
9571 gdb_byte reg_buf[8];
9572 int offset, double_regnum;
9573
9574 sprintf (name_buf, "d%d", regnum << 1);
9575 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9576 strlen (name_buf));
9577
9578 /* d0 is always the least significant half of q0. */
9579 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9580 offset = 8;
9581 else
9582 offset = 0;
9583
9584 regcache_raw_write (regcache, double_regnum, buf + offset);
9585 offset = 8 - offset;
9586 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9587 }
9588
9589 static void
9590 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9591 int regnum, const gdb_byte *buf)
9592 {
9593 const int num_regs = gdbarch_num_regs (gdbarch);
9594 char name_buf[4];
9595 gdb_byte reg_buf[8];
9596 int offset, double_regnum;
9597
9598 gdb_assert (regnum >= num_regs);
9599 regnum -= num_regs;
9600
9601 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9602 /* Quad-precision register. */
9603 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9604 else
9605 {
9606 /* Single-precision register. */
9607 gdb_assert (regnum < 32);
9608
9609 /* s0 is always the least significant half of d0. */
9610 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9611 offset = (regnum & 1) ? 0 : 4;
9612 else
9613 offset = (regnum & 1) ? 4 : 0;
9614
9615 sprintf (name_buf, "d%d", regnum >> 1);
9616 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9617 strlen (name_buf));
9618
9619 regcache_raw_read (regcache, double_regnum, reg_buf);
9620 memcpy (reg_buf + offset, buf, 4);
9621 regcache_raw_write (regcache, double_regnum, reg_buf);
9622 }
9623 }
9624
9625 static struct value *
9626 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9627 {
9628 const int *reg_p = baton;
9629 return value_of_register (*reg_p, frame);
9630 }
9631 \f
9632 static enum gdb_osabi
9633 arm_elf_osabi_sniffer (bfd *abfd)
9634 {
9635 unsigned int elfosabi;
9636 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9637
9638 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9639
9640 if (elfosabi == ELFOSABI_ARM)
9641 /* GNU tools use this value. Check note sections in this case,
9642 as well. */
9643 bfd_map_over_sections (abfd,
9644 generic_elf_osabi_sniff_abi_tag_sections,
9645 &osabi);
9646
9647 /* Anything else will be handled by the generic ELF sniffer. */
9648 return osabi;
9649 }
9650
9651 static int
9652 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9653 struct reggroup *group)
9654 {
9655 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9656 this, FPS register belongs to save_regroup, restore_reggroup, and
9657 all_reggroup, of course. */
9658 if (regnum == ARM_FPS_REGNUM)
9659 return (group == float_reggroup
9660 || group == save_reggroup
9661 || group == restore_reggroup
9662 || group == all_reggroup);
9663 else
9664 return default_register_reggroup_p (gdbarch, regnum, group);
9665 }
9666
9667 \f
9668 /* Initialize the current architecture based on INFO. If possible,
9669 re-use an architecture from ARCHES, which is a list of
9670 architectures already created during this debugging session.
9671
9672 Called e.g. at program startup, when reading a core file, and when
9673 reading a binary file. */
9674
9675 static struct gdbarch *
9676 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9677 {
9678 struct gdbarch_tdep *tdep;
9679 struct gdbarch *gdbarch;
9680 struct gdbarch_list *best_arch;
9681 enum arm_abi_kind arm_abi = arm_abi_global;
9682 enum arm_float_model fp_model = arm_fp_model;
9683 struct tdesc_arch_data *tdesc_data = NULL;
9684 int i, is_m = 0;
9685 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9686 int have_neon = 0;
9687 int have_fpa_registers = 1;
9688 const struct target_desc *tdesc = info.target_desc;
9689
9690 /* If we have an object to base this architecture on, try to determine
9691 its ABI. */
9692
9693 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9694 {
9695 int ei_osabi, e_flags;
9696
9697 switch (bfd_get_flavour (info.abfd))
9698 {
9699 case bfd_target_aout_flavour:
9700 /* Assume it's an old APCS-style ABI. */
9701 arm_abi = ARM_ABI_APCS;
9702 break;
9703
9704 case bfd_target_coff_flavour:
9705 /* Assume it's an old APCS-style ABI. */
9706 /* XXX WinCE? */
9707 arm_abi = ARM_ABI_APCS;
9708 break;
9709
9710 case bfd_target_elf_flavour:
9711 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9712 e_flags = elf_elfheader (info.abfd)->e_flags;
9713
9714 if (ei_osabi == ELFOSABI_ARM)
9715 {
9716 /* GNU tools used to use this value, but do not for EABI
9717 objects. There's nowhere to tag an EABI version
9718 anyway, so assume APCS. */
9719 arm_abi = ARM_ABI_APCS;
9720 }
9721 else if (ei_osabi == ELFOSABI_NONE)
9722 {
9723 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9724 int attr_arch, attr_profile;
9725
9726 switch (eabi_ver)
9727 {
9728 case EF_ARM_EABI_UNKNOWN:
9729 /* Assume GNU tools. */
9730 arm_abi = ARM_ABI_APCS;
9731 break;
9732
9733 case EF_ARM_EABI_VER4:
9734 case EF_ARM_EABI_VER5:
9735 arm_abi = ARM_ABI_AAPCS;
9736 /* EABI binaries default to VFP float ordering.
9737 They may also contain build attributes that can
9738 be used to identify if the VFP argument-passing
9739 ABI is in use. */
9740 if (fp_model == ARM_FLOAT_AUTO)
9741 {
9742 #ifdef HAVE_ELF
9743 switch (bfd_elf_get_obj_attr_int (info.abfd,
9744 OBJ_ATTR_PROC,
9745 Tag_ABI_VFP_args))
9746 {
9747 case 0:
9748 /* "The user intended FP parameter/result
9749 passing to conform to AAPCS, base
9750 variant". */
9751 fp_model = ARM_FLOAT_SOFT_VFP;
9752 break;
9753 case 1:
9754 /* "The user intended FP parameter/result
9755 passing to conform to AAPCS, VFP
9756 variant". */
9757 fp_model = ARM_FLOAT_VFP;
9758 break;
9759 case 2:
9760 /* "The user intended FP parameter/result
9761 passing to conform to tool chain-specific
9762 conventions" - we don't know any such
9763 conventions, so leave it as "auto". */
9764 break;
9765 default:
9766 /* Attribute value not mentioned in the
9767 October 2008 ABI, so leave it as
9768 "auto". */
9769 break;
9770 }
9771 #else
9772 fp_model = ARM_FLOAT_SOFT_VFP;
9773 #endif
9774 }
9775 break;
9776
9777 default:
9778 /* Leave it as "auto". */
9779 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9780 break;
9781 }
9782
9783 #ifdef HAVE_ELF
9784 /* Detect M-profile programs. This only works if the
9785 executable file includes build attributes; GCC does
9786 copy them to the executable, but e.g. RealView does
9787 not. */
9788 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9789 Tag_CPU_arch);
9790 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9791 OBJ_ATTR_PROC,
9792 Tag_CPU_arch_profile);
9793 /* GCC specifies the profile for v6-M; RealView only
9794 specifies the profile for architectures starting with
9795 V7 (as opposed to architectures with a tag
9796 numerically greater than TAG_CPU_ARCH_V7). */
9797 if (!tdesc_has_registers (tdesc)
9798 && (attr_arch == TAG_CPU_ARCH_V6_M
9799 || attr_arch == TAG_CPU_ARCH_V6S_M
9800 || attr_profile == 'M'))
9801 tdesc = tdesc_arm_with_m;
9802 #endif
9803 }
9804
9805 if (fp_model == ARM_FLOAT_AUTO)
9806 {
9807 int e_flags = elf_elfheader (info.abfd)->e_flags;
9808
9809 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9810 {
9811 case 0:
9812 /* Leave it as "auto". Strictly speaking this case
9813 means FPA, but almost nobody uses that now, and
9814 many toolchains fail to set the appropriate bits
9815 for the floating-point model they use. */
9816 break;
9817 case EF_ARM_SOFT_FLOAT:
9818 fp_model = ARM_FLOAT_SOFT_FPA;
9819 break;
9820 case EF_ARM_VFP_FLOAT:
9821 fp_model = ARM_FLOAT_VFP;
9822 break;
9823 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9824 fp_model = ARM_FLOAT_SOFT_VFP;
9825 break;
9826 }
9827 }
9828
9829 if (e_flags & EF_ARM_BE8)
9830 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9831
9832 break;
9833
9834 default:
9835 /* Leave it as "auto". */
9836 break;
9837 }
9838 }
9839
9840 /* Check any target description for validity. */
9841 if (tdesc_has_registers (tdesc))
9842 {
9843 /* For most registers we require GDB's default names; but also allow
9844 the numeric names for sp / lr / pc, as a convenience. */
9845 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9846 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9847 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9848
9849 const struct tdesc_feature *feature;
9850 int valid_p;
9851
9852 feature = tdesc_find_feature (tdesc,
9853 "org.gnu.gdb.arm.core");
9854 if (feature == NULL)
9855 {
9856 feature = tdesc_find_feature (tdesc,
9857 "org.gnu.gdb.arm.m-profile");
9858 if (feature == NULL)
9859 return NULL;
9860 else
9861 is_m = 1;
9862 }
9863
9864 tdesc_data = tdesc_data_alloc ();
9865
9866 valid_p = 1;
9867 for (i = 0; i < ARM_SP_REGNUM; i++)
9868 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9869 arm_register_names[i]);
9870 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9871 ARM_SP_REGNUM,
9872 arm_sp_names);
9873 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9874 ARM_LR_REGNUM,
9875 arm_lr_names);
9876 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9877 ARM_PC_REGNUM,
9878 arm_pc_names);
9879 if (is_m)
9880 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9881 ARM_PS_REGNUM, "xpsr");
9882 else
9883 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9884 ARM_PS_REGNUM, "cpsr");
9885
9886 if (!valid_p)
9887 {
9888 tdesc_data_cleanup (tdesc_data);
9889 return NULL;
9890 }
9891
9892 feature = tdesc_find_feature (tdesc,
9893 "org.gnu.gdb.arm.fpa");
9894 if (feature != NULL)
9895 {
9896 valid_p = 1;
9897 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9898 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9899 arm_register_names[i]);
9900 if (!valid_p)
9901 {
9902 tdesc_data_cleanup (tdesc_data);
9903 return NULL;
9904 }
9905 }
9906 else
9907 have_fpa_registers = 0;
9908
9909 feature = tdesc_find_feature (tdesc,
9910 "org.gnu.gdb.xscale.iwmmxt");
9911 if (feature != NULL)
9912 {
9913 static const char *const iwmmxt_names[] = {
9914 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9915 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9916 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9917 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9918 };
9919
9920 valid_p = 1;
9921 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9922 valid_p
9923 &= tdesc_numbered_register (feature, tdesc_data, i,
9924 iwmmxt_names[i - ARM_WR0_REGNUM]);
9925
9926 /* Check for the control registers, but do not fail if they
9927 are missing. */
9928 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9929 tdesc_numbered_register (feature, tdesc_data, i,
9930 iwmmxt_names[i - ARM_WR0_REGNUM]);
9931
9932 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9933 valid_p
9934 &= tdesc_numbered_register (feature, tdesc_data, i,
9935 iwmmxt_names[i - ARM_WR0_REGNUM]);
9936
9937 if (!valid_p)
9938 {
9939 tdesc_data_cleanup (tdesc_data);
9940 return NULL;
9941 }
9942 }
9943
9944 /* If we have a VFP unit, check whether the single precision registers
9945 are present. If not, then we will synthesize them as pseudo
9946 registers. */
9947 feature = tdesc_find_feature (tdesc,
9948 "org.gnu.gdb.arm.vfp");
9949 if (feature != NULL)
9950 {
9951 static const char *const vfp_double_names[] = {
9952 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9953 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9954 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9955 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9956 };
9957
9958 /* Require the double precision registers. There must be either
9959 16 or 32. */
9960 valid_p = 1;
9961 for (i = 0; i < 32; i++)
9962 {
9963 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9964 ARM_D0_REGNUM + i,
9965 vfp_double_names[i]);
9966 if (!valid_p)
9967 break;
9968 }
9969 if (!valid_p && i == 16)
9970 valid_p = 1;
9971
9972 /* Also require FPSCR. */
9973 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9974 ARM_FPSCR_REGNUM, "fpscr");
9975 if (!valid_p)
9976 {
9977 tdesc_data_cleanup (tdesc_data);
9978 return NULL;
9979 }
9980
9981 if (tdesc_unnumbered_register (feature, "s0") == 0)
9982 have_vfp_pseudos = 1;
9983
9984 have_vfp_registers = 1;
9985
9986 /* If we have VFP, also check for NEON. The architecture allows
9987 NEON without VFP (integer vector operations only), but GDB
9988 does not support that. */
9989 feature = tdesc_find_feature (tdesc,
9990 "org.gnu.gdb.arm.neon");
9991 if (feature != NULL)
9992 {
9993 /* NEON requires 32 double-precision registers. */
9994 if (i != 32)
9995 {
9996 tdesc_data_cleanup (tdesc_data);
9997 return NULL;
9998 }
9999
10000 /* If there are quad registers defined by the stub, use
10001 their type; otherwise (normally) provide them with
10002 the default type. */
10003 if (tdesc_unnumbered_register (feature, "q0") == 0)
10004 have_neon_pseudos = 1;
10005
10006 have_neon = 1;
10007 }
10008 }
10009 }
10010
10011 /* If there is already a candidate, use it. */
10012 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10013 best_arch != NULL;
10014 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10015 {
10016 if (arm_abi != ARM_ABI_AUTO
10017 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10018 continue;
10019
10020 if (fp_model != ARM_FLOAT_AUTO
10021 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10022 continue;
10023
10024 /* There are various other properties in tdep that we do not
10025 need to check here: those derived from a target description,
10026 since gdbarches with a different target description are
10027 automatically disqualified. */
10028
10029 /* Do check is_m, though, since it might come from the binary. */
10030 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10031 continue;
10032
10033 /* Found a match. */
10034 break;
10035 }
10036
10037 if (best_arch != NULL)
10038 {
10039 if (tdesc_data != NULL)
10040 tdesc_data_cleanup (tdesc_data);
10041 return best_arch->gdbarch;
10042 }
10043
10044 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10045 gdbarch = gdbarch_alloc (&info, tdep);
10046
10047 /* Record additional information about the architecture we are defining.
10048 These are gdbarch discriminators, like the OSABI. */
10049 tdep->arm_abi = arm_abi;
10050 tdep->fp_model = fp_model;
10051 tdep->is_m = is_m;
10052 tdep->have_fpa_registers = have_fpa_registers;
10053 tdep->have_vfp_registers = have_vfp_registers;
10054 tdep->have_vfp_pseudos = have_vfp_pseudos;
10055 tdep->have_neon_pseudos = have_neon_pseudos;
10056 tdep->have_neon = have_neon;
10057
10058 /* Breakpoints. */
10059 switch (info.byte_order_for_code)
10060 {
10061 case BFD_ENDIAN_BIG:
10062 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10063 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10064 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10065 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10066
10067 break;
10068
10069 case BFD_ENDIAN_LITTLE:
10070 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10071 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10072 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10073 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10074
10075 break;
10076
10077 default:
10078 internal_error (__FILE__, __LINE__,
10079 _("arm_gdbarch_init: bad byte order for float format"));
10080 }
10081
10082 /* On ARM targets char defaults to unsigned. */
10083 set_gdbarch_char_signed (gdbarch, 0);
10084
10085 /* Note: for displaced stepping, this includes the breakpoint, and one word
10086 of additional scratch space. This setting isn't used for anything beside
10087 displaced stepping at present. */
10088 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10089
10090 /* This should be low enough for everything. */
10091 tdep->lowest_pc = 0x20;
10092 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10093
10094 /* The default, for both APCS and AAPCS, is to return small
10095 structures in registers. */
10096 tdep->struct_return = reg_struct_return;
10097
10098 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10099 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10100
10101 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10102
10103 /* Frame handling. */
10104 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10105 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10106 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10107
10108 frame_base_set_default (gdbarch, &arm_normal_base);
10109
10110 /* Address manipulation. */
10111 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
10112 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10113
10114 /* Advance PC across function entry code. */
10115 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10116
10117 /* Detect whether PC is in function epilogue. */
10118 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10119
10120 /* Skip trampolines. */
10121 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10122
10123 /* The stack grows downward. */
10124 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10125
10126 /* Breakpoint manipulation. */
10127 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10128 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10129 arm_remote_breakpoint_from_pc);
10130
10131 /* Information about registers, etc. */
10132 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10133 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10134 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10135 set_gdbarch_register_type (gdbarch, arm_register_type);
10136 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10137
10138 /* This "info float" is FPA-specific. Use the generic version if we
10139 do not have FPA. */
10140 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10141 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10142
10143 /* Internal <-> external register number maps. */
10144 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10145 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10146
10147 set_gdbarch_register_name (gdbarch, arm_register_name);
10148
10149 /* Returning results. */
10150 set_gdbarch_return_value (gdbarch, arm_return_value);
10151
10152 /* Disassembly. */
10153 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10154
10155 /* Minsymbol frobbing. */
10156 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10157 set_gdbarch_coff_make_msymbol_special (gdbarch,
10158 arm_coff_make_msymbol_special);
10159 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10160
10161 /* Thumb-2 IT block support. */
10162 set_gdbarch_adjust_breakpoint_address (gdbarch,
10163 arm_adjust_breakpoint_address);
10164
10165 /* Virtual tables. */
10166 set_gdbarch_vbit_in_delta (gdbarch, 1);
10167
10168 /* Hook in the ABI-specific overrides, if they have been registered. */
10169 gdbarch_init_osabi (info, gdbarch);
10170
10171 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10172
10173 /* Add some default predicates. */
10174 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10175 dwarf2_append_unwinders (gdbarch);
10176 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10177 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10178
10179 /* Now we have tuned the configuration, set a few final things,
10180 based on what the OS ABI has told us. */
10181
10182 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10183 binaries are always marked. */
10184 if (tdep->arm_abi == ARM_ABI_AUTO)
10185 tdep->arm_abi = ARM_ABI_APCS;
10186
10187 /* Watchpoints are not steppable. */
10188 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10189
10190 /* We used to default to FPA for generic ARM, but almost nobody
10191 uses that now, and we now provide a way for the user to force
10192 the model. So default to the most useful variant. */
10193 if (tdep->fp_model == ARM_FLOAT_AUTO)
10194 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10195
10196 if (tdep->jb_pc >= 0)
10197 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10198
10199 /* Floating point sizes and format. */
10200 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10201 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10202 {
10203 set_gdbarch_double_format
10204 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10205 set_gdbarch_long_double_format
10206 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10207 }
10208 else
10209 {
10210 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10211 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10212 }
10213
10214 if (have_vfp_pseudos)
10215 {
10216 /* NOTE: These are the only pseudo registers used by
10217 the ARM target at the moment. If more are added, a
10218 little more care in numbering will be needed. */
10219
10220 int num_pseudos = 32;
10221 if (have_neon_pseudos)
10222 num_pseudos += 16;
10223 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10224 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10225 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10226 }
10227
10228 if (tdesc_data)
10229 {
10230 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10231
10232 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10233
10234 /* Override tdesc_register_type to adjust the types of VFP
10235 registers for NEON. */
10236 set_gdbarch_register_type (gdbarch, arm_register_type);
10237 }
10238
10239 /* Add standard register aliases. We add aliases even for those
10240 nanes which are used by the current architecture - it's simpler,
10241 and does no harm, since nothing ever lists user registers. */
10242 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10243 user_reg_add (gdbarch, arm_register_aliases[i].name,
10244 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10245
10246 return gdbarch;
10247 }
10248
10249 static void
10250 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10251 {
10252 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10253
10254 if (tdep == NULL)
10255 return;
10256
10257 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10258 (unsigned long) tdep->lowest_pc);
10259 }
10260
10261 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10262
10263 void
10264 _initialize_arm_tdep (void)
10265 {
10266 struct ui_file *stb;
10267 long length;
10268 struct cmd_list_element *new_set, *new_show;
10269 const char *setname;
10270 const char *setdesc;
10271 const char *const *regnames;
10272 int numregs, i, j;
10273 static char *helptext;
10274 char regdesc[1024], *rdptr = regdesc;
10275 size_t rest = sizeof (regdesc);
10276
10277 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10278
10279 arm_objfile_data_key
10280 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10281
10282 /* Add ourselves to objfile event chain. */
10283 observer_attach_new_objfile (arm_exidx_new_objfile);
10284 arm_exidx_data_key
10285 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10286
10287 /* Register an ELF OS ABI sniffer for ARM binaries. */
10288 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10289 bfd_target_elf_flavour,
10290 arm_elf_osabi_sniffer);
10291
10292 /* Initialize the standard target descriptions. */
10293 initialize_tdesc_arm_with_m ();
10294 initialize_tdesc_arm_with_iwmmxt ();
10295 initialize_tdesc_arm_with_vfpv2 ();
10296 initialize_tdesc_arm_with_vfpv3 ();
10297 initialize_tdesc_arm_with_neon ();
10298
10299 /* Get the number of possible sets of register names defined in opcodes. */
10300 num_disassembly_options = get_arm_regname_num_options ();
10301
10302 /* Add root prefix command for all "set arm"/"show arm" commands. */
10303 add_prefix_cmd ("arm", no_class, set_arm_command,
10304 _("Various ARM-specific commands."),
10305 &setarmcmdlist, "set arm ", 0, &setlist);
10306
10307 add_prefix_cmd ("arm", no_class, show_arm_command,
10308 _("Various ARM-specific commands."),
10309 &showarmcmdlist, "show arm ", 0, &showlist);
10310
10311 /* Sync the opcode insn printer with our register viewer. */
10312 parse_arm_disassembler_option ("reg-names-std");
10313
10314 /* Initialize the array that will be passed to
10315 add_setshow_enum_cmd(). */
10316 valid_disassembly_styles
10317 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10318 for (i = 0; i < num_disassembly_options; i++)
10319 {
10320 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10321 valid_disassembly_styles[i] = setname;
10322 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10323 rdptr += length;
10324 rest -= length;
10325 /* When we find the default names, tell the disassembler to use
10326 them. */
10327 if (!strcmp (setname, "std"))
10328 {
10329 disassembly_style = setname;
10330 set_arm_regname_option (i);
10331 }
10332 }
10333 /* Mark the end of valid options. */
10334 valid_disassembly_styles[num_disassembly_options] = NULL;
10335
10336 /* Create the help text. */
10337 stb = mem_fileopen ();
10338 fprintf_unfiltered (stb, "%s%s%s",
10339 _("The valid values are:\n"),
10340 regdesc,
10341 _("The default is \"std\"."));
10342 helptext = ui_file_xstrdup (stb, NULL);
10343 ui_file_delete (stb);
10344
10345 add_setshow_enum_cmd("disassembler", no_class,
10346 valid_disassembly_styles, &disassembly_style,
10347 _("Set the disassembly style."),
10348 _("Show the disassembly style."),
10349 helptext,
10350 set_disassembly_style_sfunc,
10351 NULL, /* FIXME: i18n: The disassembly style is
10352 \"%s\". */
10353 &setarmcmdlist, &showarmcmdlist);
10354
10355 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10356 _("Set usage of ARM 32-bit mode."),
10357 _("Show usage of ARM 32-bit mode."),
10358 _("When off, a 26-bit PC will be used."),
10359 NULL,
10360 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10361 mode is %s. */
10362 &setarmcmdlist, &showarmcmdlist);
10363
10364 /* Add a command to allow the user to force the FPU model. */
10365 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10366 _("Set the floating point type."),
10367 _("Show the floating point type."),
10368 _("auto - Determine the FP typefrom the OS-ABI.\n\
10369 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10370 fpa - FPA co-processor (GCC compiled).\n\
10371 softvfp - Software FP with pure-endian doubles.\n\
10372 vfp - VFP co-processor."),
10373 set_fp_model_sfunc, show_fp_model,
10374 &setarmcmdlist, &showarmcmdlist);
10375
10376 /* Add a command to allow the user to force the ABI. */
10377 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10378 _("Set the ABI."),
10379 _("Show the ABI."),
10380 NULL, arm_set_abi, arm_show_abi,
10381 &setarmcmdlist, &showarmcmdlist);
10382
10383 /* Add two commands to allow the user to force the assumed
10384 execution mode. */
10385 add_setshow_enum_cmd ("fallback-mode", class_support,
10386 arm_mode_strings, &arm_fallback_mode_string,
10387 _("Set the mode assumed when symbols are unavailable."),
10388 _("Show the mode assumed when symbols are unavailable."),
10389 NULL, NULL, arm_show_fallback_mode,
10390 &setarmcmdlist, &showarmcmdlist);
10391 add_setshow_enum_cmd ("force-mode", class_support,
10392 arm_mode_strings, &arm_force_mode_string,
10393 _("Set the mode assumed even when symbols are available."),
10394 _("Show the mode assumed even when symbols are available."),
10395 NULL, NULL, arm_show_force_mode,
10396 &setarmcmdlist, &showarmcmdlist);
10397
10398 /* Debugging flag. */
10399 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10400 _("Set ARM debugging."),
10401 _("Show ARM debugging."),
10402 _("When on, arm-specific debugging is enabled."),
10403 NULL,
10404 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10405 &setdebuglist, &showdebuglist);
10406 }
This page took 0.250001 seconds and 4 git commands to generate.