Merge dg-extract-results.{sh,py} from GCC upstream
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "regcache.h"
31 #include "reggroups.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "remote.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59
60 #include "features/arm-with-m.c"
61 #include "features/arm-with-m-fpa-layout.c"
62 #include "features/arm-with-m-vfp-d16.c"
63 #include "features/arm-with-iwmmxt.c"
64 #include "features/arm-with-vfpv2.c"
65 #include "features/arm-with-vfpv3.c"
66 #include "features/arm-with-neon.c"
67
68 static int arm_debug;
69
70 /* Macros for setting and testing a bit in a minimal symbol that marks
71 it as Thumb function. The MSB of the minimal symbol's "info" field
72 is used for this purpose.
73
74 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
75 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
76
77 #define MSYMBOL_SET_SPECIAL(msym) \
78 MSYMBOL_TARGET_FLAG_1 (msym) = 1
79
80 #define MSYMBOL_IS_SPECIAL(msym) \
81 MSYMBOL_TARGET_FLAG_1 (msym)
82
83 /* Per-objfile data used for mapping symbols. */
84 static const struct objfile_data *arm_objfile_data_key;
85
86 struct arm_mapping_symbol
87 {
88 bfd_vma value;
89 char type;
90 };
91 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
92 DEF_VEC_O(arm_mapping_symbol_s);
93
94 struct arm_per_objfile
95 {
96 VEC(arm_mapping_symbol_s) **section_maps;
97 };
98
99 /* The list of available "set arm ..." and "show arm ..." commands. */
100 static struct cmd_list_element *setarmcmdlist = NULL;
101 static struct cmd_list_element *showarmcmdlist = NULL;
102
103 /* The type of floating-point to use. Keep this in sync with enum
104 arm_float_model, and the help string in _initialize_arm_tdep. */
105 static const char *const fp_model_strings[] =
106 {
107 "auto",
108 "softfpa",
109 "fpa",
110 "softvfp",
111 "vfp",
112 NULL
113 };
114
115 /* A variable that can be configured by the user. */
116 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
117 static const char *current_fp_model = "auto";
118
119 /* The ABI to use. Keep this in sync with arm_abi_kind. */
120 static const char *const arm_abi_strings[] =
121 {
122 "auto",
123 "APCS",
124 "AAPCS",
125 NULL
126 };
127
128 /* A variable that can be configured by the user. */
129 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
130 static const char *arm_abi_string = "auto";
131
132 /* The execution mode to assume. */
133 static const char *const arm_mode_strings[] =
134 {
135 "auto",
136 "arm",
137 "thumb",
138 NULL
139 };
140
141 static const char *arm_fallback_mode_string = "auto";
142 static const char *arm_force_mode_string = "auto";
143
144 /* Internal override of the execution mode. -1 means no override,
145 0 means override to ARM mode, 1 means override to Thumb mode.
146 The effect is the same as if arm_force_mode has been set by the
147 user (except the internal override has precedence over a user's
148 arm_force_mode override). */
149 static int arm_override_mode = -1;
150
151 /* Number of different reg name sets (options). */
152 static int num_disassembly_options;
153
154 /* The standard register names, and all the valid aliases for them. Note
155 that `fp', `sp' and `pc' are not added in this alias list, because they
156 have been added as builtin user registers in
157 std-regs.c:_initialize_frame_reg. */
158 static const struct
159 {
160 const char *name;
161 int regnum;
162 } arm_register_aliases[] = {
163 /* Basic register numbers. */
164 { "r0", 0 },
165 { "r1", 1 },
166 { "r2", 2 },
167 { "r3", 3 },
168 { "r4", 4 },
169 { "r5", 5 },
170 { "r6", 6 },
171 { "r7", 7 },
172 { "r8", 8 },
173 { "r9", 9 },
174 { "r10", 10 },
175 { "r11", 11 },
176 { "r12", 12 },
177 { "r13", 13 },
178 { "r14", 14 },
179 { "r15", 15 },
180 /* Synonyms (argument and variable registers). */
181 { "a1", 0 },
182 { "a2", 1 },
183 { "a3", 2 },
184 { "a4", 3 },
185 { "v1", 4 },
186 { "v2", 5 },
187 { "v3", 6 },
188 { "v4", 7 },
189 { "v5", 8 },
190 { "v6", 9 },
191 { "v7", 10 },
192 { "v8", 11 },
193 /* Other platform-specific names for r9. */
194 { "sb", 9 },
195 { "tr", 9 },
196 /* Special names. */
197 { "ip", 12 },
198 { "lr", 14 },
199 /* Names used by GCC (not listed in the ARM EABI). */
200 { "sl", 10 },
201 /* A special name from the older ATPCS. */
202 { "wr", 7 },
203 };
204
205 static const char *const arm_register_names[] =
206 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
207 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
208 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
209 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
210 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
211 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
212 "fps", "cpsr" }; /* 24 25 */
213
214 /* Valid register name styles. */
215 static const char **valid_disassembly_styles;
216
217 /* Disassembly style to use. Default to "std" register names. */
218 static const char *disassembly_style;
219
220 /* This is used to keep the bfd arch_info in sync with the disassembly
221 style. */
222 static void set_disassembly_style_sfunc(char *, int,
223 struct cmd_list_element *);
224 static void set_disassembly_style (void);
225
226 static void convert_from_extended (const struct floatformat *, const void *,
227 void *, int);
228 static void convert_to_extended (const struct floatformat *, void *,
229 const void *, int);
230
231 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
232 struct regcache *regcache,
233 int regnum, gdb_byte *buf);
234 static void arm_neon_quad_write (struct gdbarch *gdbarch,
235 struct regcache *regcache,
236 int regnum, const gdb_byte *buf);
237
238 static int thumb_insn_size (unsigned short inst1);
239
240 struct arm_prologue_cache
241 {
242 /* The stack pointer at the time this frame was created; i.e. the
243 caller's stack pointer when this function was called. It is used
244 to identify this frame. */
245 CORE_ADDR prev_sp;
246
247 /* The frame base for this frame is just prev_sp - frame size.
248 FRAMESIZE is the distance from the frame pointer to the
249 initial stack pointer. */
250
251 int framesize;
252
253 /* The register used to hold the frame pointer for this frame. */
254 int framereg;
255
256 /* Saved register offsets. */
257 struct trad_frame_saved_reg *saved_regs;
258 };
259
260 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
261 CORE_ADDR prologue_start,
262 CORE_ADDR prologue_end,
263 struct arm_prologue_cache *cache);
264
265 /* Architecture version for displaced stepping. This effects the behaviour of
266 certain instructions, and really should not be hard-wired. */
267
268 #define DISPLACED_STEPPING_ARCH_VERSION 5
269
270 /* Addresses for calling Thumb functions have the bit 0 set.
271 Here are some macros to test, set, or clear bit 0 of addresses. */
272 #define IS_THUMB_ADDR(addr) ((addr) & 1)
273 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
274 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
275
276 /* Set to true if the 32-bit mode is in use. */
277
278 int arm_apcs_32 = 1;
279
280 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
281
282 int
283 arm_psr_thumb_bit (struct gdbarch *gdbarch)
284 {
285 if (gdbarch_tdep (gdbarch)->is_m)
286 return XPSR_T;
287 else
288 return CPSR_T;
289 }
290
291 /* Determine if FRAME is executing in Thumb mode. */
292
293 int
294 arm_frame_is_thumb (struct frame_info *frame)
295 {
296 CORE_ADDR cpsr;
297 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
298
299 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
300 directly (from a signal frame or dummy frame) or by interpreting
301 the saved LR (from a prologue or DWARF frame). So consult it and
302 trust the unwinders. */
303 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
304
305 return (cpsr & t_bit) != 0;
306 }
307
308 /* Callback for VEC_lower_bound. */
309
310 static inline int
311 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
312 const struct arm_mapping_symbol *rhs)
313 {
314 return lhs->value < rhs->value;
315 }
316
317 /* Search for the mapping symbol covering MEMADDR. If one is found,
318 return its type. Otherwise, return 0. If START is non-NULL,
319 set *START to the location of the mapping symbol. */
320
321 static char
322 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
323 {
324 struct obj_section *sec;
325
326 /* If there are mapping symbols, consult them. */
327 sec = find_pc_section (memaddr);
328 if (sec != NULL)
329 {
330 struct arm_per_objfile *data;
331 VEC(arm_mapping_symbol_s) *map;
332 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
333 0 };
334 unsigned int idx;
335
336 data = objfile_data (sec->objfile, arm_objfile_data_key);
337 if (data != NULL)
338 {
339 map = data->section_maps[sec->the_bfd_section->index];
340 if (!VEC_empty (arm_mapping_symbol_s, map))
341 {
342 struct arm_mapping_symbol *map_sym;
343
344 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
345 arm_compare_mapping_symbols);
346
347 /* VEC_lower_bound finds the earliest ordered insertion
348 point. If the following symbol starts at this exact
349 address, we use that; otherwise, the preceding
350 mapping symbol covers this address. */
351 if (idx < VEC_length (arm_mapping_symbol_s, map))
352 {
353 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
354 if (map_sym->value == map_key.value)
355 {
356 if (start)
357 *start = map_sym->value + obj_section_addr (sec);
358 return map_sym->type;
359 }
360 }
361
362 if (idx > 0)
363 {
364 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
365 if (start)
366 *start = map_sym->value + obj_section_addr (sec);
367 return map_sym->type;
368 }
369 }
370 }
371 }
372
373 return 0;
374 }
375
376 /* Determine if the program counter specified in MEMADDR is in a Thumb
377 function. This function should be called for addresses unrelated to
378 any executing frame; otherwise, prefer arm_frame_is_thumb. */
379
380 int
381 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
382 {
383 struct bound_minimal_symbol sym;
384 char type;
385 struct displaced_step_closure* dsc
386 = get_displaced_step_closure_by_addr(memaddr);
387
388 /* If checking the mode of displaced instruction in copy area, the mode
389 should be determined by instruction on the original address. */
390 if (dsc)
391 {
392 if (debug_displaced)
393 fprintf_unfiltered (gdb_stdlog,
394 "displaced: check mode of %.8lx instead of %.8lx\n",
395 (unsigned long) dsc->insn_addr,
396 (unsigned long) memaddr);
397 memaddr = dsc->insn_addr;
398 }
399
400 /* If bit 0 of the address is set, assume this is a Thumb address. */
401 if (IS_THUMB_ADDR (memaddr))
402 return 1;
403
404 /* Respect internal mode override if active. */
405 if (arm_override_mode != -1)
406 return arm_override_mode;
407
408 /* If the user wants to override the symbol table, let him. */
409 if (strcmp (arm_force_mode_string, "arm") == 0)
410 return 0;
411 if (strcmp (arm_force_mode_string, "thumb") == 0)
412 return 1;
413
414 /* ARM v6-M and v7-M are always in Thumb mode. */
415 if (gdbarch_tdep (gdbarch)->is_m)
416 return 1;
417
418 /* If there are mapping symbols, consult them. */
419 type = arm_find_mapping_symbol (memaddr, NULL);
420 if (type)
421 return type == 't';
422
423 /* Thumb functions have a "special" bit set in minimal symbols. */
424 sym = lookup_minimal_symbol_by_pc (memaddr);
425 if (sym.minsym)
426 return (MSYMBOL_IS_SPECIAL (sym.minsym));
427
428 /* If the user wants to override the fallback mode, let them. */
429 if (strcmp (arm_fallback_mode_string, "arm") == 0)
430 return 0;
431 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
432 return 1;
433
434 /* If we couldn't find any symbol, but we're talking to a running
435 target, then trust the current value of $cpsr. This lets
436 "display/i $pc" always show the correct mode (though if there is
437 a symbol table we will not reach here, so it still may not be
438 displayed in the mode it will be executed). */
439 if (target_has_registers)
440 return arm_frame_is_thumb (get_current_frame ());
441
442 /* Otherwise we're out of luck; we assume ARM. */
443 return 0;
444 }
445
446 /* Remove useless bits from addresses in a running program. */
447 static CORE_ADDR
448 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
449 {
450 /* On M-profile devices, do not strip the low bit from EXC_RETURN
451 (the magic exception return address). */
452 if (gdbarch_tdep (gdbarch)->is_m
453 && (val & 0xfffffff0) == 0xfffffff0)
454 return val;
455
456 if (arm_apcs_32)
457 return UNMAKE_THUMB_ADDR (val);
458 else
459 return (val & 0x03fffffc);
460 }
461
462 /* Return 1 if PC is the start of a compiler helper function which
463 can be safely ignored during prologue skipping. IS_THUMB is true
464 if the function is known to be a Thumb function due to the way it
465 is being called. */
466 static int
467 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
468 {
469 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
470 struct bound_minimal_symbol msym;
471
472 msym = lookup_minimal_symbol_by_pc (pc);
473 if (msym.minsym != NULL
474 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
475 && MSYMBOL_LINKAGE_NAME (msym.minsym) != NULL)
476 {
477 const char *name = MSYMBOL_LINKAGE_NAME (msym.minsym);
478
479 /* The GNU linker's Thumb call stub to foo is named
480 __foo_from_thumb. */
481 if (strstr (name, "_from_thumb") != NULL)
482 name += 2;
483
484 /* On soft-float targets, __truncdfsf2 is called to convert promoted
485 arguments to their argument types in non-prototyped
486 functions. */
487 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
488 return 1;
489 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
490 return 1;
491
492 /* Internal functions related to thread-local storage. */
493 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
494 return 1;
495 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
496 return 1;
497 }
498 else
499 {
500 /* If we run against a stripped glibc, we may be unable to identify
501 special functions by name. Check for one important case,
502 __aeabi_read_tp, by comparing the *code* against the default
503 implementation (this is hand-written ARM assembler in glibc). */
504
505 if (!is_thumb
506 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
507 == 0xe3e00a0f /* mov r0, #0xffff0fff */
508 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
509 == 0xe240f01f) /* sub pc, r0, #31 */
510 return 1;
511 }
512
513 return 0;
514 }
515
516 /* Support routines for instruction parsing. */
517 #define submask(x) ((1L << ((x) + 1)) - 1)
518 #define bit(obj,st) (((obj) >> (st)) & 1)
519 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
520 #define sbits(obj,st,fn) \
521 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
522 #define BranchDest(addr,instr) \
523 ((CORE_ADDR) (((unsigned long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
524
525 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
526 the first 16-bit of instruction, and INSN2 is the second 16-bit of
527 instruction. */
528 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
529 ((bits ((insn1), 0, 3) << 12) \
530 | (bits ((insn1), 10, 10) << 11) \
531 | (bits ((insn2), 12, 14) << 8) \
532 | bits ((insn2), 0, 7))
533
534 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
535 the 32-bit instruction. */
536 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
537 ((bits ((insn), 16, 19) << 12) \
538 | bits ((insn), 0, 11))
539
540 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
541
542 static unsigned int
543 thumb_expand_immediate (unsigned int imm)
544 {
545 unsigned int count = imm >> 7;
546
547 if (count < 8)
548 switch (count / 2)
549 {
550 case 0:
551 return imm & 0xff;
552 case 1:
553 return (imm & 0xff) | ((imm & 0xff) << 16);
554 case 2:
555 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
556 case 3:
557 return (imm & 0xff) | ((imm & 0xff) << 8)
558 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
559 }
560
561 return (0x80 | (imm & 0x7f)) << (32 - count);
562 }
563
564 /* Return 1 if the 16-bit Thumb instruction INST might change
565 control flow, 0 otherwise. */
566
567 static int
568 thumb_instruction_changes_pc (unsigned short inst)
569 {
570 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
571 return 1;
572
573 if ((inst & 0xf000) == 0xd000) /* conditional branch */
574 return 1;
575
576 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
577 return 1;
578
579 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
580 return 1;
581
582 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
583 return 1;
584
585 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
586 return 1;
587
588 return 0;
589 }
590
591 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
592 might change control flow, 0 otherwise. */
593
594 static int
595 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
596 {
597 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
598 {
599 /* Branches and miscellaneous control instructions. */
600
601 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
602 {
603 /* B, BL, BLX. */
604 return 1;
605 }
606 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
607 {
608 /* SUBS PC, LR, #imm8. */
609 return 1;
610 }
611 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
612 {
613 /* Conditional branch. */
614 return 1;
615 }
616
617 return 0;
618 }
619
620 if ((inst1 & 0xfe50) == 0xe810)
621 {
622 /* Load multiple or RFE. */
623
624 if (bit (inst1, 7) && !bit (inst1, 8))
625 {
626 /* LDMIA or POP */
627 if (bit (inst2, 15))
628 return 1;
629 }
630 else if (!bit (inst1, 7) && bit (inst1, 8))
631 {
632 /* LDMDB */
633 if (bit (inst2, 15))
634 return 1;
635 }
636 else if (bit (inst1, 7) && bit (inst1, 8))
637 {
638 /* RFEIA */
639 return 1;
640 }
641 else if (!bit (inst1, 7) && !bit (inst1, 8))
642 {
643 /* RFEDB */
644 return 1;
645 }
646
647 return 0;
648 }
649
650 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
651 {
652 /* MOV PC or MOVS PC. */
653 return 1;
654 }
655
656 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
657 {
658 /* LDR PC. */
659 if (bits (inst1, 0, 3) == 15)
660 return 1;
661 if (bit (inst1, 7))
662 return 1;
663 if (bit (inst2, 11))
664 return 1;
665 if ((inst2 & 0x0fc0) == 0x0000)
666 return 1;
667
668 return 0;
669 }
670
671 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
672 {
673 /* TBB. */
674 return 1;
675 }
676
677 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
678 {
679 /* TBH. */
680 return 1;
681 }
682
683 return 0;
684 }
685
686 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
687 epilogue, 0 otherwise. */
688
689 static int
690 thumb_instruction_restores_sp (unsigned short insn)
691 {
692 return (insn == 0x46bd /* mov sp, r7 */
693 || (insn & 0xff80) == 0xb000 /* add sp, imm */
694 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
695 }
696
697 /* Analyze a Thumb prologue, looking for a recognizable stack frame
698 and frame pointer. Scan until we encounter a store that could
699 clobber the stack frame unexpectedly, or an unknown instruction.
700 Return the last address which is definitely safe to skip for an
701 initial breakpoint. */
702
703 static CORE_ADDR
704 thumb_analyze_prologue (struct gdbarch *gdbarch,
705 CORE_ADDR start, CORE_ADDR limit,
706 struct arm_prologue_cache *cache)
707 {
708 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
709 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
710 int i;
711 pv_t regs[16];
712 struct pv_area *stack;
713 struct cleanup *back_to;
714 CORE_ADDR offset;
715 CORE_ADDR unrecognized_pc = 0;
716
717 for (i = 0; i < 16; i++)
718 regs[i] = pv_register (i, 0);
719 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
720 back_to = make_cleanup_free_pv_area (stack);
721
722 while (start < limit)
723 {
724 unsigned short insn;
725
726 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
727
728 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
729 {
730 int regno;
731 int mask;
732
733 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
734 break;
735
736 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
737 whether to save LR (R14). */
738 mask = (insn & 0xff) | ((insn & 0x100) << 6);
739
740 /* Calculate offsets of saved R0-R7 and LR. */
741 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
742 if (mask & (1 << regno))
743 {
744 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
745 -4);
746 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
747 }
748 }
749 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
750 {
751 offset = (insn & 0x7f) << 2; /* get scaled offset */
752 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
753 -offset);
754 }
755 else if (thumb_instruction_restores_sp (insn))
756 {
757 /* Don't scan past the epilogue. */
758 break;
759 }
760 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
761 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
762 (insn & 0xff) << 2);
763 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
764 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
765 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
766 bits (insn, 6, 8));
767 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
768 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
769 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
770 bits (insn, 0, 7));
771 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
772 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
773 && pv_is_constant (regs[bits (insn, 3, 5)]))
774 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
775 regs[bits (insn, 6, 8)]);
776 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
777 && pv_is_constant (regs[bits (insn, 3, 6)]))
778 {
779 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
780 int rm = bits (insn, 3, 6);
781 regs[rd] = pv_add (regs[rd], regs[rm]);
782 }
783 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
784 {
785 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
786 int src_reg = (insn & 0x78) >> 3;
787 regs[dst_reg] = regs[src_reg];
788 }
789 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
790 {
791 /* Handle stores to the stack. Normally pushes are used,
792 but with GCC -mtpcs-frame, there may be other stores
793 in the prologue to create the frame. */
794 int regno = (insn >> 8) & 0x7;
795 pv_t addr;
796
797 offset = (insn & 0xff) << 2;
798 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
799
800 if (pv_area_store_would_trash (stack, addr))
801 break;
802
803 pv_area_store (stack, addr, 4, regs[regno]);
804 }
805 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
806 {
807 int rd = bits (insn, 0, 2);
808 int rn = bits (insn, 3, 5);
809 pv_t addr;
810
811 offset = bits (insn, 6, 10) << 2;
812 addr = pv_add_constant (regs[rn], offset);
813
814 if (pv_area_store_would_trash (stack, addr))
815 break;
816
817 pv_area_store (stack, addr, 4, regs[rd]);
818 }
819 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
820 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
821 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
822 /* Ignore stores of argument registers to the stack. */
823 ;
824 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
825 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
826 /* Ignore block loads from the stack, potentially copying
827 parameters from memory. */
828 ;
829 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
830 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
831 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
832 /* Similarly ignore single loads from the stack. */
833 ;
834 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
835 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
836 /* Skip register copies, i.e. saves to another register
837 instead of the stack. */
838 ;
839 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
840 /* Recognize constant loads; even with small stacks these are necessary
841 on Thumb. */
842 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
843 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
844 {
845 /* Constant pool loads, for the same reason. */
846 unsigned int constant;
847 CORE_ADDR loc;
848
849 loc = start + 4 + bits (insn, 0, 7) * 4;
850 constant = read_memory_unsigned_integer (loc, 4, byte_order);
851 regs[bits (insn, 8, 10)] = pv_constant (constant);
852 }
853 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
854 {
855 unsigned short inst2;
856
857 inst2 = read_memory_unsigned_integer (start + 2, 2,
858 byte_order_for_code);
859
860 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
861 {
862 /* BL, BLX. Allow some special function calls when
863 skipping the prologue; GCC generates these before
864 storing arguments to the stack. */
865 CORE_ADDR nextpc;
866 int j1, j2, imm1, imm2;
867
868 imm1 = sbits (insn, 0, 10);
869 imm2 = bits (inst2, 0, 10);
870 j1 = bit (inst2, 13);
871 j2 = bit (inst2, 11);
872
873 offset = ((imm1 << 12) + (imm2 << 1));
874 offset ^= ((!j2) << 22) | ((!j1) << 23);
875
876 nextpc = start + 4 + offset;
877 /* For BLX make sure to clear the low bits. */
878 if (bit (inst2, 12) == 0)
879 nextpc = nextpc & 0xfffffffc;
880
881 if (!skip_prologue_function (gdbarch, nextpc,
882 bit (inst2, 12) != 0))
883 break;
884 }
885
886 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
887 { registers } */
888 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
889 {
890 pv_t addr = regs[bits (insn, 0, 3)];
891 int regno;
892
893 if (pv_area_store_would_trash (stack, addr))
894 break;
895
896 /* Calculate offsets of saved registers. */
897 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
898 if (inst2 & (1 << regno))
899 {
900 addr = pv_add_constant (addr, -4);
901 pv_area_store (stack, addr, 4, regs[regno]);
902 }
903
904 if (insn & 0x0020)
905 regs[bits (insn, 0, 3)] = addr;
906 }
907
908 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
909 [Rn, #+/-imm]{!} */
910 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
911 {
912 int regno1 = bits (inst2, 12, 15);
913 int regno2 = bits (inst2, 8, 11);
914 pv_t addr = regs[bits (insn, 0, 3)];
915
916 offset = inst2 & 0xff;
917 if (insn & 0x0080)
918 addr = pv_add_constant (addr, offset);
919 else
920 addr = pv_add_constant (addr, -offset);
921
922 if (pv_area_store_would_trash (stack, addr))
923 break;
924
925 pv_area_store (stack, addr, 4, regs[regno1]);
926 pv_area_store (stack, pv_add_constant (addr, 4),
927 4, regs[regno2]);
928
929 if (insn & 0x0020)
930 regs[bits (insn, 0, 3)] = addr;
931 }
932
933 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
934 && (inst2 & 0x0c00) == 0x0c00
935 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
936 {
937 int regno = bits (inst2, 12, 15);
938 pv_t addr = regs[bits (insn, 0, 3)];
939
940 offset = inst2 & 0xff;
941 if (inst2 & 0x0200)
942 addr = pv_add_constant (addr, offset);
943 else
944 addr = pv_add_constant (addr, -offset);
945
946 if (pv_area_store_would_trash (stack, addr))
947 break;
948
949 pv_area_store (stack, addr, 4, regs[regno]);
950
951 if (inst2 & 0x0100)
952 regs[bits (insn, 0, 3)] = addr;
953 }
954
955 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
956 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
957 {
958 int regno = bits (inst2, 12, 15);
959 pv_t addr;
960
961 offset = inst2 & 0xfff;
962 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
963
964 if (pv_area_store_would_trash (stack, addr))
965 break;
966
967 pv_area_store (stack, addr, 4, regs[regno]);
968 }
969
970 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore stores of argument registers to the stack. */
973 ;
974
975 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
976 && (inst2 & 0x0d00) == 0x0c00
977 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
978 /* Ignore stores of argument registers to the stack. */
979 ;
980
981 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
982 { registers } */
983 && (inst2 & 0x8000) == 0x0000
984 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
985 /* Ignore block loads from the stack, potentially copying
986 parameters from memory. */
987 ;
988
989 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
990 [Rn, #+/-imm] */
991 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
992 /* Similarly ignore dual loads from the stack. */
993 ;
994
995 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
996 && (inst2 & 0x0d00) == 0x0c00
997 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
998 /* Similarly ignore single loads from the stack. */
999 ;
1000
1001 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
1002 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
1003 /* Similarly ignore single loads from the stack. */
1004 ;
1005
1006 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1007 && (inst2 & 0x8000) == 0x0000)
1008 {
1009 unsigned int imm = ((bits (insn, 10, 10) << 11)
1010 | (bits (inst2, 12, 14) << 8)
1011 | bits (inst2, 0, 7));
1012
1013 regs[bits (inst2, 8, 11)]
1014 = pv_add_constant (regs[bits (insn, 0, 3)],
1015 thumb_expand_immediate (imm));
1016 }
1017
1018 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1019 && (inst2 & 0x8000) == 0x0000)
1020 {
1021 unsigned int imm = ((bits (insn, 10, 10) << 11)
1022 | (bits (inst2, 12, 14) << 8)
1023 | bits (inst2, 0, 7));
1024
1025 regs[bits (inst2, 8, 11)]
1026 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1027 }
1028
1029 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1030 && (inst2 & 0x8000) == 0x0000)
1031 {
1032 unsigned int imm = ((bits (insn, 10, 10) << 11)
1033 | (bits (inst2, 12, 14) << 8)
1034 | bits (inst2, 0, 7));
1035
1036 regs[bits (inst2, 8, 11)]
1037 = pv_add_constant (regs[bits (insn, 0, 3)],
1038 - (CORE_ADDR) thumb_expand_immediate (imm));
1039 }
1040
1041 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1042 && (inst2 & 0x8000) == 0x0000)
1043 {
1044 unsigned int imm = ((bits (insn, 10, 10) << 11)
1045 | (bits (inst2, 12, 14) << 8)
1046 | bits (inst2, 0, 7));
1047
1048 regs[bits (inst2, 8, 11)]
1049 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1050 }
1051
1052 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1053 {
1054 unsigned int imm = ((bits (insn, 10, 10) << 11)
1055 | (bits (inst2, 12, 14) << 8)
1056 | bits (inst2, 0, 7));
1057
1058 regs[bits (inst2, 8, 11)]
1059 = pv_constant (thumb_expand_immediate (imm));
1060 }
1061
1062 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1063 {
1064 unsigned int imm
1065 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1066
1067 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1068 }
1069
1070 else if (insn == 0xea5f /* mov.w Rd,Rm */
1071 && (inst2 & 0xf0f0) == 0)
1072 {
1073 int dst_reg = (inst2 & 0x0f00) >> 8;
1074 int src_reg = inst2 & 0xf;
1075 regs[dst_reg] = regs[src_reg];
1076 }
1077
1078 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1079 {
1080 /* Constant pool loads. */
1081 unsigned int constant;
1082 CORE_ADDR loc;
1083
1084 offset = bits (inst2, 0, 11);
1085 if (insn & 0x0080)
1086 loc = start + 4 + offset;
1087 else
1088 loc = start + 4 - offset;
1089
1090 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1091 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1092 }
1093
1094 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1095 {
1096 /* Constant pool loads. */
1097 unsigned int constant;
1098 CORE_ADDR loc;
1099
1100 offset = bits (inst2, 0, 7) << 2;
1101 if (insn & 0x0080)
1102 loc = start + 4 + offset;
1103 else
1104 loc = start + 4 - offset;
1105
1106 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1107 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1108
1109 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1110 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1111 }
1112
1113 else if (thumb2_instruction_changes_pc (insn, inst2))
1114 {
1115 /* Don't scan past anything that might change control flow. */
1116 break;
1117 }
1118 else
1119 {
1120 /* The optimizer might shove anything into the prologue,
1121 so we just skip what we don't recognize. */
1122 unrecognized_pc = start;
1123 }
1124
1125 start += 2;
1126 }
1127 else if (thumb_instruction_changes_pc (insn))
1128 {
1129 /* Don't scan past anything that might change control flow. */
1130 break;
1131 }
1132 else
1133 {
1134 /* The optimizer might shove anything into the prologue,
1135 so we just skip what we don't recognize. */
1136 unrecognized_pc = start;
1137 }
1138
1139 start += 2;
1140 }
1141
1142 if (arm_debug)
1143 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1144 paddress (gdbarch, start));
1145
1146 if (unrecognized_pc == 0)
1147 unrecognized_pc = start;
1148
1149 if (cache == NULL)
1150 {
1151 do_cleanups (back_to);
1152 return unrecognized_pc;
1153 }
1154
1155 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1156 {
1157 /* Frame pointer is fp. Frame size is constant. */
1158 cache->framereg = ARM_FP_REGNUM;
1159 cache->framesize = -regs[ARM_FP_REGNUM].k;
1160 }
1161 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1162 {
1163 /* Frame pointer is r7. Frame size is constant. */
1164 cache->framereg = THUMB_FP_REGNUM;
1165 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1166 }
1167 else
1168 {
1169 /* Try the stack pointer... this is a bit desperate. */
1170 cache->framereg = ARM_SP_REGNUM;
1171 cache->framesize = -regs[ARM_SP_REGNUM].k;
1172 }
1173
1174 for (i = 0; i < 16; i++)
1175 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1176 cache->saved_regs[i].addr = offset;
1177
1178 do_cleanups (back_to);
1179 return unrecognized_pc;
1180 }
1181
1182
1183 /* Try to analyze the instructions starting from PC, which load symbol
1184 __stack_chk_guard. Return the address of instruction after loading this
1185 symbol, set the dest register number to *BASEREG, and set the size of
1186 instructions for loading symbol in OFFSET. Return 0 if instructions are
1187 not recognized. */
1188
1189 static CORE_ADDR
1190 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1191 unsigned int *destreg, int *offset)
1192 {
1193 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1194 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1195 unsigned int low, high, address;
1196
1197 address = 0;
1198 if (is_thumb)
1199 {
1200 unsigned short insn1
1201 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1202
1203 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1204 {
1205 *destreg = bits (insn1, 8, 10);
1206 *offset = 2;
1207 address = (pc & 0xfffffffc) + 4 + (bits (insn1, 0, 7) << 2);
1208 address = read_memory_unsigned_integer (address, 4,
1209 byte_order_for_code);
1210 }
1211 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1212 {
1213 unsigned short insn2
1214 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1215
1216 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217
1218 insn1
1219 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1220 insn2
1221 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1222
1223 /* movt Rd, #const */
1224 if ((insn1 & 0xfbc0) == 0xf2c0)
1225 {
1226 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1227 *destreg = bits (insn2, 8, 11);
1228 *offset = 8;
1229 address = (high << 16 | low);
1230 }
1231 }
1232 }
1233 else
1234 {
1235 unsigned int insn
1236 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1237
1238 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, [PC, #immed] */
1239 {
1240 address = bits (insn, 0, 11) + pc + 8;
1241 address = read_memory_unsigned_integer (address, 4,
1242 byte_order_for_code);
1243
1244 *destreg = bits (insn, 12, 15);
1245 *offset = 4;
1246 }
1247 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1248 {
1249 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1250
1251 insn
1252 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1253
1254 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1255 {
1256 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1257 *destreg = bits (insn, 12, 15);
1258 *offset = 8;
1259 address = (high << 16 | low);
1260 }
1261 }
1262 }
1263
1264 return address;
1265 }
1266
1267 /* Try to skip a sequence of instructions used for stack protector. If PC
1268 points to the first instruction of this sequence, return the address of
1269 first instruction after this sequence, otherwise, return original PC.
1270
1271 On arm, this sequence of instructions is composed of mainly three steps,
1272 Step 1: load symbol __stack_chk_guard,
1273 Step 2: load from address of __stack_chk_guard,
1274 Step 3: store it to somewhere else.
1275
1276 Usually, instructions on step 2 and step 3 are the same on various ARM
1277 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1278 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1279 instructions in step 1 vary from different ARM architectures. On ARMv7,
1280 they are,
1281
1282 movw Rn, #:lower16:__stack_chk_guard
1283 movt Rn, #:upper16:__stack_chk_guard
1284
1285 On ARMv5t, it is,
1286
1287 ldr Rn, .Label
1288 ....
1289 .Lable:
1290 .word __stack_chk_guard
1291
1292 Since ldr/str is a very popular instruction, we can't use them as
1293 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1294 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1295 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1296
1297 static CORE_ADDR
1298 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1299 {
1300 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1301 unsigned int basereg;
1302 struct bound_minimal_symbol stack_chk_guard;
1303 int offset;
1304 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1305 CORE_ADDR addr;
1306
1307 /* Try to parse the instructions in Step 1. */
1308 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1309 &basereg, &offset);
1310 if (!addr)
1311 return pc;
1312
1313 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1314 /* ADDR must correspond to a symbol whose name is __stack_chk_guard.
1315 Otherwise, this sequence cannot be for stack protector. */
1316 if (stack_chk_guard.minsym == NULL
1317 || strncmp (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym),
1318 "__stack_chk_guard",
1319 strlen ("__stack_chk_guard")) != 0)
1320 return pc;
1321
1322 if (is_thumb)
1323 {
1324 unsigned int destreg;
1325 unsigned short insn
1326 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1327
1328 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1329 if ((insn & 0xf800) != 0x6800)
1330 return pc;
1331 if (bits (insn, 3, 5) != basereg)
1332 return pc;
1333 destreg = bits (insn, 0, 2);
1334
1335 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1336 byte_order_for_code);
1337 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1338 if ((insn & 0xf800) != 0x6000)
1339 return pc;
1340 if (destreg != bits (insn, 0, 2))
1341 return pc;
1342 }
1343 else
1344 {
1345 unsigned int destreg;
1346 unsigned int insn
1347 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1348
1349 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1350 if ((insn & 0x0e500000) != 0x04100000)
1351 return pc;
1352 if (bits (insn, 16, 19) != basereg)
1353 return pc;
1354 destreg = bits (insn, 12, 15);
1355 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1356 insn = read_memory_unsigned_integer (pc + offset + 4,
1357 4, byte_order_for_code);
1358 if ((insn & 0x0e500000) != 0x04000000)
1359 return pc;
1360 if (bits (insn, 12, 15) != destreg)
1361 return pc;
1362 }
1363 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1364 on arm. */
1365 if (is_thumb)
1366 return pc + offset + 4;
1367 else
1368 return pc + offset + 8;
1369 }
1370
1371 /* Advance the PC across any function entry prologue instructions to
1372 reach some "real" code.
1373
1374 The APCS (ARM Procedure Call Standard) defines the following
1375 prologue:
1376
1377 mov ip, sp
1378 [stmfd sp!, {a1,a2,a3,a4}]
1379 stmfd sp!, {...,fp,ip,lr,pc}
1380 [stfe f7, [sp, #-12]!]
1381 [stfe f6, [sp, #-12]!]
1382 [stfe f5, [sp, #-12]!]
1383 [stfe f4, [sp, #-12]!]
1384 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1385
1386 static CORE_ADDR
1387 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1388 {
1389 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1390 unsigned long inst;
1391 CORE_ADDR func_addr, limit_pc;
1392
1393 /* See if we can determine the end of the prologue via the symbol table.
1394 If so, then return either PC, or the PC after the prologue, whichever
1395 is greater. */
1396 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1397 {
1398 CORE_ADDR post_prologue_pc
1399 = skip_prologue_using_sal (gdbarch, func_addr);
1400 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
1401
1402 if (post_prologue_pc)
1403 post_prologue_pc
1404 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1405
1406
1407 /* GCC always emits a line note before the prologue and another
1408 one after, even if the two are at the same address or on the
1409 same line. Take advantage of this so that we do not need to
1410 know every instruction that might appear in the prologue. We
1411 will have producer information for most binaries; if it is
1412 missing (e.g. for -gstabs), assuming the GNU tools. */
1413 if (post_prologue_pc
1414 && (cust == NULL
1415 || COMPUNIT_PRODUCER (cust) == NULL
1416 || strncmp (COMPUNIT_PRODUCER (cust), "GNU ",
1417 sizeof ("GNU ") - 1) == 0
1418 || strncmp (COMPUNIT_PRODUCER (cust), "clang ",
1419 sizeof ("clang ") - 1) == 0))
1420 return post_prologue_pc;
1421
1422 if (post_prologue_pc != 0)
1423 {
1424 CORE_ADDR analyzed_limit;
1425
1426 /* For non-GCC compilers, make sure the entire line is an
1427 acceptable prologue; GDB will round this function's
1428 return value up to the end of the following line so we
1429 can not skip just part of a line (and we do not want to).
1430
1431 RealView does not treat the prologue specially, but does
1432 associate prologue code with the opening brace; so this
1433 lets us skip the first line if we think it is the opening
1434 brace. */
1435 if (arm_pc_is_thumb (gdbarch, func_addr))
1436 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1437 post_prologue_pc, NULL);
1438 else
1439 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1440 post_prologue_pc, NULL);
1441
1442 if (analyzed_limit != post_prologue_pc)
1443 return func_addr;
1444
1445 return post_prologue_pc;
1446 }
1447 }
1448
1449 /* Can't determine prologue from the symbol table, need to examine
1450 instructions. */
1451
1452 /* Find an upper limit on the function prologue using the debug
1453 information. If the debug information could not be used to provide
1454 that bound, then use an arbitrary large number as the upper bound. */
1455 /* Like arm_scan_prologue, stop no later than pc + 64. */
1456 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1457 if (limit_pc == 0)
1458 limit_pc = pc + 64; /* Magic. */
1459
1460
1461 /* Check if this is Thumb code. */
1462 if (arm_pc_is_thumb (gdbarch, pc))
1463 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1464 else
1465 return arm_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1466 }
1467
1468 /* *INDENT-OFF* */
1469 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1470 This function decodes a Thumb function prologue to determine:
1471 1) the size of the stack frame
1472 2) which registers are saved on it
1473 3) the offsets of saved regs
1474 4) the offset from the stack pointer to the frame pointer
1475
1476 A typical Thumb function prologue would create this stack frame
1477 (offsets relative to FP)
1478 old SP -> 24 stack parameters
1479 20 LR
1480 16 R7
1481 R7 -> 0 local variables (16 bytes)
1482 SP -> -12 additional stack space (12 bytes)
1483 The frame size would thus be 36 bytes, and the frame offset would be
1484 12 bytes. The frame register is R7.
1485
1486 The comments for thumb_skip_prolog() describe the algorithm we use
1487 to detect the end of the prolog. */
1488 /* *INDENT-ON* */
1489
1490 static void
1491 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1492 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1493 {
1494 CORE_ADDR prologue_start;
1495 CORE_ADDR prologue_end;
1496
1497 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1498 &prologue_end))
1499 {
1500 /* See comment in arm_scan_prologue for an explanation of
1501 this heuristics. */
1502 if (prologue_end > prologue_start + 64)
1503 {
1504 prologue_end = prologue_start + 64;
1505 }
1506 }
1507 else
1508 /* We're in the boondocks: we have no idea where the start of the
1509 function is. */
1510 return;
1511
1512 prologue_end = min (prologue_end, prev_pc);
1513
1514 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1515 }
1516
1517 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1518
1519 static int
1520 arm_instruction_changes_pc (uint32_t this_instr)
1521 {
1522 if (bits (this_instr, 28, 31) == INST_NV)
1523 /* Unconditional instructions. */
1524 switch (bits (this_instr, 24, 27))
1525 {
1526 case 0xa:
1527 case 0xb:
1528 /* Branch with Link and change to Thumb. */
1529 return 1;
1530 case 0xc:
1531 case 0xd:
1532 case 0xe:
1533 /* Coprocessor register transfer. */
1534 if (bits (this_instr, 12, 15) == 15)
1535 error (_("Invalid update to pc in instruction"));
1536 return 0;
1537 default:
1538 return 0;
1539 }
1540 else
1541 switch (bits (this_instr, 25, 27))
1542 {
1543 case 0x0:
1544 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1545 {
1546 /* Multiplies and extra load/stores. */
1547 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1548 /* Neither multiplies nor extension load/stores are allowed
1549 to modify PC. */
1550 return 0;
1551
1552 /* Otherwise, miscellaneous instructions. */
1553
1554 /* BX <reg>, BXJ <reg>, BLX <reg> */
1555 if (bits (this_instr, 4, 27) == 0x12fff1
1556 || bits (this_instr, 4, 27) == 0x12fff2
1557 || bits (this_instr, 4, 27) == 0x12fff3)
1558 return 1;
1559
1560 /* Other miscellaneous instructions are unpredictable if they
1561 modify PC. */
1562 return 0;
1563 }
1564 /* Data processing instruction. Fall through. */
1565
1566 case 0x1:
1567 if (bits (this_instr, 12, 15) == 15)
1568 return 1;
1569 else
1570 return 0;
1571
1572 case 0x2:
1573 case 0x3:
1574 /* Media instructions and architecturally undefined instructions. */
1575 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1576 return 0;
1577
1578 /* Stores. */
1579 if (bit (this_instr, 20) == 0)
1580 return 0;
1581
1582 /* Loads. */
1583 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1584 return 1;
1585 else
1586 return 0;
1587
1588 case 0x4:
1589 /* Load/store multiple. */
1590 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1591 return 1;
1592 else
1593 return 0;
1594
1595 case 0x5:
1596 /* Branch and branch with link. */
1597 return 1;
1598
1599 case 0x6:
1600 case 0x7:
1601 /* Coprocessor transfers or SWIs can not affect PC. */
1602 return 0;
1603
1604 default:
1605 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1606 }
1607 }
1608
1609 /* Return 1 if the ARM instruction INSN restores SP in epilogue, 0
1610 otherwise. */
1611
1612 static int
1613 arm_instruction_restores_sp (unsigned int insn)
1614 {
1615 if (bits (insn, 28, 31) != INST_NV)
1616 {
1617 if ((insn & 0x0df0f000) == 0x0080d000
1618 /* ADD SP (register or immediate). */
1619 || (insn & 0x0df0f000) == 0x0040d000
1620 /* SUB SP (register or immediate). */
1621 || (insn & 0x0ffffff0) == 0x01a0d000
1622 /* MOV SP. */
1623 || (insn & 0x0fff0000) == 0x08bd0000
1624 /* POP (LDMIA). */
1625 || (insn & 0x0fff0000) == 0x049d0000)
1626 /* POP of a single register. */
1627 return 1;
1628 }
1629
1630 return 0;
1631 }
1632
1633 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1634 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1635 fill it in. Return the first address not recognized as a prologue
1636 instruction.
1637
1638 We recognize all the instructions typically found in ARM prologues,
1639 plus harmless instructions which can be skipped (either for analysis
1640 purposes, or a more restrictive set that can be skipped when finding
1641 the end of the prologue). */
1642
1643 static CORE_ADDR
1644 arm_analyze_prologue (struct gdbarch *gdbarch,
1645 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1646 struct arm_prologue_cache *cache)
1647 {
1648 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1649 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1650 int regno;
1651 CORE_ADDR offset, current_pc;
1652 pv_t regs[ARM_FPS_REGNUM];
1653 struct pv_area *stack;
1654 struct cleanup *back_to;
1655 CORE_ADDR unrecognized_pc = 0;
1656
1657 /* Search the prologue looking for instructions that set up the
1658 frame pointer, adjust the stack pointer, and save registers.
1659
1660 Be careful, however, and if it doesn't look like a prologue,
1661 don't try to scan it. If, for instance, a frameless function
1662 begins with stmfd sp!, then we will tell ourselves there is
1663 a frame, which will confuse stack traceback, as well as "finish"
1664 and other operations that rely on a knowledge of the stack
1665 traceback. */
1666
1667 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1668 regs[regno] = pv_register (regno, 0);
1669 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1670 back_to = make_cleanup_free_pv_area (stack);
1671
1672 for (current_pc = prologue_start;
1673 current_pc < prologue_end;
1674 current_pc += 4)
1675 {
1676 unsigned int insn
1677 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1678
1679 if (insn == 0xe1a0c00d) /* mov ip, sp */
1680 {
1681 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1682 continue;
1683 }
1684 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1685 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1686 {
1687 unsigned imm = insn & 0xff; /* immediate value */
1688 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1689 int rd = bits (insn, 12, 15);
1690 imm = (imm >> rot) | (imm << (32 - rot));
1691 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1692 continue;
1693 }
1694 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1695 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1696 {
1697 unsigned imm = insn & 0xff; /* immediate value */
1698 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1699 int rd = bits (insn, 12, 15);
1700 imm = (imm >> rot) | (imm << (32 - rot));
1701 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1702 continue;
1703 }
1704 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1705 [sp, #-4]! */
1706 {
1707 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1708 break;
1709 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1710 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1711 regs[bits (insn, 12, 15)]);
1712 continue;
1713 }
1714 else if ((insn & 0xffff0000) == 0xe92d0000)
1715 /* stmfd sp!, {..., fp, ip, lr, pc}
1716 or
1717 stmfd sp!, {a1, a2, a3, a4} */
1718 {
1719 int mask = insn & 0xffff;
1720
1721 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1722 break;
1723
1724 /* Calculate offsets of saved registers. */
1725 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1726 if (mask & (1 << regno))
1727 {
1728 regs[ARM_SP_REGNUM]
1729 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1730 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1731 }
1732 }
1733 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1734 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1735 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1736 {
1737 /* No need to add this to saved_regs -- it's just an arg reg. */
1738 continue;
1739 }
1740 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1741 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1742 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1743 {
1744 /* No need to add this to saved_regs -- it's just an arg reg. */
1745 continue;
1746 }
1747 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1748 { registers } */
1749 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1750 {
1751 /* No need to add this to saved_regs -- it's just arg regs. */
1752 continue;
1753 }
1754 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1755 {
1756 unsigned imm = insn & 0xff; /* immediate value */
1757 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1758 imm = (imm >> rot) | (imm << (32 - rot));
1759 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1760 }
1761 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1762 {
1763 unsigned imm = insn & 0xff; /* immediate value */
1764 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1765 imm = (imm >> rot) | (imm << (32 - rot));
1766 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1767 }
1768 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1769 [sp, -#c]! */
1770 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1771 {
1772 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1773 break;
1774
1775 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1776 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1777 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1778 }
1779 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1780 [sp!] */
1781 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1782 {
1783 int n_saved_fp_regs;
1784 unsigned int fp_start_reg, fp_bound_reg;
1785
1786 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1787 break;
1788
1789 if ((insn & 0x800) == 0x800) /* N0 is set */
1790 {
1791 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1792 n_saved_fp_regs = 3;
1793 else
1794 n_saved_fp_regs = 1;
1795 }
1796 else
1797 {
1798 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1799 n_saved_fp_regs = 2;
1800 else
1801 n_saved_fp_regs = 4;
1802 }
1803
1804 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1805 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1806 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1807 {
1808 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1809 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1810 regs[fp_start_reg++]);
1811 }
1812 }
1813 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1814 {
1815 /* Allow some special function calls when skipping the
1816 prologue; GCC generates these before storing arguments to
1817 the stack. */
1818 CORE_ADDR dest = BranchDest (current_pc, insn);
1819
1820 if (skip_prologue_function (gdbarch, dest, 0))
1821 continue;
1822 else
1823 break;
1824 }
1825 else if ((insn & 0xf0000000) != 0xe0000000)
1826 break; /* Condition not true, exit early. */
1827 else if (arm_instruction_changes_pc (insn))
1828 /* Don't scan past anything that might change control flow. */
1829 break;
1830 else if (arm_instruction_restores_sp (insn))
1831 {
1832 /* Don't scan past the epilogue. */
1833 break;
1834 }
1835 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1836 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1837 /* Ignore block loads from the stack, potentially copying
1838 parameters from memory. */
1839 continue;
1840 else if ((insn & 0xfc500000) == 0xe4100000
1841 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1842 /* Similarly ignore single loads from the stack. */
1843 continue;
1844 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1845 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1846 register instead of the stack. */
1847 continue;
1848 else
1849 {
1850 /* The optimizer might shove anything into the prologue, if
1851 we build up cache (cache != NULL) from scanning prologue,
1852 we just skip what we don't recognize and scan further to
1853 make cache as complete as possible. However, if we skip
1854 prologue, we'll stop immediately on unrecognized
1855 instruction. */
1856 unrecognized_pc = current_pc;
1857 if (cache != NULL)
1858 continue;
1859 else
1860 break;
1861 }
1862 }
1863
1864 if (unrecognized_pc == 0)
1865 unrecognized_pc = current_pc;
1866
1867 if (cache)
1868 {
1869 int framereg, framesize;
1870
1871 /* The frame size is just the distance from the frame register
1872 to the original stack pointer. */
1873 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1874 {
1875 /* Frame pointer is fp. */
1876 framereg = ARM_FP_REGNUM;
1877 framesize = -regs[ARM_FP_REGNUM].k;
1878 }
1879 else
1880 {
1881 /* Try the stack pointer... this is a bit desperate. */
1882 framereg = ARM_SP_REGNUM;
1883 framesize = -regs[ARM_SP_REGNUM].k;
1884 }
1885
1886 cache->framereg = framereg;
1887 cache->framesize = framesize;
1888
1889 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1890 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1891 cache->saved_regs[regno].addr = offset;
1892 }
1893
1894 if (arm_debug)
1895 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1896 paddress (gdbarch, unrecognized_pc));
1897
1898 do_cleanups (back_to);
1899 return unrecognized_pc;
1900 }
1901
1902 static void
1903 arm_scan_prologue (struct frame_info *this_frame,
1904 struct arm_prologue_cache *cache)
1905 {
1906 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1907 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1908 int regno;
1909 CORE_ADDR prologue_start, prologue_end, current_pc;
1910 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1911 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1912 pv_t regs[ARM_FPS_REGNUM];
1913 struct pv_area *stack;
1914 struct cleanup *back_to;
1915 CORE_ADDR offset;
1916
1917 /* Assume there is no frame until proven otherwise. */
1918 cache->framereg = ARM_SP_REGNUM;
1919 cache->framesize = 0;
1920
1921 /* Check for Thumb prologue. */
1922 if (arm_frame_is_thumb (this_frame))
1923 {
1924 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1925 return;
1926 }
1927
1928 /* Find the function prologue. If we can't find the function in
1929 the symbol table, peek in the stack frame to find the PC. */
1930 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1931 &prologue_end))
1932 {
1933 /* One way to find the end of the prologue (which works well
1934 for unoptimized code) is to do the following:
1935
1936 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1937
1938 if (sal.line == 0)
1939 prologue_end = prev_pc;
1940 else if (sal.end < prologue_end)
1941 prologue_end = sal.end;
1942
1943 This mechanism is very accurate so long as the optimizer
1944 doesn't move any instructions from the function body into the
1945 prologue. If this happens, sal.end will be the last
1946 instruction in the first hunk of prologue code just before
1947 the first instruction that the scheduler has moved from
1948 the body to the prologue.
1949
1950 In order to make sure that we scan all of the prologue
1951 instructions, we use a slightly less accurate mechanism which
1952 may scan more than necessary. To help compensate for this
1953 lack of accuracy, the prologue scanning loop below contains
1954 several clauses which'll cause the loop to terminate early if
1955 an implausible prologue instruction is encountered.
1956
1957 The expression
1958
1959 prologue_start + 64
1960
1961 is a suitable endpoint since it accounts for the largest
1962 possible prologue plus up to five instructions inserted by
1963 the scheduler. */
1964
1965 if (prologue_end > prologue_start + 64)
1966 {
1967 prologue_end = prologue_start + 64; /* See above. */
1968 }
1969 }
1970 else
1971 {
1972 /* We have no symbol information. Our only option is to assume this
1973 function has a standard stack frame and the normal frame register.
1974 Then, we can find the value of our frame pointer on entrance to
1975 the callee (or at the present moment if this is the innermost frame).
1976 The value stored there should be the address of the stmfd + 8. */
1977 CORE_ADDR frame_loc;
1978 LONGEST return_value;
1979
1980 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1981 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1982 return;
1983 else
1984 {
1985 prologue_start = gdbarch_addr_bits_remove
1986 (gdbarch, return_value) - 8;
1987 prologue_end = prologue_start + 64; /* See above. */
1988 }
1989 }
1990
1991 if (prev_pc < prologue_end)
1992 prologue_end = prev_pc;
1993
1994 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1995 }
1996
1997 static struct arm_prologue_cache *
1998 arm_make_prologue_cache (struct frame_info *this_frame)
1999 {
2000 int reg;
2001 struct arm_prologue_cache *cache;
2002 CORE_ADDR unwound_fp;
2003
2004 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2005 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2006
2007 arm_scan_prologue (this_frame, cache);
2008
2009 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2010 if (unwound_fp == 0)
2011 return cache;
2012
2013 cache->prev_sp = unwound_fp + cache->framesize;
2014
2015 /* Calculate actual addresses of saved registers using offsets
2016 determined by arm_scan_prologue. */
2017 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2018 if (trad_frame_addr_p (cache->saved_regs, reg))
2019 cache->saved_regs[reg].addr += cache->prev_sp;
2020
2021 return cache;
2022 }
2023
2024 /* Our frame ID for a normal frame is the current function's starting PC
2025 and the caller's SP when we were called. */
2026
2027 static void
2028 arm_prologue_this_id (struct frame_info *this_frame,
2029 void **this_cache,
2030 struct frame_id *this_id)
2031 {
2032 struct arm_prologue_cache *cache;
2033 struct frame_id id;
2034 CORE_ADDR pc, func;
2035
2036 if (*this_cache == NULL)
2037 *this_cache = arm_make_prologue_cache (this_frame);
2038 cache = *this_cache;
2039
2040 /* This is meant to halt the backtrace at "_start". */
2041 pc = get_frame_pc (this_frame);
2042 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2043 return;
2044
2045 /* If we've hit a wall, stop. */
2046 if (cache->prev_sp == 0)
2047 return;
2048
2049 /* Use function start address as part of the frame ID. If we cannot
2050 identify the start address (due to missing symbol information),
2051 fall back to just using the current PC. */
2052 func = get_frame_func (this_frame);
2053 if (!func)
2054 func = pc;
2055
2056 id = frame_id_build (cache->prev_sp, func);
2057 *this_id = id;
2058 }
2059
2060 static struct value *
2061 arm_prologue_prev_register (struct frame_info *this_frame,
2062 void **this_cache,
2063 int prev_regnum)
2064 {
2065 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2066 struct arm_prologue_cache *cache;
2067
2068 if (*this_cache == NULL)
2069 *this_cache = arm_make_prologue_cache (this_frame);
2070 cache = *this_cache;
2071
2072 /* If we are asked to unwind the PC, then we need to return the LR
2073 instead. The prologue may save PC, but it will point into this
2074 frame's prologue, not the next frame's resume location. Also
2075 strip the saved T bit. A valid LR may have the low bit set, but
2076 a valid PC never does. */
2077 if (prev_regnum == ARM_PC_REGNUM)
2078 {
2079 CORE_ADDR lr;
2080
2081 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2082 return frame_unwind_got_constant (this_frame, prev_regnum,
2083 arm_addr_bits_remove (gdbarch, lr));
2084 }
2085
2086 /* SP is generally not saved to the stack, but this frame is
2087 identified by the next frame's stack pointer at the time of the call.
2088 The value was already reconstructed into PREV_SP. */
2089 if (prev_regnum == ARM_SP_REGNUM)
2090 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2091
2092 /* The CPSR may have been changed by the call instruction and by the
2093 called function. The only bit we can reconstruct is the T bit,
2094 by checking the low bit of LR as of the call. This is a reliable
2095 indicator of Thumb-ness except for some ARM v4T pre-interworking
2096 Thumb code, which could get away with a clear low bit as long as
2097 the called function did not use bx. Guess that all other
2098 bits are unchanged; the condition flags are presumably lost,
2099 but the processor status is likely valid. */
2100 if (prev_regnum == ARM_PS_REGNUM)
2101 {
2102 CORE_ADDR lr, cpsr;
2103 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2104
2105 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2106 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2107 if (IS_THUMB_ADDR (lr))
2108 cpsr |= t_bit;
2109 else
2110 cpsr &= ~t_bit;
2111 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2112 }
2113
2114 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2115 prev_regnum);
2116 }
2117
2118 struct frame_unwind arm_prologue_unwind = {
2119 NORMAL_FRAME,
2120 default_frame_unwind_stop_reason,
2121 arm_prologue_this_id,
2122 arm_prologue_prev_register,
2123 NULL,
2124 default_frame_sniffer
2125 };
2126
2127 /* Maintain a list of ARM exception table entries per objfile, similar to the
2128 list of mapping symbols. We only cache entries for standard ARM-defined
2129 personality routines; the cache will contain only the frame unwinding
2130 instructions associated with the entry (not the descriptors). */
2131
2132 static const struct objfile_data *arm_exidx_data_key;
2133
2134 struct arm_exidx_entry
2135 {
2136 bfd_vma addr;
2137 gdb_byte *entry;
2138 };
2139 typedef struct arm_exidx_entry arm_exidx_entry_s;
2140 DEF_VEC_O(arm_exidx_entry_s);
2141
2142 struct arm_exidx_data
2143 {
2144 VEC(arm_exidx_entry_s) **section_maps;
2145 };
2146
2147 static void
2148 arm_exidx_data_free (struct objfile *objfile, void *arg)
2149 {
2150 struct arm_exidx_data *data = arg;
2151 unsigned int i;
2152
2153 for (i = 0; i < objfile->obfd->section_count; i++)
2154 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2155 }
2156
2157 static inline int
2158 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2159 const struct arm_exidx_entry *rhs)
2160 {
2161 return lhs->addr < rhs->addr;
2162 }
2163
2164 static struct obj_section *
2165 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2166 {
2167 struct obj_section *osect;
2168
2169 ALL_OBJFILE_OSECTIONS (objfile, osect)
2170 if (bfd_get_section_flags (objfile->obfd,
2171 osect->the_bfd_section) & SEC_ALLOC)
2172 {
2173 bfd_vma start, size;
2174 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2175 size = bfd_get_section_size (osect->the_bfd_section);
2176
2177 if (start <= vma && vma < start + size)
2178 return osect;
2179 }
2180
2181 return NULL;
2182 }
2183
2184 /* Parse contents of exception table and exception index sections
2185 of OBJFILE, and fill in the exception table entry cache.
2186
2187 For each entry that refers to a standard ARM-defined personality
2188 routine, extract the frame unwinding instructions (from either
2189 the index or the table section). The unwinding instructions
2190 are normalized by:
2191 - extracting them from the rest of the table data
2192 - converting to host endianness
2193 - appending the implicit 0xb0 ("Finish") code
2194
2195 The extracted and normalized instructions are stored for later
2196 retrieval by the arm_find_exidx_entry routine. */
2197
2198 static void
2199 arm_exidx_new_objfile (struct objfile *objfile)
2200 {
2201 struct cleanup *cleanups;
2202 struct arm_exidx_data *data;
2203 asection *exidx, *extab;
2204 bfd_vma exidx_vma = 0, extab_vma = 0;
2205 bfd_size_type exidx_size = 0, extab_size = 0;
2206 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2207 LONGEST i;
2208
2209 /* If we've already touched this file, do nothing. */
2210 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2211 return;
2212 cleanups = make_cleanup (null_cleanup, NULL);
2213
2214 /* Read contents of exception table and index. */
2215 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2216 if (exidx)
2217 {
2218 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2219 exidx_size = bfd_get_section_size (exidx);
2220 exidx_data = xmalloc (exidx_size);
2221 make_cleanup (xfree, exidx_data);
2222
2223 if (!bfd_get_section_contents (objfile->obfd, exidx,
2224 exidx_data, 0, exidx_size))
2225 {
2226 do_cleanups (cleanups);
2227 return;
2228 }
2229 }
2230
2231 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2232 if (extab)
2233 {
2234 extab_vma = bfd_section_vma (objfile->obfd, extab);
2235 extab_size = bfd_get_section_size (extab);
2236 extab_data = xmalloc (extab_size);
2237 make_cleanup (xfree, extab_data);
2238
2239 if (!bfd_get_section_contents (objfile->obfd, extab,
2240 extab_data, 0, extab_size))
2241 {
2242 do_cleanups (cleanups);
2243 return;
2244 }
2245 }
2246
2247 /* Allocate exception table data structure. */
2248 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2249 set_objfile_data (objfile, arm_exidx_data_key, data);
2250 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2251 objfile->obfd->section_count,
2252 VEC(arm_exidx_entry_s) *);
2253
2254 /* Fill in exception table. */
2255 for (i = 0; i < exidx_size / 8; i++)
2256 {
2257 struct arm_exidx_entry new_exidx_entry;
2258 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2259 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2260 bfd_vma addr = 0, word = 0;
2261 int n_bytes = 0, n_words = 0;
2262 struct obj_section *sec;
2263 gdb_byte *entry = NULL;
2264
2265 /* Extract address of start of function. */
2266 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2267 idx += exidx_vma + i * 8;
2268
2269 /* Find section containing function and compute section offset. */
2270 sec = arm_obj_section_from_vma (objfile, idx);
2271 if (sec == NULL)
2272 continue;
2273 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2274
2275 /* Determine address of exception table entry. */
2276 if (val == 1)
2277 {
2278 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2279 }
2280 else if ((val & 0xff000000) == 0x80000000)
2281 {
2282 /* Exception table entry embedded in .ARM.exidx
2283 -- must be short form. */
2284 word = val;
2285 n_bytes = 3;
2286 }
2287 else if (!(val & 0x80000000))
2288 {
2289 /* Exception table entry in .ARM.extab. */
2290 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2291 addr += exidx_vma + i * 8 + 4;
2292
2293 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2294 {
2295 word = bfd_h_get_32 (objfile->obfd,
2296 extab_data + addr - extab_vma);
2297 addr += 4;
2298
2299 if ((word & 0xff000000) == 0x80000000)
2300 {
2301 /* Short form. */
2302 n_bytes = 3;
2303 }
2304 else if ((word & 0xff000000) == 0x81000000
2305 || (word & 0xff000000) == 0x82000000)
2306 {
2307 /* Long form. */
2308 n_bytes = 2;
2309 n_words = ((word >> 16) & 0xff);
2310 }
2311 else if (!(word & 0x80000000))
2312 {
2313 bfd_vma pers;
2314 struct obj_section *pers_sec;
2315 int gnu_personality = 0;
2316
2317 /* Custom personality routine. */
2318 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2319 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2320
2321 /* Check whether we've got one of the variants of the
2322 GNU personality routines. */
2323 pers_sec = arm_obj_section_from_vma (objfile, pers);
2324 if (pers_sec)
2325 {
2326 static const char *personality[] =
2327 {
2328 "__gcc_personality_v0",
2329 "__gxx_personality_v0",
2330 "__gcj_personality_v0",
2331 "__gnu_objc_personality_v0",
2332 NULL
2333 };
2334
2335 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2336 int k;
2337
2338 for (k = 0; personality[k]; k++)
2339 if (lookup_minimal_symbol_by_pc_name
2340 (pc, personality[k], objfile))
2341 {
2342 gnu_personality = 1;
2343 break;
2344 }
2345 }
2346
2347 /* If so, the next word contains a word count in the high
2348 byte, followed by the same unwind instructions as the
2349 pre-defined forms. */
2350 if (gnu_personality
2351 && addr + 4 <= extab_vma + extab_size)
2352 {
2353 word = bfd_h_get_32 (objfile->obfd,
2354 extab_data + addr - extab_vma);
2355 addr += 4;
2356 n_bytes = 3;
2357 n_words = ((word >> 24) & 0xff);
2358 }
2359 }
2360 }
2361 }
2362
2363 /* Sanity check address. */
2364 if (n_words)
2365 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2366 n_words = n_bytes = 0;
2367
2368 /* The unwind instructions reside in WORD (only the N_BYTES least
2369 significant bytes are valid), followed by N_WORDS words in the
2370 extab section starting at ADDR. */
2371 if (n_bytes || n_words)
2372 {
2373 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2374 n_bytes + n_words * 4 + 1);
2375
2376 while (n_bytes--)
2377 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2378
2379 while (n_words--)
2380 {
2381 word = bfd_h_get_32 (objfile->obfd,
2382 extab_data + addr - extab_vma);
2383 addr += 4;
2384
2385 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2386 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2387 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2388 *p++ = (gdb_byte) (word & 0xff);
2389 }
2390
2391 /* Implied "Finish" to terminate the list. */
2392 *p++ = 0xb0;
2393 }
2394
2395 /* Push entry onto vector. They are guaranteed to always
2396 appear in order of increasing addresses. */
2397 new_exidx_entry.addr = idx;
2398 new_exidx_entry.entry = entry;
2399 VEC_safe_push (arm_exidx_entry_s,
2400 data->section_maps[sec->the_bfd_section->index],
2401 &new_exidx_entry);
2402 }
2403
2404 do_cleanups (cleanups);
2405 }
2406
2407 /* Search for the exception table entry covering MEMADDR. If one is found,
2408 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2409 set *START to the start of the region covered by this entry. */
2410
2411 static gdb_byte *
2412 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2413 {
2414 struct obj_section *sec;
2415
2416 sec = find_pc_section (memaddr);
2417 if (sec != NULL)
2418 {
2419 struct arm_exidx_data *data;
2420 VEC(arm_exidx_entry_s) *map;
2421 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2422 unsigned int idx;
2423
2424 data = objfile_data (sec->objfile, arm_exidx_data_key);
2425 if (data != NULL)
2426 {
2427 map = data->section_maps[sec->the_bfd_section->index];
2428 if (!VEC_empty (arm_exidx_entry_s, map))
2429 {
2430 struct arm_exidx_entry *map_sym;
2431
2432 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2433 arm_compare_exidx_entries);
2434
2435 /* VEC_lower_bound finds the earliest ordered insertion
2436 point. If the following symbol starts at this exact
2437 address, we use that; otherwise, the preceding
2438 exception table entry covers this address. */
2439 if (idx < VEC_length (arm_exidx_entry_s, map))
2440 {
2441 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2442 if (map_sym->addr == map_key.addr)
2443 {
2444 if (start)
2445 *start = map_sym->addr + obj_section_addr (sec);
2446 return map_sym->entry;
2447 }
2448 }
2449
2450 if (idx > 0)
2451 {
2452 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2453 if (start)
2454 *start = map_sym->addr + obj_section_addr (sec);
2455 return map_sym->entry;
2456 }
2457 }
2458 }
2459 }
2460
2461 return NULL;
2462 }
2463
2464 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2465 instruction list from the ARM exception table entry ENTRY, allocate and
2466 return a prologue cache structure describing how to unwind this frame.
2467
2468 Return NULL if the unwinding instruction list contains a "spare",
2469 "reserved" or "refuse to unwind" instruction as defined in section
2470 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2471 for the ARM Architecture" document. */
2472
2473 static struct arm_prologue_cache *
2474 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2475 {
2476 CORE_ADDR vsp = 0;
2477 int vsp_valid = 0;
2478
2479 struct arm_prologue_cache *cache;
2480 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2481 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2482
2483 for (;;)
2484 {
2485 gdb_byte insn;
2486
2487 /* Whenever we reload SP, we actually have to retrieve its
2488 actual value in the current frame. */
2489 if (!vsp_valid)
2490 {
2491 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2492 {
2493 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2494 vsp = get_frame_register_unsigned (this_frame, reg);
2495 }
2496 else
2497 {
2498 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2499 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2500 }
2501
2502 vsp_valid = 1;
2503 }
2504
2505 /* Decode next unwind instruction. */
2506 insn = *entry++;
2507
2508 if ((insn & 0xc0) == 0)
2509 {
2510 int offset = insn & 0x3f;
2511 vsp += (offset << 2) + 4;
2512 }
2513 else if ((insn & 0xc0) == 0x40)
2514 {
2515 int offset = insn & 0x3f;
2516 vsp -= (offset << 2) + 4;
2517 }
2518 else if ((insn & 0xf0) == 0x80)
2519 {
2520 int mask = ((insn & 0xf) << 8) | *entry++;
2521 int i;
2522
2523 /* The special case of an all-zero mask identifies
2524 "Refuse to unwind". We return NULL to fall back
2525 to the prologue analyzer. */
2526 if (mask == 0)
2527 return NULL;
2528
2529 /* Pop registers r4..r15 under mask. */
2530 for (i = 0; i < 12; i++)
2531 if (mask & (1 << i))
2532 {
2533 cache->saved_regs[4 + i].addr = vsp;
2534 vsp += 4;
2535 }
2536
2537 /* Special-case popping SP -- we need to reload vsp. */
2538 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2539 vsp_valid = 0;
2540 }
2541 else if ((insn & 0xf0) == 0x90)
2542 {
2543 int reg = insn & 0xf;
2544
2545 /* Reserved cases. */
2546 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2547 return NULL;
2548
2549 /* Set SP from another register and mark VSP for reload. */
2550 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2551 vsp_valid = 0;
2552 }
2553 else if ((insn & 0xf0) == 0xa0)
2554 {
2555 int count = insn & 0x7;
2556 int pop_lr = (insn & 0x8) != 0;
2557 int i;
2558
2559 /* Pop r4..r[4+count]. */
2560 for (i = 0; i <= count; i++)
2561 {
2562 cache->saved_regs[4 + i].addr = vsp;
2563 vsp += 4;
2564 }
2565
2566 /* If indicated by flag, pop LR as well. */
2567 if (pop_lr)
2568 {
2569 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2570 vsp += 4;
2571 }
2572 }
2573 else if (insn == 0xb0)
2574 {
2575 /* We could only have updated PC by popping into it; if so, it
2576 will show up as address. Otherwise, copy LR into PC. */
2577 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2578 cache->saved_regs[ARM_PC_REGNUM]
2579 = cache->saved_regs[ARM_LR_REGNUM];
2580
2581 /* We're done. */
2582 break;
2583 }
2584 else if (insn == 0xb1)
2585 {
2586 int mask = *entry++;
2587 int i;
2588
2589 /* All-zero mask and mask >= 16 is "spare". */
2590 if (mask == 0 || mask >= 16)
2591 return NULL;
2592
2593 /* Pop r0..r3 under mask. */
2594 for (i = 0; i < 4; i++)
2595 if (mask & (1 << i))
2596 {
2597 cache->saved_regs[i].addr = vsp;
2598 vsp += 4;
2599 }
2600 }
2601 else if (insn == 0xb2)
2602 {
2603 ULONGEST offset = 0;
2604 unsigned shift = 0;
2605
2606 do
2607 {
2608 offset |= (*entry & 0x7f) << shift;
2609 shift += 7;
2610 }
2611 while (*entry++ & 0x80);
2612
2613 vsp += 0x204 + (offset << 2);
2614 }
2615 else if (insn == 0xb3)
2616 {
2617 int start = *entry >> 4;
2618 int count = (*entry++) & 0xf;
2619 int i;
2620
2621 /* Only registers D0..D15 are valid here. */
2622 if (start + count >= 16)
2623 return NULL;
2624
2625 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2626 for (i = 0; i <= count; i++)
2627 {
2628 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2629 vsp += 8;
2630 }
2631
2632 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2633 vsp += 4;
2634 }
2635 else if ((insn & 0xf8) == 0xb8)
2636 {
2637 int count = insn & 0x7;
2638 int i;
2639
2640 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2641 for (i = 0; i <= count; i++)
2642 {
2643 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2644 vsp += 8;
2645 }
2646
2647 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2648 vsp += 4;
2649 }
2650 else if (insn == 0xc6)
2651 {
2652 int start = *entry >> 4;
2653 int count = (*entry++) & 0xf;
2654 int i;
2655
2656 /* Only registers WR0..WR15 are valid. */
2657 if (start + count >= 16)
2658 return NULL;
2659
2660 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2661 for (i = 0; i <= count; i++)
2662 {
2663 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2664 vsp += 8;
2665 }
2666 }
2667 else if (insn == 0xc7)
2668 {
2669 int mask = *entry++;
2670 int i;
2671
2672 /* All-zero mask and mask >= 16 is "spare". */
2673 if (mask == 0 || mask >= 16)
2674 return NULL;
2675
2676 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2677 for (i = 0; i < 4; i++)
2678 if (mask & (1 << i))
2679 {
2680 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2681 vsp += 4;
2682 }
2683 }
2684 else if ((insn & 0xf8) == 0xc0)
2685 {
2686 int count = insn & 0x7;
2687 int i;
2688
2689 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2690 for (i = 0; i <= count; i++)
2691 {
2692 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2693 vsp += 8;
2694 }
2695 }
2696 else if (insn == 0xc8)
2697 {
2698 int start = *entry >> 4;
2699 int count = (*entry++) & 0xf;
2700 int i;
2701
2702 /* Only registers D0..D31 are valid. */
2703 if (start + count >= 16)
2704 return NULL;
2705
2706 /* Pop VFP double-precision registers
2707 D[16+start]..D[16+start+count]. */
2708 for (i = 0; i <= count; i++)
2709 {
2710 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2711 vsp += 8;
2712 }
2713 }
2714 else if (insn == 0xc9)
2715 {
2716 int start = *entry >> 4;
2717 int count = (*entry++) & 0xf;
2718 int i;
2719
2720 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2721 for (i = 0; i <= count; i++)
2722 {
2723 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2724 vsp += 8;
2725 }
2726 }
2727 else if ((insn & 0xf8) == 0xd0)
2728 {
2729 int count = insn & 0x7;
2730 int i;
2731
2732 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2733 for (i = 0; i <= count; i++)
2734 {
2735 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2736 vsp += 8;
2737 }
2738 }
2739 else
2740 {
2741 /* Everything else is "spare". */
2742 return NULL;
2743 }
2744 }
2745
2746 /* If we restore SP from a register, assume this was the frame register.
2747 Otherwise just fall back to SP as frame register. */
2748 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2749 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2750 else
2751 cache->framereg = ARM_SP_REGNUM;
2752
2753 /* Determine offset to previous frame. */
2754 cache->framesize
2755 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2756
2757 /* We already got the previous SP. */
2758 cache->prev_sp = vsp;
2759
2760 return cache;
2761 }
2762
2763 /* Unwinding via ARM exception table entries. Note that the sniffer
2764 already computes a filled-in prologue cache, which is then used
2765 with the same arm_prologue_this_id and arm_prologue_prev_register
2766 routines also used for prologue-parsing based unwinding. */
2767
2768 static int
2769 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2770 struct frame_info *this_frame,
2771 void **this_prologue_cache)
2772 {
2773 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2774 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2775 CORE_ADDR addr_in_block, exidx_region, func_start;
2776 struct arm_prologue_cache *cache;
2777 gdb_byte *entry;
2778
2779 /* See if we have an ARM exception table entry covering this address. */
2780 addr_in_block = get_frame_address_in_block (this_frame);
2781 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2782 if (!entry)
2783 return 0;
2784
2785 /* The ARM exception table does not describe unwind information
2786 for arbitrary PC values, but is guaranteed to be correct only
2787 at call sites. We have to decide here whether we want to use
2788 ARM exception table information for this frame, or fall back
2789 to using prologue parsing. (Note that if we have DWARF CFI,
2790 this sniffer isn't even called -- CFI is always preferred.)
2791
2792 Before we make this decision, however, we check whether we
2793 actually have *symbol* information for the current frame.
2794 If not, prologue parsing would not work anyway, so we might
2795 as well use the exception table and hope for the best. */
2796 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2797 {
2798 int exc_valid = 0;
2799
2800 /* If the next frame is "normal", we are at a call site in this
2801 frame, so exception information is guaranteed to be valid. */
2802 if (get_next_frame (this_frame)
2803 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2804 exc_valid = 1;
2805
2806 /* We also assume exception information is valid if we're currently
2807 blocked in a system call. The system library is supposed to
2808 ensure this, so that e.g. pthread cancellation works. */
2809 if (arm_frame_is_thumb (this_frame))
2810 {
2811 LONGEST insn;
2812
2813 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2814 byte_order_for_code, &insn)
2815 && (insn & 0xff00) == 0xdf00 /* svc */)
2816 exc_valid = 1;
2817 }
2818 else
2819 {
2820 LONGEST insn;
2821
2822 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2823 byte_order_for_code, &insn)
2824 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2825 exc_valid = 1;
2826 }
2827
2828 /* Bail out if we don't know that exception information is valid. */
2829 if (!exc_valid)
2830 return 0;
2831
2832 /* The ARM exception index does not mark the *end* of the region
2833 covered by the entry, and some functions will not have any entry.
2834 To correctly recognize the end of the covered region, the linker
2835 should have inserted dummy records with a CANTUNWIND marker.
2836
2837 Unfortunately, current versions of GNU ld do not reliably do
2838 this, and thus we may have found an incorrect entry above.
2839 As a (temporary) sanity check, we only use the entry if it
2840 lies *within* the bounds of the function. Note that this check
2841 might reject perfectly valid entries that just happen to cover
2842 multiple functions; therefore this check ought to be removed
2843 once the linker is fixed. */
2844 if (func_start > exidx_region)
2845 return 0;
2846 }
2847
2848 /* Decode the list of unwinding instructions into a prologue cache.
2849 Note that this may fail due to e.g. a "refuse to unwind" code. */
2850 cache = arm_exidx_fill_cache (this_frame, entry);
2851 if (!cache)
2852 return 0;
2853
2854 *this_prologue_cache = cache;
2855 return 1;
2856 }
2857
2858 struct frame_unwind arm_exidx_unwind = {
2859 NORMAL_FRAME,
2860 default_frame_unwind_stop_reason,
2861 arm_prologue_this_id,
2862 arm_prologue_prev_register,
2863 NULL,
2864 arm_exidx_unwind_sniffer
2865 };
2866
2867 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2868 trampoline, return the target PC. Otherwise return 0.
2869
2870 void call0a (char c, short s, int i, long l) {}
2871
2872 int main (void)
2873 {
2874 (*pointer_to_call0a) (c, s, i, l);
2875 }
2876
2877 Instead of calling a stub library function _call_via_xx (xx is
2878 the register name), GCC may inline the trampoline in the object
2879 file as below (register r2 has the address of call0a).
2880
2881 .global main
2882 .type main, %function
2883 ...
2884 bl .L1
2885 ...
2886 .size main, .-main
2887
2888 .L1:
2889 bx r2
2890
2891 The trampoline 'bx r2' doesn't belong to main. */
2892
2893 static CORE_ADDR
2894 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2895 {
2896 /* The heuristics of recognizing such trampoline is that FRAME is
2897 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2898 if (arm_frame_is_thumb (frame))
2899 {
2900 gdb_byte buf[2];
2901
2902 if (target_read_memory (pc, buf, 2) == 0)
2903 {
2904 struct gdbarch *gdbarch = get_frame_arch (frame);
2905 enum bfd_endian byte_order_for_code
2906 = gdbarch_byte_order_for_code (gdbarch);
2907 uint16_t insn
2908 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2909
2910 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2911 {
2912 CORE_ADDR dest
2913 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2914
2915 /* Clear the LSB so that gdb core sets step-resume
2916 breakpoint at the right address. */
2917 return UNMAKE_THUMB_ADDR (dest);
2918 }
2919 }
2920 }
2921
2922 return 0;
2923 }
2924
2925 static struct arm_prologue_cache *
2926 arm_make_stub_cache (struct frame_info *this_frame)
2927 {
2928 struct arm_prologue_cache *cache;
2929
2930 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2931 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2932
2933 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2934
2935 return cache;
2936 }
2937
2938 /* Our frame ID for a stub frame is the current SP and LR. */
2939
2940 static void
2941 arm_stub_this_id (struct frame_info *this_frame,
2942 void **this_cache,
2943 struct frame_id *this_id)
2944 {
2945 struct arm_prologue_cache *cache;
2946
2947 if (*this_cache == NULL)
2948 *this_cache = arm_make_stub_cache (this_frame);
2949 cache = *this_cache;
2950
2951 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2952 }
2953
2954 static int
2955 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2956 struct frame_info *this_frame,
2957 void **this_prologue_cache)
2958 {
2959 CORE_ADDR addr_in_block;
2960 gdb_byte dummy[4];
2961 CORE_ADDR pc, start_addr;
2962 const char *name;
2963
2964 addr_in_block = get_frame_address_in_block (this_frame);
2965 pc = get_frame_pc (this_frame);
2966 if (in_plt_section (addr_in_block)
2967 /* We also use the stub winder if the target memory is unreadable
2968 to avoid having the prologue unwinder trying to read it. */
2969 || target_read_memory (pc, dummy, 4) != 0)
2970 return 1;
2971
2972 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2973 && arm_skip_bx_reg (this_frame, pc) != 0)
2974 return 1;
2975
2976 return 0;
2977 }
2978
2979 struct frame_unwind arm_stub_unwind = {
2980 NORMAL_FRAME,
2981 default_frame_unwind_stop_reason,
2982 arm_stub_this_id,
2983 arm_prologue_prev_register,
2984 NULL,
2985 arm_stub_unwind_sniffer
2986 };
2987
2988 /* Put here the code to store, into CACHE->saved_regs, the addresses
2989 of the saved registers of frame described by THIS_FRAME. CACHE is
2990 returned. */
2991
2992 static struct arm_prologue_cache *
2993 arm_m_exception_cache (struct frame_info *this_frame)
2994 {
2995 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2996 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2997 struct arm_prologue_cache *cache;
2998 CORE_ADDR unwound_sp;
2999 LONGEST xpsr;
3000
3001 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
3002 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
3003
3004 unwound_sp = get_frame_register_unsigned (this_frame,
3005 ARM_SP_REGNUM);
3006
3007 /* The hardware saves eight 32-bit words, comprising xPSR,
3008 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
3009 "B1.5.6 Exception entry behavior" in
3010 "ARMv7-M Architecture Reference Manual". */
3011 cache->saved_regs[0].addr = unwound_sp;
3012 cache->saved_regs[1].addr = unwound_sp + 4;
3013 cache->saved_regs[2].addr = unwound_sp + 8;
3014 cache->saved_regs[3].addr = unwound_sp + 12;
3015 cache->saved_regs[12].addr = unwound_sp + 16;
3016 cache->saved_regs[14].addr = unwound_sp + 20;
3017 cache->saved_regs[15].addr = unwound_sp + 24;
3018 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
3019
3020 /* If bit 9 of the saved xPSR is set, then there is a four-byte
3021 aligner between the top of the 32-byte stack frame and the
3022 previous context's stack pointer. */
3023 cache->prev_sp = unwound_sp + 32;
3024 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
3025 && (xpsr & (1 << 9)) != 0)
3026 cache->prev_sp += 4;
3027
3028 return cache;
3029 }
3030
3031 /* Implementation of function hook 'this_id' in
3032 'struct frame_uwnind'. */
3033
3034 static void
3035 arm_m_exception_this_id (struct frame_info *this_frame,
3036 void **this_cache,
3037 struct frame_id *this_id)
3038 {
3039 struct arm_prologue_cache *cache;
3040
3041 if (*this_cache == NULL)
3042 *this_cache = arm_m_exception_cache (this_frame);
3043 cache = *this_cache;
3044
3045 /* Our frame ID for a stub frame is the current SP and LR. */
3046 *this_id = frame_id_build (cache->prev_sp,
3047 get_frame_pc (this_frame));
3048 }
3049
3050 /* Implementation of function hook 'prev_register' in
3051 'struct frame_uwnind'. */
3052
3053 static struct value *
3054 arm_m_exception_prev_register (struct frame_info *this_frame,
3055 void **this_cache,
3056 int prev_regnum)
3057 {
3058 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3059 struct arm_prologue_cache *cache;
3060
3061 if (*this_cache == NULL)
3062 *this_cache = arm_m_exception_cache (this_frame);
3063 cache = *this_cache;
3064
3065 /* The value was already reconstructed into PREV_SP. */
3066 if (prev_regnum == ARM_SP_REGNUM)
3067 return frame_unwind_got_constant (this_frame, prev_regnum,
3068 cache->prev_sp);
3069
3070 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3071 prev_regnum);
3072 }
3073
3074 /* Implementation of function hook 'sniffer' in
3075 'struct frame_uwnind'. */
3076
3077 static int
3078 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3079 struct frame_info *this_frame,
3080 void **this_prologue_cache)
3081 {
3082 CORE_ADDR this_pc = get_frame_pc (this_frame);
3083
3084 /* No need to check is_m; this sniffer is only registered for
3085 M-profile architectures. */
3086
3087 /* Exception frames return to one of these magic PCs. Other values
3088 are not defined as of v7-M. See details in "B1.5.8 Exception
3089 return behavior" in "ARMv7-M Architecture Reference Manual". */
3090 if (this_pc == 0xfffffff1 || this_pc == 0xfffffff9
3091 || this_pc == 0xfffffffd)
3092 return 1;
3093
3094 return 0;
3095 }
3096
3097 /* Frame unwinder for M-profile exceptions. */
3098
3099 struct frame_unwind arm_m_exception_unwind =
3100 {
3101 SIGTRAMP_FRAME,
3102 default_frame_unwind_stop_reason,
3103 arm_m_exception_this_id,
3104 arm_m_exception_prev_register,
3105 NULL,
3106 arm_m_exception_unwind_sniffer
3107 };
3108
3109 static CORE_ADDR
3110 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3111 {
3112 struct arm_prologue_cache *cache;
3113
3114 if (*this_cache == NULL)
3115 *this_cache = arm_make_prologue_cache (this_frame);
3116 cache = *this_cache;
3117
3118 return cache->prev_sp - cache->framesize;
3119 }
3120
3121 struct frame_base arm_normal_base = {
3122 &arm_prologue_unwind,
3123 arm_normal_frame_base,
3124 arm_normal_frame_base,
3125 arm_normal_frame_base
3126 };
3127
3128 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
3129 dummy frame. The frame ID's base needs to match the TOS value
3130 saved by save_dummy_frame_tos() and returned from
3131 arm_push_dummy_call, and the PC needs to match the dummy frame's
3132 breakpoint. */
3133
3134 static struct frame_id
3135 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3136 {
3137 return frame_id_build (get_frame_register_unsigned (this_frame,
3138 ARM_SP_REGNUM),
3139 get_frame_pc (this_frame));
3140 }
3141
3142 /* Given THIS_FRAME, find the previous frame's resume PC (which will
3143 be used to construct the previous frame's ID, after looking up the
3144 containing function). */
3145
3146 static CORE_ADDR
3147 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
3148 {
3149 CORE_ADDR pc;
3150 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
3151 return arm_addr_bits_remove (gdbarch, pc);
3152 }
3153
3154 static CORE_ADDR
3155 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
3156 {
3157 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
3158 }
3159
3160 static struct value *
3161 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3162 int regnum)
3163 {
3164 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3165 CORE_ADDR lr, cpsr;
3166 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3167
3168 switch (regnum)
3169 {
3170 case ARM_PC_REGNUM:
3171 /* The PC is normally copied from the return column, which
3172 describes saves of LR. However, that version may have an
3173 extra bit set to indicate Thumb state. The bit is not
3174 part of the PC. */
3175 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3176 return frame_unwind_got_constant (this_frame, regnum,
3177 arm_addr_bits_remove (gdbarch, lr));
3178
3179 case ARM_PS_REGNUM:
3180 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3181 cpsr = get_frame_register_unsigned (this_frame, regnum);
3182 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3183 if (IS_THUMB_ADDR (lr))
3184 cpsr |= t_bit;
3185 else
3186 cpsr &= ~t_bit;
3187 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3188
3189 default:
3190 internal_error (__FILE__, __LINE__,
3191 _("Unexpected register %d"), regnum);
3192 }
3193 }
3194
3195 static void
3196 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3197 struct dwarf2_frame_state_reg *reg,
3198 struct frame_info *this_frame)
3199 {
3200 switch (regnum)
3201 {
3202 case ARM_PC_REGNUM:
3203 case ARM_PS_REGNUM:
3204 reg->how = DWARF2_FRAME_REG_FN;
3205 reg->loc.fn = arm_dwarf2_prev_register;
3206 break;
3207 case ARM_SP_REGNUM:
3208 reg->how = DWARF2_FRAME_REG_CFA;
3209 break;
3210 }
3211 }
3212
3213 /* Return true if we are in the function's epilogue, i.e. after the
3214 instruction that destroyed the function's stack frame. */
3215
3216 static int
3217 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3218 {
3219 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3220 unsigned int insn, insn2;
3221 int found_return = 0, found_stack_adjust = 0;
3222 CORE_ADDR func_start, func_end;
3223 CORE_ADDR scan_pc;
3224 gdb_byte buf[4];
3225
3226 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3227 return 0;
3228
3229 /* The epilogue is a sequence of instructions along the following lines:
3230
3231 - add stack frame size to SP or FP
3232 - [if frame pointer used] restore SP from FP
3233 - restore registers from SP [may include PC]
3234 - a return-type instruction [if PC wasn't already restored]
3235
3236 In a first pass, we scan forward from the current PC and verify the
3237 instructions we find as compatible with this sequence, ending in a
3238 return instruction.
3239
3240 However, this is not sufficient to distinguish indirect function calls
3241 within a function from indirect tail calls in the epilogue in some cases.
3242 Therefore, if we didn't already find any SP-changing instruction during
3243 forward scan, we add a backward scanning heuristic to ensure we actually
3244 are in the epilogue. */
3245
3246 scan_pc = pc;
3247 while (scan_pc < func_end && !found_return)
3248 {
3249 if (target_read_memory (scan_pc, buf, 2))
3250 break;
3251
3252 scan_pc += 2;
3253 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3254
3255 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3256 found_return = 1;
3257 else if (insn == 0x46f7) /* mov pc, lr */
3258 found_return = 1;
3259 else if (thumb_instruction_restores_sp (insn))
3260 {
3261 if ((insn & 0xff00) == 0xbd00) /* pop <registers, PC> */
3262 found_return = 1;
3263 }
3264 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3265 {
3266 if (target_read_memory (scan_pc, buf, 2))
3267 break;
3268
3269 scan_pc += 2;
3270 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3271
3272 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3273 {
3274 if (insn2 & 0x8000) /* <registers> include PC. */
3275 found_return = 1;
3276 }
3277 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3278 && (insn2 & 0x0fff) == 0x0b04)
3279 {
3280 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3281 found_return = 1;
3282 }
3283 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3284 && (insn2 & 0x0e00) == 0x0a00)
3285 ;
3286 else
3287 break;
3288 }
3289 else
3290 break;
3291 }
3292
3293 if (!found_return)
3294 return 0;
3295
3296 /* Since any instruction in the epilogue sequence, with the possible
3297 exception of return itself, updates the stack pointer, we need to
3298 scan backwards for at most one instruction. Try either a 16-bit or
3299 a 32-bit instruction. This is just a heuristic, so we do not worry
3300 too much about false positives. */
3301
3302 if (pc - 4 < func_start)
3303 return 0;
3304 if (target_read_memory (pc - 4, buf, 4))
3305 return 0;
3306
3307 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3308 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3309
3310 if (thumb_instruction_restores_sp (insn2))
3311 found_stack_adjust = 1;
3312 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3313 found_stack_adjust = 1;
3314 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3315 && (insn2 & 0x0fff) == 0x0b04)
3316 found_stack_adjust = 1;
3317 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3318 && (insn2 & 0x0e00) == 0x0a00)
3319 found_stack_adjust = 1;
3320
3321 return found_stack_adjust;
3322 }
3323
3324 /* Return true if we are in the function's epilogue, i.e. after the
3325 instruction that destroyed the function's stack frame. */
3326
3327 static int
3328 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3329 {
3330 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3331 unsigned int insn;
3332 int found_return;
3333 CORE_ADDR func_start, func_end;
3334
3335 if (arm_pc_is_thumb (gdbarch, pc))
3336 return thumb_in_function_epilogue_p (gdbarch, pc);
3337
3338 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3339 return 0;
3340
3341 /* We are in the epilogue if the previous instruction was a stack
3342 adjustment and the next instruction is a possible return (bx, mov
3343 pc, or pop). We could have to scan backwards to find the stack
3344 adjustment, or forwards to find the return, but this is a decent
3345 approximation. First scan forwards. */
3346
3347 found_return = 0;
3348 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3349 if (bits (insn, 28, 31) != INST_NV)
3350 {
3351 if ((insn & 0x0ffffff0) == 0x012fff10)
3352 /* BX. */
3353 found_return = 1;
3354 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3355 /* MOV PC. */
3356 found_return = 1;
3357 else if ((insn & 0x0fff0000) == 0x08bd0000
3358 && (insn & 0x0000c000) != 0)
3359 /* POP (LDMIA), including PC or LR. */
3360 found_return = 1;
3361 }
3362
3363 if (!found_return)
3364 return 0;
3365
3366 /* Scan backwards. This is just a heuristic, so do not worry about
3367 false positives from mode changes. */
3368
3369 if (pc < func_start + 4)
3370 return 0;
3371
3372 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3373 if (arm_instruction_restores_sp (insn))
3374 return 1;
3375
3376 return 0;
3377 }
3378
3379
3380 /* When arguments must be pushed onto the stack, they go on in reverse
3381 order. The code below implements a FILO (stack) to do this. */
3382
3383 struct stack_item
3384 {
3385 int len;
3386 struct stack_item *prev;
3387 void *data;
3388 };
3389
3390 static struct stack_item *
3391 push_stack_item (struct stack_item *prev, const void *contents, int len)
3392 {
3393 struct stack_item *si;
3394 si = xmalloc (sizeof (struct stack_item));
3395 si->data = xmalloc (len);
3396 si->len = len;
3397 si->prev = prev;
3398 memcpy (si->data, contents, len);
3399 return si;
3400 }
3401
3402 static struct stack_item *
3403 pop_stack_item (struct stack_item *si)
3404 {
3405 struct stack_item *dead = si;
3406 si = si->prev;
3407 xfree (dead->data);
3408 xfree (dead);
3409 return si;
3410 }
3411
3412
3413 /* Return the alignment (in bytes) of the given type. */
3414
3415 static int
3416 arm_type_align (struct type *t)
3417 {
3418 int n;
3419 int align;
3420 int falign;
3421
3422 t = check_typedef (t);
3423 switch (TYPE_CODE (t))
3424 {
3425 default:
3426 /* Should never happen. */
3427 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3428 return 4;
3429
3430 case TYPE_CODE_PTR:
3431 case TYPE_CODE_ENUM:
3432 case TYPE_CODE_INT:
3433 case TYPE_CODE_FLT:
3434 case TYPE_CODE_SET:
3435 case TYPE_CODE_RANGE:
3436 case TYPE_CODE_REF:
3437 case TYPE_CODE_CHAR:
3438 case TYPE_CODE_BOOL:
3439 return TYPE_LENGTH (t);
3440
3441 case TYPE_CODE_ARRAY:
3442 case TYPE_CODE_COMPLEX:
3443 /* TODO: What about vector types? */
3444 return arm_type_align (TYPE_TARGET_TYPE (t));
3445
3446 case TYPE_CODE_STRUCT:
3447 case TYPE_CODE_UNION:
3448 align = 1;
3449 for (n = 0; n < TYPE_NFIELDS (t); n++)
3450 {
3451 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3452 if (falign > align)
3453 align = falign;
3454 }
3455 return align;
3456 }
3457 }
3458
3459 /* Possible base types for a candidate for passing and returning in
3460 VFP registers. */
3461
3462 enum arm_vfp_cprc_base_type
3463 {
3464 VFP_CPRC_UNKNOWN,
3465 VFP_CPRC_SINGLE,
3466 VFP_CPRC_DOUBLE,
3467 VFP_CPRC_VEC64,
3468 VFP_CPRC_VEC128
3469 };
3470
3471 /* The length of one element of base type B. */
3472
3473 static unsigned
3474 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3475 {
3476 switch (b)
3477 {
3478 case VFP_CPRC_SINGLE:
3479 return 4;
3480 case VFP_CPRC_DOUBLE:
3481 return 8;
3482 case VFP_CPRC_VEC64:
3483 return 8;
3484 case VFP_CPRC_VEC128:
3485 return 16;
3486 default:
3487 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3488 (int) b);
3489 }
3490 }
3491
3492 /* The character ('s', 'd' or 'q') for the type of VFP register used
3493 for passing base type B. */
3494
3495 static int
3496 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3497 {
3498 switch (b)
3499 {
3500 case VFP_CPRC_SINGLE:
3501 return 's';
3502 case VFP_CPRC_DOUBLE:
3503 return 'd';
3504 case VFP_CPRC_VEC64:
3505 return 'd';
3506 case VFP_CPRC_VEC128:
3507 return 'q';
3508 default:
3509 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3510 (int) b);
3511 }
3512 }
3513
3514 /* Determine whether T may be part of a candidate for passing and
3515 returning in VFP registers, ignoring the limit on the total number
3516 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3517 classification of the first valid component found; if it is not
3518 VFP_CPRC_UNKNOWN, all components must have the same classification
3519 as *BASE_TYPE. If it is found that T contains a type not permitted
3520 for passing and returning in VFP registers, a type differently
3521 classified from *BASE_TYPE, or two types differently classified
3522 from each other, return -1, otherwise return the total number of
3523 base-type elements found (possibly 0 in an empty structure or
3524 array). Vector types are not currently supported, matching the
3525 generic AAPCS support. */
3526
3527 static int
3528 arm_vfp_cprc_sub_candidate (struct type *t,
3529 enum arm_vfp_cprc_base_type *base_type)
3530 {
3531 t = check_typedef (t);
3532 switch (TYPE_CODE (t))
3533 {
3534 case TYPE_CODE_FLT:
3535 switch (TYPE_LENGTH (t))
3536 {
3537 case 4:
3538 if (*base_type == VFP_CPRC_UNKNOWN)
3539 *base_type = VFP_CPRC_SINGLE;
3540 else if (*base_type != VFP_CPRC_SINGLE)
3541 return -1;
3542 return 1;
3543
3544 case 8:
3545 if (*base_type == VFP_CPRC_UNKNOWN)
3546 *base_type = VFP_CPRC_DOUBLE;
3547 else if (*base_type != VFP_CPRC_DOUBLE)
3548 return -1;
3549 return 1;
3550
3551 default:
3552 return -1;
3553 }
3554 break;
3555
3556 case TYPE_CODE_COMPLEX:
3557 /* Arguments of complex T where T is one of the types float or
3558 double get treated as if they are implemented as:
3559
3560 struct complexT
3561 {
3562 T real;
3563 T imag;
3564 };
3565
3566 */
3567 switch (TYPE_LENGTH (t))
3568 {
3569 case 8:
3570 if (*base_type == VFP_CPRC_UNKNOWN)
3571 *base_type = VFP_CPRC_SINGLE;
3572 else if (*base_type != VFP_CPRC_SINGLE)
3573 return -1;
3574 return 2;
3575
3576 case 16:
3577 if (*base_type == VFP_CPRC_UNKNOWN)
3578 *base_type = VFP_CPRC_DOUBLE;
3579 else if (*base_type != VFP_CPRC_DOUBLE)
3580 return -1;
3581 return 2;
3582
3583 default:
3584 return -1;
3585 }
3586 break;
3587
3588 case TYPE_CODE_ARRAY:
3589 {
3590 int count;
3591 unsigned unitlen;
3592 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3593 if (count == -1)
3594 return -1;
3595 if (TYPE_LENGTH (t) == 0)
3596 {
3597 gdb_assert (count == 0);
3598 return 0;
3599 }
3600 else if (count == 0)
3601 return -1;
3602 unitlen = arm_vfp_cprc_unit_length (*base_type);
3603 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3604 return TYPE_LENGTH (t) / unitlen;
3605 }
3606 break;
3607
3608 case TYPE_CODE_STRUCT:
3609 {
3610 int count = 0;
3611 unsigned unitlen;
3612 int i;
3613 for (i = 0; i < TYPE_NFIELDS (t); i++)
3614 {
3615 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3616 base_type);
3617 if (sub_count == -1)
3618 return -1;
3619 count += sub_count;
3620 }
3621 if (TYPE_LENGTH (t) == 0)
3622 {
3623 gdb_assert (count == 0);
3624 return 0;
3625 }
3626 else if (count == 0)
3627 return -1;
3628 unitlen = arm_vfp_cprc_unit_length (*base_type);
3629 if (TYPE_LENGTH (t) != unitlen * count)
3630 return -1;
3631 return count;
3632 }
3633
3634 case TYPE_CODE_UNION:
3635 {
3636 int count = 0;
3637 unsigned unitlen;
3638 int i;
3639 for (i = 0; i < TYPE_NFIELDS (t); i++)
3640 {
3641 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3642 base_type);
3643 if (sub_count == -1)
3644 return -1;
3645 count = (count > sub_count ? count : sub_count);
3646 }
3647 if (TYPE_LENGTH (t) == 0)
3648 {
3649 gdb_assert (count == 0);
3650 return 0;
3651 }
3652 else if (count == 0)
3653 return -1;
3654 unitlen = arm_vfp_cprc_unit_length (*base_type);
3655 if (TYPE_LENGTH (t) != unitlen * count)
3656 return -1;
3657 return count;
3658 }
3659
3660 default:
3661 break;
3662 }
3663
3664 return -1;
3665 }
3666
3667 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3668 if passed to or returned from a non-variadic function with the VFP
3669 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3670 *BASE_TYPE to the base type for T and *COUNT to the number of
3671 elements of that base type before returning. */
3672
3673 static int
3674 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3675 int *count)
3676 {
3677 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3678 int c = arm_vfp_cprc_sub_candidate (t, &b);
3679 if (c <= 0 || c > 4)
3680 return 0;
3681 *base_type = b;
3682 *count = c;
3683 return 1;
3684 }
3685
3686 /* Return 1 if the VFP ABI should be used for passing arguments to and
3687 returning values from a function of type FUNC_TYPE, 0
3688 otherwise. */
3689
3690 static int
3691 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3692 {
3693 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3694 /* Variadic functions always use the base ABI. Assume that functions
3695 without debug info are not variadic. */
3696 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3697 return 0;
3698 /* The VFP ABI is only supported as a variant of AAPCS. */
3699 if (tdep->arm_abi != ARM_ABI_AAPCS)
3700 return 0;
3701 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3702 }
3703
3704 /* We currently only support passing parameters in integer registers, which
3705 conforms with GCC's default model, and VFP argument passing following
3706 the VFP variant of AAPCS. Several other variants exist and
3707 we should probably support some of them based on the selected ABI. */
3708
3709 static CORE_ADDR
3710 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3711 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3712 struct value **args, CORE_ADDR sp, int struct_return,
3713 CORE_ADDR struct_addr)
3714 {
3715 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3716 int argnum;
3717 int argreg;
3718 int nstack;
3719 struct stack_item *si = NULL;
3720 int use_vfp_abi;
3721 struct type *ftype;
3722 unsigned vfp_regs_free = (1 << 16) - 1;
3723
3724 /* Determine the type of this function and whether the VFP ABI
3725 applies. */
3726 ftype = check_typedef (value_type (function));
3727 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3728 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3729 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3730
3731 /* Set the return address. For the ARM, the return breakpoint is
3732 always at BP_ADDR. */
3733 if (arm_pc_is_thumb (gdbarch, bp_addr))
3734 bp_addr |= 1;
3735 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3736
3737 /* Walk through the list of args and determine how large a temporary
3738 stack is required. Need to take care here as structs may be
3739 passed on the stack, and we have to push them. */
3740 nstack = 0;
3741
3742 argreg = ARM_A1_REGNUM;
3743 nstack = 0;
3744
3745 /* The struct_return pointer occupies the first parameter
3746 passing register. */
3747 if (struct_return)
3748 {
3749 if (arm_debug)
3750 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3751 gdbarch_register_name (gdbarch, argreg),
3752 paddress (gdbarch, struct_addr));
3753 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3754 argreg++;
3755 }
3756
3757 for (argnum = 0; argnum < nargs; argnum++)
3758 {
3759 int len;
3760 struct type *arg_type;
3761 struct type *target_type;
3762 enum type_code typecode;
3763 const bfd_byte *val;
3764 int align;
3765 enum arm_vfp_cprc_base_type vfp_base_type;
3766 int vfp_base_count;
3767 int may_use_core_reg = 1;
3768
3769 arg_type = check_typedef (value_type (args[argnum]));
3770 len = TYPE_LENGTH (arg_type);
3771 target_type = TYPE_TARGET_TYPE (arg_type);
3772 typecode = TYPE_CODE (arg_type);
3773 val = value_contents (args[argnum]);
3774
3775 align = arm_type_align (arg_type);
3776 /* Round alignment up to a whole number of words. */
3777 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3778 /* Different ABIs have different maximum alignments. */
3779 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3780 {
3781 /* The APCS ABI only requires word alignment. */
3782 align = INT_REGISTER_SIZE;
3783 }
3784 else
3785 {
3786 /* The AAPCS requires at most doubleword alignment. */
3787 if (align > INT_REGISTER_SIZE * 2)
3788 align = INT_REGISTER_SIZE * 2;
3789 }
3790
3791 if (use_vfp_abi
3792 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3793 &vfp_base_count))
3794 {
3795 int regno;
3796 int unit_length;
3797 int shift;
3798 unsigned mask;
3799
3800 /* Because this is a CPRC it cannot go in a core register or
3801 cause a core register to be skipped for alignment.
3802 Either it goes in VFP registers and the rest of this loop
3803 iteration is skipped for this argument, or it goes on the
3804 stack (and the stack alignment code is correct for this
3805 case). */
3806 may_use_core_reg = 0;
3807
3808 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3809 shift = unit_length / 4;
3810 mask = (1 << (shift * vfp_base_count)) - 1;
3811 for (regno = 0; regno < 16; regno += shift)
3812 if (((vfp_regs_free >> regno) & mask) == mask)
3813 break;
3814
3815 if (regno < 16)
3816 {
3817 int reg_char;
3818 int reg_scaled;
3819 int i;
3820
3821 vfp_regs_free &= ~(mask << regno);
3822 reg_scaled = regno / shift;
3823 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3824 for (i = 0; i < vfp_base_count; i++)
3825 {
3826 char name_buf[4];
3827 int regnum;
3828 if (reg_char == 'q')
3829 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3830 val + i * unit_length);
3831 else
3832 {
3833 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3834 reg_char, reg_scaled + i);
3835 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3836 strlen (name_buf));
3837 regcache_cooked_write (regcache, regnum,
3838 val + i * unit_length);
3839 }
3840 }
3841 continue;
3842 }
3843 else
3844 {
3845 /* This CPRC could not go in VFP registers, so all VFP
3846 registers are now marked as used. */
3847 vfp_regs_free = 0;
3848 }
3849 }
3850
3851 /* Push stack padding for dowubleword alignment. */
3852 if (nstack & (align - 1))
3853 {
3854 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3855 nstack += INT_REGISTER_SIZE;
3856 }
3857
3858 /* Doubleword aligned quantities must go in even register pairs. */
3859 if (may_use_core_reg
3860 && argreg <= ARM_LAST_ARG_REGNUM
3861 && align > INT_REGISTER_SIZE
3862 && argreg & 1)
3863 argreg++;
3864
3865 /* If the argument is a pointer to a function, and it is a
3866 Thumb function, create a LOCAL copy of the value and set
3867 the THUMB bit in it. */
3868 if (TYPE_CODE_PTR == typecode
3869 && target_type != NULL
3870 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3871 {
3872 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3873 if (arm_pc_is_thumb (gdbarch, regval))
3874 {
3875 bfd_byte *copy = alloca (len);
3876 store_unsigned_integer (copy, len, byte_order,
3877 MAKE_THUMB_ADDR (regval));
3878 val = copy;
3879 }
3880 }
3881
3882 /* Copy the argument to general registers or the stack in
3883 register-sized pieces. Large arguments are split between
3884 registers and stack. */
3885 while (len > 0)
3886 {
3887 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3888
3889 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3890 {
3891 /* The argument is being passed in a general purpose
3892 register. */
3893 CORE_ADDR regval
3894 = extract_unsigned_integer (val, partial_len, byte_order);
3895 if (byte_order == BFD_ENDIAN_BIG)
3896 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3897 if (arm_debug)
3898 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3899 argnum,
3900 gdbarch_register_name
3901 (gdbarch, argreg),
3902 phex (regval, INT_REGISTER_SIZE));
3903 regcache_cooked_write_unsigned (regcache, argreg, regval);
3904 argreg++;
3905 }
3906 else
3907 {
3908 /* Push the arguments onto the stack. */
3909 if (arm_debug)
3910 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3911 argnum, nstack);
3912 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3913 nstack += INT_REGISTER_SIZE;
3914 }
3915
3916 len -= partial_len;
3917 val += partial_len;
3918 }
3919 }
3920 /* If we have an odd number of words to push, then decrement the stack
3921 by one word now, so first stack argument will be dword aligned. */
3922 if (nstack & 4)
3923 sp -= 4;
3924
3925 while (si)
3926 {
3927 sp -= si->len;
3928 write_memory (sp, si->data, si->len);
3929 si = pop_stack_item (si);
3930 }
3931
3932 /* Finally, update teh SP register. */
3933 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3934
3935 return sp;
3936 }
3937
3938
3939 /* Always align the frame to an 8-byte boundary. This is required on
3940 some platforms and harmless on the rest. */
3941
3942 static CORE_ADDR
3943 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3944 {
3945 /* Align the stack to eight bytes. */
3946 return sp & ~ (CORE_ADDR) 7;
3947 }
3948
3949 static void
3950 print_fpu_flags (struct ui_file *file, int flags)
3951 {
3952 if (flags & (1 << 0))
3953 fputs_filtered ("IVO ", file);
3954 if (flags & (1 << 1))
3955 fputs_filtered ("DVZ ", file);
3956 if (flags & (1 << 2))
3957 fputs_filtered ("OFL ", file);
3958 if (flags & (1 << 3))
3959 fputs_filtered ("UFL ", file);
3960 if (flags & (1 << 4))
3961 fputs_filtered ("INX ", file);
3962 fputc_filtered ('\n', file);
3963 }
3964
3965 /* Print interesting information about the floating point processor
3966 (if present) or emulator. */
3967 static void
3968 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3969 struct frame_info *frame, const char *args)
3970 {
3971 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3972 int type;
3973
3974 type = (status >> 24) & 127;
3975 if (status & (1 << 31))
3976 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
3977 else
3978 fprintf_filtered (file, _("Software FPU type %d\n"), type);
3979 /* i18n: [floating point unit] mask */
3980 fputs_filtered (_("mask: "), file);
3981 print_fpu_flags (file, status >> 16);
3982 /* i18n: [floating point unit] flags */
3983 fputs_filtered (_("flags: "), file);
3984 print_fpu_flags (file, status);
3985 }
3986
3987 /* Construct the ARM extended floating point type. */
3988 static struct type *
3989 arm_ext_type (struct gdbarch *gdbarch)
3990 {
3991 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3992
3993 if (!tdep->arm_ext_type)
3994 tdep->arm_ext_type
3995 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3996 floatformats_arm_ext);
3997
3998 return tdep->arm_ext_type;
3999 }
4000
4001 static struct type *
4002 arm_neon_double_type (struct gdbarch *gdbarch)
4003 {
4004 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4005
4006 if (tdep->neon_double_type == NULL)
4007 {
4008 struct type *t, *elem;
4009
4010 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
4011 TYPE_CODE_UNION);
4012 elem = builtin_type (gdbarch)->builtin_uint8;
4013 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
4014 elem = builtin_type (gdbarch)->builtin_uint16;
4015 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
4016 elem = builtin_type (gdbarch)->builtin_uint32;
4017 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
4018 elem = builtin_type (gdbarch)->builtin_uint64;
4019 append_composite_type_field (t, "u64", elem);
4020 elem = builtin_type (gdbarch)->builtin_float;
4021 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
4022 elem = builtin_type (gdbarch)->builtin_double;
4023 append_composite_type_field (t, "f64", elem);
4024
4025 TYPE_VECTOR (t) = 1;
4026 TYPE_NAME (t) = "neon_d";
4027 tdep->neon_double_type = t;
4028 }
4029
4030 return tdep->neon_double_type;
4031 }
4032
4033 /* FIXME: The vector types are not correctly ordered on big-endian
4034 targets. Just as s0 is the low bits of d0, d0[0] is also the low
4035 bits of d0 - regardless of what unit size is being held in d0. So
4036 the offset of the first uint8 in d0 is 7, but the offset of the
4037 first float is 4. This code works as-is for little-endian
4038 targets. */
4039
4040 static struct type *
4041 arm_neon_quad_type (struct gdbarch *gdbarch)
4042 {
4043 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4044
4045 if (tdep->neon_quad_type == NULL)
4046 {
4047 struct type *t, *elem;
4048
4049 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
4050 TYPE_CODE_UNION);
4051 elem = builtin_type (gdbarch)->builtin_uint8;
4052 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
4053 elem = builtin_type (gdbarch)->builtin_uint16;
4054 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
4055 elem = builtin_type (gdbarch)->builtin_uint32;
4056 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
4057 elem = builtin_type (gdbarch)->builtin_uint64;
4058 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4059 elem = builtin_type (gdbarch)->builtin_float;
4060 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4061 elem = builtin_type (gdbarch)->builtin_double;
4062 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4063
4064 TYPE_VECTOR (t) = 1;
4065 TYPE_NAME (t) = "neon_q";
4066 tdep->neon_quad_type = t;
4067 }
4068
4069 return tdep->neon_quad_type;
4070 }
4071
4072 /* Return the GDB type object for the "standard" data type of data in
4073 register N. */
4074
4075 static struct type *
4076 arm_register_type (struct gdbarch *gdbarch, int regnum)
4077 {
4078 int num_regs = gdbarch_num_regs (gdbarch);
4079
4080 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4081 && regnum >= num_regs && regnum < num_regs + 32)
4082 return builtin_type (gdbarch)->builtin_float;
4083
4084 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4085 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4086 return arm_neon_quad_type (gdbarch);
4087
4088 /* If the target description has register information, we are only
4089 in this function so that we can override the types of
4090 double-precision registers for NEON. */
4091 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4092 {
4093 struct type *t = tdesc_register_type (gdbarch, regnum);
4094
4095 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4096 && TYPE_CODE (t) == TYPE_CODE_FLT
4097 && gdbarch_tdep (gdbarch)->have_neon)
4098 return arm_neon_double_type (gdbarch);
4099 else
4100 return t;
4101 }
4102
4103 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4104 {
4105 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4106 return builtin_type (gdbarch)->builtin_void;
4107
4108 return arm_ext_type (gdbarch);
4109 }
4110 else if (regnum == ARM_SP_REGNUM)
4111 return builtin_type (gdbarch)->builtin_data_ptr;
4112 else if (regnum == ARM_PC_REGNUM)
4113 return builtin_type (gdbarch)->builtin_func_ptr;
4114 else if (regnum >= ARRAY_SIZE (arm_register_names))
4115 /* These registers are only supported on targets which supply
4116 an XML description. */
4117 return builtin_type (gdbarch)->builtin_int0;
4118 else
4119 return builtin_type (gdbarch)->builtin_uint32;
4120 }
4121
4122 /* Map a DWARF register REGNUM onto the appropriate GDB register
4123 number. */
4124
4125 static int
4126 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4127 {
4128 /* Core integer regs. */
4129 if (reg >= 0 && reg <= 15)
4130 return reg;
4131
4132 /* Legacy FPA encoding. These were once used in a way which
4133 overlapped with VFP register numbering, so their use is
4134 discouraged, but GDB doesn't support the ARM toolchain
4135 which used them for VFP. */
4136 if (reg >= 16 && reg <= 23)
4137 return ARM_F0_REGNUM + reg - 16;
4138
4139 /* New assignments for the FPA registers. */
4140 if (reg >= 96 && reg <= 103)
4141 return ARM_F0_REGNUM + reg - 96;
4142
4143 /* WMMX register assignments. */
4144 if (reg >= 104 && reg <= 111)
4145 return ARM_WCGR0_REGNUM + reg - 104;
4146
4147 if (reg >= 112 && reg <= 127)
4148 return ARM_WR0_REGNUM + reg - 112;
4149
4150 if (reg >= 192 && reg <= 199)
4151 return ARM_WC0_REGNUM + reg - 192;
4152
4153 /* VFP v2 registers. A double precision value is actually
4154 in d1 rather than s2, but the ABI only defines numbering
4155 for the single precision registers. This will "just work"
4156 in GDB for little endian targets (we'll read eight bytes,
4157 starting in s0 and then progressing to s1), but will be
4158 reversed on big endian targets with VFP. This won't
4159 be a problem for the new Neon quad registers; you're supposed
4160 to use DW_OP_piece for those. */
4161 if (reg >= 64 && reg <= 95)
4162 {
4163 char name_buf[4];
4164
4165 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4166 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4167 strlen (name_buf));
4168 }
4169
4170 /* VFP v3 / Neon registers. This range is also used for VFP v2
4171 registers, except that it now describes d0 instead of s0. */
4172 if (reg >= 256 && reg <= 287)
4173 {
4174 char name_buf[4];
4175
4176 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4177 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4178 strlen (name_buf));
4179 }
4180
4181 return -1;
4182 }
4183
4184 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4185 static int
4186 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4187 {
4188 int reg = regnum;
4189 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4190
4191 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4192 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4193
4194 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4195 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4196
4197 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4198 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4199
4200 if (reg < NUM_GREGS)
4201 return SIM_ARM_R0_REGNUM + reg;
4202 reg -= NUM_GREGS;
4203
4204 if (reg < NUM_FREGS)
4205 return SIM_ARM_FP0_REGNUM + reg;
4206 reg -= NUM_FREGS;
4207
4208 if (reg < NUM_SREGS)
4209 return SIM_ARM_FPS_REGNUM + reg;
4210 reg -= NUM_SREGS;
4211
4212 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4213 }
4214
4215 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4216 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4217 It is thought that this is is the floating-point register format on
4218 little-endian systems. */
4219
4220 static void
4221 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4222 void *dbl, int endianess)
4223 {
4224 DOUBLEST d;
4225
4226 if (endianess == BFD_ENDIAN_BIG)
4227 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4228 else
4229 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4230 ptr, &d);
4231 floatformat_from_doublest (fmt, &d, dbl);
4232 }
4233
4234 static void
4235 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4236 int endianess)
4237 {
4238 DOUBLEST d;
4239
4240 floatformat_to_doublest (fmt, ptr, &d);
4241 if (endianess == BFD_ENDIAN_BIG)
4242 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4243 else
4244 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4245 &d, dbl);
4246 }
4247
4248 static int
4249 condition_true (unsigned long cond, unsigned long status_reg)
4250 {
4251 if (cond == INST_AL || cond == INST_NV)
4252 return 1;
4253
4254 switch (cond)
4255 {
4256 case INST_EQ:
4257 return ((status_reg & FLAG_Z) != 0);
4258 case INST_NE:
4259 return ((status_reg & FLAG_Z) == 0);
4260 case INST_CS:
4261 return ((status_reg & FLAG_C) != 0);
4262 case INST_CC:
4263 return ((status_reg & FLAG_C) == 0);
4264 case INST_MI:
4265 return ((status_reg & FLAG_N) != 0);
4266 case INST_PL:
4267 return ((status_reg & FLAG_N) == 0);
4268 case INST_VS:
4269 return ((status_reg & FLAG_V) != 0);
4270 case INST_VC:
4271 return ((status_reg & FLAG_V) == 0);
4272 case INST_HI:
4273 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4274 case INST_LS:
4275 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4276 case INST_GE:
4277 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4278 case INST_LT:
4279 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4280 case INST_GT:
4281 return (((status_reg & FLAG_Z) == 0)
4282 && (((status_reg & FLAG_N) == 0)
4283 == ((status_reg & FLAG_V) == 0)));
4284 case INST_LE:
4285 return (((status_reg & FLAG_Z) != 0)
4286 || (((status_reg & FLAG_N) == 0)
4287 != ((status_reg & FLAG_V) == 0)));
4288 }
4289 return 1;
4290 }
4291
4292 static unsigned long
4293 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4294 unsigned long pc_val, unsigned long status_reg)
4295 {
4296 unsigned long res, shift;
4297 int rm = bits (inst, 0, 3);
4298 unsigned long shifttype = bits (inst, 5, 6);
4299
4300 if (bit (inst, 4))
4301 {
4302 int rs = bits (inst, 8, 11);
4303 shift = (rs == 15 ? pc_val + 8
4304 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4305 }
4306 else
4307 shift = bits (inst, 7, 11);
4308
4309 res = (rm == ARM_PC_REGNUM
4310 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4311 : get_frame_register_unsigned (frame, rm));
4312
4313 switch (shifttype)
4314 {
4315 case 0: /* LSL */
4316 res = shift >= 32 ? 0 : res << shift;
4317 break;
4318
4319 case 1: /* LSR */
4320 res = shift >= 32 ? 0 : res >> shift;
4321 break;
4322
4323 case 2: /* ASR */
4324 if (shift >= 32)
4325 shift = 31;
4326 res = ((res & 0x80000000L)
4327 ? ~((~res) >> shift) : res >> shift);
4328 break;
4329
4330 case 3: /* ROR/RRX */
4331 shift &= 31;
4332 if (shift == 0)
4333 res = (res >> 1) | (carry ? 0x80000000L : 0);
4334 else
4335 res = (res >> shift) | (res << (32 - shift));
4336 break;
4337 }
4338
4339 return res & 0xffffffff;
4340 }
4341
4342 /* Return number of 1-bits in VAL. */
4343
4344 static int
4345 bitcount (unsigned long val)
4346 {
4347 int nbits;
4348 for (nbits = 0; val != 0; nbits++)
4349 val &= val - 1; /* Delete rightmost 1-bit in val. */
4350 return nbits;
4351 }
4352
4353 /* Return the size in bytes of the complete Thumb instruction whose
4354 first halfword is INST1. */
4355
4356 static int
4357 thumb_insn_size (unsigned short inst1)
4358 {
4359 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4360 return 4;
4361 else
4362 return 2;
4363 }
4364
4365 static int
4366 thumb_advance_itstate (unsigned int itstate)
4367 {
4368 /* Preserve IT[7:5], the first three bits of the condition. Shift
4369 the upcoming condition flags left by one bit. */
4370 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4371
4372 /* If we have finished the IT block, clear the state. */
4373 if ((itstate & 0x0f) == 0)
4374 itstate = 0;
4375
4376 return itstate;
4377 }
4378
4379 /* Find the next PC after the current instruction executes. In some
4380 cases we can not statically determine the answer (see the IT state
4381 handling in this function); in that case, a breakpoint may be
4382 inserted in addition to the returned PC, which will be used to set
4383 another breakpoint by our caller. */
4384
4385 static CORE_ADDR
4386 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4387 {
4388 struct gdbarch *gdbarch = get_frame_arch (frame);
4389 struct address_space *aspace = get_frame_address_space (frame);
4390 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4391 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4392 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4393 unsigned short inst1;
4394 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4395 unsigned long offset;
4396 ULONGEST status, itstate;
4397
4398 nextpc = MAKE_THUMB_ADDR (nextpc);
4399 pc_val = MAKE_THUMB_ADDR (pc_val);
4400
4401 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4402
4403 /* Thumb-2 conditional execution support. There are eight bits in
4404 the CPSR which describe conditional execution state. Once
4405 reconstructed (they're in a funny order), the low five bits
4406 describe the low bit of the condition for each instruction and
4407 how many instructions remain. The high three bits describe the
4408 base condition. One of the low four bits will be set if an IT
4409 block is active. These bits read as zero on earlier
4410 processors. */
4411 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4412 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4413
4414 /* If-Then handling. On GNU/Linux, where this routine is used, we
4415 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4416 can disable execution of the undefined instruction. So we might
4417 miss the breakpoint if we set it on a skipped conditional
4418 instruction. Because conditional instructions can change the
4419 flags, affecting the execution of further instructions, we may
4420 need to set two breakpoints. */
4421
4422 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4423 {
4424 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4425 {
4426 /* An IT instruction. Because this instruction does not
4427 modify the flags, we can accurately predict the next
4428 executed instruction. */
4429 itstate = inst1 & 0x00ff;
4430 pc += thumb_insn_size (inst1);
4431
4432 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4433 {
4434 inst1 = read_memory_unsigned_integer (pc, 2,
4435 byte_order_for_code);
4436 pc += thumb_insn_size (inst1);
4437 itstate = thumb_advance_itstate (itstate);
4438 }
4439
4440 return MAKE_THUMB_ADDR (pc);
4441 }
4442 else if (itstate != 0)
4443 {
4444 /* We are in a conditional block. Check the condition. */
4445 if (! condition_true (itstate >> 4, status))
4446 {
4447 /* Advance to the next executed instruction. */
4448 pc += thumb_insn_size (inst1);
4449 itstate = thumb_advance_itstate (itstate);
4450
4451 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4452 {
4453 inst1 = read_memory_unsigned_integer (pc, 2,
4454 byte_order_for_code);
4455 pc += thumb_insn_size (inst1);
4456 itstate = thumb_advance_itstate (itstate);
4457 }
4458
4459 return MAKE_THUMB_ADDR (pc);
4460 }
4461 else if ((itstate & 0x0f) == 0x08)
4462 {
4463 /* This is the last instruction of the conditional
4464 block, and it is executed. We can handle it normally
4465 because the following instruction is not conditional,
4466 and we must handle it normally because it is
4467 permitted to branch. Fall through. */
4468 }
4469 else
4470 {
4471 int cond_negated;
4472
4473 /* There are conditional instructions after this one.
4474 If this instruction modifies the flags, then we can
4475 not predict what the next executed instruction will
4476 be. Fortunately, this instruction is architecturally
4477 forbidden to branch; we know it will fall through.
4478 Start by skipping past it. */
4479 pc += thumb_insn_size (inst1);
4480 itstate = thumb_advance_itstate (itstate);
4481
4482 /* Set a breakpoint on the following instruction. */
4483 gdb_assert ((itstate & 0x0f) != 0);
4484 arm_insert_single_step_breakpoint (gdbarch, aspace,
4485 MAKE_THUMB_ADDR (pc));
4486 cond_negated = (itstate >> 4) & 1;
4487
4488 /* Skip all following instructions with the same
4489 condition. If there is a later instruction in the IT
4490 block with the opposite condition, set the other
4491 breakpoint there. If not, then set a breakpoint on
4492 the instruction after the IT block. */
4493 do
4494 {
4495 inst1 = read_memory_unsigned_integer (pc, 2,
4496 byte_order_for_code);
4497 pc += thumb_insn_size (inst1);
4498 itstate = thumb_advance_itstate (itstate);
4499 }
4500 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4501
4502 return MAKE_THUMB_ADDR (pc);
4503 }
4504 }
4505 }
4506 else if (itstate & 0x0f)
4507 {
4508 /* We are in a conditional block. Check the condition. */
4509 int cond = itstate >> 4;
4510
4511 if (! condition_true (cond, status))
4512 /* Advance to the next instruction. All the 32-bit
4513 instructions share a common prefix. */
4514 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4515
4516 /* Otherwise, handle the instruction normally. */
4517 }
4518
4519 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4520 {
4521 CORE_ADDR sp;
4522
4523 /* Fetch the saved PC from the stack. It's stored above
4524 all of the other registers. */
4525 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4526 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4527 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4528 }
4529 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4530 {
4531 unsigned long cond = bits (inst1, 8, 11);
4532 if (cond == 0x0f) /* 0x0f = SWI */
4533 {
4534 struct gdbarch_tdep *tdep;
4535 tdep = gdbarch_tdep (gdbarch);
4536
4537 if (tdep->syscall_next_pc != NULL)
4538 nextpc = tdep->syscall_next_pc (frame);
4539
4540 }
4541 else if (cond != 0x0f && condition_true (cond, status))
4542 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4543 }
4544 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4545 {
4546 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4547 }
4548 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4549 {
4550 unsigned short inst2;
4551 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4552
4553 /* Default to the next instruction. */
4554 nextpc = pc + 4;
4555 nextpc = MAKE_THUMB_ADDR (nextpc);
4556
4557 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4558 {
4559 /* Branches and miscellaneous control instructions. */
4560
4561 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4562 {
4563 /* B, BL, BLX. */
4564 int j1, j2, imm1, imm2;
4565
4566 imm1 = sbits (inst1, 0, 10);
4567 imm2 = bits (inst2, 0, 10);
4568 j1 = bit (inst2, 13);
4569 j2 = bit (inst2, 11);
4570
4571 offset = ((imm1 << 12) + (imm2 << 1));
4572 offset ^= ((!j2) << 22) | ((!j1) << 23);
4573
4574 nextpc = pc_val + offset;
4575 /* For BLX make sure to clear the low bits. */
4576 if (bit (inst2, 12) == 0)
4577 nextpc = nextpc & 0xfffffffc;
4578 }
4579 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4580 {
4581 /* SUBS PC, LR, #imm8. */
4582 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4583 nextpc -= inst2 & 0x00ff;
4584 }
4585 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4586 {
4587 /* Conditional branch. */
4588 if (condition_true (bits (inst1, 6, 9), status))
4589 {
4590 int sign, j1, j2, imm1, imm2;
4591
4592 sign = sbits (inst1, 10, 10);
4593 imm1 = bits (inst1, 0, 5);
4594 imm2 = bits (inst2, 0, 10);
4595 j1 = bit (inst2, 13);
4596 j2 = bit (inst2, 11);
4597
4598 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4599 offset += (imm1 << 12) + (imm2 << 1);
4600
4601 nextpc = pc_val + offset;
4602 }
4603 }
4604 }
4605 else if ((inst1 & 0xfe50) == 0xe810)
4606 {
4607 /* Load multiple or RFE. */
4608 int rn, offset, load_pc = 1;
4609
4610 rn = bits (inst1, 0, 3);
4611 if (bit (inst1, 7) && !bit (inst1, 8))
4612 {
4613 /* LDMIA or POP */
4614 if (!bit (inst2, 15))
4615 load_pc = 0;
4616 offset = bitcount (inst2) * 4 - 4;
4617 }
4618 else if (!bit (inst1, 7) && bit (inst1, 8))
4619 {
4620 /* LDMDB */
4621 if (!bit (inst2, 15))
4622 load_pc = 0;
4623 offset = -4;
4624 }
4625 else if (bit (inst1, 7) && bit (inst1, 8))
4626 {
4627 /* RFEIA */
4628 offset = 0;
4629 }
4630 else if (!bit (inst1, 7) && !bit (inst1, 8))
4631 {
4632 /* RFEDB */
4633 offset = -8;
4634 }
4635 else
4636 load_pc = 0;
4637
4638 if (load_pc)
4639 {
4640 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4641 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4642 }
4643 }
4644 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4645 {
4646 /* MOV PC or MOVS PC. */
4647 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4648 nextpc = MAKE_THUMB_ADDR (nextpc);
4649 }
4650 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4651 {
4652 /* LDR PC. */
4653 CORE_ADDR base;
4654 int rn, load_pc = 1;
4655
4656 rn = bits (inst1, 0, 3);
4657 base = get_frame_register_unsigned (frame, rn);
4658 if (rn == ARM_PC_REGNUM)
4659 {
4660 base = (base + 4) & ~(CORE_ADDR) 0x3;
4661 if (bit (inst1, 7))
4662 base += bits (inst2, 0, 11);
4663 else
4664 base -= bits (inst2, 0, 11);
4665 }
4666 else if (bit (inst1, 7))
4667 base += bits (inst2, 0, 11);
4668 else if (bit (inst2, 11))
4669 {
4670 if (bit (inst2, 10))
4671 {
4672 if (bit (inst2, 9))
4673 base += bits (inst2, 0, 7);
4674 else
4675 base -= bits (inst2, 0, 7);
4676 }
4677 }
4678 else if ((inst2 & 0x0fc0) == 0x0000)
4679 {
4680 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4681 base += get_frame_register_unsigned (frame, rm) << shift;
4682 }
4683 else
4684 /* Reserved. */
4685 load_pc = 0;
4686
4687 if (load_pc)
4688 nextpc = get_frame_memory_unsigned (frame, base, 4);
4689 }
4690 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4691 {
4692 /* TBB. */
4693 CORE_ADDR tbl_reg, table, offset, length;
4694
4695 tbl_reg = bits (inst1, 0, 3);
4696 if (tbl_reg == 0x0f)
4697 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4698 else
4699 table = get_frame_register_unsigned (frame, tbl_reg);
4700
4701 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4702 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4703 nextpc = pc_val + length;
4704 }
4705 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4706 {
4707 /* TBH. */
4708 CORE_ADDR tbl_reg, table, offset, length;
4709
4710 tbl_reg = bits (inst1, 0, 3);
4711 if (tbl_reg == 0x0f)
4712 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4713 else
4714 table = get_frame_register_unsigned (frame, tbl_reg);
4715
4716 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4717 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4718 nextpc = pc_val + length;
4719 }
4720 }
4721 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4722 {
4723 if (bits (inst1, 3, 6) == 0x0f)
4724 nextpc = UNMAKE_THUMB_ADDR (pc_val);
4725 else
4726 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4727 }
4728 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4729 {
4730 if (bits (inst1, 3, 6) == 0x0f)
4731 nextpc = pc_val;
4732 else
4733 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4734
4735 nextpc = MAKE_THUMB_ADDR (nextpc);
4736 }
4737 else if ((inst1 & 0xf500) == 0xb100)
4738 {
4739 /* CBNZ or CBZ. */
4740 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4741 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4742
4743 if (bit (inst1, 11) && reg != 0)
4744 nextpc = pc_val + imm;
4745 else if (!bit (inst1, 11) && reg == 0)
4746 nextpc = pc_val + imm;
4747 }
4748 return nextpc;
4749 }
4750
4751 /* Get the raw next address. PC is the current program counter, in
4752 FRAME, which is assumed to be executing in ARM mode.
4753
4754 The value returned has the execution state of the next instruction
4755 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4756 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4757 address. */
4758
4759 static CORE_ADDR
4760 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4761 {
4762 struct gdbarch *gdbarch = get_frame_arch (frame);
4763 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4764 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4765 unsigned long pc_val;
4766 unsigned long this_instr;
4767 unsigned long status;
4768 CORE_ADDR nextpc;
4769
4770 pc_val = (unsigned long) pc;
4771 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4772
4773 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4774 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4775
4776 if (bits (this_instr, 28, 31) == INST_NV)
4777 switch (bits (this_instr, 24, 27))
4778 {
4779 case 0xa:
4780 case 0xb:
4781 {
4782 /* Branch with Link and change to Thumb. */
4783 nextpc = BranchDest (pc, this_instr);
4784 nextpc |= bit (this_instr, 24) << 1;
4785 nextpc = MAKE_THUMB_ADDR (nextpc);
4786 break;
4787 }
4788 case 0xc:
4789 case 0xd:
4790 case 0xe:
4791 /* Coprocessor register transfer. */
4792 if (bits (this_instr, 12, 15) == 15)
4793 error (_("Invalid update to pc in instruction"));
4794 break;
4795 }
4796 else if (condition_true (bits (this_instr, 28, 31), status))
4797 {
4798 switch (bits (this_instr, 24, 27))
4799 {
4800 case 0x0:
4801 case 0x1: /* data processing */
4802 case 0x2:
4803 case 0x3:
4804 {
4805 unsigned long operand1, operand2, result = 0;
4806 unsigned long rn;
4807 int c;
4808
4809 if (bits (this_instr, 12, 15) != 15)
4810 break;
4811
4812 if (bits (this_instr, 22, 25) == 0
4813 && bits (this_instr, 4, 7) == 9) /* multiply */
4814 error (_("Invalid update to pc in instruction"));
4815
4816 /* BX <reg>, BLX <reg> */
4817 if (bits (this_instr, 4, 27) == 0x12fff1
4818 || bits (this_instr, 4, 27) == 0x12fff3)
4819 {
4820 rn = bits (this_instr, 0, 3);
4821 nextpc = ((rn == ARM_PC_REGNUM)
4822 ? (pc_val + 8)
4823 : get_frame_register_unsigned (frame, rn));
4824
4825 return nextpc;
4826 }
4827
4828 /* Multiply into PC. */
4829 c = (status & FLAG_C) ? 1 : 0;
4830 rn = bits (this_instr, 16, 19);
4831 operand1 = ((rn == ARM_PC_REGNUM)
4832 ? (pc_val + 8)
4833 : get_frame_register_unsigned (frame, rn));
4834
4835 if (bit (this_instr, 25))
4836 {
4837 unsigned long immval = bits (this_instr, 0, 7);
4838 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4839 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4840 & 0xffffffff;
4841 }
4842 else /* operand 2 is a shifted register. */
4843 operand2 = shifted_reg_val (frame, this_instr, c,
4844 pc_val, status);
4845
4846 switch (bits (this_instr, 21, 24))
4847 {
4848 case 0x0: /*and */
4849 result = operand1 & operand2;
4850 break;
4851
4852 case 0x1: /*eor */
4853 result = operand1 ^ operand2;
4854 break;
4855
4856 case 0x2: /*sub */
4857 result = operand1 - operand2;
4858 break;
4859
4860 case 0x3: /*rsb */
4861 result = operand2 - operand1;
4862 break;
4863
4864 case 0x4: /*add */
4865 result = operand1 + operand2;
4866 break;
4867
4868 case 0x5: /*adc */
4869 result = operand1 + operand2 + c;
4870 break;
4871
4872 case 0x6: /*sbc */
4873 result = operand1 - operand2 + c;
4874 break;
4875
4876 case 0x7: /*rsc */
4877 result = operand2 - operand1 + c;
4878 break;
4879
4880 case 0x8:
4881 case 0x9:
4882 case 0xa:
4883 case 0xb: /* tst, teq, cmp, cmn */
4884 result = (unsigned long) nextpc;
4885 break;
4886
4887 case 0xc: /*orr */
4888 result = operand1 | operand2;
4889 break;
4890
4891 case 0xd: /*mov */
4892 /* Always step into a function. */
4893 result = operand2;
4894 break;
4895
4896 case 0xe: /*bic */
4897 result = operand1 & ~operand2;
4898 break;
4899
4900 case 0xf: /*mvn */
4901 result = ~operand2;
4902 break;
4903 }
4904
4905 /* In 26-bit APCS the bottom two bits of the result are
4906 ignored, and we always end up in ARM state. */
4907 if (!arm_apcs_32)
4908 nextpc = arm_addr_bits_remove (gdbarch, result);
4909 else
4910 nextpc = result;
4911
4912 break;
4913 }
4914
4915 case 0x4:
4916 case 0x5: /* data transfer */
4917 case 0x6:
4918 case 0x7:
4919 if (bit (this_instr, 20))
4920 {
4921 /* load */
4922 if (bits (this_instr, 12, 15) == 15)
4923 {
4924 /* rd == pc */
4925 unsigned long rn;
4926 unsigned long base;
4927
4928 if (bit (this_instr, 22))
4929 error (_("Invalid update to pc in instruction"));
4930
4931 /* byte write to PC */
4932 rn = bits (this_instr, 16, 19);
4933 base = ((rn == ARM_PC_REGNUM)
4934 ? (pc_val + 8)
4935 : get_frame_register_unsigned (frame, rn));
4936
4937 if (bit (this_instr, 24))
4938 {
4939 /* pre-indexed */
4940 int c = (status & FLAG_C) ? 1 : 0;
4941 unsigned long offset =
4942 (bit (this_instr, 25)
4943 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4944 : bits (this_instr, 0, 11));
4945
4946 if (bit (this_instr, 23))
4947 base += offset;
4948 else
4949 base -= offset;
4950 }
4951 nextpc =
4952 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4953 4, byte_order);
4954 }
4955 }
4956 break;
4957
4958 case 0x8:
4959 case 0x9: /* block transfer */
4960 if (bit (this_instr, 20))
4961 {
4962 /* LDM */
4963 if (bit (this_instr, 15))
4964 {
4965 /* loading pc */
4966 int offset = 0;
4967 unsigned long rn_val
4968 = get_frame_register_unsigned (frame,
4969 bits (this_instr, 16, 19));
4970
4971 if (bit (this_instr, 23))
4972 {
4973 /* up */
4974 unsigned long reglist = bits (this_instr, 0, 14);
4975 offset = bitcount (reglist) * 4;
4976 if (bit (this_instr, 24)) /* pre */
4977 offset += 4;
4978 }
4979 else if (bit (this_instr, 24))
4980 offset = -4;
4981
4982 nextpc =
4983 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4984 (rn_val + offset),
4985 4, byte_order);
4986 }
4987 }
4988 break;
4989
4990 case 0xb: /* branch & link */
4991 case 0xa: /* branch */
4992 {
4993 nextpc = BranchDest (pc, this_instr);
4994 break;
4995 }
4996
4997 case 0xc:
4998 case 0xd:
4999 case 0xe: /* coproc ops */
5000 break;
5001 case 0xf: /* SWI */
5002 {
5003 struct gdbarch_tdep *tdep;
5004 tdep = gdbarch_tdep (gdbarch);
5005
5006 if (tdep->syscall_next_pc != NULL)
5007 nextpc = tdep->syscall_next_pc (frame);
5008
5009 }
5010 break;
5011
5012 default:
5013 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
5014 return (pc);
5015 }
5016 }
5017
5018 return nextpc;
5019 }
5020
5021 /* Determine next PC after current instruction executes. Will call either
5022 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
5023 loop is detected. */
5024
5025 CORE_ADDR
5026 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
5027 {
5028 CORE_ADDR nextpc;
5029
5030 if (arm_frame_is_thumb (frame))
5031 nextpc = thumb_get_next_pc_raw (frame, pc);
5032 else
5033 nextpc = arm_get_next_pc_raw (frame, pc);
5034
5035 return nextpc;
5036 }
5037
5038 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
5039 of the appropriate mode (as encoded in the PC value), even if this
5040 differs from what would be expected according to the symbol tables. */
5041
5042 void
5043 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
5044 struct address_space *aspace,
5045 CORE_ADDR pc)
5046 {
5047 struct cleanup *old_chain
5048 = make_cleanup_restore_integer (&arm_override_mode);
5049
5050 arm_override_mode = IS_THUMB_ADDR (pc);
5051 pc = gdbarch_addr_bits_remove (gdbarch, pc);
5052
5053 insert_single_step_breakpoint (gdbarch, aspace, pc);
5054
5055 do_cleanups (old_chain);
5056 }
5057
5058 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
5059 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
5060 is found, attempt to step through it. A breakpoint is placed at the end of
5061 the sequence. */
5062
5063 static int
5064 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
5065 {
5066 struct gdbarch *gdbarch = get_frame_arch (frame);
5067 struct address_space *aspace = get_frame_address_space (frame);
5068 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5069 CORE_ADDR pc = get_frame_pc (frame);
5070 CORE_ADDR breaks[2] = {-1, -1};
5071 CORE_ADDR loc = pc;
5072 unsigned short insn1, insn2;
5073 int insn_count;
5074 int index;
5075 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5076 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5077 ULONGEST status, itstate;
5078
5079 /* We currently do not support atomic sequences within an IT block. */
5080 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
5081 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
5082 if (itstate & 0x0f)
5083 return 0;
5084
5085 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
5086 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5087 loc += 2;
5088 if (thumb_insn_size (insn1) != 4)
5089 return 0;
5090
5091 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5092 loc += 2;
5093 if (!((insn1 & 0xfff0) == 0xe850
5094 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
5095 return 0;
5096
5097 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5098 instructions. */
5099 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5100 {
5101 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5102 loc += 2;
5103
5104 if (thumb_insn_size (insn1) != 4)
5105 {
5106 /* Assume that there is at most one conditional branch in the
5107 atomic sequence. If a conditional branch is found, put a
5108 breakpoint in its destination address. */
5109 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
5110 {
5111 if (last_breakpoint > 0)
5112 return 0; /* More than one conditional branch found,
5113 fallback to the standard code. */
5114
5115 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
5116 last_breakpoint++;
5117 }
5118
5119 /* We do not support atomic sequences that use any *other*
5120 instructions but conditional branches to change the PC.
5121 Fall back to standard code to avoid losing control of
5122 execution. */
5123 else if (thumb_instruction_changes_pc (insn1))
5124 return 0;
5125 }
5126 else
5127 {
5128 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5129 loc += 2;
5130
5131 /* Assume that there is at most one conditional branch in the
5132 atomic sequence. If a conditional branch is found, put a
5133 breakpoint in its destination address. */
5134 if ((insn1 & 0xf800) == 0xf000
5135 && (insn2 & 0xd000) == 0x8000
5136 && (insn1 & 0x0380) != 0x0380)
5137 {
5138 int sign, j1, j2, imm1, imm2;
5139 unsigned int offset;
5140
5141 sign = sbits (insn1, 10, 10);
5142 imm1 = bits (insn1, 0, 5);
5143 imm2 = bits (insn2, 0, 10);
5144 j1 = bit (insn2, 13);
5145 j2 = bit (insn2, 11);
5146
5147 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
5148 offset += (imm1 << 12) + (imm2 << 1);
5149
5150 if (last_breakpoint > 0)
5151 return 0; /* More than one conditional branch found,
5152 fallback to the standard code. */
5153
5154 breaks[1] = loc + offset;
5155 last_breakpoint++;
5156 }
5157
5158 /* We do not support atomic sequences that use any *other*
5159 instructions but conditional branches to change the PC.
5160 Fall back to standard code to avoid losing control of
5161 execution. */
5162 else if (thumb2_instruction_changes_pc (insn1, insn2))
5163 return 0;
5164
5165 /* If we find a strex{,b,h,d}, we're done. */
5166 if ((insn1 & 0xfff0) == 0xe840
5167 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
5168 break;
5169 }
5170 }
5171
5172 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5173 if (insn_count == atomic_sequence_length)
5174 return 0;
5175
5176 /* Insert a breakpoint right after the end of the atomic sequence. */
5177 breaks[0] = loc;
5178
5179 /* Check for duplicated breakpoints. Check also for a breakpoint
5180 placed (branch instruction's destination) anywhere in sequence. */
5181 if (last_breakpoint
5182 && (breaks[1] == breaks[0]
5183 || (breaks[1] >= pc && breaks[1] < loc)))
5184 last_breakpoint = 0;
5185
5186 /* Effectively inserts the breakpoints. */
5187 for (index = 0; index <= last_breakpoint; index++)
5188 arm_insert_single_step_breakpoint (gdbarch, aspace,
5189 MAKE_THUMB_ADDR (breaks[index]));
5190
5191 return 1;
5192 }
5193
5194 static int
5195 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5196 {
5197 struct gdbarch *gdbarch = get_frame_arch (frame);
5198 struct address_space *aspace = get_frame_address_space (frame);
5199 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5200 CORE_ADDR pc = get_frame_pc (frame);
5201 CORE_ADDR breaks[2] = {-1, -1};
5202 CORE_ADDR loc = pc;
5203 unsigned int insn;
5204 int insn_count;
5205 int index;
5206 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5207 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5208
5209 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5210 Note that we do not currently support conditionally executed atomic
5211 instructions. */
5212 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5213 loc += 4;
5214 if ((insn & 0xff9000f0) != 0xe1900090)
5215 return 0;
5216
5217 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5218 instructions. */
5219 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5220 {
5221 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5222 loc += 4;
5223
5224 /* Assume that there is at most one conditional branch in the atomic
5225 sequence. If a conditional branch is found, put a breakpoint in
5226 its destination address. */
5227 if (bits (insn, 24, 27) == 0xa)
5228 {
5229 if (last_breakpoint > 0)
5230 return 0; /* More than one conditional branch found, fallback
5231 to the standard single-step code. */
5232
5233 breaks[1] = BranchDest (loc - 4, insn);
5234 last_breakpoint++;
5235 }
5236
5237 /* We do not support atomic sequences that use any *other* instructions
5238 but conditional branches to change the PC. Fall back to standard
5239 code to avoid losing control of execution. */
5240 else if (arm_instruction_changes_pc (insn))
5241 return 0;
5242
5243 /* If we find a strex{,b,h,d}, we're done. */
5244 if ((insn & 0xff9000f0) == 0xe1800090)
5245 break;
5246 }
5247
5248 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5249 if (insn_count == atomic_sequence_length)
5250 return 0;
5251
5252 /* Insert a breakpoint right after the end of the atomic sequence. */
5253 breaks[0] = loc;
5254
5255 /* Check for duplicated breakpoints. Check also for a breakpoint
5256 placed (branch instruction's destination) anywhere in sequence. */
5257 if (last_breakpoint
5258 && (breaks[1] == breaks[0]
5259 || (breaks[1] >= pc && breaks[1] < loc)))
5260 last_breakpoint = 0;
5261
5262 /* Effectively inserts the breakpoints. */
5263 for (index = 0; index <= last_breakpoint; index++)
5264 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5265
5266 return 1;
5267 }
5268
5269 int
5270 arm_deal_with_atomic_sequence (struct frame_info *frame)
5271 {
5272 if (arm_frame_is_thumb (frame))
5273 return thumb_deal_with_atomic_sequence_raw (frame);
5274 else
5275 return arm_deal_with_atomic_sequence_raw (frame);
5276 }
5277
5278 /* single_step() is called just before we want to resume the inferior,
5279 if we want to single-step it but there is no hardware or kernel
5280 single-step support. We find the target of the coming instruction
5281 and breakpoint it. */
5282
5283 int
5284 arm_software_single_step (struct frame_info *frame)
5285 {
5286 struct gdbarch *gdbarch = get_frame_arch (frame);
5287 struct address_space *aspace = get_frame_address_space (frame);
5288 CORE_ADDR next_pc;
5289
5290 if (arm_deal_with_atomic_sequence (frame))
5291 return 1;
5292
5293 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5294 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5295
5296 return 1;
5297 }
5298
5299 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5300 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5301 NULL if an error occurs. BUF is freed. */
5302
5303 static gdb_byte *
5304 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5305 int old_len, int new_len)
5306 {
5307 gdb_byte *new_buf;
5308 int bytes_to_read = new_len - old_len;
5309
5310 new_buf = xmalloc (new_len);
5311 memcpy (new_buf + bytes_to_read, buf, old_len);
5312 xfree (buf);
5313 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5314 {
5315 xfree (new_buf);
5316 return NULL;
5317 }
5318 return new_buf;
5319 }
5320
5321 /* An IT block is at most the 2-byte IT instruction followed by
5322 four 4-byte instructions. The furthest back we must search to
5323 find an IT block that affects the current instruction is thus
5324 2 + 3 * 4 == 14 bytes. */
5325 #define MAX_IT_BLOCK_PREFIX 14
5326
5327 /* Use a quick scan if there are more than this many bytes of
5328 code. */
5329 #define IT_SCAN_THRESHOLD 32
5330
5331 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5332 A breakpoint in an IT block may not be hit, depending on the
5333 condition flags. */
5334 static CORE_ADDR
5335 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5336 {
5337 gdb_byte *buf;
5338 char map_type;
5339 CORE_ADDR boundary, func_start;
5340 int buf_len;
5341 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5342 int i, any, last_it, last_it_count;
5343
5344 /* If we are using BKPT breakpoints, none of this is necessary. */
5345 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5346 return bpaddr;
5347
5348 /* ARM mode does not have this problem. */
5349 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5350 return bpaddr;
5351
5352 /* We are setting a breakpoint in Thumb code that could potentially
5353 contain an IT block. The first step is to find how much Thumb
5354 code there is; we do not need to read outside of known Thumb
5355 sequences. */
5356 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5357 if (map_type == 0)
5358 /* Thumb-2 code must have mapping symbols to have a chance. */
5359 return bpaddr;
5360
5361 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5362
5363 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5364 && func_start > boundary)
5365 boundary = func_start;
5366
5367 /* Search for a candidate IT instruction. We have to do some fancy
5368 footwork to distinguish a real IT instruction from the second
5369 half of a 32-bit instruction, but there is no need for that if
5370 there's no candidate. */
5371 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5372 if (buf_len == 0)
5373 /* No room for an IT instruction. */
5374 return bpaddr;
5375
5376 buf = xmalloc (buf_len);
5377 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5378 return bpaddr;
5379 any = 0;
5380 for (i = 0; i < buf_len; i += 2)
5381 {
5382 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5383 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5384 {
5385 any = 1;
5386 break;
5387 }
5388 }
5389 if (any == 0)
5390 {
5391 xfree (buf);
5392 return bpaddr;
5393 }
5394
5395 /* OK, the code bytes before this instruction contain at least one
5396 halfword which resembles an IT instruction. We know that it's
5397 Thumb code, but there are still two possibilities. Either the
5398 halfword really is an IT instruction, or it is the second half of
5399 a 32-bit Thumb instruction. The only way we can tell is to
5400 scan forwards from a known instruction boundary. */
5401 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5402 {
5403 int definite;
5404
5405 /* There's a lot of code before this instruction. Start with an
5406 optimistic search; it's easy to recognize halfwords that can
5407 not be the start of a 32-bit instruction, and use that to
5408 lock on to the instruction boundaries. */
5409 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5410 if (buf == NULL)
5411 return bpaddr;
5412 buf_len = IT_SCAN_THRESHOLD;
5413
5414 definite = 0;
5415 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5416 {
5417 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5418 if (thumb_insn_size (inst1) == 2)
5419 {
5420 definite = 1;
5421 break;
5422 }
5423 }
5424
5425 /* At this point, if DEFINITE, BUF[I] is the first place we
5426 are sure that we know the instruction boundaries, and it is far
5427 enough from BPADDR that we could not miss an IT instruction
5428 affecting BPADDR. If ! DEFINITE, give up - start from a
5429 known boundary. */
5430 if (! definite)
5431 {
5432 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5433 bpaddr - boundary);
5434 if (buf == NULL)
5435 return bpaddr;
5436 buf_len = bpaddr - boundary;
5437 i = 0;
5438 }
5439 }
5440 else
5441 {
5442 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5443 if (buf == NULL)
5444 return bpaddr;
5445 buf_len = bpaddr - boundary;
5446 i = 0;
5447 }
5448
5449 /* Scan forwards. Find the last IT instruction before BPADDR. */
5450 last_it = -1;
5451 last_it_count = 0;
5452 while (i < buf_len)
5453 {
5454 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5455 last_it_count--;
5456 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5457 {
5458 last_it = i;
5459 if (inst1 & 0x0001)
5460 last_it_count = 4;
5461 else if (inst1 & 0x0002)
5462 last_it_count = 3;
5463 else if (inst1 & 0x0004)
5464 last_it_count = 2;
5465 else
5466 last_it_count = 1;
5467 }
5468 i += thumb_insn_size (inst1);
5469 }
5470
5471 xfree (buf);
5472
5473 if (last_it == -1)
5474 /* There wasn't really an IT instruction after all. */
5475 return bpaddr;
5476
5477 if (last_it_count < 1)
5478 /* It was too far away. */
5479 return bpaddr;
5480
5481 /* This really is a trouble spot. Move the breakpoint to the IT
5482 instruction. */
5483 return bpaddr - buf_len + last_it;
5484 }
5485
5486 /* ARM displaced stepping support.
5487
5488 Generally ARM displaced stepping works as follows:
5489
5490 1. When an instruction is to be single-stepped, it is first decoded by
5491 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5492 Depending on the type of instruction, it is then copied to a scratch
5493 location, possibly in a modified form. The copy_* set of functions
5494 performs such modification, as necessary. A breakpoint is placed after
5495 the modified instruction in the scratch space to return control to GDB.
5496 Note in particular that instructions which modify the PC will no longer
5497 do so after modification.
5498
5499 2. The instruction is single-stepped, by setting the PC to the scratch
5500 location address, and resuming. Control returns to GDB when the
5501 breakpoint is hit.
5502
5503 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5504 function used for the current instruction. This function's job is to
5505 put the CPU/memory state back to what it would have been if the
5506 instruction had been executed unmodified in its original location. */
5507
5508 /* NOP instruction (mov r0, r0). */
5509 #define ARM_NOP 0xe1a00000
5510 #define THUMB_NOP 0x4600
5511
5512 /* Helper for register reads for displaced stepping. In particular, this
5513 returns the PC as it would be seen by the instruction at its original
5514 location. */
5515
5516 ULONGEST
5517 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5518 int regno)
5519 {
5520 ULONGEST ret;
5521 CORE_ADDR from = dsc->insn_addr;
5522
5523 if (regno == ARM_PC_REGNUM)
5524 {
5525 /* Compute pipeline offset:
5526 - When executing an ARM instruction, PC reads as the address of the
5527 current instruction plus 8.
5528 - When executing a Thumb instruction, PC reads as the address of the
5529 current instruction plus 4. */
5530
5531 if (!dsc->is_thumb)
5532 from += 8;
5533 else
5534 from += 4;
5535
5536 if (debug_displaced)
5537 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5538 (unsigned long) from);
5539 return (ULONGEST) from;
5540 }
5541 else
5542 {
5543 regcache_cooked_read_unsigned (regs, regno, &ret);
5544 if (debug_displaced)
5545 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5546 regno, (unsigned long) ret);
5547 return ret;
5548 }
5549 }
5550
5551 static int
5552 displaced_in_arm_mode (struct regcache *regs)
5553 {
5554 ULONGEST ps;
5555 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5556
5557 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5558
5559 return (ps & t_bit) == 0;
5560 }
5561
5562 /* Write to the PC as from a branch instruction. */
5563
5564 static void
5565 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5566 ULONGEST val)
5567 {
5568 if (!dsc->is_thumb)
5569 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5570 architecture versions < 6. */
5571 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5572 val & ~(ULONGEST) 0x3);
5573 else
5574 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5575 val & ~(ULONGEST) 0x1);
5576 }
5577
5578 /* Write to the PC as from a branch-exchange instruction. */
5579
5580 static void
5581 bx_write_pc (struct regcache *regs, ULONGEST val)
5582 {
5583 ULONGEST ps;
5584 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5585
5586 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5587
5588 if ((val & 1) == 1)
5589 {
5590 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5591 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5592 }
5593 else if ((val & 2) == 0)
5594 {
5595 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5596 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5597 }
5598 else
5599 {
5600 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5601 mode, align dest to 4 bytes). */
5602 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5603 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5604 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5605 }
5606 }
5607
5608 /* Write to the PC as if from a load instruction. */
5609
5610 static void
5611 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5612 ULONGEST val)
5613 {
5614 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5615 bx_write_pc (regs, val);
5616 else
5617 branch_write_pc (regs, dsc, val);
5618 }
5619
5620 /* Write to the PC as if from an ALU instruction. */
5621
5622 static void
5623 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5624 ULONGEST val)
5625 {
5626 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5627 bx_write_pc (regs, val);
5628 else
5629 branch_write_pc (regs, dsc, val);
5630 }
5631
5632 /* Helper for writing to registers for displaced stepping. Writing to the PC
5633 has a varying effects depending on the instruction which does the write:
5634 this is controlled by the WRITE_PC argument. */
5635
5636 void
5637 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5638 int regno, ULONGEST val, enum pc_write_style write_pc)
5639 {
5640 if (regno == ARM_PC_REGNUM)
5641 {
5642 if (debug_displaced)
5643 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5644 (unsigned long) val);
5645 switch (write_pc)
5646 {
5647 case BRANCH_WRITE_PC:
5648 branch_write_pc (regs, dsc, val);
5649 break;
5650
5651 case BX_WRITE_PC:
5652 bx_write_pc (regs, val);
5653 break;
5654
5655 case LOAD_WRITE_PC:
5656 load_write_pc (regs, dsc, val);
5657 break;
5658
5659 case ALU_WRITE_PC:
5660 alu_write_pc (regs, dsc, val);
5661 break;
5662
5663 case CANNOT_WRITE_PC:
5664 warning (_("Instruction wrote to PC in an unexpected way when "
5665 "single-stepping"));
5666 break;
5667
5668 default:
5669 internal_error (__FILE__, __LINE__,
5670 _("Invalid argument to displaced_write_reg"));
5671 }
5672
5673 dsc->wrote_to_pc = 1;
5674 }
5675 else
5676 {
5677 if (debug_displaced)
5678 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5679 regno, (unsigned long) val);
5680 regcache_cooked_write_unsigned (regs, regno, val);
5681 }
5682 }
5683
5684 /* This function is used to concisely determine if an instruction INSN
5685 references PC. Register fields of interest in INSN should have the
5686 corresponding fields of BITMASK set to 0b1111. The function
5687 returns return 1 if any of these fields in INSN reference the PC
5688 (also 0b1111, r15), else it returns 0. */
5689
5690 static int
5691 insn_references_pc (uint32_t insn, uint32_t bitmask)
5692 {
5693 uint32_t lowbit = 1;
5694
5695 while (bitmask != 0)
5696 {
5697 uint32_t mask;
5698
5699 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5700 ;
5701
5702 if (!lowbit)
5703 break;
5704
5705 mask = lowbit * 0xf;
5706
5707 if ((insn & mask) == mask)
5708 return 1;
5709
5710 bitmask &= ~mask;
5711 }
5712
5713 return 0;
5714 }
5715
5716 /* The simplest copy function. Many instructions have the same effect no
5717 matter what address they are executed at: in those cases, use this. */
5718
5719 static int
5720 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5721 const char *iname, struct displaced_step_closure *dsc)
5722 {
5723 if (debug_displaced)
5724 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5725 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5726 iname);
5727
5728 dsc->modinsn[0] = insn;
5729
5730 return 0;
5731 }
5732
5733 static int
5734 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5735 uint16_t insn2, const char *iname,
5736 struct displaced_step_closure *dsc)
5737 {
5738 if (debug_displaced)
5739 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5740 "opcode/class '%s' unmodified\n", insn1, insn2,
5741 iname);
5742
5743 dsc->modinsn[0] = insn1;
5744 dsc->modinsn[1] = insn2;
5745 dsc->numinsns = 2;
5746
5747 return 0;
5748 }
5749
5750 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5751 modification. */
5752 static int
5753 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5754 const char *iname,
5755 struct displaced_step_closure *dsc)
5756 {
5757 if (debug_displaced)
5758 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5759 "opcode/class '%s' unmodified\n", insn,
5760 iname);
5761
5762 dsc->modinsn[0] = insn;
5763
5764 return 0;
5765 }
5766
5767 /* Preload instructions with immediate offset. */
5768
5769 static void
5770 cleanup_preload (struct gdbarch *gdbarch,
5771 struct regcache *regs, struct displaced_step_closure *dsc)
5772 {
5773 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5774 if (!dsc->u.preload.immed)
5775 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5776 }
5777
5778 static void
5779 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5780 struct displaced_step_closure *dsc, unsigned int rn)
5781 {
5782 ULONGEST rn_val;
5783 /* Preload instructions:
5784
5785 {pli/pld} [rn, #+/-imm]
5786 ->
5787 {pli/pld} [r0, #+/-imm]. */
5788
5789 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5790 rn_val = displaced_read_reg (regs, dsc, rn);
5791 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5792 dsc->u.preload.immed = 1;
5793
5794 dsc->cleanup = &cleanup_preload;
5795 }
5796
5797 static int
5798 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5799 struct displaced_step_closure *dsc)
5800 {
5801 unsigned int rn = bits (insn, 16, 19);
5802
5803 if (!insn_references_pc (insn, 0x000f0000ul))
5804 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5805
5806 if (debug_displaced)
5807 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5808 (unsigned long) insn);
5809
5810 dsc->modinsn[0] = insn & 0xfff0ffff;
5811
5812 install_preload (gdbarch, regs, dsc, rn);
5813
5814 return 0;
5815 }
5816
5817 static int
5818 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5819 struct regcache *regs, struct displaced_step_closure *dsc)
5820 {
5821 unsigned int rn = bits (insn1, 0, 3);
5822 unsigned int u_bit = bit (insn1, 7);
5823 int imm12 = bits (insn2, 0, 11);
5824 ULONGEST pc_val;
5825
5826 if (rn != ARM_PC_REGNUM)
5827 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5828
5829 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5830 PLD (literal) Encoding T1. */
5831 if (debug_displaced)
5832 fprintf_unfiltered (gdb_stdlog,
5833 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5834 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5835 imm12);
5836
5837 if (!u_bit)
5838 imm12 = -1 * imm12;
5839
5840 /* Rewrite instruction {pli/pld} PC imm12 into:
5841 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5842
5843 {pli/pld} [r0, r1]
5844
5845 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5846
5847 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5848 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5849
5850 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5851
5852 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5853 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5854 dsc->u.preload.immed = 0;
5855
5856 /* {pli/pld} [r0, r1] */
5857 dsc->modinsn[0] = insn1 & 0xfff0;
5858 dsc->modinsn[1] = 0xf001;
5859 dsc->numinsns = 2;
5860
5861 dsc->cleanup = &cleanup_preload;
5862 return 0;
5863 }
5864
5865 /* Preload instructions with register offset. */
5866
5867 static void
5868 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5869 struct displaced_step_closure *dsc, unsigned int rn,
5870 unsigned int rm)
5871 {
5872 ULONGEST rn_val, rm_val;
5873
5874 /* Preload register-offset instructions:
5875
5876 {pli/pld} [rn, rm {, shift}]
5877 ->
5878 {pli/pld} [r0, r1 {, shift}]. */
5879
5880 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5881 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5882 rn_val = displaced_read_reg (regs, dsc, rn);
5883 rm_val = displaced_read_reg (regs, dsc, rm);
5884 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5885 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5886 dsc->u.preload.immed = 0;
5887
5888 dsc->cleanup = &cleanup_preload;
5889 }
5890
5891 static int
5892 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5893 struct regcache *regs,
5894 struct displaced_step_closure *dsc)
5895 {
5896 unsigned int rn = bits (insn, 16, 19);
5897 unsigned int rm = bits (insn, 0, 3);
5898
5899
5900 if (!insn_references_pc (insn, 0x000f000ful))
5901 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5902
5903 if (debug_displaced)
5904 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5905 (unsigned long) insn);
5906
5907 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5908
5909 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5910 return 0;
5911 }
5912
5913 /* Copy/cleanup coprocessor load and store instructions. */
5914
5915 static void
5916 cleanup_copro_load_store (struct gdbarch *gdbarch,
5917 struct regcache *regs,
5918 struct displaced_step_closure *dsc)
5919 {
5920 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5921
5922 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5923
5924 if (dsc->u.ldst.writeback)
5925 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5926 }
5927
5928 static void
5929 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5930 struct displaced_step_closure *dsc,
5931 int writeback, unsigned int rn)
5932 {
5933 ULONGEST rn_val;
5934
5935 /* Coprocessor load/store instructions:
5936
5937 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5938 ->
5939 {stc/stc2} [r0, #+/-imm].
5940
5941 ldc/ldc2 are handled identically. */
5942
5943 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5944 rn_val = displaced_read_reg (regs, dsc, rn);
5945 /* PC should be 4-byte aligned. */
5946 rn_val = rn_val & 0xfffffffc;
5947 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5948
5949 dsc->u.ldst.writeback = writeback;
5950 dsc->u.ldst.rn = rn;
5951
5952 dsc->cleanup = &cleanup_copro_load_store;
5953 }
5954
5955 static int
5956 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5957 struct regcache *regs,
5958 struct displaced_step_closure *dsc)
5959 {
5960 unsigned int rn = bits (insn, 16, 19);
5961
5962 if (!insn_references_pc (insn, 0x000f0000ul))
5963 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5964
5965 if (debug_displaced)
5966 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5967 "load/store insn %.8lx\n", (unsigned long) insn);
5968
5969 dsc->modinsn[0] = insn & 0xfff0ffff;
5970
5971 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5972
5973 return 0;
5974 }
5975
5976 static int
5977 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5978 uint16_t insn2, struct regcache *regs,
5979 struct displaced_step_closure *dsc)
5980 {
5981 unsigned int rn = bits (insn1, 0, 3);
5982
5983 if (rn != ARM_PC_REGNUM)
5984 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5985 "copro load/store", dsc);
5986
5987 if (debug_displaced)
5988 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5989 "load/store insn %.4x%.4x\n", insn1, insn2);
5990
5991 dsc->modinsn[0] = insn1 & 0xfff0;
5992 dsc->modinsn[1] = insn2;
5993 dsc->numinsns = 2;
5994
5995 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5996 doesn't support writeback, so pass 0. */
5997 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5998
5999 return 0;
6000 }
6001
6002 /* Clean up branch instructions (actually perform the branch, by setting
6003 PC). */
6004
6005 static void
6006 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
6007 struct displaced_step_closure *dsc)
6008 {
6009 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6010 int branch_taken = condition_true (dsc->u.branch.cond, status);
6011 enum pc_write_style write_pc = dsc->u.branch.exchange
6012 ? BX_WRITE_PC : BRANCH_WRITE_PC;
6013
6014 if (!branch_taken)
6015 return;
6016
6017 if (dsc->u.branch.link)
6018 {
6019 /* The value of LR should be the next insn of current one. In order
6020 not to confuse logic hanlding later insn `bx lr', if current insn mode
6021 is Thumb, the bit 0 of LR value should be set to 1. */
6022 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
6023
6024 if (dsc->is_thumb)
6025 next_insn_addr |= 0x1;
6026
6027 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
6028 CANNOT_WRITE_PC);
6029 }
6030
6031 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
6032 }
6033
6034 /* Copy B/BL/BLX instructions with immediate destinations. */
6035
6036 static void
6037 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
6038 struct displaced_step_closure *dsc,
6039 unsigned int cond, int exchange, int link, long offset)
6040 {
6041 /* Implement "BL<cond> <label>" as:
6042
6043 Preparation: cond <- instruction condition
6044 Insn: mov r0, r0 (nop)
6045 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
6046
6047 B<cond> similar, but don't set r14 in cleanup. */
6048
6049 dsc->u.branch.cond = cond;
6050 dsc->u.branch.link = link;
6051 dsc->u.branch.exchange = exchange;
6052
6053 dsc->u.branch.dest = dsc->insn_addr;
6054 if (link && exchange)
6055 /* For BLX, offset is computed from the Align (PC, 4). */
6056 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
6057
6058 if (dsc->is_thumb)
6059 dsc->u.branch.dest += 4 + offset;
6060 else
6061 dsc->u.branch.dest += 8 + offset;
6062
6063 dsc->cleanup = &cleanup_branch;
6064 }
6065 static int
6066 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
6067 struct regcache *regs, struct displaced_step_closure *dsc)
6068 {
6069 unsigned int cond = bits (insn, 28, 31);
6070 int exchange = (cond == 0xf);
6071 int link = exchange || bit (insn, 24);
6072 long offset;
6073
6074 if (debug_displaced)
6075 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
6076 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
6077 (unsigned long) insn);
6078 if (exchange)
6079 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
6080 then arrange the switch into Thumb mode. */
6081 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
6082 else
6083 offset = bits (insn, 0, 23) << 2;
6084
6085 if (bit (offset, 25))
6086 offset = offset | ~0x3ffffff;
6087
6088 dsc->modinsn[0] = ARM_NOP;
6089
6090 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6091 return 0;
6092 }
6093
6094 static int
6095 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
6096 uint16_t insn2, struct regcache *regs,
6097 struct displaced_step_closure *dsc)
6098 {
6099 int link = bit (insn2, 14);
6100 int exchange = link && !bit (insn2, 12);
6101 int cond = INST_AL;
6102 long offset = 0;
6103 int j1 = bit (insn2, 13);
6104 int j2 = bit (insn2, 11);
6105 int s = sbits (insn1, 10, 10);
6106 int i1 = !(j1 ^ bit (insn1, 10));
6107 int i2 = !(j2 ^ bit (insn1, 10));
6108
6109 if (!link && !exchange) /* B */
6110 {
6111 offset = (bits (insn2, 0, 10) << 1);
6112 if (bit (insn2, 12)) /* Encoding T4 */
6113 {
6114 offset |= (bits (insn1, 0, 9) << 12)
6115 | (i2 << 22)
6116 | (i1 << 23)
6117 | (s << 24);
6118 cond = INST_AL;
6119 }
6120 else /* Encoding T3 */
6121 {
6122 offset |= (bits (insn1, 0, 5) << 12)
6123 | (j1 << 18)
6124 | (j2 << 19)
6125 | (s << 20);
6126 cond = bits (insn1, 6, 9);
6127 }
6128 }
6129 else
6130 {
6131 offset = (bits (insn1, 0, 9) << 12);
6132 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
6133 offset |= exchange ?
6134 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
6135 }
6136
6137 if (debug_displaced)
6138 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
6139 "%.4x %.4x with offset %.8lx\n",
6140 link ? (exchange) ? "blx" : "bl" : "b",
6141 insn1, insn2, offset);
6142
6143 dsc->modinsn[0] = THUMB_NOP;
6144
6145 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6146 return 0;
6147 }
6148
6149 /* Copy B Thumb instructions. */
6150 static int
6151 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
6152 struct displaced_step_closure *dsc)
6153 {
6154 unsigned int cond = 0;
6155 int offset = 0;
6156 unsigned short bit_12_15 = bits (insn, 12, 15);
6157 CORE_ADDR from = dsc->insn_addr;
6158
6159 if (bit_12_15 == 0xd)
6160 {
6161 /* offset = SignExtend (imm8:0, 32) */
6162 offset = sbits ((insn << 1), 0, 8);
6163 cond = bits (insn, 8, 11);
6164 }
6165 else if (bit_12_15 == 0xe) /* Encoding T2 */
6166 {
6167 offset = sbits ((insn << 1), 0, 11);
6168 cond = INST_AL;
6169 }
6170
6171 if (debug_displaced)
6172 fprintf_unfiltered (gdb_stdlog,
6173 "displaced: copying b immediate insn %.4x "
6174 "with offset %d\n", insn, offset);
6175
6176 dsc->u.branch.cond = cond;
6177 dsc->u.branch.link = 0;
6178 dsc->u.branch.exchange = 0;
6179 dsc->u.branch.dest = from + 4 + offset;
6180
6181 dsc->modinsn[0] = THUMB_NOP;
6182
6183 dsc->cleanup = &cleanup_branch;
6184
6185 return 0;
6186 }
6187
6188 /* Copy BX/BLX with register-specified destinations. */
6189
6190 static void
6191 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6192 struct displaced_step_closure *dsc, int link,
6193 unsigned int cond, unsigned int rm)
6194 {
6195 /* Implement {BX,BLX}<cond> <reg>" as:
6196
6197 Preparation: cond <- instruction condition
6198 Insn: mov r0, r0 (nop)
6199 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6200
6201 Don't set r14 in cleanup for BX. */
6202
6203 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6204
6205 dsc->u.branch.cond = cond;
6206 dsc->u.branch.link = link;
6207
6208 dsc->u.branch.exchange = 1;
6209
6210 dsc->cleanup = &cleanup_branch;
6211 }
6212
6213 static int
6214 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6215 struct regcache *regs, struct displaced_step_closure *dsc)
6216 {
6217 unsigned int cond = bits (insn, 28, 31);
6218 /* BX: x12xxx1x
6219 BLX: x12xxx3x. */
6220 int link = bit (insn, 5);
6221 unsigned int rm = bits (insn, 0, 3);
6222
6223 if (debug_displaced)
6224 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6225 (unsigned long) insn);
6226
6227 dsc->modinsn[0] = ARM_NOP;
6228
6229 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6230 return 0;
6231 }
6232
6233 static int
6234 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6235 struct regcache *regs,
6236 struct displaced_step_closure *dsc)
6237 {
6238 int link = bit (insn, 7);
6239 unsigned int rm = bits (insn, 3, 6);
6240
6241 if (debug_displaced)
6242 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6243 (unsigned short) insn);
6244
6245 dsc->modinsn[0] = THUMB_NOP;
6246
6247 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6248
6249 return 0;
6250 }
6251
6252
6253 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6254
6255 static void
6256 cleanup_alu_imm (struct gdbarch *gdbarch,
6257 struct regcache *regs, struct displaced_step_closure *dsc)
6258 {
6259 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6260 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6261 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6262 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6263 }
6264
6265 static int
6266 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6267 struct displaced_step_closure *dsc)
6268 {
6269 unsigned int rn = bits (insn, 16, 19);
6270 unsigned int rd = bits (insn, 12, 15);
6271 unsigned int op = bits (insn, 21, 24);
6272 int is_mov = (op == 0xd);
6273 ULONGEST rd_val, rn_val;
6274
6275 if (!insn_references_pc (insn, 0x000ff000ul))
6276 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6277
6278 if (debug_displaced)
6279 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6280 "%.8lx\n", is_mov ? "move" : "ALU",
6281 (unsigned long) insn);
6282
6283 /* Instruction is of form:
6284
6285 <op><cond> rd, [rn,] #imm
6286
6287 Rewrite as:
6288
6289 Preparation: tmp1, tmp2 <- r0, r1;
6290 r0, r1 <- rd, rn
6291 Insn: <op><cond> r0, r1, #imm
6292 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6293 */
6294
6295 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6296 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6297 rn_val = displaced_read_reg (regs, dsc, rn);
6298 rd_val = displaced_read_reg (regs, dsc, rd);
6299 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6300 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6301 dsc->rd = rd;
6302
6303 if (is_mov)
6304 dsc->modinsn[0] = insn & 0xfff00fff;
6305 else
6306 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6307
6308 dsc->cleanup = &cleanup_alu_imm;
6309
6310 return 0;
6311 }
6312
6313 static int
6314 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6315 uint16_t insn2, struct regcache *regs,
6316 struct displaced_step_closure *dsc)
6317 {
6318 unsigned int op = bits (insn1, 5, 8);
6319 unsigned int rn, rm, rd;
6320 ULONGEST rd_val, rn_val;
6321
6322 rn = bits (insn1, 0, 3); /* Rn */
6323 rm = bits (insn2, 0, 3); /* Rm */
6324 rd = bits (insn2, 8, 11); /* Rd */
6325
6326 /* This routine is only called for instruction MOV. */
6327 gdb_assert (op == 0x2 && rn == 0xf);
6328
6329 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6330 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6331
6332 if (debug_displaced)
6333 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6334 "ALU", insn1, insn2);
6335
6336 /* Instruction is of form:
6337
6338 <op><cond> rd, [rn,] #imm
6339
6340 Rewrite as:
6341
6342 Preparation: tmp1, tmp2 <- r0, r1;
6343 r0, r1 <- rd, rn
6344 Insn: <op><cond> r0, r1, #imm
6345 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6346 */
6347
6348 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6349 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6350 rn_val = displaced_read_reg (regs, dsc, rn);
6351 rd_val = displaced_read_reg (regs, dsc, rd);
6352 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6353 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6354 dsc->rd = rd;
6355
6356 dsc->modinsn[0] = insn1;
6357 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6358 dsc->numinsns = 2;
6359
6360 dsc->cleanup = &cleanup_alu_imm;
6361
6362 return 0;
6363 }
6364
6365 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6366
6367 static void
6368 cleanup_alu_reg (struct gdbarch *gdbarch,
6369 struct regcache *regs, struct displaced_step_closure *dsc)
6370 {
6371 ULONGEST rd_val;
6372 int i;
6373
6374 rd_val = displaced_read_reg (regs, dsc, 0);
6375
6376 for (i = 0; i < 3; i++)
6377 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6378
6379 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6380 }
6381
6382 static void
6383 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6384 struct displaced_step_closure *dsc,
6385 unsigned int rd, unsigned int rn, unsigned int rm)
6386 {
6387 ULONGEST rd_val, rn_val, rm_val;
6388
6389 /* Instruction is of form:
6390
6391 <op><cond> rd, [rn,] rm [, <shift>]
6392
6393 Rewrite as:
6394
6395 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6396 r0, r1, r2 <- rd, rn, rm
6397 Insn: <op><cond> r0, r1, r2 [, <shift>]
6398 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6399 */
6400
6401 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6402 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6403 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6404 rd_val = displaced_read_reg (regs, dsc, rd);
6405 rn_val = displaced_read_reg (regs, dsc, rn);
6406 rm_val = displaced_read_reg (regs, dsc, rm);
6407 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6408 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6409 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6410 dsc->rd = rd;
6411
6412 dsc->cleanup = &cleanup_alu_reg;
6413 }
6414
6415 static int
6416 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6417 struct displaced_step_closure *dsc)
6418 {
6419 unsigned int op = bits (insn, 21, 24);
6420 int is_mov = (op == 0xd);
6421
6422 if (!insn_references_pc (insn, 0x000ff00ful))
6423 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6424
6425 if (debug_displaced)
6426 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6427 is_mov ? "move" : "ALU", (unsigned long) insn);
6428
6429 if (is_mov)
6430 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6431 else
6432 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6433
6434 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6435 bits (insn, 0, 3));
6436 return 0;
6437 }
6438
6439 static int
6440 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6441 struct regcache *regs,
6442 struct displaced_step_closure *dsc)
6443 {
6444 unsigned rn, rm, rd;
6445
6446 rd = bits (insn, 3, 6);
6447 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6448 rm = 2;
6449
6450 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6451 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6452
6453 if (debug_displaced)
6454 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6455 "ALU", (unsigned short) insn);
6456
6457 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6458
6459 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6460
6461 return 0;
6462 }
6463
6464 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6465
6466 static void
6467 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6468 struct regcache *regs,
6469 struct displaced_step_closure *dsc)
6470 {
6471 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6472 int i;
6473
6474 for (i = 0; i < 4; i++)
6475 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6476
6477 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6478 }
6479
6480 static void
6481 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6482 struct displaced_step_closure *dsc,
6483 unsigned int rd, unsigned int rn, unsigned int rm,
6484 unsigned rs)
6485 {
6486 int i;
6487 ULONGEST rd_val, rn_val, rm_val, rs_val;
6488
6489 /* Instruction is of form:
6490
6491 <op><cond> rd, [rn,] rm, <shift> rs
6492
6493 Rewrite as:
6494
6495 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6496 r0, r1, r2, r3 <- rd, rn, rm, rs
6497 Insn: <op><cond> r0, r1, r2, <shift> r3
6498 Cleanup: tmp5 <- r0
6499 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6500 rd <- tmp5
6501 */
6502
6503 for (i = 0; i < 4; i++)
6504 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6505
6506 rd_val = displaced_read_reg (regs, dsc, rd);
6507 rn_val = displaced_read_reg (regs, dsc, rn);
6508 rm_val = displaced_read_reg (regs, dsc, rm);
6509 rs_val = displaced_read_reg (regs, dsc, rs);
6510 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6511 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6512 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6513 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6514 dsc->rd = rd;
6515 dsc->cleanup = &cleanup_alu_shifted_reg;
6516 }
6517
6518 static int
6519 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6520 struct regcache *regs,
6521 struct displaced_step_closure *dsc)
6522 {
6523 unsigned int op = bits (insn, 21, 24);
6524 int is_mov = (op == 0xd);
6525 unsigned int rd, rn, rm, rs;
6526
6527 if (!insn_references_pc (insn, 0x000fff0ful))
6528 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6529
6530 if (debug_displaced)
6531 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6532 "%.8lx\n", is_mov ? "move" : "ALU",
6533 (unsigned long) insn);
6534
6535 rn = bits (insn, 16, 19);
6536 rm = bits (insn, 0, 3);
6537 rs = bits (insn, 8, 11);
6538 rd = bits (insn, 12, 15);
6539
6540 if (is_mov)
6541 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6542 else
6543 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6544
6545 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6546
6547 return 0;
6548 }
6549
6550 /* Clean up load instructions. */
6551
6552 static void
6553 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6554 struct displaced_step_closure *dsc)
6555 {
6556 ULONGEST rt_val, rt_val2 = 0, rn_val;
6557
6558 rt_val = displaced_read_reg (regs, dsc, 0);
6559 if (dsc->u.ldst.xfersize == 8)
6560 rt_val2 = displaced_read_reg (regs, dsc, 1);
6561 rn_val = displaced_read_reg (regs, dsc, 2);
6562
6563 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6564 if (dsc->u.ldst.xfersize > 4)
6565 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6566 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6567 if (!dsc->u.ldst.immed)
6568 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6569
6570 /* Handle register writeback. */
6571 if (dsc->u.ldst.writeback)
6572 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6573 /* Put result in right place. */
6574 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6575 if (dsc->u.ldst.xfersize == 8)
6576 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6577 }
6578
6579 /* Clean up store instructions. */
6580
6581 static void
6582 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6583 struct displaced_step_closure *dsc)
6584 {
6585 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6586
6587 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6588 if (dsc->u.ldst.xfersize > 4)
6589 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6590 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6591 if (!dsc->u.ldst.immed)
6592 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6593 if (!dsc->u.ldst.restore_r4)
6594 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6595
6596 /* Writeback. */
6597 if (dsc->u.ldst.writeback)
6598 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6599 }
6600
6601 /* Copy "extra" load/store instructions. These are halfword/doubleword
6602 transfers, which have a different encoding to byte/word transfers. */
6603
6604 static int
6605 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6606 struct regcache *regs, struct displaced_step_closure *dsc)
6607 {
6608 unsigned int op1 = bits (insn, 20, 24);
6609 unsigned int op2 = bits (insn, 5, 6);
6610 unsigned int rt = bits (insn, 12, 15);
6611 unsigned int rn = bits (insn, 16, 19);
6612 unsigned int rm = bits (insn, 0, 3);
6613 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6614 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6615 int immed = (op1 & 0x4) != 0;
6616 int opcode;
6617 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6618
6619 if (!insn_references_pc (insn, 0x000ff00ful))
6620 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6621
6622 if (debug_displaced)
6623 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6624 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6625 (unsigned long) insn);
6626
6627 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6628
6629 if (opcode < 0)
6630 internal_error (__FILE__, __LINE__,
6631 _("copy_extra_ld_st: instruction decode error"));
6632
6633 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6634 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6635 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6636 if (!immed)
6637 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6638
6639 rt_val = displaced_read_reg (regs, dsc, rt);
6640 if (bytesize[opcode] == 8)
6641 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6642 rn_val = displaced_read_reg (regs, dsc, rn);
6643 if (!immed)
6644 rm_val = displaced_read_reg (regs, dsc, rm);
6645
6646 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6647 if (bytesize[opcode] == 8)
6648 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6649 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6650 if (!immed)
6651 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6652
6653 dsc->rd = rt;
6654 dsc->u.ldst.xfersize = bytesize[opcode];
6655 dsc->u.ldst.rn = rn;
6656 dsc->u.ldst.immed = immed;
6657 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6658 dsc->u.ldst.restore_r4 = 0;
6659
6660 if (immed)
6661 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6662 ->
6663 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6664 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6665 else
6666 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6667 ->
6668 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6669 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6670
6671 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6672
6673 return 0;
6674 }
6675
6676 /* Copy byte/half word/word loads and stores. */
6677
6678 static void
6679 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6680 struct displaced_step_closure *dsc, int load,
6681 int immed, int writeback, int size, int usermode,
6682 int rt, int rm, int rn)
6683 {
6684 ULONGEST rt_val, rn_val, rm_val = 0;
6685
6686 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6687 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6688 if (!immed)
6689 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6690 if (!load)
6691 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6692
6693 rt_val = displaced_read_reg (regs, dsc, rt);
6694 rn_val = displaced_read_reg (regs, dsc, rn);
6695 if (!immed)
6696 rm_val = displaced_read_reg (regs, dsc, rm);
6697
6698 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6699 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6700 if (!immed)
6701 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6702 dsc->rd = rt;
6703 dsc->u.ldst.xfersize = size;
6704 dsc->u.ldst.rn = rn;
6705 dsc->u.ldst.immed = immed;
6706 dsc->u.ldst.writeback = writeback;
6707
6708 /* To write PC we can do:
6709
6710 Before this sequence of instructions:
6711 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6712 r2 is the Rn value got from dispalced_read_reg.
6713
6714 Insn1: push {pc} Write address of STR instruction + offset on stack
6715 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6716 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6717 = addr(Insn1) + offset - addr(Insn3) - 8
6718 = offset - 16
6719 Insn4: add r4, r4, #8 r4 = offset - 8
6720 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6721 = from + offset
6722 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6723
6724 Otherwise we don't know what value to write for PC, since the offset is
6725 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6726 of this can be found in Section "Saving from r15" in
6727 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6728
6729 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6730 }
6731
6732
6733 static int
6734 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6735 uint16_t insn2, struct regcache *regs,
6736 struct displaced_step_closure *dsc, int size)
6737 {
6738 unsigned int u_bit = bit (insn1, 7);
6739 unsigned int rt = bits (insn2, 12, 15);
6740 int imm12 = bits (insn2, 0, 11);
6741 ULONGEST pc_val;
6742
6743 if (debug_displaced)
6744 fprintf_unfiltered (gdb_stdlog,
6745 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6746 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6747 imm12);
6748
6749 if (!u_bit)
6750 imm12 = -1 * imm12;
6751
6752 /* Rewrite instruction LDR Rt imm12 into:
6753
6754 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6755
6756 LDR R0, R2, R3,
6757
6758 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6759
6760
6761 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6762 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6763 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6764
6765 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6766
6767 pc_val = pc_val & 0xfffffffc;
6768
6769 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6770 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6771
6772 dsc->rd = rt;
6773
6774 dsc->u.ldst.xfersize = size;
6775 dsc->u.ldst.immed = 0;
6776 dsc->u.ldst.writeback = 0;
6777 dsc->u.ldst.restore_r4 = 0;
6778
6779 /* LDR R0, R2, R3 */
6780 dsc->modinsn[0] = 0xf852;
6781 dsc->modinsn[1] = 0x3;
6782 dsc->numinsns = 2;
6783
6784 dsc->cleanup = &cleanup_load;
6785
6786 return 0;
6787 }
6788
6789 static int
6790 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6791 uint16_t insn2, struct regcache *regs,
6792 struct displaced_step_closure *dsc,
6793 int writeback, int immed)
6794 {
6795 unsigned int rt = bits (insn2, 12, 15);
6796 unsigned int rn = bits (insn1, 0, 3);
6797 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6798 /* In LDR (register), there is also a register Rm, which is not allowed to
6799 be PC, so we don't have to check it. */
6800
6801 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6802 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6803 dsc);
6804
6805 if (debug_displaced)
6806 fprintf_unfiltered (gdb_stdlog,
6807 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6808 rt, rn, insn1, insn2);
6809
6810 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6811 0, rt, rm, rn);
6812
6813 dsc->u.ldst.restore_r4 = 0;
6814
6815 if (immed)
6816 /* ldr[b]<cond> rt, [rn, #imm], etc.
6817 ->
6818 ldr[b]<cond> r0, [r2, #imm]. */
6819 {
6820 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6821 dsc->modinsn[1] = insn2 & 0x0fff;
6822 }
6823 else
6824 /* ldr[b]<cond> rt, [rn, rm], etc.
6825 ->
6826 ldr[b]<cond> r0, [r2, r3]. */
6827 {
6828 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6829 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6830 }
6831
6832 dsc->numinsns = 2;
6833
6834 return 0;
6835 }
6836
6837
6838 static int
6839 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6840 struct regcache *regs,
6841 struct displaced_step_closure *dsc,
6842 int load, int size, int usermode)
6843 {
6844 int immed = !bit (insn, 25);
6845 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6846 unsigned int rt = bits (insn, 12, 15);
6847 unsigned int rn = bits (insn, 16, 19);
6848 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6849
6850 if (!insn_references_pc (insn, 0x000ff00ful))
6851 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6852
6853 if (debug_displaced)
6854 fprintf_unfiltered (gdb_stdlog,
6855 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6856 load ? (size == 1 ? "ldrb" : "ldr")
6857 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6858 rt, rn,
6859 (unsigned long) insn);
6860
6861 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6862 usermode, rt, rm, rn);
6863
6864 if (load || rt != ARM_PC_REGNUM)
6865 {
6866 dsc->u.ldst.restore_r4 = 0;
6867
6868 if (immed)
6869 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6870 ->
6871 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6872 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6873 else
6874 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6875 ->
6876 {ldr,str}[b]<cond> r0, [r2, r3]. */
6877 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6878 }
6879 else
6880 {
6881 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6882 dsc->u.ldst.restore_r4 = 1;
6883 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6884 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6885 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6886 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6887 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6888
6889 /* As above. */
6890 if (immed)
6891 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6892 else
6893 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6894
6895 dsc->numinsns = 6;
6896 }
6897
6898 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6899
6900 return 0;
6901 }
6902
6903 /* Cleanup LDM instructions with fully-populated register list. This is an
6904 unfortunate corner case: it's impossible to implement correctly by modifying
6905 the instruction. The issue is as follows: we have an instruction,
6906
6907 ldm rN, {r0-r15}
6908
6909 which we must rewrite to avoid loading PC. A possible solution would be to
6910 do the load in two halves, something like (with suitable cleanup
6911 afterwards):
6912
6913 mov r8, rN
6914 ldm[id][ab] r8!, {r0-r7}
6915 str r7, <temp>
6916 ldm[id][ab] r8, {r7-r14}
6917 <bkpt>
6918
6919 but at present there's no suitable place for <temp>, since the scratch space
6920 is overwritten before the cleanup routine is called. For now, we simply
6921 emulate the instruction. */
6922
6923 static void
6924 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6925 struct displaced_step_closure *dsc)
6926 {
6927 int inc = dsc->u.block.increment;
6928 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6929 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6930 uint32_t regmask = dsc->u.block.regmask;
6931 int regno = inc ? 0 : 15;
6932 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6933 int exception_return = dsc->u.block.load && dsc->u.block.user
6934 && (regmask & 0x8000) != 0;
6935 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6936 int do_transfer = condition_true (dsc->u.block.cond, status);
6937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6938
6939 if (!do_transfer)
6940 return;
6941
6942 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6943 sensible we can do here. Complain loudly. */
6944 if (exception_return)
6945 error (_("Cannot single-step exception return"));
6946
6947 /* We don't handle any stores here for now. */
6948 gdb_assert (dsc->u.block.load != 0);
6949
6950 if (debug_displaced)
6951 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6952 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6953 dsc->u.block.increment ? "inc" : "dec",
6954 dsc->u.block.before ? "before" : "after");
6955
6956 while (regmask)
6957 {
6958 uint32_t memword;
6959
6960 if (inc)
6961 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6962 regno++;
6963 else
6964 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6965 regno--;
6966
6967 xfer_addr += bump_before;
6968
6969 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6970 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6971
6972 xfer_addr += bump_after;
6973
6974 regmask &= ~(1 << regno);
6975 }
6976
6977 if (dsc->u.block.writeback)
6978 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6979 CANNOT_WRITE_PC);
6980 }
6981
6982 /* Clean up an STM which included the PC in the register list. */
6983
6984 static void
6985 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6986 struct displaced_step_closure *dsc)
6987 {
6988 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6989 int store_executed = condition_true (dsc->u.block.cond, status);
6990 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6991 CORE_ADDR stm_insn_addr;
6992 uint32_t pc_val;
6993 long offset;
6994 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6995
6996 /* If condition code fails, there's nothing else to do. */
6997 if (!store_executed)
6998 return;
6999
7000 if (dsc->u.block.increment)
7001 {
7002 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
7003
7004 if (dsc->u.block.before)
7005 pc_stored_at += 4;
7006 }
7007 else
7008 {
7009 pc_stored_at = dsc->u.block.xfer_addr;
7010
7011 if (dsc->u.block.before)
7012 pc_stored_at -= 4;
7013 }
7014
7015 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
7016 stm_insn_addr = dsc->scratch_base;
7017 offset = pc_val - stm_insn_addr;
7018
7019 if (debug_displaced)
7020 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
7021 "STM instruction\n", offset);
7022
7023 /* Rewrite the stored PC to the proper value for the non-displaced original
7024 instruction. */
7025 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
7026 dsc->insn_addr + offset);
7027 }
7028
7029 /* Clean up an LDM which includes the PC in the register list. We clumped all
7030 the registers in the transferred list into a contiguous range r0...rX (to
7031 avoid loading PC directly and losing control of the debugged program), so we
7032 must undo that here. */
7033
7034 static void
7035 cleanup_block_load_pc (struct gdbarch *gdbarch,
7036 struct regcache *regs,
7037 struct displaced_step_closure *dsc)
7038 {
7039 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
7040 int load_executed = condition_true (dsc->u.block.cond, status);
7041 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
7042 unsigned int regs_loaded = bitcount (mask);
7043 unsigned int num_to_shuffle = regs_loaded, clobbered;
7044
7045 /* The method employed here will fail if the register list is fully populated
7046 (we need to avoid loading PC directly). */
7047 gdb_assert (num_to_shuffle < 16);
7048
7049 if (!load_executed)
7050 return;
7051
7052 clobbered = (1 << num_to_shuffle) - 1;
7053
7054 while (num_to_shuffle > 0)
7055 {
7056 if ((mask & (1 << write_reg)) != 0)
7057 {
7058 unsigned int read_reg = num_to_shuffle - 1;
7059
7060 if (read_reg != write_reg)
7061 {
7062 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
7063 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
7064 if (debug_displaced)
7065 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
7066 "loaded register r%d to r%d\n"), read_reg,
7067 write_reg);
7068 }
7069 else if (debug_displaced)
7070 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
7071 "r%d already in the right place\n"),
7072 write_reg);
7073
7074 clobbered &= ~(1 << write_reg);
7075
7076 num_to_shuffle--;
7077 }
7078
7079 write_reg--;
7080 }
7081
7082 /* Restore any registers we scribbled over. */
7083 for (write_reg = 0; clobbered != 0; write_reg++)
7084 {
7085 if ((clobbered & (1 << write_reg)) != 0)
7086 {
7087 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
7088 CANNOT_WRITE_PC);
7089 if (debug_displaced)
7090 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
7091 "clobbered register r%d\n"), write_reg);
7092 clobbered &= ~(1 << write_reg);
7093 }
7094 }
7095
7096 /* Perform register writeback manually. */
7097 if (dsc->u.block.writeback)
7098 {
7099 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
7100
7101 if (dsc->u.block.increment)
7102 new_rn_val += regs_loaded * 4;
7103 else
7104 new_rn_val -= regs_loaded * 4;
7105
7106 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
7107 CANNOT_WRITE_PC);
7108 }
7109 }
7110
7111 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
7112 in user-level code (in particular exception return, ldm rn, {...pc}^). */
7113
7114 static int
7115 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
7116 struct regcache *regs,
7117 struct displaced_step_closure *dsc)
7118 {
7119 int load = bit (insn, 20);
7120 int user = bit (insn, 22);
7121 int increment = bit (insn, 23);
7122 int before = bit (insn, 24);
7123 int writeback = bit (insn, 21);
7124 int rn = bits (insn, 16, 19);
7125
7126 /* Block transfers which don't mention PC can be run directly
7127 out-of-line. */
7128 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
7129 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
7130
7131 if (rn == ARM_PC_REGNUM)
7132 {
7133 warning (_("displaced: Unpredictable LDM or STM with "
7134 "base register r15"));
7135 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
7136 }
7137
7138 if (debug_displaced)
7139 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7140 "%.8lx\n", (unsigned long) insn);
7141
7142 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7143 dsc->u.block.rn = rn;
7144
7145 dsc->u.block.load = load;
7146 dsc->u.block.user = user;
7147 dsc->u.block.increment = increment;
7148 dsc->u.block.before = before;
7149 dsc->u.block.writeback = writeback;
7150 dsc->u.block.cond = bits (insn, 28, 31);
7151
7152 dsc->u.block.regmask = insn & 0xffff;
7153
7154 if (load)
7155 {
7156 if ((insn & 0xffff) == 0xffff)
7157 {
7158 /* LDM with a fully-populated register list. This case is
7159 particularly tricky. Implement for now by fully emulating the
7160 instruction (which might not behave perfectly in all cases, but
7161 these instructions should be rare enough for that not to matter
7162 too much). */
7163 dsc->modinsn[0] = ARM_NOP;
7164
7165 dsc->cleanup = &cleanup_block_load_all;
7166 }
7167 else
7168 {
7169 /* LDM of a list of registers which includes PC. Implement by
7170 rewriting the list of registers to be transferred into a
7171 contiguous chunk r0...rX before doing the transfer, then shuffling
7172 registers into the correct places in the cleanup routine. */
7173 unsigned int regmask = insn & 0xffff;
7174 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7175 unsigned int to = 0, from = 0, i, new_rn;
7176
7177 for (i = 0; i < num_in_list; i++)
7178 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7179
7180 /* Writeback makes things complicated. We need to avoid clobbering
7181 the base register with one of the registers in our modified
7182 register list, but just using a different register can't work in
7183 all cases, e.g.:
7184
7185 ldm r14!, {r0-r13,pc}
7186
7187 which would need to be rewritten as:
7188
7189 ldm rN!, {r0-r14}
7190
7191 but that can't work, because there's no free register for N.
7192
7193 Solve this by turning off the writeback bit, and emulating
7194 writeback manually in the cleanup routine. */
7195
7196 if (writeback)
7197 insn &= ~(1 << 21);
7198
7199 new_regmask = (1 << num_in_list) - 1;
7200
7201 if (debug_displaced)
7202 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7203 "{..., pc}: original reg list %.4x, modified "
7204 "list %.4x\n"), rn, writeback ? "!" : "",
7205 (int) insn & 0xffff, new_regmask);
7206
7207 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7208
7209 dsc->cleanup = &cleanup_block_load_pc;
7210 }
7211 }
7212 else
7213 {
7214 /* STM of a list of registers which includes PC. Run the instruction
7215 as-is, but out of line: this will store the wrong value for the PC,
7216 so we must manually fix up the memory in the cleanup routine.
7217 Doing things this way has the advantage that we can auto-detect
7218 the offset of the PC write (which is architecture-dependent) in
7219 the cleanup routine. */
7220 dsc->modinsn[0] = insn;
7221
7222 dsc->cleanup = &cleanup_block_store_pc;
7223 }
7224
7225 return 0;
7226 }
7227
7228 static int
7229 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7230 struct regcache *regs,
7231 struct displaced_step_closure *dsc)
7232 {
7233 int rn = bits (insn1, 0, 3);
7234 int load = bit (insn1, 4);
7235 int writeback = bit (insn1, 5);
7236
7237 /* Block transfers which don't mention PC can be run directly
7238 out-of-line. */
7239 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7240 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7241
7242 if (rn == ARM_PC_REGNUM)
7243 {
7244 warning (_("displaced: Unpredictable LDM or STM with "
7245 "base register r15"));
7246 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7247 "unpredictable ldm/stm", dsc);
7248 }
7249
7250 if (debug_displaced)
7251 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7252 "%.4x%.4x\n", insn1, insn2);
7253
7254 /* Clear bit 13, since it should be always zero. */
7255 dsc->u.block.regmask = (insn2 & 0xdfff);
7256 dsc->u.block.rn = rn;
7257
7258 dsc->u.block.load = load;
7259 dsc->u.block.user = 0;
7260 dsc->u.block.increment = bit (insn1, 7);
7261 dsc->u.block.before = bit (insn1, 8);
7262 dsc->u.block.writeback = writeback;
7263 dsc->u.block.cond = INST_AL;
7264 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7265
7266 if (load)
7267 {
7268 if (dsc->u.block.regmask == 0xffff)
7269 {
7270 /* This branch is impossible to happen. */
7271 gdb_assert (0);
7272 }
7273 else
7274 {
7275 unsigned int regmask = dsc->u.block.regmask;
7276 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7277 unsigned int to = 0, from = 0, i, new_rn;
7278
7279 for (i = 0; i < num_in_list; i++)
7280 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7281
7282 if (writeback)
7283 insn1 &= ~(1 << 5);
7284
7285 new_regmask = (1 << num_in_list) - 1;
7286
7287 if (debug_displaced)
7288 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7289 "{..., pc}: original reg list %.4x, modified "
7290 "list %.4x\n"), rn, writeback ? "!" : "",
7291 (int) dsc->u.block.regmask, new_regmask);
7292
7293 dsc->modinsn[0] = insn1;
7294 dsc->modinsn[1] = (new_regmask & 0xffff);
7295 dsc->numinsns = 2;
7296
7297 dsc->cleanup = &cleanup_block_load_pc;
7298 }
7299 }
7300 else
7301 {
7302 dsc->modinsn[0] = insn1;
7303 dsc->modinsn[1] = insn2;
7304 dsc->numinsns = 2;
7305 dsc->cleanup = &cleanup_block_store_pc;
7306 }
7307 return 0;
7308 }
7309
7310 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7311 for Linux, where some SVC instructions must be treated specially. */
7312
7313 static void
7314 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7315 struct displaced_step_closure *dsc)
7316 {
7317 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7318
7319 if (debug_displaced)
7320 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7321 "%.8lx\n", (unsigned long) resume_addr);
7322
7323 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7324 }
7325
7326
7327 /* Common copy routine for svc instruciton. */
7328
7329 static int
7330 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7331 struct displaced_step_closure *dsc)
7332 {
7333 /* Preparation: none.
7334 Insn: unmodified svc.
7335 Cleanup: pc <- insn_addr + insn_size. */
7336
7337 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7338 instruction. */
7339 dsc->wrote_to_pc = 1;
7340
7341 /* Allow OS-specific code to override SVC handling. */
7342 if (dsc->u.svc.copy_svc_os)
7343 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7344 else
7345 {
7346 dsc->cleanup = &cleanup_svc;
7347 return 0;
7348 }
7349 }
7350
7351 static int
7352 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7353 struct regcache *regs, struct displaced_step_closure *dsc)
7354 {
7355
7356 if (debug_displaced)
7357 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7358 (unsigned long) insn);
7359
7360 dsc->modinsn[0] = insn;
7361
7362 return install_svc (gdbarch, regs, dsc);
7363 }
7364
7365 static int
7366 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7367 struct regcache *regs, struct displaced_step_closure *dsc)
7368 {
7369
7370 if (debug_displaced)
7371 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7372 insn);
7373
7374 dsc->modinsn[0] = insn;
7375
7376 return install_svc (gdbarch, regs, dsc);
7377 }
7378
7379 /* Copy undefined instructions. */
7380
7381 static int
7382 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7383 struct displaced_step_closure *dsc)
7384 {
7385 if (debug_displaced)
7386 fprintf_unfiltered (gdb_stdlog,
7387 "displaced: copying undefined insn %.8lx\n",
7388 (unsigned long) insn);
7389
7390 dsc->modinsn[0] = insn;
7391
7392 return 0;
7393 }
7394
7395 static int
7396 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7397 struct displaced_step_closure *dsc)
7398 {
7399
7400 if (debug_displaced)
7401 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7402 "%.4x %.4x\n", (unsigned short) insn1,
7403 (unsigned short) insn2);
7404
7405 dsc->modinsn[0] = insn1;
7406 dsc->modinsn[1] = insn2;
7407 dsc->numinsns = 2;
7408
7409 return 0;
7410 }
7411
7412 /* Copy unpredictable instructions. */
7413
7414 static int
7415 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7416 struct displaced_step_closure *dsc)
7417 {
7418 if (debug_displaced)
7419 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7420 "%.8lx\n", (unsigned long) insn);
7421
7422 dsc->modinsn[0] = insn;
7423
7424 return 0;
7425 }
7426
7427 /* The decode_* functions are instruction decoding helpers. They mostly follow
7428 the presentation in the ARM ARM. */
7429
7430 static int
7431 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7432 struct regcache *regs,
7433 struct displaced_step_closure *dsc)
7434 {
7435 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7436 unsigned int rn = bits (insn, 16, 19);
7437
7438 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7439 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7440 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7441 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7442 else if ((op1 & 0x60) == 0x20)
7443 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7444 else if ((op1 & 0x71) == 0x40)
7445 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7446 dsc);
7447 else if ((op1 & 0x77) == 0x41)
7448 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7449 else if ((op1 & 0x77) == 0x45)
7450 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7451 else if ((op1 & 0x77) == 0x51)
7452 {
7453 if (rn != 0xf)
7454 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7455 else
7456 return arm_copy_unpred (gdbarch, insn, dsc);
7457 }
7458 else if ((op1 & 0x77) == 0x55)
7459 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7460 else if (op1 == 0x57)
7461 switch (op2)
7462 {
7463 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7464 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7465 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7466 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7467 default: return arm_copy_unpred (gdbarch, insn, dsc);
7468 }
7469 else if ((op1 & 0x63) == 0x43)
7470 return arm_copy_unpred (gdbarch, insn, dsc);
7471 else if ((op2 & 0x1) == 0x0)
7472 switch (op1 & ~0x80)
7473 {
7474 case 0x61:
7475 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7476 case 0x65:
7477 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7478 case 0x71: case 0x75:
7479 /* pld/pldw reg. */
7480 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7481 case 0x63: case 0x67: case 0x73: case 0x77:
7482 return arm_copy_unpred (gdbarch, insn, dsc);
7483 default:
7484 return arm_copy_undef (gdbarch, insn, dsc);
7485 }
7486 else
7487 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7488 }
7489
7490 static int
7491 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7492 struct regcache *regs,
7493 struct displaced_step_closure *dsc)
7494 {
7495 if (bit (insn, 27) == 0)
7496 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7497 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7498 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7499 {
7500 case 0x0: case 0x2:
7501 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7502
7503 case 0x1: case 0x3:
7504 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7505
7506 case 0x4: case 0x5: case 0x6: case 0x7:
7507 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7508
7509 case 0x8:
7510 switch ((insn & 0xe00000) >> 21)
7511 {
7512 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7513 /* stc/stc2. */
7514 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7515
7516 case 0x2:
7517 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7518
7519 default:
7520 return arm_copy_undef (gdbarch, insn, dsc);
7521 }
7522
7523 case 0x9:
7524 {
7525 int rn_f = (bits (insn, 16, 19) == 0xf);
7526 switch ((insn & 0xe00000) >> 21)
7527 {
7528 case 0x1: case 0x3:
7529 /* ldc/ldc2 imm (undefined for rn == pc). */
7530 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7531 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7532
7533 case 0x2:
7534 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7535
7536 case 0x4: case 0x5: case 0x6: case 0x7:
7537 /* ldc/ldc2 lit (undefined for rn != pc). */
7538 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7539 : arm_copy_undef (gdbarch, insn, dsc);
7540
7541 default:
7542 return arm_copy_undef (gdbarch, insn, dsc);
7543 }
7544 }
7545
7546 case 0xa:
7547 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7548
7549 case 0xb:
7550 if (bits (insn, 16, 19) == 0xf)
7551 /* ldc/ldc2 lit. */
7552 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7553 else
7554 return arm_copy_undef (gdbarch, insn, dsc);
7555
7556 case 0xc:
7557 if (bit (insn, 4))
7558 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7559 else
7560 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7561
7562 case 0xd:
7563 if (bit (insn, 4))
7564 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7565 else
7566 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7567
7568 default:
7569 return arm_copy_undef (gdbarch, insn, dsc);
7570 }
7571 }
7572
7573 /* Decode miscellaneous instructions in dp/misc encoding space. */
7574
7575 static int
7576 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7577 struct regcache *regs,
7578 struct displaced_step_closure *dsc)
7579 {
7580 unsigned int op2 = bits (insn, 4, 6);
7581 unsigned int op = bits (insn, 21, 22);
7582 unsigned int op1 = bits (insn, 16, 19);
7583
7584 switch (op2)
7585 {
7586 case 0x0:
7587 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7588
7589 case 0x1:
7590 if (op == 0x1) /* bx. */
7591 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7592 else if (op == 0x3)
7593 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7594 else
7595 return arm_copy_undef (gdbarch, insn, dsc);
7596
7597 case 0x2:
7598 if (op == 0x1)
7599 /* Not really supported. */
7600 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7601 else
7602 return arm_copy_undef (gdbarch, insn, dsc);
7603
7604 case 0x3:
7605 if (op == 0x1)
7606 return arm_copy_bx_blx_reg (gdbarch, insn,
7607 regs, dsc); /* blx register. */
7608 else
7609 return arm_copy_undef (gdbarch, insn, dsc);
7610
7611 case 0x5:
7612 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7613
7614 case 0x7:
7615 if (op == 0x1)
7616 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7617 else if (op == 0x3)
7618 /* Not really supported. */
7619 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7620
7621 default:
7622 return arm_copy_undef (gdbarch, insn, dsc);
7623 }
7624 }
7625
7626 static int
7627 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7628 struct regcache *regs,
7629 struct displaced_step_closure *dsc)
7630 {
7631 if (bit (insn, 25))
7632 switch (bits (insn, 20, 24))
7633 {
7634 case 0x10:
7635 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7636
7637 case 0x14:
7638 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7639
7640 case 0x12: case 0x16:
7641 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7642
7643 default:
7644 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7645 }
7646 else
7647 {
7648 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7649
7650 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7651 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7652 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7653 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7654 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7655 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7656 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7657 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7658 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7659 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7660 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7661 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7662 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7663 /* 2nd arg means "unpriveleged". */
7664 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7665 dsc);
7666 }
7667
7668 /* Should be unreachable. */
7669 return 1;
7670 }
7671
7672 static int
7673 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7674 struct regcache *regs,
7675 struct displaced_step_closure *dsc)
7676 {
7677 int a = bit (insn, 25), b = bit (insn, 4);
7678 uint32_t op1 = bits (insn, 20, 24);
7679 int rn_f = bits (insn, 16, 19) == 0xf;
7680
7681 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7682 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7683 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7684 else if ((!a && (op1 & 0x17) == 0x02)
7685 || (a && (op1 & 0x17) == 0x02 && !b))
7686 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7687 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7688 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7689 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7690 else if ((!a && (op1 & 0x17) == 0x03)
7691 || (a && (op1 & 0x17) == 0x03 && !b))
7692 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7693 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7694 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7695 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7696 else if ((!a && (op1 & 0x17) == 0x06)
7697 || (a && (op1 & 0x17) == 0x06 && !b))
7698 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7699 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7700 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7701 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7702 else if ((!a && (op1 & 0x17) == 0x07)
7703 || (a && (op1 & 0x17) == 0x07 && !b))
7704 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7705
7706 /* Should be unreachable. */
7707 return 1;
7708 }
7709
7710 static int
7711 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7712 struct displaced_step_closure *dsc)
7713 {
7714 switch (bits (insn, 20, 24))
7715 {
7716 case 0x00: case 0x01: case 0x02: case 0x03:
7717 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7718
7719 case 0x04: case 0x05: case 0x06: case 0x07:
7720 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7721
7722 case 0x08: case 0x09: case 0x0a: case 0x0b:
7723 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7724 return arm_copy_unmodified (gdbarch, insn,
7725 "decode/pack/unpack/saturate/reverse", dsc);
7726
7727 case 0x18:
7728 if (bits (insn, 5, 7) == 0) /* op2. */
7729 {
7730 if (bits (insn, 12, 15) == 0xf)
7731 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7732 else
7733 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7734 }
7735 else
7736 return arm_copy_undef (gdbarch, insn, dsc);
7737
7738 case 0x1a: case 0x1b:
7739 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7740 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7741 else
7742 return arm_copy_undef (gdbarch, insn, dsc);
7743
7744 case 0x1c: case 0x1d:
7745 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7746 {
7747 if (bits (insn, 0, 3) == 0xf)
7748 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7749 else
7750 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7751 }
7752 else
7753 return arm_copy_undef (gdbarch, insn, dsc);
7754
7755 case 0x1e: case 0x1f:
7756 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7757 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7758 else
7759 return arm_copy_undef (gdbarch, insn, dsc);
7760 }
7761
7762 /* Should be unreachable. */
7763 return 1;
7764 }
7765
7766 static int
7767 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7768 struct regcache *regs,
7769 struct displaced_step_closure *dsc)
7770 {
7771 if (bit (insn, 25))
7772 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7773 else
7774 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7775 }
7776
7777 static int
7778 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7779 struct regcache *regs,
7780 struct displaced_step_closure *dsc)
7781 {
7782 unsigned int opcode = bits (insn, 20, 24);
7783
7784 switch (opcode)
7785 {
7786 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7787 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7788
7789 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7790 case 0x12: case 0x16:
7791 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7792
7793 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7794 case 0x13: case 0x17:
7795 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7796
7797 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7798 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7799 /* Note: no writeback for these instructions. Bit 25 will always be
7800 zero though (via caller), so the following works OK. */
7801 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7802 }
7803
7804 /* Should be unreachable. */
7805 return 1;
7806 }
7807
7808 /* Decode shifted register instructions. */
7809
7810 static int
7811 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7812 uint16_t insn2, struct regcache *regs,
7813 struct displaced_step_closure *dsc)
7814 {
7815 /* PC is only allowed to be used in instruction MOV. */
7816
7817 unsigned int op = bits (insn1, 5, 8);
7818 unsigned int rn = bits (insn1, 0, 3);
7819
7820 if (op == 0x2 && rn == 0xf) /* MOV */
7821 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7822 else
7823 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7824 "dp (shift reg)", dsc);
7825 }
7826
7827
7828 /* Decode extension register load/store. Exactly the same as
7829 arm_decode_ext_reg_ld_st. */
7830
7831 static int
7832 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7833 uint16_t insn2, struct regcache *regs,
7834 struct displaced_step_closure *dsc)
7835 {
7836 unsigned int opcode = bits (insn1, 4, 8);
7837
7838 switch (opcode)
7839 {
7840 case 0x04: case 0x05:
7841 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7842 "vfp/neon vmov", dsc);
7843
7844 case 0x08: case 0x0c: /* 01x00 */
7845 case 0x0a: case 0x0e: /* 01x10 */
7846 case 0x12: case 0x16: /* 10x10 */
7847 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7848 "vfp/neon vstm/vpush", dsc);
7849
7850 case 0x09: case 0x0d: /* 01x01 */
7851 case 0x0b: case 0x0f: /* 01x11 */
7852 case 0x13: case 0x17: /* 10x11 */
7853 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7854 "vfp/neon vldm/vpop", dsc);
7855
7856 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7857 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7858 "vstr", dsc);
7859 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7860 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7861 }
7862
7863 /* Should be unreachable. */
7864 return 1;
7865 }
7866
7867 static int
7868 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7869 struct regcache *regs, struct displaced_step_closure *dsc)
7870 {
7871 unsigned int op1 = bits (insn, 20, 25);
7872 int op = bit (insn, 4);
7873 unsigned int coproc = bits (insn, 8, 11);
7874 unsigned int rn = bits (insn, 16, 19);
7875
7876 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7877 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7878 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7879 && (coproc & 0xe) != 0xa)
7880 /* stc/stc2. */
7881 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7882 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7883 && (coproc & 0xe) != 0xa)
7884 /* ldc/ldc2 imm/lit. */
7885 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7886 else if ((op1 & 0x3e) == 0x00)
7887 return arm_copy_undef (gdbarch, insn, dsc);
7888 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7889 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7890 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7891 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7892 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7893 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7894 else if ((op1 & 0x30) == 0x20 && !op)
7895 {
7896 if ((coproc & 0xe) == 0xa)
7897 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7898 else
7899 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7900 }
7901 else if ((op1 & 0x30) == 0x20 && op)
7902 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7903 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7904 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7905 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7906 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7907 else if ((op1 & 0x30) == 0x30)
7908 return arm_copy_svc (gdbarch, insn, regs, dsc);
7909 else
7910 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7911 }
7912
7913 static int
7914 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7915 uint16_t insn2, struct regcache *regs,
7916 struct displaced_step_closure *dsc)
7917 {
7918 unsigned int coproc = bits (insn2, 8, 11);
7919 unsigned int op1 = bits (insn1, 4, 9);
7920 unsigned int bit_5_8 = bits (insn1, 5, 8);
7921 unsigned int bit_9 = bit (insn1, 9);
7922 unsigned int bit_4 = bit (insn1, 4);
7923 unsigned int rn = bits (insn1, 0, 3);
7924
7925 if (bit_9 == 0)
7926 {
7927 if (bit_5_8 == 2)
7928 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7929 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7930 dsc);
7931 else if (bit_5_8 == 0) /* UNDEFINED. */
7932 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7933 else
7934 {
7935 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7936 if ((coproc & 0xe) == 0xa)
7937 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7938 dsc);
7939 else /* coproc is not 101x. */
7940 {
7941 if (bit_4 == 0) /* STC/STC2. */
7942 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7943 "stc/stc2", dsc);
7944 else /* LDC/LDC2 {literal, immeidate}. */
7945 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7946 regs, dsc);
7947 }
7948 }
7949 }
7950 else
7951 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7952
7953 return 0;
7954 }
7955
7956 static void
7957 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7958 struct displaced_step_closure *dsc, int rd)
7959 {
7960 /* ADR Rd, #imm
7961
7962 Rewrite as:
7963
7964 Preparation: Rd <- PC
7965 Insn: ADD Rd, #imm
7966 Cleanup: Null.
7967 */
7968
7969 /* Rd <- PC */
7970 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7971 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7972 }
7973
7974 static int
7975 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7976 struct displaced_step_closure *dsc,
7977 int rd, unsigned int imm)
7978 {
7979
7980 /* Encoding T2: ADDS Rd, #imm */
7981 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7982
7983 install_pc_relative (gdbarch, regs, dsc, rd);
7984
7985 return 0;
7986 }
7987
7988 static int
7989 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7990 struct regcache *regs,
7991 struct displaced_step_closure *dsc)
7992 {
7993 unsigned int rd = bits (insn, 8, 10);
7994 unsigned int imm8 = bits (insn, 0, 7);
7995
7996 if (debug_displaced)
7997 fprintf_unfiltered (gdb_stdlog,
7998 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7999 rd, imm8, insn);
8000
8001 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
8002 }
8003
8004 static int
8005 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
8006 uint16_t insn2, struct regcache *regs,
8007 struct displaced_step_closure *dsc)
8008 {
8009 unsigned int rd = bits (insn2, 8, 11);
8010 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
8011 extract raw immediate encoding rather than computing immediate. When
8012 generating ADD or SUB instruction, we can simply perform OR operation to
8013 set immediate into ADD. */
8014 unsigned int imm_3_8 = insn2 & 0x70ff;
8015 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
8016
8017 if (debug_displaced)
8018 fprintf_unfiltered (gdb_stdlog,
8019 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
8020 rd, imm_i, imm_3_8, insn1, insn2);
8021
8022 if (bit (insn1, 7)) /* Encoding T2 */
8023 {
8024 /* Encoding T3: SUB Rd, Rd, #imm */
8025 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
8026 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8027 }
8028 else /* Encoding T3 */
8029 {
8030 /* Encoding T3: ADD Rd, Rd, #imm */
8031 dsc->modinsn[0] = (0xf100 | rd | imm_i);
8032 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8033 }
8034 dsc->numinsns = 2;
8035
8036 install_pc_relative (gdbarch, regs, dsc, rd);
8037
8038 return 0;
8039 }
8040
8041 static int
8042 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
8043 struct regcache *regs,
8044 struct displaced_step_closure *dsc)
8045 {
8046 unsigned int rt = bits (insn1, 8, 10);
8047 unsigned int pc;
8048 int imm8 = (bits (insn1, 0, 7) << 2);
8049 CORE_ADDR from = dsc->insn_addr;
8050
8051 /* LDR Rd, #imm8
8052
8053 Rwrite as:
8054
8055 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
8056
8057 Insn: LDR R0, [R2, R3];
8058 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
8059
8060 if (debug_displaced)
8061 fprintf_unfiltered (gdb_stdlog,
8062 "displaced: copying thumb ldr r%d [pc #%d]\n"
8063 , rt, imm8);
8064
8065 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
8066 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
8067 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
8068 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
8069 /* The assembler calculates the required value of the offset from the
8070 Align(PC,4) value of this instruction to the label. */
8071 pc = pc & 0xfffffffc;
8072
8073 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
8074 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
8075
8076 dsc->rd = rt;
8077 dsc->u.ldst.xfersize = 4;
8078 dsc->u.ldst.rn = 0;
8079 dsc->u.ldst.immed = 0;
8080 dsc->u.ldst.writeback = 0;
8081 dsc->u.ldst.restore_r4 = 0;
8082
8083 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
8084
8085 dsc->cleanup = &cleanup_load;
8086
8087 return 0;
8088 }
8089
8090 /* Copy Thumb cbnz/cbz insruction. */
8091
8092 static int
8093 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
8094 struct regcache *regs,
8095 struct displaced_step_closure *dsc)
8096 {
8097 int non_zero = bit (insn1, 11);
8098 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
8099 CORE_ADDR from = dsc->insn_addr;
8100 int rn = bits (insn1, 0, 2);
8101 int rn_val = displaced_read_reg (regs, dsc, rn);
8102
8103 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
8104 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
8105 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
8106 condition is false, let it be, cleanup_branch will do nothing. */
8107 if (dsc->u.branch.cond)
8108 {
8109 dsc->u.branch.cond = INST_AL;
8110 dsc->u.branch.dest = from + 4 + imm5;
8111 }
8112 else
8113 dsc->u.branch.dest = from + 2;
8114
8115 dsc->u.branch.link = 0;
8116 dsc->u.branch.exchange = 0;
8117
8118 if (debug_displaced)
8119 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
8120 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
8121 rn, rn_val, insn1, dsc->u.branch.dest);
8122
8123 dsc->modinsn[0] = THUMB_NOP;
8124
8125 dsc->cleanup = &cleanup_branch;
8126 return 0;
8127 }
8128
8129 /* Copy Table Branch Byte/Halfword */
8130 static int
8131 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
8132 uint16_t insn2, struct regcache *regs,
8133 struct displaced_step_closure *dsc)
8134 {
8135 ULONGEST rn_val, rm_val;
8136 int is_tbh = bit (insn2, 4);
8137 CORE_ADDR halfwords = 0;
8138 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8139
8140 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
8141 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
8142
8143 if (is_tbh)
8144 {
8145 gdb_byte buf[2];
8146
8147 target_read_memory (rn_val + 2 * rm_val, buf, 2);
8148 halfwords = extract_unsigned_integer (buf, 2, byte_order);
8149 }
8150 else
8151 {
8152 gdb_byte buf[1];
8153
8154 target_read_memory (rn_val + rm_val, buf, 1);
8155 halfwords = extract_unsigned_integer (buf, 1, byte_order);
8156 }
8157
8158 if (debug_displaced)
8159 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
8160 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
8161 (unsigned int) rn_val, (unsigned int) rm_val,
8162 (unsigned int) halfwords);
8163
8164 dsc->u.branch.cond = INST_AL;
8165 dsc->u.branch.link = 0;
8166 dsc->u.branch.exchange = 0;
8167 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
8168
8169 dsc->cleanup = &cleanup_branch;
8170
8171 return 0;
8172 }
8173
8174 static void
8175 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
8176 struct displaced_step_closure *dsc)
8177 {
8178 /* PC <- r7 */
8179 int val = displaced_read_reg (regs, dsc, 7);
8180 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8181
8182 /* r7 <- r8 */
8183 val = displaced_read_reg (regs, dsc, 8);
8184 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8185
8186 /* r8 <- tmp[0] */
8187 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8188
8189 }
8190
8191 static int
8192 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8193 struct regcache *regs,
8194 struct displaced_step_closure *dsc)
8195 {
8196 dsc->u.block.regmask = insn1 & 0x00ff;
8197
8198 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8199 to :
8200
8201 (1) register list is full, that is, r0-r7 are used.
8202 Prepare: tmp[0] <- r8
8203
8204 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8205 MOV r8, r7; Move value of r7 to r8;
8206 POP {r7}; Store PC value into r7.
8207
8208 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8209
8210 (2) register list is not full, supposing there are N registers in
8211 register list (except PC, 0 <= N <= 7).
8212 Prepare: for each i, 0 - N, tmp[i] <- ri.
8213
8214 POP {r0, r1, ...., rN};
8215
8216 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8217 from tmp[] properly.
8218 */
8219 if (debug_displaced)
8220 fprintf_unfiltered (gdb_stdlog,
8221 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8222 dsc->u.block.regmask, insn1);
8223
8224 if (dsc->u.block.regmask == 0xff)
8225 {
8226 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8227
8228 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8229 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8230 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8231
8232 dsc->numinsns = 3;
8233 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8234 }
8235 else
8236 {
8237 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8238 unsigned int new_regmask, bit = 1;
8239 unsigned int to = 0, from = 0, i, new_rn;
8240
8241 for (i = 0; i < num_in_list + 1; i++)
8242 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8243
8244 new_regmask = (1 << (num_in_list + 1)) - 1;
8245
8246 if (debug_displaced)
8247 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8248 "{..., pc}: original reg list %.4x,"
8249 " modified list %.4x\n"),
8250 (int) dsc->u.block.regmask, new_regmask);
8251
8252 dsc->u.block.regmask |= 0x8000;
8253 dsc->u.block.writeback = 0;
8254 dsc->u.block.cond = INST_AL;
8255
8256 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8257
8258 dsc->cleanup = &cleanup_block_load_pc;
8259 }
8260
8261 return 0;
8262 }
8263
8264 static void
8265 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8266 struct regcache *regs,
8267 struct displaced_step_closure *dsc)
8268 {
8269 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8270 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8271 int err = 0;
8272
8273 /* 16-bit thumb instructions. */
8274 switch (op_bit_12_15)
8275 {
8276 /* Shift (imme), add, subtract, move and compare. */
8277 case 0: case 1: case 2: case 3:
8278 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8279 "shift/add/sub/mov/cmp",
8280 dsc);
8281 break;
8282 case 4:
8283 switch (op_bit_10_11)
8284 {
8285 case 0: /* Data-processing */
8286 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8287 "data-processing",
8288 dsc);
8289 break;
8290 case 1: /* Special data instructions and branch and exchange. */
8291 {
8292 unsigned short op = bits (insn1, 7, 9);
8293 if (op == 6 || op == 7) /* BX or BLX */
8294 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8295 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8296 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8297 else
8298 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8299 dsc);
8300 }
8301 break;
8302 default: /* LDR (literal) */
8303 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8304 }
8305 break;
8306 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8307 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8308 break;
8309 case 10:
8310 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8311 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8312 else /* Generate SP-relative address */
8313 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8314 break;
8315 case 11: /* Misc 16-bit instructions */
8316 {
8317 switch (bits (insn1, 8, 11))
8318 {
8319 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8320 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8321 break;
8322 case 12: case 13: /* POP */
8323 if (bit (insn1, 8)) /* PC is in register list. */
8324 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8325 else
8326 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8327 break;
8328 case 15: /* If-Then, and hints */
8329 if (bits (insn1, 0, 3))
8330 /* If-Then makes up to four following instructions conditional.
8331 IT instruction itself is not conditional, so handle it as a
8332 common unmodified instruction. */
8333 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8334 dsc);
8335 else
8336 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8337 break;
8338 default:
8339 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8340 }
8341 }
8342 break;
8343 case 12:
8344 if (op_bit_10_11 < 2) /* Store multiple registers */
8345 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8346 else /* Load multiple registers */
8347 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8348 break;
8349 case 13: /* Conditional branch and supervisor call */
8350 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8351 err = thumb_copy_b (gdbarch, insn1, dsc);
8352 else
8353 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8354 break;
8355 case 14: /* Unconditional branch */
8356 err = thumb_copy_b (gdbarch, insn1, dsc);
8357 break;
8358 default:
8359 err = 1;
8360 }
8361
8362 if (err)
8363 internal_error (__FILE__, __LINE__,
8364 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8365 }
8366
8367 static int
8368 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8369 uint16_t insn1, uint16_t insn2,
8370 struct regcache *regs,
8371 struct displaced_step_closure *dsc)
8372 {
8373 int rt = bits (insn2, 12, 15);
8374 int rn = bits (insn1, 0, 3);
8375 int op1 = bits (insn1, 7, 8);
8376 int err = 0;
8377
8378 switch (bits (insn1, 5, 6))
8379 {
8380 case 0: /* Load byte and memory hints */
8381 if (rt == 0xf) /* PLD/PLI */
8382 {
8383 if (rn == 0xf)
8384 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8385 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8386 else
8387 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8388 "pli/pld", dsc);
8389 }
8390 else
8391 {
8392 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8393 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8394 1);
8395 else
8396 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8397 "ldrb{reg, immediate}/ldrbt",
8398 dsc);
8399 }
8400
8401 break;
8402 case 1: /* Load halfword and memory hints. */
8403 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8404 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8405 "pld/unalloc memhint", dsc);
8406 else
8407 {
8408 if (rn == 0xf)
8409 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8410 2);
8411 else
8412 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8413 "ldrh/ldrht", dsc);
8414 }
8415 break;
8416 case 2: /* Load word */
8417 {
8418 int insn2_bit_8_11 = bits (insn2, 8, 11);
8419
8420 if (rn == 0xf)
8421 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8422 else if (op1 == 0x1) /* Encoding T3 */
8423 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8424 0, 1);
8425 else /* op1 == 0x0 */
8426 {
8427 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8428 /* LDR (immediate) */
8429 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8430 dsc, bit (insn2, 8), 1);
8431 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8432 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8433 "ldrt", dsc);
8434 else
8435 /* LDR (register) */
8436 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8437 dsc, 0, 0);
8438 }
8439 break;
8440 }
8441 default:
8442 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8443 break;
8444 }
8445 return 0;
8446 }
8447
8448 static void
8449 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8450 uint16_t insn2, struct regcache *regs,
8451 struct displaced_step_closure *dsc)
8452 {
8453 int err = 0;
8454 unsigned short op = bit (insn2, 15);
8455 unsigned int op1 = bits (insn1, 11, 12);
8456
8457 switch (op1)
8458 {
8459 case 1:
8460 {
8461 switch (bits (insn1, 9, 10))
8462 {
8463 case 0:
8464 if (bit (insn1, 6))
8465 {
8466 /* Load/store {dual, execlusive}, table branch. */
8467 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8468 && bits (insn2, 5, 7) == 0)
8469 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8470 dsc);
8471 else
8472 /* PC is not allowed to use in load/store {dual, exclusive}
8473 instructions. */
8474 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8475 "load/store dual/ex", dsc);
8476 }
8477 else /* load/store multiple */
8478 {
8479 switch (bits (insn1, 7, 8))
8480 {
8481 case 0: case 3: /* SRS, RFE */
8482 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8483 "srs/rfe", dsc);
8484 break;
8485 case 1: case 2: /* LDM/STM/PUSH/POP */
8486 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8487 break;
8488 }
8489 }
8490 break;
8491
8492 case 1:
8493 /* Data-processing (shift register). */
8494 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8495 dsc);
8496 break;
8497 default: /* Coprocessor instructions. */
8498 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8499 break;
8500 }
8501 break;
8502 }
8503 case 2: /* op1 = 2 */
8504 if (op) /* Branch and misc control. */
8505 {
8506 if (bit (insn2, 14) /* BLX/BL */
8507 || bit (insn2, 12) /* Unconditional branch */
8508 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8509 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8510 else
8511 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8512 "misc ctrl", dsc);
8513 }
8514 else
8515 {
8516 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8517 {
8518 int op = bits (insn1, 4, 8);
8519 int rn = bits (insn1, 0, 3);
8520 if ((op == 0 || op == 0xa) && rn == 0xf)
8521 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8522 regs, dsc);
8523 else
8524 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8525 "dp/pb", dsc);
8526 }
8527 else /* Data processing (modified immeidate) */
8528 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8529 "dp/mi", dsc);
8530 }
8531 break;
8532 case 3: /* op1 = 3 */
8533 switch (bits (insn1, 9, 10))
8534 {
8535 case 0:
8536 if (bit (insn1, 4))
8537 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8538 regs, dsc);
8539 else /* NEON Load/Store and Store single data item */
8540 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8541 "neon elt/struct load/store",
8542 dsc);
8543 break;
8544 case 1: /* op1 = 3, bits (9, 10) == 1 */
8545 switch (bits (insn1, 7, 8))
8546 {
8547 case 0: case 1: /* Data processing (register) */
8548 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8549 "dp(reg)", dsc);
8550 break;
8551 case 2: /* Multiply and absolute difference */
8552 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8553 "mul/mua/diff", dsc);
8554 break;
8555 case 3: /* Long multiply and divide */
8556 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8557 "lmul/lmua", dsc);
8558 break;
8559 }
8560 break;
8561 default: /* Coprocessor instructions */
8562 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8563 break;
8564 }
8565 break;
8566 default:
8567 err = 1;
8568 }
8569
8570 if (err)
8571 internal_error (__FILE__, __LINE__,
8572 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8573
8574 }
8575
8576 static void
8577 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8578 CORE_ADDR to, struct regcache *regs,
8579 struct displaced_step_closure *dsc)
8580 {
8581 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8582 uint16_t insn1
8583 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8584
8585 if (debug_displaced)
8586 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8587 "at %.8lx\n", insn1, (unsigned long) from);
8588
8589 dsc->is_thumb = 1;
8590 dsc->insn_size = thumb_insn_size (insn1);
8591 if (thumb_insn_size (insn1) == 4)
8592 {
8593 uint16_t insn2
8594 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8595 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8596 }
8597 else
8598 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8599 }
8600
8601 void
8602 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8603 CORE_ADDR to, struct regcache *regs,
8604 struct displaced_step_closure *dsc)
8605 {
8606 int err = 0;
8607 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8608 uint32_t insn;
8609
8610 /* Most displaced instructions use a 1-instruction scratch space, so set this
8611 here and override below if/when necessary. */
8612 dsc->numinsns = 1;
8613 dsc->insn_addr = from;
8614 dsc->scratch_base = to;
8615 dsc->cleanup = NULL;
8616 dsc->wrote_to_pc = 0;
8617
8618 if (!displaced_in_arm_mode (regs))
8619 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8620
8621 dsc->is_thumb = 0;
8622 dsc->insn_size = 4;
8623 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8624 if (debug_displaced)
8625 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8626 "at %.8lx\n", (unsigned long) insn,
8627 (unsigned long) from);
8628
8629 if ((insn & 0xf0000000) == 0xf0000000)
8630 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8631 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8632 {
8633 case 0x0: case 0x1: case 0x2: case 0x3:
8634 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8635 break;
8636
8637 case 0x4: case 0x5: case 0x6:
8638 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8639 break;
8640
8641 case 0x7:
8642 err = arm_decode_media (gdbarch, insn, dsc);
8643 break;
8644
8645 case 0x8: case 0x9: case 0xa: case 0xb:
8646 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8647 break;
8648
8649 case 0xc: case 0xd: case 0xe: case 0xf:
8650 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8651 break;
8652 }
8653
8654 if (err)
8655 internal_error (__FILE__, __LINE__,
8656 _("arm_process_displaced_insn: Instruction decode error"));
8657 }
8658
8659 /* Actually set up the scratch space for a displaced instruction. */
8660
8661 void
8662 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8663 CORE_ADDR to, struct displaced_step_closure *dsc)
8664 {
8665 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8666 unsigned int i, len, offset;
8667 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8668 int size = dsc->is_thumb? 2 : 4;
8669 const gdb_byte *bkp_insn;
8670
8671 offset = 0;
8672 /* Poke modified instruction(s). */
8673 for (i = 0; i < dsc->numinsns; i++)
8674 {
8675 if (debug_displaced)
8676 {
8677 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8678 if (size == 4)
8679 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8680 dsc->modinsn[i]);
8681 else if (size == 2)
8682 fprintf_unfiltered (gdb_stdlog, "%.4x",
8683 (unsigned short)dsc->modinsn[i]);
8684
8685 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8686 (unsigned long) to + offset);
8687
8688 }
8689 write_memory_unsigned_integer (to + offset, size,
8690 byte_order_for_code,
8691 dsc->modinsn[i]);
8692 offset += size;
8693 }
8694
8695 /* Choose the correct breakpoint instruction. */
8696 if (dsc->is_thumb)
8697 {
8698 bkp_insn = tdep->thumb_breakpoint;
8699 len = tdep->thumb_breakpoint_size;
8700 }
8701 else
8702 {
8703 bkp_insn = tdep->arm_breakpoint;
8704 len = tdep->arm_breakpoint_size;
8705 }
8706
8707 /* Put breakpoint afterwards. */
8708 write_memory (to + offset, bkp_insn, len);
8709
8710 if (debug_displaced)
8711 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8712 paddress (gdbarch, from), paddress (gdbarch, to));
8713 }
8714
8715 /* Entry point for copying an instruction into scratch space for displaced
8716 stepping. */
8717
8718 struct displaced_step_closure *
8719 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8720 CORE_ADDR from, CORE_ADDR to,
8721 struct regcache *regs)
8722 {
8723 struct displaced_step_closure *dsc
8724 = xmalloc (sizeof (struct displaced_step_closure));
8725 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8726 arm_displaced_init_closure (gdbarch, from, to, dsc);
8727
8728 return dsc;
8729 }
8730
8731 /* Entry point for cleaning things up after a displaced instruction has been
8732 single-stepped. */
8733
8734 void
8735 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8736 struct displaced_step_closure *dsc,
8737 CORE_ADDR from, CORE_ADDR to,
8738 struct regcache *regs)
8739 {
8740 if (dsc->cleanup)
8741 dsc->cleanup (gdbarch, regs, dsc);
8742
8743 if (!dsc->wrote_to_pc)
8744 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8745 dsc->insn_addr + dsc->insn_size);
8746
8747 }
8748
8749 #include "bfd-in2.h"
8750 #include "libcoff.h"
8751
8752 static int
8753 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8754 {
8755 struct gdbarch *gdbarch = info->application_data;
8756
8757 if (arm_pc_is_thumb (gdbarch, memaddr))
8758 {
8759 static asymbol *asym;
8760 static combined_entry_type ce;
8761 static struct coff_symbol_struct csym;
8762 static struct bfd fake_bfd;
8763 static bfd_target fake_target;
8764
8765 if (csym.native == NULL)
8766 {
8767 /* Create a fake symbol vector containing a Thumb symbol.
8768 This is solely so that the code in print_insn_little_arm()
8769 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8770 the presence of a Thumb symbol and switch to decoding
8771 Thumb instructions. */
8772
8773 fake_target.flavour = bfd_target_coff_flavour;
8774 fake_bfd.xvec = &fake_target;
8775 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8776 csym.native = &ce;
8777 csym.symbol.the_bfd = &fake_bfd;
8778 csym.symbol.name = "fake";
8779 asym = (asymbol *) & csym;
8780 }
8781
8782 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8783 info->symbols = &asym;
8784 }
8785 else
8786 info->symbols = NULL;
8787
8788 if (info->endian == BFD_ENDIAN_BIG)
8789 return print_insn_big_arm (memaddr, info);
8790 else
8791 return print_insn_little_arm (memaddr, info);
8792 }
8793
8794 /* The following define instruction sequences that will cause ARM
8795 cpu's to take an undefined instruction trap. These are used to
8796 signal a breakpoint to GDB.
8797
8798 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8799 modes. A different instruction is required for each mode. The ARM
8800 cpu's can also be big or little endian. Thus four different
8801 instructions are needed to support all cases.
8802
8803 Note: ARMv4 defines several new instructions that will take the
8804 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8805 not in fact add the new instructions. The new undefined
8806 instructions in ARMv4 are all instructions that had no defined
8807 behaviour in earlier chips. There is no guarantee that they will
8808 raise an exception, but may be treated as NOP's. In practice, it
8809 may only safe to rely on instructions matching:
8810
8811 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8812 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8813 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8814
8815 Even this may only true if the condition predicate is true. The
8816 following use a condition predicate of ALWAYS so it is always TRUE.
8817
8818 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8819 and NetBSD all use a software interrupt rather than an undefined
8820 instruction to force a trap. This can be handled by by the
8821 abi-specific code during establishment of the gdbarch vector. */
8822
8823 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8824 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8825 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8826 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8827
8828 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8829 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8830 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8831 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8832
8833 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8834 the program counter value to determine whether a 16-bit or 32-bit
8835 breakpoint should be used. It returns a pointer to a string of
8836 bytes that encode a breakpoint instruction, stores the length of
8837 the string to *lenptr, and adjusts the program counter (if
8838 necessary) to point to the actual memory location where the
8839 breakpoint should be inserted. */
8840
8841 static const unsigned char *
8842 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8843 {
8844 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8845 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8846
8847 if (arm_pc_is_thumb (gdbarch, *pcptr))
8848 {
8849 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8850
8851 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8852 check whether we are replacing a 32-bit instruction. */
8853 if (tdep->thumb2_breakpoint != NULL)
8854 {
8855 gdb_byte buf[2];
8856 if (target_read_memory (*pcptr, buf, 2) == 0)
8857 {
8858 unsigned short inst1;
8859 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8860 if (thumb_insn_size (inst1) == 4)
8861 {
8862 *lenptr = tdep->thumb2_breakpoint_size;
8863 return tdep->thumb2_breakpoint;
8864 }
8865 }
8866 }
8867
8868 *lenptr = tdep->thumb_breakpoint_size;
8869 return tdep->thumb_breakpoint;
8870 }
8871 else
8872 {
8873 *lenptr = tdep->arm_breakpoint_size;
8874 return tdep->arm_breakpoint;
8875 }
8876 }
8877
8878 static void
8879 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8880 int *kindptr)
8881 {
8882 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8883
8884 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8885 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8886 that this is not confused with a 32-bit ARM breakpoint. */
8887 *kindptr = 3;
8888 }
8889
8890 /* Extract from an array REGBUF containing the (raw) register state a
8891 function return value of type TYPE, and copy that, in virtual
8892 format, into VALBUF. */
8893
8894 static void
8895 arm_extract_return_value (struct type *type, struct regcache *regs,
8896 gdb_byte *valbuf)
8897 {
8898 struct gdbarch *gdbarch = get_regcache_arch (regs);
8899 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8900
8901 if (TYPE_CODE_FLT == TYPE_CODE (type))
8902 {
8903 switch (gdbarch_tdep (gdbarch)->fp_model)
8904 {
8905 case ARM_FLOAT_FPA:
8906 {
8907 /* The value is in register F0 in internal format. We need to
8908 extract the raw value and then convert it to the desired
8909 internal type. */
8910 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8911
8912 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8913 convert_from_extended (floatformat_from_type (type), tmpbuf,
8914 valbuf, gdbarch_byte_order (gdbarch));
8915 }
8916 break;
8917
8918 case ARM_FLOAT_SOFT_FPA:
8919 case ARM_FLOAT_SOFT_VFP:
8920 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8921 not using the VFP ABI code. */
8922 case ARM_FLOAT_VFP:
8923 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8924 if (TYPE_LENGTH (type) > 4)
8925 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8926 valbuf + INT_REGISTER_SIZE);
8927 break;
8928
8929 default:
8930 internal_error (__FILE__, __LINE__,
8931 _("arm_extract_return_value: "
8932 "Floating point model not supported"));
8933 break;
8934 }
8935 }
8936 else if (TYPE_CODE (type) == TYPE_CODE_INT
8937 || TYPE_CODE (type) == TYPE_CODE_CHAR
8938 || TYPE_CODE (type) == TYPE_CODE_BOOL
8939 || TYPE_CODE (type) == TYPE_CODE_PTR
8940 || TYPE_CODE (type) == TYPE_CODE_REF
8941 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8942 {
8943 /* If the type is a plain integer, then the access is
8944 straight-forward. Otherwise we have to play around a bit
8945 more. */
8946 int len = TYPE_LENGTH (type);
8947 int regno = ARM_A1_REGNUM;
8948 ULONGEST tmp;
8949
8950 while (len > 0)
8951 {
8952 /* By using store_unsigned_integer we avoid having to do
8953 anything special for small big-endian values. */
8954 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8955 store_unsigned_integer (valbuf,
8956 (len > INT_REGISTER_SIZE
8957 ? INT_REGISTER_SIZE : len),
8958 byte_order, tmp);
8959 len -= INT_REGISTER_SIZE;
8960 valbuf += INT_REGISTER_SIZE;
8961 }
8962 }
8963 else
8964 {
8965 /* For a structure or union the behaviour is as if the value had
8966 been stored to word-aligned memory and then loaded into
8967 registers with 32-bit load instruction(s). */
8968 int len = TYPE_LENGTH (type);
8969 int regno = ARM_A1_REGNUM;
8970 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8971
8972 while (len > 0)
8973 {
8974 regcache_cooked_read (regs, regno++, tmpbuf);
8975 memcpy (valbuf, tmpbuf,
8976 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8977 len -= INT_REGISTER_SIZE;
8978 valbuf += INT_REGISTER_SIZE;
8979 }
8980 }
8981 }
8982
8983
8984 /* Will a function return an aggregate type in memory or in a
8985 register? Return 0 if an aggregate type can be returned in a
8986 register, 1 if it must be returned in memory. */
8987
8988 static int
8989 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8990 {
8991 int nRc;
8992 enum type_code code;
8993
8994 CHECK_TYPEDEF (type);
8995
8996 /* In the ARM ABI, "integer" like aggregate types are returned in
8997 registers. For an aggregate type to be integer like, its size
8998 must be less than or equal to INT_REGISTER_SIZE and the
8999 offset of each addressable subfield must be zero. Note that bit
9000 fields are not addressable, and all addressable subfields of
9001 unions always start at offset zero.
9002
9003 This function is based on the behaviour of GCC 2.95.1.
9004 See: gcc/arm.c: arm_return_in_memory() for details.
9005
9006 Note: All versions of GCC before GCC 2.95.2 do not set up the
9007 parameters correctly for a function returning the following
9008 structure: struct { float f;}; This should be returned in memory,
9009 not a register. Richard Earnshaw sent me a patch, but I do not
9010 know of any way to detect if a function like the above has been
9011 compiled with the correct calling convention. */
9012
9013 /* All aggregate types that won't fit in a register must be returned
9014 in memory. */
9015 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
9016 {
9017 return 1;
9018 }
9019
9020 /* The AAPCS says all aggregates not larger than a word are returned
9021 in a register. */
9022 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
9023 return 0;
9024
9025 /* The only aggregate types that can be returned in a register are
9026 structs and unions. Arrays must be returned in memory. */
9027 code = TYPE_CODE (type);
9028 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
9029 {
9030 return 1;
9031 }
9032
9033 /* Assume all other aggregate types can be returned in a register.
9034 Run a check for structures, unions and arrays. */
9035 nRc = 0;
9036
9037 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
9038 {
9039 int i;
9040 /* Need to check if this struct/union is "integer" like. For
9041 this to be true, its size must be less than or equal to
9042 INT_REGISTER_SIZE and the offset of each addressable
9043 subfield must be zero. Note that bit fields are not
9044 addressable, and unions always start at offset zero. If any
9045 of the subfields is a floating point type, the struct/union
9046 cannot be an integer type. */
9047
9048 /* For each field in the object, check:
9049 1) Is it FP? --> yes, nRc = 1;
9050 2) Is it addressable (bitpos != 0) and
9051 not packed (bitsize == 0)?
9052 --> yes, nRc = 1
9053 */
9054
9055 for (i = 0; i < TYPE_NFIELDS (type); i++)
9056 {
9057 enum type_code field_type_code;
9058 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
9059 i)));
9060
9061 /* Is it a floating point type field? */
9062 if (field_type_code == TYPE_CODE_FLT)
9063 {
9064 nRc = 1;
9065 break;
9066 }
9067
9068 /* If bitpos != 0, then we have to care about it. */
9069 if (TYPE_FIELD_BITPOS (type, i) != 0)
9070 {
9071 /* Bitfields are not addressable. If the field bitsize is
9072 zero, then the field is not packed. Hence it cannot be
9073 a bitfield or any other packed type. */
9074 if (TYPE_FIELD_BITSIZE (type, i) == 0)
9075 {
9076 nRc = 1;
9077 break;
9078 }
9079 }
9080 }
9081 }
9082
9083 return nRc;
9084 }
9085
9086 /* Write into appropriate registers a function return value of type
9087 TYPE, given in virtual format. */
9088
9089 static void
9090 arm_store_return_value (struct type *type, struct regcache *regs,
9091 const gdb_byte *valbuf)
9092 {
9093 struct gdbarch *gdbarch = get_regcache_arch (regs);
9094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9095
9096 if (TYPE_CODE (type) == TYPE_CODE_FLT)
9097 {
9098 gdb_byte buf[MAX_REGISTER_SIZE];
9099
9100 switch (gdbarch_tdep (gdbarch)->fp_model)
9101 {
9102 case ARM_FLOAT_FPA:
9103
9104 convert_to_extended (floatformat_from_type (type), buf, valbuf,
9105 gdbarch_byte_order (gdbarch));
9106 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
9107 break;
9108
9109 case ARM_FLOAT_SOFT_FPA:
9110 case ARM_FLOAT_SOFT_VFP:
9111 /* ARM_FLOAT_VFP can arise if this is a variadic function so
9112 not using the VFP ABI code. */
9113 case ARM_FLOAT_VFP:
9114 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
9115 if (TYPE_LENGTH (type) > 4)
9116 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
9117 valbuf + INT_REGISTER_SIZE);
9118 break;
9119
9120 default:
9121 internal_error (__FILE__, __LINE__,
9122 _("arm_store_return_value: Floating "
9123 "point model not supported"));
9124 break;
9125 }
9126 }
9127 else if (TYPE_CODE (type) == TYPE_CODE_INT
9128 || TYPE_CODE (type) == TYPE_CODE_CHAR
9129 || TYPE_CODE (type) == TYPE_CODE_BOOL
9130 || TYPE_CODE (type) == TYPE_CODE_PTR
9131 || TYPE_CODE (type) == TYPE_CODE_REF
9132 || TYPE_CODE (type) == TYPE_CODE_ENUM)
9133 {
9134 if (TYPE_LENGTH (type) <= 4)
9135 {
9136 /* Values of one word or less are zero/sign-extended and
9137 returned in r0. */
9138 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9139 LONGEST val = unpack_long (type, valbuf);
9140
9141 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
9142 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
9143 }
9144 else
9145 {
9146 /* Integral values greater than one word are stored in consecutive
9147 registers starting with r0. This will always be a multiple of
9148 the regiser size. */
9149 int len = TYPE_LENGTH (type);
9150 int regno = ARM_A1_REGNUM;
9151
9152 while (len > 0)
9153 {
9154 regcache_cooked_write (regs, regno++, valbuf);
9155 len -= INT_REGISTER_SIZE;
9156 valbuf += INT_REGISTER_SIZE;
9157 }
9158 }
9159 }
9160 else
9161 {
9162 /* For a structure or union the behaviour is as if the value had
9163 been stored to word-aligned memory and then loaded into
9164 registers with 32-bit load instruction(s). */
9165 int len = TYPE_LENGTH (type);
9166 int regno = ARM_A1_REGNUM;
9167 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9168
9169 while (len > 0)
9170 {
9171 memcpy (tmpbuf, valbuf,
9172 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
9173 regcache_cooked_write (regs, regno++, tmpbuf);
9174 len -= INT_REGISTER_SIZE;
9175 valbuf += INT_REGISTER_SIZE;
9176 }
9177 }
9178 }
9179
9180
9181 /* Handle function return values. */
9182
9183 static enum return_value_convention
9184 arm_return_value (struct gdbarch *gdbarch, struct value *function,
9185 struct type *valtype, struct regcache *regcache,
9186 gdb_byte *readbuf, const gdb_byte *writebuf)
9187 {
9188 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9189 struct type *func_type = function ? value_type (function) : NULL;
9190 enum arm_vfp_cprc_base_type vfp_base_type;
9191 int vfp_base_count;
9192
9193 if (arm_vfp_abi_for_function (gdbarch, func_type)
9194 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9195 {
9196 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9197 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9198 int i;
9199 for (i = 0; i < vfp_base_count; i++)
9200 {
9201 if (reg_char == 'q')
9202 {
9203 if (writebuf)
9204 arm_neon_quad_write (gdbarch, regcache, i,
9205 writebuf + i * unit_length);
9206
9207 if (readbuf)
9208 arm_neon_quad_read (gdbarch, regcache, i,
9209 readbuf + i * unit_length);
9210 }
9211 else
9212 {
9213 char name_buf[4];
9214 int regnum;
9215
9216 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
9217 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9218 strlen (name_buf));
9219 if (writebuf)
9220 regcache_cooked_write (regcache, regnum,
9221 writebuf + i * unit_length);
9222 if (readbuf)
9223 regcache_cooked_read (regcache, regnum,
9224 readbuf + i * unit_length);
9225 }
9226 }
9227 return RETURN_VALUE_REGISTER_CONVENTION;
9228 }
9229
9230 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9231 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9232 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9233 {
9234 if (tdep->struct_return == pcc_struct_return
9235 || arm_return_in_memory (gdbarch, valtype))
9236 return RETURN_VALUE_STRUCT_CONVENTION;
9237 }
9238
9239 /* AAPCS returns complex types longer than a register in memory. */
9240 if (tdep->arm_abi != ARM_ABI_APCS
9241 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9242 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9243 return RETURN_VALUE_STRUCT_CONVENTION;
9244
9245 if (writebuf)
9246 arm_store_return_value (valtype, regcache, writebuf);
9247
9248 if (readbuf)
9249 arm_extract_return_value (valtype, regcache, readbuf);
9250
9251 return RETURN_VALUE_REGISTER_CONVENTION;
9252 }
9253
9254
9255 static int
9256 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9257 {
9258 struct gdbarch *gdbarch = get_frame_arch (frame);
9259 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9260 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9261 CORE_ADDR jb_addr;
9262 gdb_byte buf[INT_REGISTER_SIZE];
9263
9264 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9265
9266 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9267 INT_REGISTER_SIZE))
9268 return 0;
9269
9270 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9271 return 1;
9272 }
9273
9274 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9275 return the target PC. Otherwise return 0. */
9276
9277 CORE_ADDR
9278 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9279 {
9280 const char *name;
9281 int namelen;
9282 CORE_ADDR start_addr;
9283
9284 /* Find the starting address and name of the function containing the PC. */
9285 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9286 {
9287 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
9288 check here. */
9289 start_addr = arm_skip_bx_reg (frame, pc);
9290 if (start_addr != 0)
9291 return start_addr;
9292
9293 return 0;
9294 }
9295
9296 /* If PC is in a Thumb call or return stub, return the address of the
9297 target PC, which is in a register. The thunk functions are called
9298 _call_via_xx, where x is the register name. The possible names
9299 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9300 functions, named __ARM_call_via_r[0-7]. */
9301 if (strncmp (name, "_call_via_", 10) == 0
9302 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9303 {
9304 /* Use the name suffix to determine which register contains the
9305 target PC. */
9306 static char *table[15] =
9307 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9308 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9309 };
9310 int regno;
9311 int offset = strlen (name) - 2;
9312
9313 for (regno = 0; regno <= 14; regno++)
9314 if (strcmp (&name[offset], table[regno]) == 0)
9315 return get_frame_register_unsigned (frame, regno);
9316 }
9317
9318 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9319 non-interworking calls to foo. We could decode the stubs
9320 to find the target but it's easier to use the symbol table. */
9321 namelen = strlen (name);
9322 if (name[0] == '_' && name[1] == '_'
9323 && ((namelen > 2 + strlen ("_from_thumb")
9324 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9325 strlen ("_from_thumb")) == 0)
9326 || (namelen > 2 + strlen ("_from_arm")
9327 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9328 strlen ("_from_arm")) == 0)))
9329 {
9330 char *target_name;
9331 int target_len = namelen - 2;
9332 struct bound_minimal_symbol minsym;
9333 struct objfile *objfile;
9334 struct obj_section *sec;
9335
9336 if (name[namelen - 1] == 'b')
9337 target_len -= strlen ("_from_thumb");
9338 else
9339 target_len -= strlen ("_from_arm");
9340
9341 target_name = alloca (target_len + 1);
9342 memcpy (target_name, name + 2, target_len);
9343 target_name[target_len] = '\0';
9344
9345 sec = find_pc_section (pc);
9346 objfile = (sec == NULL) ? NULL : sec->objfile;
9347 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9348 if (minsym.minsym != NULL)
9349 return BMSYMBOL_VALUE_ADDRESS (minsym);
9350 else
9351 return 0;
9352 }
9353
9354 return 0; /* not a stub */
9355 }
9356
9357 static void
9358 set_arm_command (char *args, int from_tty)
9359 {
9360 printf_unfiltered (_("\
9361 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9362 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9363 }
9364
9365 static void
9366 show_arm_command (char *args, int from_tty)
9367 {
9368 cmd_show_list (showarmcmdlist, from_tty, "");
9369 }
9370
9371 static void
9372 arm_update_current_architecture (void)
9373 {
9374 struct gdbarch_info info;
9375
9376 /* If the current architecture is not ARM, we have nothing to do. */
9377 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
9378 return;
9379
9380 /* Update the architecture. */
9381 gdbarch_info_init (&info);
9382
9383 if (!gdbarch_update_p (info))
9384 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9385 }
9386
9387 static void
9388 set_fp_model_sfunc (char *args, int from_tty,
9389 struct cmd_list_element *c)
9390 {
9391 enum arm_float_model fp_model;
9392
9393 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9394 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9395 {
9396 arm_fp_model = fp_model;
9397 break;
9398 }
9399
9400 if (fp_model == ARM_FLOAT_LAST)
9401 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9402 current_fp_model);
9403
9404 arm_update_current_architecture ();
9405 }
9406
9407 static void
9408 show_fp_model (struct ui_file *file, int from_tty,
9409 struct cmd_list_element *c, const char *value)
9410 {
9411 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9412
9413 if (arm_fp_model == ARM_FLOAT_AUTO
9414 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9415 fprintf_filtered (file, _("\
9416 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9417 fp_model_strings[tdep->fp_model]);
9418 else
9419 fprintf_filtered (file, _("\
9420 The current ARM floating point model is \"%s\".\n"),
9421 fp_model_strings[arm_fp_model]);
9422 }
9423
9424 static void
9425 arm_set_abi (char *args, int from_tty,
9426 struct cmd_list_element *c)
9427 {
9428 enum arm_abi_kind arm_abi;
9429
9430 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9431 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9432 {
9433 arm_abi_global = arm_abi;
9434 break;
9435 }
9436
9437 if (arm_abi == ARM_ABI_LAST)
9438 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9439 arm_abi_string);
9440
9441 arm_update_current_architecture ();
9442 }
9443
9444 static void
9445 arm_show_abi (struct ui_file *file, int from_tty,
9446 struct cmd_list_element *c, const char *value)
9447 {
9448 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9449
9450 if (arm_abi_global == ARM_ABI_AUTO
9451 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9452 fprintf_filtered (file, _("\
9453 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9454 arm_abi_strings[tdep->arm_abi]);
9455 else
9456 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9457 arm_abi_string);
9458 }
9459
9460 static void
9461 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9462 struct cmd_list_element *c, const char *value)
9463 {
9464 fprintf_filtered (file,
9465 _("The current execution mode assumed "
9466 "(when symbols are unavailable) is \"%s\".\n"),
9467 arm_fallback_mode_string);
9468 }
9469
9470 static void
9471 arm_show_force_mode (struct ui_file *file, int from_tty,
9472 struct cmd_list_element *c, const char *value)
9473 {
9474 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9475
9476 fprintf_filtered (file,
9477 _("The current execution mode assumed "
9478 "(even when symbols are available) is \"%s\".\n"),
9479 arm_force_mode_string);
9480 }
9481
9482 /* If the user changes the register disassembly style used for info
9483 register and other commands, we have to also switch the style used
9484 in opcodes for disassembly output. This function is run in the "set
9485 arm disassembly" command, and does that. */
9486
9487 static void
9488 set_disassembly_style_sfunc (char *args, int from_tty,
9489 struct cmd_list_element *c)
9490 {
9491 set_disassembly_style ();
9492 }
9493 \f
9494 /* Return the ARM register name corresponding to register I. */
9495 static const char *
9496 arm_register_name (struct gdbarch *gdbarch, int i)
9497 {
9498 const int num_regs = gdbarch_num_regs (gdbarch);
9499
9500 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9501 && i >= num_regs && i < num_regs + 32)
9502 {
9503 static const char *const vfp_pseudo_names[] = {
9504 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9505 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9506 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9507 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9508 };
9509
9510 return vfp_pseudo_names[i - num_regs];
9511 }
9512
9513 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9514 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9515 {
9516 static const char *const neon_pseudo_names[] = {
9517 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9518 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9519 };
9520
9521 return neon_pseudo_names[i - num_regs - 32];
9522 }
9523
9524 if (i >= ARRAY_SIZE (arm_register_names))
9525 /* These registers are only supported on targets which supply
9526 an XML description. */
9527 return "";
9528
9529 return arm_register_names[i];
9530 }
9531
9532 static void
9533 set_disassembly_style (void)
9534 {
9535 int current;
9536
9537 /* Find the style that the user wants. */
9538 for (current = 0; current < num_disassembly_options; current++)
9539 if (disassembly_style == valid_disassembly_styles[current])
9540 break;
9541 gdb_assert (current < num_disassembly_options);
9542
9543 /* Synchronize the disassembler. */
9544 set_arm_regname_option (current);
9545 }
9546
9547 /* Test whether the coff symbol specific value corresponds to a Thumb
9548 function. */
9549
9550 static int
9551 coff_sym_is_thumb (int val)
9552 {
9553 return (val == C_THUMBEXT
9554 || val == C_THUMBSTAT
9555 || val == C_THUMBEXTFUNC
9556 || val == C_THUMBSTATFUNC
9557 || val == C_THUMBLABEL);
9558 }
9559
9560 /* arm_coff_make_msymbol_special()
9561 arm_elf_make_msymbol_special()
9562
9563 These functions test whether the COFF or ELF symbol corresponds to
9564 an address in thumb code, and set a "special" bit in a minimal
9565 symbol to indicate that it does. */
9566
9567 static void
9568 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9569 {
9570 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9571 == ST_BRANCH_TO_THUMB)
9572 MSYMBOL_SET_SPECIAL (msym);
9573 }
9574
9575 static void
9576 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9577 {
9578 if (coff_sym_is_thumb (val))
9579 MSYMBOL_SET_SPECIAL (msym);
9580 }
9581
9582 static void
9583 arm_objfile_data_free (struct objfile *objfile, void *arg)
9584 {
9585 struct arm_per_objfile *data = arg;
9586 unsigned int i;
9587
9588 for (i = 0; i < objfile->obfd->section_count; i++)
9589 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9590 }
9591
9592 static void
9593 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9594 asymbol *sym)
9595 {
9596 const char *name = bfd_asymbol_name (sym);
9597 struct arm_per_objfile *data;
9598 VEC(arm_mapping_symbol_s) **map_p;
9599 struct arm_mapping_symbol new_map_sym;
9600
9601 gdb_assert (name[0] == '$');
9602 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9603 return;
9604
9605 data = objfile_data (objfile, arm_objfile_data_key);
9606 if (data == NULL)
9607 {
9608 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9609 struct arm_per_objfile);
9610 set_objfile_data (objfile, arm_objfile_data_key, data);
9611 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9612 objfile->obfd->section_count,
9613 VEC(arm_mapping_symbol_s) *);
9614 }
9615 map_p = &data->section_maps[bfd_get_section (sym)->index];
9616
9617 new_map_sym.value = sym->value;
9618 new_map_sym.type = name[1];
9619
9620 /* Assume that most mapping symbols appear in order of increasing
9621 value. If they were randomly distributed, it would be faster to
9622 always push here and then sort at first use. */
9623 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9624 {
9625 struct arm_mapping_symbol *prev_map_sym;
9626
9627 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9628 if (prev_map_sym->value >= sym->value)
9629 {
9630 unsigned int idx;
9631 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9632 arm_compare_mapping_symbols);
9633 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9634 return;
9635 }
9636 }
9637
9638 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9639 }
9640
9641 static void
9642 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9643 {
9644 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9645 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9646
9647 /* If necessary, set the T bit. */
9648 if (arm_apcs_32)
9649 {
9650 ULONGEST val, t_bit;
9651 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9652 t_bit = arm_psr_thumb_bit (gdbarch);
9653 if (arm_pc_is_thumb (gdbarch, pc))
9654 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9655 val | t_bit);
9656 else
9657 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9658 val & ~t_bit);
9659 }
9660 }
9661
9662 /* Read the contents of a NEON quad register, by reading from two
9663 double registers. This is used to implement the quad pseudo
9664 registers, and for argument passing in case the quad registers are
9665 missing; vectors are passed in quad registers when using the VFP
9666 ABI, even if a NEON unit is not present. REGNUM is the index of
9667 the quad register, in [0, 15]. */
9668
9669 static enum register_status
9670 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9671 int regnum, gdb_byte *buf)
9672 {
9673 char name_buf[4];
9674 gdb_byte reg_buf[8];
9675 int offset, double_regnum;
9676 enum register_status status;
9677
9678 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9679 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9680 strlen (name_buf));
9681
9682 /* d0 is always the least significant half of q0. */
9683 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9684 offset = 8;
9685 else
9686 offset = 0;
9687
9688 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9689 if (status != REG_VALID)
9690 return status;
9691 memcpy (buf + offset, reg_buf, 8);
9692
9693 offset = 8 - offset;
9694 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9695 if (status != REG_VALID)
9696 return status;
9697 memcpy (buf + offset, reg_buf, 8);
9698
9699 return REG_VALID;
9700 }
9701
9702 static enum register_status
9703 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9704 int regnum, gdb_byte *buf)
9705 {
9706 const int num_regs = gdbarch_num_regs (gdbarch);
9707 char name_buf[4];
9708 gdb_byte reg_buf[8];
9709 int offset, double_regnum;
9710
9711 gdb_assert (regnum >= num_regs);
9712 regnum -= num_regs;
9713
9714 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9715 /* Quad-precision register. */
9716 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9717 else
9718 {
9719 enum register_status status;
9720
9721 /* Single-precision register. */
9722 gdb_assert (regnum < 32);
9723
9724 /* s0 is always the least significant half of d0. */
9725 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9726 offset = (regnum & 1) ? 0 : 4;
9727 else
9728 offset = (regnum & 1) ? 4 : 0;
9729
9730 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9731 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9732 strlen (name_buf));
9733
9734 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9735 if (status == REG_VALID)
9736 memcpy (buf, reg_buf + offset, 4);
9737 return status;
9738 }
9739 }
9740
9741 /* Store the contents of BUF to a NEON quad register, by writing to
9742 two double registers. This is used to implement the quad pseudo
9743 registers, and for argument passing in case the quad registers are
9744 missing; vectors are passed in quad registers when using the VFP
9745 ABI, even if a NEON unit is not present. REGNUM is the index
9746 of the quad register, in [0, 15]. */
9747
9748 static void
9749 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9750 int regnum, const gdb_byte *buf)
9751 {
9752 char name_buf[4];
9753 int offset, double_regnum;
9754
9755 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9756 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9757 strlen (name_buf));
9758
9759 /* d0 is always the least significant half of q0. */
9760 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9761 offset = 8;
9762 else
9763 offset = 0;
9764
9765 regcache_raw_write (regcache, double_regnum, buf + offset);
9766 offset = 8 - offset;
9767 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9768 }
9769
9770 static void
9771 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9772 int regnum, const gdb_byte *buf)
9773 {
9774 const int num_regs = gdbarch_num_regs (gdbarch);
9775 char name_buf[4];
9776 gdb_byte reg_buf[8];
9777 int offset, double_regnum;
9778
9779 gdb_assert (regnum >= num_regs);
9780 regnum -= num_regs;
9781
9782 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9783 /* Quad-precision register. */
9784 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9785 else
9786 {
9787 /* Single-precision register. */
9788 gdb_assert (regnum < 32);
9789
9790 /* s0 is always the least significant half of d0. */
9791 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9792 offset = (regnum & 1) ? 0 : 4;
9793 else
9794 offset = (regnum & 1) ? 4 : 0;
9795
9796 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9797 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9798 strlen (name_buf));
9799
9800 regcache_raw_read (regcache, double_regnum, reg_buf);
9801 memcpy (reg_buf + offset, buf, 4);
9802 regcache_raw_write (regcache, double_regnum, reg_buf);
9803 }
9804 }
9805
9806 static struct value *
9807 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9808 {
9809 const int *reg_p = baton;
9810 return value_of_register (*reg_p, frame);
9811 }
9812 \f
9813 static enum gdb_osabi
9814 arm_elf_osabi_sniffer (bfd *abfd)
9815 {
9816 unsigned int elfosabi;
9817 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9818
9819 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9820
9821 if (elfosabi == ELFOSABI_ARM)
9822 /* GNU tools use this value. Check note sections in this case,
9823 as well. */
9824 bfd_map_over_sections (abfd,
9825 generic_elf_osabi_sniff_abi_tag_sections,
9826 &osabi);
9827
9828 /* Anything else will be handled by the generic ELF sniffer. */
9829 return osabi;
9830 }
9831
9832 static int
9833 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9834 struct reggroup *group)
9835 {
9836 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9837 this, FPS register belongs to save_regroup, restore_reggroup, and
9838 all_reggroup, of course. */
9839 if (regnum == ARM_FPS_REGNUM)
9840 return (group == float_reggroup
9841 || group == save_reggroup
9842 || group == restore_reggroup
9843 || group == all_reggroup);
9844 else
9845 return default_register_reggroup_p (gdbarch, regnum, group);
9846 }
9847
9848 \f
9849 /* For backward-compatibility we allow two 'g' packet lengths with
9850 the remote protocol depending on whether FPA registers are
9851 supplied. M-profile targets do not have FPA registers, but some
9852 stubs already exist in the wild which use a 'g' packet which
9853 supplies them albeit with dummy values. The packet format which
9854 includes FPA registers should be considered deprecated for
9855 M-profile targets. */
9856
9857 static void
9858 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
9859 {
9860 if (gdbarch_tdep (gdbarch)->is_m)
9861 {
9862 /* If we know from the executable this is an M-profile target,
9863 cater for remote targets whose register set layout is the
9864 same as the FPA layout. */
9865 register_remote_g_packet_guess (gdbarch,
9866 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
9867 (16 * INT_REGISTER_SIZE)
9868 + (8 * FP_REGISTER_SIZE)
9869 + (2 * INT_REGISTER_SIZE),
9870 tdesc_arm_with_m_fpa_layout);
9871
9872 /* The regular M-profile layout. */
9873 register_remote_g_packet_guess (gdbarch,
9874 /* r0-r12,sp,lr,pc; xpsr */
9875 (16 * INT_REGISTER_SIZE)
9876 + INT_REGISTER_SIZE,
9877 tdesc_arm_with_m);
9878
9879 /* M-profile plus M4F VFP. */
9880 register_remote_g_packet_guess (gdbarch,
9881 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
9882 (16 * INT_REGISTER_SIZE)
9883 + (16 * VFP_REGISTER_SIZE)
9884 + (2 * INT_REGISTER_SIZE),
9885 tdesc_arm_with_m_vfp_d16);
9886 }
9887
9888 /* Otherwise we don't have a useful guess. */
9889 }
9890
9891 \f
9892 /* Initialize the current architecture based on INFO. If possible,
9893 re-use an architecture from ARCHES, which is a list of
9894 architectures already created during this debugging session.
9895
9896 Called e.g. at program startup, when reading a core file, and when
9897 reading a binary file. */
9898
9899 static struct gdbarch *
9900 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9901 {
9902 struct gdbarch_tdep *tdep;
9903 struct gdbarch *gdbarch;
9904 struct gdbarch_list *best_arch;
9905 enum arm_abi_kind arm_abi = arm_abi_global;
9906 enum arm_float_model fp_model = arm_fp_model;
9907 struct tdesc_arch_data *tdesc_data = NULL;
9908 int i, is_m = 0;
9909 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9910 int have_neon = 0;
9911 int have_fpa_registers = 1;
9912 const struct target_desc *tdesc = info.target_desc;
9913
9914 /* If we have an object to base this architecture on, try to determine
9915 its ABI. */
9916
9917 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9918 {
9919 int ei_osabi, e_flags;
9920
9921 switch (bfd_get_flavour (info.abfd))
9922 {
9923 case bfd_target_aout_flavour:
9924 /* Assume it's an old APCS-style ABI. */
9925 arm_abi = ARM_ABI_APCS;
9926 break;
9927
9928 case bfd_target_coff_flavour:
9929 /* Assume it's an old APCS-style ABI. */
9930 /* XXX WinCE? */
9931 arm_abi = ARM_ABI_APCS;
9932 break;
9933
9934 case bfd_target_elf_flavour:
9935 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9936 e_flags = elf_elfheader (info.abfd)->e_flags;
9937
9938 if (ei_osabi == ELFOSABI_ARM)
9939 {
9940 /* GNU tools used to use this value, but do not for EABI
9941 objects. There's nowhere to tag an EABI version
9942 anyway, so assume APCS. */
9943 arm_abi = ARM_ABI_APCS;
9944 }
9945 else if (ei_osabi == ELFOSABI_NONE)
9946 {
9947 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9948 int attr_arch, attr_profile;
9949
9950 switch (eabi_ver)
9951 {
9952 case EF_ARM_EABI_UNKNOWN:
9953 /* Assume GNU tools. */
9954 arm_abi = ARM_ABI_APCS;
9955 break;
9956
9957 case EF_ARM_EABI_VER4:
9958 case EF_ARM_EABI_VER5:
9959 arm_abi = ARM_ABI_AAPCS;
9960 /* EABI binaries default to VFP float ordering.
9961 They may also contain build attributes that can
9962 be used to identify if the VFP argument-passing
9963 ABI is in use. */
9964 if (fp_model == ARM_FLOAT_AUTO)
9965 {
9966 #ifdef HAVE_ELF
9967 switch (bfd_elf_get_obj_attr_int (info.abfd,
9968 OBJ_ATTR_PROC,
9969 Tag_ABI_VFP_args))
9970 {
9971 case 0:
9972 /* "The user intended FP parameter/result
9973 passing to conform to AAPCS, base
9974 variant". */
9975 fp_model = ARM_FLOAT_SOFT_VFP;
9976 break;
9977 case 1:
9978 /* "The user intended FP parameter/result
9979 passing to conform to AAPCS, VFP
9980 variant". */
9981 fp_model = ARM_FLOAT_VFP;
9982 break;
9983 case 2:
9984 /* "The user intended FP parameter/result
9985 passing to conform to tool chain-specific
9986 conventions" - we don't know any such
9987 conventions, so leave it as "auto". */
9988 break;
9989 default:
9990 /* Attribute value not mentioned in the
9991 October 2008 ABI, so leave it as
9992 "auto". */
9993 break;
9994 }
9995 #else
9996 fp_model = ARM_FLOAT_SOFT_VFP;
9997 #endif
9998 }
9999 break;
10000
10001 default:
10002 /* Leave it as "auto". */
10003 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
10004 break;
10005 }
10006
10007 #ifdef HAVE_ELF
10008 /* Detect M-profile programs. This only works if the
10009 executable file includes build attributes; GCC does
10010 copy them to the executable, but e.g. RealView does
10011 not. */
10012 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
10013 Tag_CPU_arch);
10014 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
10015 OBJ_ATTR_PROC,
10016 Tag_CPU_arch_profile);
10017 /* GCC specifies the profile for v6-M; RealView only
10018 specifies the profile for architectures starting with
10019 V7 (as opposed to architectures with a tag
10020 numerically greater than TAG_CPU_ARCH_V7). */
10021 if (!tdesc_has_registers (tdesc)
10022 && (attr_arch == TAG_CPU_ARCH_V6_M
10023 || attr_arch == TAG_CPU_ARCH_V6S_M
10024 || attr_profile == 'M'))
10025 is_m = 1;
10026 #endif
10027 }
10028
10029 if (fp_model == ARM_FLOAT_AUTO)
10030 {
10031 int e_flags = elf_elfheader (info.abfd)->e_flags;
10032
10033 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
10034 {
10035 case 0:
10036 /* Leave it as "auto". Strictly speaking this case
10037 means FPA, but almost nobody uses that now, and
10038 many toolchains fail to set the appropriate bits
10039 for the floating-point model they use. */
10040 break;
10041 case EF_ARM_SOFT_FLOAT:
10042 fp_model = ARM_FLOAT_SOFT_FPA;
10043 break;
10044 case EF_ARM_VFP_FLOAT:
10045 fp_model = ARM_FLOAT_VFP;
10046 break;
10047 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
10048 fp_model = ARM_FLOAT_SOFT_VFP;
10049 break;
10050 }
10051 }
10052
10053 if (e_flags & EF_ARM_BE8)
10054 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
10055
10056 break;
10057
10058 default:
10059 /* Leave it as "auto". */
10060 break;
10061 }
10062 }
10063
10064 /* Check any target description for validity. */
10065 if (tdesc_has_registers (tdesc))
10066 {
10067 /* For most registers we require GDB's default names; but also allow
10068 the numeric names for sp / lr / pc, as a convenience. */
10069 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
10070 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
10071 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
10072
10073 const struct tdesc_feature *feature;
10074 int valid_p;
10075
10076 feature = tdesc_find_feature (tdesc,
10077 "org.gnu.gdb.arm.core");
10078 if (feature == NULL)
10079 {
10080 feature = tdesc_find_feature (tdesc,
10081 "org.gnu.gdb.arm.m-profile");
10082 if (feature == NULL)
10083 return NULL;
10084 else
10085 is_m = 1;
10086 }
10087
10088 tdesc_data = tdesc_data_alloc ();
10089
10090 valid_p = 1;
10091 for (i = 0; i < ARM_SP_REGNUM; i++)
10092 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10093 arm_register_names[i]);
10094 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10095 ARM_SP_REGNUM,
10096 arm_sp_names);
10097 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10098 ARM_LR_REGNUM,
10099 arm_lr_names);
10100 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10101 ARM_PC_REGNUM,
10102 arm_pc_names);
10103 if (is_m)
10104 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10105 ARM_PS_REGNUM, "xpsr");
10106 else
10107 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10108 ARM_PS_REGNUM, "cpsr");
10109
10110 if (!valid_p)
10111 {
10112 tdesc_data_cleanup (tdesc_data);
10113 return NULL;
10114 }
10115
10116 feature = tdesc_find_feature (tdesc,
10117 "org.gnu.gdb.arm.fpa");
10118 if (feature != NULL)
10119 {
10120 valid_p = 1;
10121 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
10122 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10123 arm_register_names[i]);
10124 if (!valid_p)
10125 {
10126 tdesc_data_cleanup (tdesc_data);
10127 return NULL;
10128 }
10129 }
10130 else
10131 have_fpa_registers = 0;
10132
10133 feature = tdesc_find_feature (tdesc,
10134 "org.gnu.gdb.xscale.iwmmxt");
10135 if (feature != NULL)
10136 {
10137 static const char *const iwmmxt_names[] = {
10138 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
10139 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
10140 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
10141 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
10142 };
10143
10144 valid_p = 1;
10145 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
10146 valid_p
10147 &= tdesc_numbered_register (feature, tdesc_data, i,
10148 iwmmxt_names[i - ARM_WR0_REGNUM]);
10149
10150 /* Check for the control registers, but do not fail if they
10151 are missing. */
10152 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
10153 tdesc_numbered_register (feature, tdesc_data, i,
10154 iwmmxt_names[i - ARM_WR0_REGNUM]);
10155
10156 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
10157 valid_p
10158 &= tdesc_numbered_register (feature, tdesc_data, i,
10159 iwmmxt_names[i - ARM_WR0_REGNUM]);
10160
10161 if (!valid_p)
10162 {
10163 tdesc_data_cleanup (tdesc_data);
10164 return NULL;
10165 }
10166 }
10167
10168 /* If we have a VFP unit, check whether the single precision registers
10169 are present. If not, then we will synthesize them as pseudo
10170 registers. */
10171 feature = tdesc_find_feature (tdesc,
10172 "org.gnu.gdb.arm.vfp");
10173 if (feature != NULL)
10174 {
10175 static const char *const vfp_double_names[] = {
10176 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
10177 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
10178 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
10179 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
10180 };
10181
10182 /* Require the double precision registers. There must be either
10183 16 or 32. */
10184 valid_p = 1;
10185 for (i = 0; i < 32; i++)
10186 {
10187 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10188 ARM_D0_REGNUM + i,
10189 vfp_double_names[i]);
10190 if (!valid_p)
10191 break;
10192 }
10193 if (!valid_p && i == 16)
10194 valid_p = 1;
10195
10196 /* Also require FPSCR. */
10197 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10198 ARM_FPSCR_REGNUM, "fpscr");
10199 if (!valid_p)
10200 {
10201 tdesc_data_cleanup (tdesc_data);
10202 return NULL;
10203 }
10204
10205 if (tdesc_unnumbered_register (feature, "s0") == 0)
10206 have_vfp_pseudos = 1;
10207
10208 have_vfp_registers = 1;
10209
10210 /* If we have VFP, also check for NEON. The architecture allows
10211 NEON without VFP (integer vector operations only), but GDB
10212 does not support that. */
10213 feature = tdesc_find_feature (tdesc,
10214 "org.gnu.gdb.arm.neon");
10215 if (feature != NULL)
10216 {
10217 /* NEON requires 32 double-precision registers. */
10218 if (i != 32)
10219 {
10220 tdesc_data_cleanup (tdesc_data);
10221 return NULL;
10222 }
10223
10224 /* If there are quad registers defined by the stub, use
10225 their type; otherwise (normally) provide them with
10226 the default type. */
10227 if (tdesc_unnumbered_register (feature, "q0") == 0)
10228 have_neon_pseudos = 1;
10229
10230 have_neon = 1;
10231 }
10232 }
10233 }
10234
10235 /* If there is already a candidate, use it. */
10236 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10237 best_arch != NULL;
10238 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10239 {
10240 if (arm_abi != ARM_ABI_AUTO
10241 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10242 continue;
10243
10244 if (fp_model != ARM_FLOAT_AUTO
10245 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10246 continue;
10247
10248 /* There are various other properties in tdep that we do not
10249 need to check here: those derived from a target description,
10250 since gdbarches with a different target description are
10251 automatically disqualified. */
10252
10253 /* Do check is_m, though, since it might come from the binary. */
10254 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10255 continue;
10256
10257 /* Found a match. */
10258 break;
10259 }
10260
10261 if (best_arch != NULL)
10262 {
10263 if (tdesc_data != NULL)
10264 tdesc_data_cleanup (tdesc_data);
10265 return best_arch->gdbarch;
10266 }
10267
10268 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10269 gdbarch = gdbarch_alloc (&info, tdep);
10270
10271 /* Record additional information about the architecture we are defining.
10272 These are gdbarch discriminators, like the OSABI. */
10273 tdep->arm_abi = arm_abi;
10274 tdep->fp_model = fp_model;
10275 tdep->is_m = is_m;
10276 tdep->have_fpa_registers = have_fpa_registers;
10277 tdep->have_vfp_registers = have_vfp_registers;
10278 tdep->have_vfp_pseudos = have_vfp_pseudos;
10279 tdep->have_neon_pseudos = have_neon_pseudos;
10280 tdep->have_neon = have_neon;
10281
10282 arm_register_g_packet_guesses (gdbarch);
10283
10284 /* Breakpoints. */
10285 switch (info.byte_order_for_code)
10286 {
10287 case BFD_ENDIAN_BIG:
10288 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10289 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10290 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10291 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10292
10293 break;
10294
10295 case BFD_ENDIAN_LITTLE:
10296 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10297 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10298 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10299 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10300
10301 break;
10302
10303 default:
10304 internal_error (__FILE__, __LINE__,
10305 _("arm_gdbarch_init: bad byte order for float format"));
10306 }
10307
10308 /* On ARM targets char defaults to unsigned. */
10309 set_gdbarch_char_signed (gdbarch, 0);
10310
10311 /* Note: for displaced stepping, this includes the breakpoint, and one word
10312 of additional scratch space. This setting isn't used for anything beside
10313 displaced stepping at present. */
10314 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10315
10316 /* This should be low enough for everything. */
10317 tdep->lowest_pc = 0x20;
10318 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10319
10320 /* The default, for both APCS and AAPCS, is to return small
10321 structures in registers. */
10322 tdep->struct_return = reg_struct_return;
10323
10324 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10325 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10326
10327 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10328
10329 /* Frame handling. */
10330 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10331 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10332 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10333
10334 frame_base_set_default (gdbarch, &arm_normal_base);
10335
10336 /* Address manipulation. */
10337 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10338
10339 /* Advance PC across function entry code. */
10340 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10341
10342 /* Detect whether PC is in function epilogue. */
10343 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10344
10345 /* Skip trampolines. */
10346 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10347
10348 /* The stack grows downward. */
10349 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10350
10351 /* Breakpoint manipulation. */
10352 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10353 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10354 arm_remote_breakpoint_from_pc);
10355
10356 /* Information about registers, etc. */
10357 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10358 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10359 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10360 set_gdbarch_register_type (gdbarch, arm_register_type);
10361 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10362
10363 /* This "info float" is FPA-specific. Use the generic version if we
10364 do not have FPA. */
10365 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10366 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10367
10368 /* Internal <-> external register number maps. */
10369 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10370 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10371
10372 set_gdbarch_register_name (gdbarch, arm_register_name);
10373
10374 /* Returning results. */
10375 set_gdbarch_return_value (gdbarch, arm_return_value);
10376
10377 /* Disassembly. */
10378 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10379
10380 /* Minsymbol frobbing. */
10381 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10382 set_gdbarch_coff_make_msymbol_special (gdbarch,
10383 arm_coff_make_msymbol_special);
10384 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10385
10386 /* Thumb-2 IT block support. */
10387 set_gdbarch_adjust_breakpoint_address (gdbarch,
10388 arm_adjust_breakpoint_address);
10389
10390 /* Virtual tables. */
10391 set_gdbarch_vbit_in_delta (gdbarch, 1);
10392
10393 /* Hook in the ABI-specific overrides, if they have been registered. */
10394 gdbarch_init_osabi (info, gdbarch);
10395
10396 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10397
10398 /* Add some default predicates. */
10399 if (is_m)
10400 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
10401 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10402 dwarf2_append_unwinders (gdbarch);
10403 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10404 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10405
10406 /* Now we have tuned the configuration, set a few final things,
10407 based on what the OS ABI has told us. */
10408
10409 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10410 binaries are always marked. */
10411 if (tdep->arm_abi == ARM_ABI_AUTO)
10412 tdep->arm_abi = ARM_ABI_APCS;
10413
10414 /* Watchpoints are not steppable. */
10415 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10416
10417 /* We used to default to FPA for generic ARM, but almost nobody
10418 uses that now, and we now provide a way for the user to force
10419 the model. So default to the most useful variant. */
10420 if (tdep->fp_model == ARM_FLOAT_AUTO)
10421 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10422
10423 if (tdep->jb_pc >= 0)
10424 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10425
10426 /* Floating point sizes and format. */
10427 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10428 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10429 {
10430 set_gdbarch_double_format
10431 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10432 set_gdbarch_long_double_format
10433 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10434 }
10435 else
10436 {
10437 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10438 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10439 }
10440
10441 if (have_vfp_pseudos)
10442 {
10443 /* NOTE: These are the only pseudo registers used by
10444 the ARM target at the moment. If more are added, a
10445 little more care in numbering will be needed. */
10446
10447 int num_pseudos = 32;
10448 if (have_neon_pseudos)
10449 num_pseudos += 16;
10450 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10451 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10452 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10453 }
10454
10455 if (tdesc_data)
10456 {
10457 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10458
10459 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10460
10461 /* Override tdesc_register_type to adjust the types of VFP
10462 registers for NEON. */
10463 set_gdbarch_register_type (gdbarch, arm_register_type);
10464 }
10465
10466 /* Add standard register aliases. We add aliases even for those
10467 nanes which are used by the current architecture - it's simpler,
10468 and does no harm, since nothing ever lists user registers. */
10469 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10470 user_reg_add (gdbarch, arm_register_aliases[i].name,
10471 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10472
10473 return gdbarch;
10474 }
10475
10476 static void
10477 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10478 {
10479 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10480
10481 if (tdep == NULL)
10482 return;
10483
10484 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10485 (unsigned long) tdep->lowest_pc);
10486 }
10487
10488 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10489
10490 void
10491 _initialize_arm_tdep (void)
10492 {
10493 struct ui_file *stb;
10494 long length;
10495 struct cmd_list_element *new_set, *new_show;
10496 const char *setname;
10497 const char *setdesc;
10498 const char *const *regnames;
10499 int numregs, i, j;
10500 static char *helptext;
10501 char regdesc[1024], *rdptr = regdesc;
10502 size_t rest = sizeof (regdesc);
10503
10504 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10505
10506 arm_objfile_data_key
10507 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10508
10509 /* Add ourselves to objfile event chain. */
10510 observer_attach_new_objfile (arm_exidx_new_objfile);
10511 arm_exidx_data_key
10512 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10513
10514 /* Register an ELF OS ABI sniffer for ARM binaries. */
10515 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10516 bfd_target_elf_flavour,
10517 arm_elf_osabi_sniffer);
10518
10519 /* Initialize the standard target descriptions. */
10520 initialize_tdesc_arm_with_m ();
10521 initialize_tdesc_arm_with_m_fpa_layout ();
10522 initialize_tdesc_arm_with_m_vfp_d16 ();
10523 initialize_tdesc_arm_with_iwmmxt ();
10524 initialize_tdesc_arm_with_vfpv2 ();
10525 initialize_tdesc_arm_with_vfpv3 ();
10526 initialize_tdesc_arm_with_neon ();
10527
10528 /* Get the number of possible sets of register names defined in opcodes. */
10529 num_disassembly_options = get_arm_regname_num_options ();
10530
10531 /* Add root prefix command for all "set arm"/"show arm" commands. */
10532 add_prefix_cmd ("arm", no_class, set_arm_command,
10533 _("Various ARM-specific commands."),
10534 &setarmcmdlist, "set arm ", 0, &setlist);
10535
10536 add_prefix_cmd ("arm", no_class, show_arm_command,
10537 _("Various ARM-specific commands."),
10538 &showarmcmdlist, "show arm ", 0, &showlist);
10539
10540 /* Sync the opcode insn printer with our register viewer. */
10541 parse_arm_disassembler_option ("reg-names-std");
10542
10543 /* Initialize the array that will be passed to
10544 add_setshow_enum_cmd(). */
10545 valid_disassembly_styles
10546 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10547 for (i = 0; i < num_disassembly_options; i++)
10548 {
10549 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10550 valid_disassembly_styles[i] = setname;
10551 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10552 rdptr += length;
10553 rest -= length;
10554 /* When we find the default names, tell the disassembler to use
10555 them. */
10556 if (!strcmp (setname, "std"))
10557 {
10558 disassembly_style = setname;
10559 set_arm_regname_option (i);
10560 }
10561 }
10562 /* Mark the end of valid options. */
10563 valid_disassembly_styles[num_disassembly_options] = NULL;
10564
10565 /* Create the help text. */
10566 stb = mem_fileopen ();
10567 fprintf_unfiltered (stb, "%s%s%s",
10568 _("The valid values are:\n"),
10569 regdesc,
10570 _("The default is \"std\"."));
10571 helptext = ui_file_xstrdup (stb, NULL);
10572 ui_file_delete (stb);
10573
10574 add_setshow_enum_cmd("disassembler", no_class,
10575 valid_disassembly_styles, &disassembly_style,
10576 _("Set the disassembly style."),
10577 _("Show the disassembly style."),
10578 helptext,
10579 set_disassembly_style_sfunc,
10580 NULL, /* FIXME: i18n: The disassembly style is
10581 \"%s\". */
10582 &setarmcmdlist, &showarmcmdlist);
10583
10584 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10585 _("Set usage of ARM 32-bit mode."),
10586 _("Show usage of ARM 32-bit mode."),
10587 _("When off, a 26-bit PC will be used."),
10588 NULL,
10589 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10590 mode is %s. */
10591 &setarmcmdlist, &showarmcmdlist);
10592
10593 /* Add a command to allow the user to force the FPU model. */
10594 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10595 _("Set the floating point type."),
10596 _("Show the floating point type."),
10597 _("auto - Determine the FP typefrom the OS-ABI.\n\
10598 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10599 fpa - FPA co-processor (GCC compiled).\n\
10600 softvfp - Software FP with pure-endian doubles.\n\
10601 vfp - VFP co-processor."),
10602 set_fp_model_sfunc, show_fp_model,
10603 &setarmcmdlist, &showarmcmdlist);
10604
10605 /* Add a command to allow the user to force the ABI. */
10606 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10607 _("Set the ABI."),
10608 _("Show the ABI."),
10609 NULL, arm_set_abi, arm_show_abi,
10610 &setarmcmdlist, &showarmcmdlist);
10611
10612 /* Add two commands to allow the user to force the assumed
10613 execution mode. */
10614 add_setshow_enum_cmd ("fallback-mode", class_support,
10615 arm_mode_strings, &arm_fallback_mode_string,
10616 _("Set the mode assumed when symbols are unavailable."),
10617 _("Show the mode assumed when symbols are unavailable."),
10618 NULL, NULL, arm_show_fallback_mode,
10619 &setarmcmdlist, &showarmcmdlist);
10620 add_setshow_enum_cmd ("force-mode", class_support,
10621 arm_mode_strings, &arm_force_mode_string,
10622 _("Set the mode assumed even when symbols are available."),
10623 _("Show the mode assumed even when symbols are available."),
10624 NULL, NULL, arm_show_force_mode,
10625 &setarmcmdlist, &showarmcmdlist);
10626
10627 /* Debugging flag. */
10628 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10629 _("Set ARM debugging."),
10630 _("Show ARM debugging."),
10631 _("When on, arm-specific debugging is enabled."),
10632 NULL,
10633 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10634 &setdebuglist, &showdebuglist);
10635 }
10636
10637 /* ARM-reversible process record data structures. */
10638
10639 #define ARM_INSN_SIZE_BYTES 4
10640 #define THUMB_INSN_SIZE_BYTES 2
10641 #define THUMB2_INSN_SIZE_BYTES 4
10642
10643
10644 /* Position of the bit within a 32-bit ARM instruction
10645 that defines whether the instruction is a load or store. */
10646 #define INSN_S_L_BIT_NUM 20
10647
10648 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
10649 do \
10650 { \
10651 unsigned int reg_len = LENGTH; \
10652 if (reg_len) \
10653 { \
10654 REGS = XNEWVEC (uint32_t, reg_len); \
10655 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
10656 } \
10657 } \
10658 while (0)
10659
10660 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
10661 do \
10662 { \
10663 unsigned int mem_len = LENGTH; \
10664 if (mem_len) \
10665 { \
10666 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
10667 memcpy(&MEMS->len, &RECORD_BUF[0], \
10668 sizeof(struct arm_mem_r) * LENGTH); \
10669 } \
10670 } \
10671 while (0)
10672
10673 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
10674 #define INSN_RECORDED(ARM_RECORD) \
10675 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
10676
10677 /* ARM memory record structure. */
10678 struct arm_mem_r
10679 {
10680 uint32_t len; /* Record length. */
10681 uint32_t addr; /* Memory address. */
10682 };
10683
10684 /* ARM instruction record contains opcode of current insn
10685 and execution state (before entry to decode_insn()),
10686 contains list of to-be-modified registers and
10687 memory blocks (on return from decode_insn()). */
10688
10689 typedef struct insn_decode_record_t
10690 {
10691 struct gdbarch *gdbarch;
10692 struct regcache *regcache;
10693 CORE_ADDR this_addr; /* Address of the insn being decoded. */
10694 uint32_t arm_insn; /* Should accommodate thumb. */
10695 uint32_t cond; /* Condition code. */
10696 uint32_t opcode; /* Insn opcode. */
10697 uint32_t decode; /* Insn decode bits. */
10698 uint32_t mem_rec_count; /* No of mem records. */
10699 uint32_t reg_rec_count; /* No of reg records. */
10700 uint32_t *arm_regs; /* Registers to be saved for this record. */
10701 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
10702 } insn_decode_record;
10703
10704
10705 /* Checks ARM SBZ and SBO mandatory fields. */
10706
10707 static int
10708 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
10709 {
10710 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
10711
10712 if (!len)
10713 return 1;
10714
10715 if (!sbo)
10716 ones = ~ones;
10717
10718 while (ones)
10719 {
10720 if (!(ones & sbo))
10721 {
10722 return 0;
10723 }
10724 ones = ones >> 1;
10725 }
10726 return 1;
10727 }
10728
10729 enum arm_record_result
10730 {
10731 ARM_RECORD_SUCCESS = 0,
10732 ARM_RECORD_FAILURE = 1
10733 };
10734
10735 typedef enum
10736 {
10737 ARM_RECORD_STRH=1,
10738 ARM_RECORD_STRD
10739 } arm_record_strx_t;
10740
10741 typedef enum
10742 {
10743 ARM_RECORD=1,
10744 THUMB_RECORD,
10745 THUMB2_RECORD
10746 } record_type_t;
10747
10748
10749 static int
10750 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
10751 uint32_t *record_buf_mem, arm_record_strx_t str_type)
10752 {
10753
10754 struct regcache *reg_cache = arm_insn_r->regcache;
10755 ULONGEST u_regval[2]= {0};
10756
10757 uint32_t reg_src1 = 0, reg_src2 = 0;
10758 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10759 uint32_t opcode1 = 0;
10760
10761 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10762 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10763 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10764
10765
10766 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10767 {
10768 /* 1) Handle misc store, immediate offset. */
10769 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10770 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10771 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10772 regcache_raw_read_unsigned (reg_cache, reg_src1,
10773 &u_regval[0]);
10774 if (ARM_PC_REGNUM == reg_src1)
10775 {
10776 /* If R15 was used as Rn, hence current PC+8. */
10777 u_regval[0] = u_regval[0] + 8;
10778 }
10779 offset_8 = (immed_high << 4) | immed_low;
10780 /* Calculate target store address. */
10781 if (14 == arm_insn_r->opcode)
10782 {
10783 tgt_mem_addr = u_regval[0] + offset_8;
10784 }
10785 else
10786 {
10787 tgt_mem_addr = u_regval[0] - offset_8;
10788 }
10789 if (ARM_RECORD_STRH == str_type)
10790 {
10791 record_buf_mem[0] = 2;
10792 record_buf_mem[1] = tgt_mem_addr;
10793 arm_insn_r->mem_rec_count = 1;
10794 }
10795 else if (ARM_RECORD_STRD == str_type)
10796 {
10797 record_buf_mem[0] = 4;
10798 record_buf_mem[1] = tgt_mem_addr;
10799 record_buf_mem[2] = 4;
10800 record_buf_mem[3] = tgt_mem_addr + 4;
10801 arm_insn_r->mem_rec_count = 2;
10802 }
10803 }
10804 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
10805 {
10806 /* 2) Store, register offset. */
10807 /* Get Rm. */
10808 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10809 /* Get Rn. */
10810 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10811 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10812 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10813 if (15 == reg_src2)
10814 {
10815 /* If R15 was used as Rn, hence current PC+8. */
10816 u_regval[0] = u_regval[0] + 8;
10817 }
10818 /* Calculate target store address, Rn +/- Rm, register offset. */
10819 if (12 == arm_insn_r->opcode)
10820 {
10821 tgt_mem_addr = u_regval[0] + u_regval[1];
10822 }
10823 else
10824 {
10825 tgt_mem_addr = u_regval[1] - u_regval[0];
10826 }
10827 if (ARM_RECORD_STRH == str_type)
10828 {
10829 record_buf_mem[0] = 2;
10830 record_buf_mem[1] = tgt_mem_addr;
10831 arm_insn_r->mem_rec_count = 1;
10832 }
10833 else if (ARM_RECORD_STRD == str_type)
10834 {
10835 record_buf_mem[0] = 4;
10836 record_buf_mem[1] = tgt_mem_addr;
10837 record_buf_mem[2] = 4;
10838 record_buf_mem[3] = tgt_mem_addr + 4;
10839 arm_insn_r->mem_rec_count = 2;
10840 }
10841 }
10842 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10843 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10844 {
10845 /* 3) Store, immediate pre-indexed. */
10846 /* 5) Store, immediate post-indexed. */
10847 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10848 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10849 offset_8 = (immed_high << 4) | immed_low;
10850 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10851 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10852 /* Calculate target store address, Rn +/- Rm, register offset. */
10853 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10854 {
10855 tgt_mem_addr = u_regval[0] + offset_8;
10856 }
10857 else
10858 {
10859 tgt_mem_addr = u_regval[0] - offset_8;
10860 }
10861 if (ARM_RECORD_STRH == str_type)
10862 {
10863 record_buf_mem[0] = 2;
10864 record_buf_mem[1] = tgt_mem_addr;
10865 arm_insn_r->mem_rec_count = 1;
10866 }
10867 else if (ARM_RECORD_STRD == str_type)
10868 {
10869 record_buf_mem[0] = 4;
10870 record_buf_mem[1] = tgt_mem_addr;
10871 record_buf_mem[2] = 4;
10872 record_buf_mem[3] = tgt_mem_addr + 4;
10873 arm_insn_r->mem_rec_count = 2;
10874 }
10875 /* Record Rn also as it changes. */
10876 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10877 arm_insn_r->reg_rec_count = 1;
10878 }
10879 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
10880 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10881 {
10882 /* 4) Store, register pre-indexed. */
10883 /* 6) Store, register post -indexed. */
10884 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10885 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10886 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10887 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10888 /* Calculate target store address, Rn +/- Rm, register offset. */
10889 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10890 {
10891 tgt_mem_addr = u_regval[0] + u_regval[1];
10892 }
10893 else
10894 {
10895 tgt_mem_addr = u_regval[1] - u_regval[0];
10896 }
10897 if (ARM_RECORD_STRH == str_type)
10898 {
10899 record_buf_mem[0] = 2;
10900 record_buf_mem[1] = tgt_mem_addr;
10901 arm_insn_r->mem_rec_count = 1;
10902 }
10903 else if (ARM_RECORD_STRD == str_type)
10904 {
10905 record_buf_mem[0] = 4;
10906 record_buf_mem[1] = tgt_mem_addr;
10907 record_buf_mem[2] = 4;
10908 record_buf_mem[3] = tgt_mem_addr + 4;
10909 arm_insn_r->mem_rec_count = 2;
10910 }
10911 /* Record Rn also as it changes. */
10912 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10913 arm_insn_r->reg_rec_count = 1;
10914 }
10915 return 0;
10916 }
10917
10918 /* Handling ARM extension space insns. */
10919
10920 static int
10921 arm_record_extension_space (insn_decode_record *arm_insn_r)
10922 {
10923 uint32_t ret = 0; /* Return value: -1:record failure ; 0:success */
10924 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
10925 uint32_t record_buf[8], record_buf_mem[8];
10926 uint32_t reg_src1 = 0;
10927 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10928 struct regcache *reg_cache = arm_insn_r->regcache;
10929 ULONGEST u_regval = 0;
10930
10931 gdb_assert (!INSN_RECORDED(arm_insn_r));
10932 /* Handle unconditional insn extension space. */
10933
10934 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
10935 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10936 if (arm_insn_r->cond)
10937 {
10938 /* PLD has no affect on architectural state, it just affects
10939 the caches. */
10940 if (5 == ((opcode1 & 0xE0) >> 5))
10941 {
10942 /* BLX(1) */
10943 record_buf[0] = ARM_PS_REGNUM;
10944 record_buf[1] = ARM_LR_REGNUM;
10945 arm_insn_r->reg_rec_count = 2;
10946 }
10947 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
10948 }
10949
10950
10951 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10952 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
10953 {
10954 ret = -1;
10955 /* Undefined instruction on ARM V5; need to handle if later
10956 versions define it. */
10957 }
10958
10959 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
10960 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10961 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
10962
10963 /* Handle arithmetic insn extension space. */
10964 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
10965 && !INSN_RECORDED(arm_insn_r))
10966 {
10967 /* Handle MLA(S) and MUL(S). */
10968 if (0 <= insn_op1 && 3 >= insn_op1)
10969 {
10970 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10971 record_buf[1] = ARM_PS_REGNUM;
10972 arm_insn_r->reg_rec_count = 2;
10973 }
10974 else if (4 <= insn_op1 && 15 >= insn_op1)
10975 {
10976 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10977 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10978 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10979 record_buf[2] = ARM_PS_REGNUM;
10980 arm_insn_r->reg_rec_count = 3;
10981 }
10982 }
10983
10984 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10985 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10986 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10987
10988 /* Handle control insn extension space. */
10989
10990 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
10991 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
10992 {
10993 if (!bit (arm_insn_r->arm_insn,25))
10994 {
10995 if (!bits (arm_insn_r->arm_insn, 4, 7))
10996 {
10997 if ((0 == insn_op1) || (2 == insn_op1))
10998 {
10999 /* MRS. */
11000 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11001 arm_insn_r->reg_rec_count = 1;
11002 }
11003 else if (1 == insn_op1)
11004 {
11005 /* CSPR is going to be changed. */
11006 record_buf[0] = ARM_PS_REGNUM;
11007 arm_insn_r->reg_rec_count = 1;
11008 }
11009 else if (3 == insn_op1)
11010 {
11011 /* SPSR is going to be changed. */
11012 /* We need to get SPSR value, which is yet to be done. */
11013 printf_unfiltered (_("Process record does not support "
11014 "instruction 0x%0x at address %s.\n"),
11015 arm_insn_r->arm_insn,
11016 paddress (arm_insn_r->gdbarch,
11017 arm_insn_r->this_addr));
11018 return -1;
11019 }
11020 }
11021 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
11022 {
11023 if (1 == insn_op1)
11024 {
11025 /* BX. */
11026 record_buf[0] = ARM_PS_REGNUM;
11027 arm_insn_r->reg_rec_count = 1;
11028 }
11029 else if (3 == insn_op1)
11030 {
11031 /* CLZ. */
11032 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11033 arm_insn_r->reg_rec_count = 1;
11034 }
11035 }
11036 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
11037 {
11038 /* BLX. */
11039 record_buf[0] = ARM_PS_REGNUM;
11040 record_buf[1] = ARM_LR_REGNUM;
11041 arm_insn_r->reg_rec_count = 2;
11042 }
11043 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
11044 {
11045 /* QADD, QSUB, QDADD, QDSUB */
11046 record_buf[0] = ARM_PS_REGNUM;
11047 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11048 arm_insn_r->reg_rec_count = 2;
11049 }
11050 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
11051 {
11052 /* BKPT. */
11053 record_buf[0] = ARM_PS_REGNUM;
11054 record_buf[1] = ARM_LR_REGNUM;
11055 arm_insn_r->reg_rec_count = 2;
11056
11057 /* Save SPSR also;how? */
11058 printf_unfiltered (_("Process record does not support "
11059 "instruction 0x%0x at address %s.\n"),
11060 arm_insn_r->arm_insn,
11061 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11062 return -1;
11063 }
11064 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
11065 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
11066 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
11067 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
11068 )
11069 {
11070 if (0 == insn_op1 || 1 == insn_op1)
11071 {
11072 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
11073 /* We dont do optimization for SMULW<y> where we
11074 need only Rd. */
11075 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11076 record_buf[1] = ARM_PS_REGNUM;
11077 arm_insn_r->reg_rec_count = 2;
11078 }
11079 else if (2 == insn_op1)
11080 {
11081 /* SMLAL<x><y>. */
11082 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11083 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11084 arm_insn_r->reg_rec_count = 2;
11085 }
11086 else if (3 == insn_op1)
11087 {
11088 /* SMUL<x><y>. */
11089 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11090 arm_insn_r->reg_rec_count = 1;
11091 }
11092 }
11093 }
11094 else
11095 {
11096 /* MSR : immediate form. */
11097 if (1 == insn_op1)
11098 {
11099 /* CSPR is going to be changed. */
11100 record_buf[0] = ARM_PS_REGNUM;
11101 arm_insn_r->reg_rec_count = 1;
11102 }
11103 else if (3 == insn_op1)
11104 {
11105 /* SPSR is going to be changed. */
11106 /* we need to get SPSR value, which is yet to be done */
11107 printf_unfiltered (_("Process record does not support "
11108 "instruction 0x%0x at address %s.\n"),
11109 arm_insn_r->arm_insn,
11110 paddress (arm_insn_r->gdbarch,
11111 arm_insn_r->this_addr));
11112 return -1;
11113 }
11114 }
11115 }
11116
11117 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
11118 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
11119 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
11120
11121 /* Handle load/store insn extension space. */
11122
11123 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
11124 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
11125 && !INSN_RECORDED(arm_insn_r))
11126 {
11127 /* SWP/SWPB. */
11128 if (0 == insn_op1)
11129 {
11130 /* These insn, changes register and memory as well. */
11131 /* SWP or SWPB insn. */
11132 /* Get memory address given by Rn. */
11133 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11134 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11135 /* SWP insn ?, swaps word. */
11136 if (8 == arm_insn_r->opcode)
11137 {
11138 record_buf_mem[0] = 4;
11139 }
11140 else
11141 {
11142 /* SWPB insn, swaps only byte. */
11143 record_buf_mem[0] = 1;
11144 }
11145 record_buf_mem[1] = u_regval;
11146 arm_insn_r->mem_rec_count = 1;
11147 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11148 arm_insn_r->reg_rec_count = 1;
11149 }
11150 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11151 {
11152 /* STRH. */
11153 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11154 ARM_RECORD_STRH);
11155 }
11156 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11157 {
11158 /* LDRD. */
11159 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11160 record_buf[1] = record_buf[0] + 1;
11161 arm_insn_r->reg_rec_count = 2;
11162 }
11163 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11164 {
11165 /* STRD. */
11166 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11167 ARM_RECORD_STRD);
11168 }
11169 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
11170 {
11171 /* LDRH, LDRSB, LDRSH. */
11172 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11173 arm_insn_r->reg_rec_count = 1;
11174 }
11175
11176 }
11177
11178 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
11179 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
11180 && !INSN_RECORDED(arm_insn_r))
11181 {
11182 ret = -1;
11183 /* Handle coprocessor insn extension space. */
11184 }
11185
11186 /* To be done for ARMv5 and later; as of now we return -1. */
11187 if (-1 == ret)
11188 printf_unfiltered (_("Process record does not support instruction x%0x "
11189 "at address %s.\n"),arm_insn_r->arm_insn,
11190 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11191
11192
11193 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11194 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11195
11196 return ret;
11197 }
11198
11199 /* Handling opcode 000 insns. */
11200
11201 static int
11202 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
11203 {
11204 struct regcache *reg_cache = arm_insn_r->regcache;
11205 uint32_t record_buf[8], record_buf_mem[8];
11206 ULONGEST u_regval[2] = {0};
11207
11208 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11209 uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
11210 uint32_t opcode1 = 0;
11211
11212 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11213 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11214 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
11215
11216 /* Data processing insn /multiply insn. */
11217 if (9 == arm_insn_r->decode
11218 && ((4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11219 || (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)))
11220 {
11221 /* Handle multiply instructions. */
11222 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
11223 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
11224 {
11225 /* Handle MLA and MUL. */
11226 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11227 record_buf[1] = ARM_PS_REGNUM;
11228 arm_insn_r->reg_rec_count = 2;
11229 }
11230 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11231 {
11232 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
11233 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11234 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11235 record_buf[2] = ARM_PS_REGNUM;
11236 arm_insn_r->reg_rec_count = 3;
11237 }
11238 }
11239 else if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11240 && (11 == arm_insn_r->decode || 13 == arm_insn_r->decode))
11241 {
11242 /* Handle misc load insns, as 20th bit (L = 1). */
11243 /* LDR insn has a capability to do branching, if
11244 MOV LR, PC is precceded by LDR insn having Rn as R15
11245 in that case, it emulates branch and link insn, and hence we
11246 need to save CSPR and PC as well. I am not sure this is right
11247 place; as opcode = 010 LDR insn make this happen, if R15 was
11248 used. */
11249 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11250 if (15 != reg_dest)
11251 {
11252 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11253 arm_insn_r->reg_rec_count = 1;
11254 }
11255 else
11256 {
11257 record_buf[0] = reg_dest;
11258 record_buf[1] = ARM_PS_REGNUM;
11259 arm_insn_r->reg_rec_count = 2;
11260 }
11261 }
11262 else if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11263 && sbo_sbz (arm_insn_r->arm_insn, 5, 12, 0)
11264 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11265 && 2 == bits (arm_insn_r->arm_insn, 20, 21))
11266 {
11267 /* Handle MSR insn. */
11268 if (9 == arm_insn_r->opcode)
11269 {
11270 /* CSPR is going to be changed. */
11271 record_buf[0] = ARM_PS_REGNUM;
11272 arm_insn_r->reg_rec_count = 1;
11273 }
11274 else
11275 {
11276 /* SPSR is going to be changed. */
11277 /* How to read SPSR value? */
11278 printf_unfiltered (_("Process record does not support instruction "
11279 "0x%0x at address %s.\n"),
11280 arm_insn_r->arm_insn,
11281 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11282 return -1;
11283 }
11284 }
11285 else if (9 == arm_insn_r->decode
11286 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11287 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11288 {
11289 /* Handling SWP, SWPB. */
11290 /* These insn, changes register and memory as well. */
11291 /* SWP or SWPB insn. */
11292
11293 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11294 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11295 /* SWP insn ?, swaps word. */
11296 if (8 == arm_insn_r->opcode)
11297 {
11298 record_buf_mem[0] = 4;
11299 }
11300 else
11301 {
11302 /* SWPB insn, swaps only byte. */
11303 record_buf_mem[0] = 1;
11304 }
11305 record_buf_mem[1] = u_regval[0];
11306 arm_insn_r->mem_rec_count = 1;
11307 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11308 arm_insn_r->reg_rec_count = 1;
11309 }
11310 else if (3 == arm_insn_r->decode && 0x12 == opcode1
11311 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11312 {
11313 /* Handle BLX, branch and link/exchange. */
11314 if (9 == arm_insn_r->opcode)
11315 {
11316 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
11317 and R14 stores the return address. */
11318 record_buf[0] = ARM_PS_REGNUM;
11319 record_buf[1] = ARM_LR_REGNUM;
11320 arm_insn_r->reg_rec_count = 2;
11321 }
11322 }
11323 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
11324 {
11325 /* Handle enhanced software breakpoint insn, BKPT. */
11326 /* CPSR is changed to be executed in ARM state, disabling normal
11327 interrupts, entering abort mode. */
11328 /* According to high vector configuration PC is set. */
11329 /* user hit breakpoint and type reverse, in
11330 that case, we need to go back with previous CPSR and
11331 Program Counter. */
11332 record_buf[0] = ARM_PS_REGNUM;
11333 record_buf[1] = ARM_LR_REGNUM;
11334 arm_insn_r->reg_rec_count = 2;
11335
11336 /* Save SPSR also; how? */
11337 printf_unfiltered (_("Process record does not support instruction "
11338 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11339 paddress (arm_insn_r->gdbarch,
11340 arm_insn_r->this_addr));
11341 return -1;
11342 }
11343 else if (11 == arm_insn_r->decode
11344 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11345 {
11346 /* Handle enhanced store insns and DSP insns (e.g. LDRD). */
11347
11348 /* Handle str(x) insn */
11349 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11350 ARM_RECORD_STRH);
11351 }
11352 else if (1 == arm_insn_r->decode && 0x12 == opcode1
11353 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11354 {
11355 /* Handle BX, branch and link/exchange. */
11356 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
11357 record_buf[0] = ARM_PS_REGNUM;
11358 arm_insn_r->reg_rec_count = 1;
11359 }
11360 else if (1 == arm_insn_r->decode && 0x16 == opcode1
11361 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
11362 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
11363 {
11364 /* Count leading zeros: CLZ. */
11365 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11366 arm_insn_r->reg_rec_count = 1;
11367 }
11368 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11369 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11370 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
11371 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0)
11372 )
11373 {
11374 /* Handle MRS insn. */
11375 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11376 arm_insn_r->reg_rec_count = 1;
11377 }
11378 else if (arm_insn_r->opcode <= 15)
11379 {
11380 /* Normal data processing insns. */
11381 /* Out of 11 shifter operands mode, all the insn modifies destination
11382 register, which is specified by 13-16 decode. */
11383 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11384 record_buf[1] = ARM_PS_REGNUM;
11385 arm_insn_r->reg_rec_count = 2;
11386 }
11387 else
11388 {
11389 return -1;
11390 }
11391
11392 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11393 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11394 return 0;
11395 }
11396
11397 /* Handling opcode 001 insns. */
11398
11399 static int
11400 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
11401 {
11402 uint32_t record_buf[8], record_buf_mem[8];
11403
11404 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11405 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11406
11407 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11408 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
11409 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11410 )
11411 {
11412 /* Handle MSR insn. */
11413 if (9 == arm_insn_r->opcode)
11414 {
11415 /* CSPR is going to be changed. */
11416 record_buf[0] = ARM_PS_REGNUM;
11417 arm_insn_r->reg_rec_count = 1;
11418 }
11419 else
11420 {
11421 /* SPSR is going to be changed. */
11422 }
11423 }
11424 else if (arm_insn_r->opcode <= 15)
11425 {
11426 /* Normal data processing insns. */
11427 /* Out of 11 shifter operands mode, all the insn modifies destination
11428 register, which is specified by 13-16 decode. */
11429 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11430 record_buf[1] = ARM_PS_REGNUM;
11431 arm_insn_r->reg_rec_count = 2;
11432 }
11433 else
11434 {
11435 return -1;
11436 }
11437
11438 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11439 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11440 return 0;
11441 }
11442
11443 /* Handle ARM mode instructions with opcode 010. */
11444
11445 static int
11446 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
11447 {
11448 struct regcache *reg_cache = arm_insn_r->regcache;
11449
11450 uint32_t reg_base , reg_dest;
11451 uint32_t offset_12, tgt_mem_addr;
11452 uint32_t record_buf[8], record_buf_mem[8];
11453 unsigned char wback;
11454 ULONGEST u_regval;
11455
11456 /* Calculate wback. */
11457 wback = (bit (arm_insn_r->arm_insn, 24) == 0)
11458 || (bit (arm_insn_r->arm_insn, 21) == 1);
11459
11460 arm_insn_r->reg_rec_count = 0;
11461 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
11462
11463 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11464 {
11465 /* LDR (immediate), LDR (literal), LDRB (immediate), LDRB (literal), LDRBT
11466 and LDRT. */
11467
11468 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11469 record_buf[arm_insn_r->reg_rec_count++] = reg_dest;
11470
11471 /* The LDR instruction is capable of doing branching. If MOV LR, PC
11472 preceeds a LDR instruction having R15 as reg_base, it
11473 emulates a branch and link instruction, and hence we need to save
11474 CPSR and PC as well. */
11475 if (ARM_PC_REGNUM == reg_dest)
11476 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
11477
11478 /* If wback is true, also save the base register, which is going to be
11479 written to. */
11480 if (wback)
11481 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11482 }
11483 else
11484 {
11485 /* STR (immediate), STRB (immediate), STRBT and STRT. */
11486
11487 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
11488 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
11489
11490 /* Handle bit U. */
11491 if (bit (arm_insn_r->arm_insn, 23))
11492 {
11493 /* U == 1: Add the offset. */
11494 tgt_mem_addr = (uint32_t) u_regval + offset_12;
11495 }
11496 else
11497 {
11498 /* U == 0: subtract the offset. */
11499 tgt_mem_addr = (uint32_t) u_regval - offset_12;
11500 }
11501
11502 /* Bit 22 tells us whether the store instruction writes 1 byte or 4
11503 bytes. */
11504 if (bit (arm_insn_r->arm_insn, 22))
11505 {
11506 /* STRB and STRBT: 1 byte. */
11507 record_buf_mem[0] = 1;
11508 }
11509 else
11510 {
11511 /* STR and STRT: 4 bytes. */
11512 record_buf_mem[0] = 4;
11513 }
11514
11515 /* Handle bit P. */
11516 if (bit (arm_insn_r->arm_insn, 24))
11517 record_buf_mem[1] = tgt_mem_addr;
11518 else
11519 record_buf_mem[1] = (uint32_t) u_regval;
11520
11521 arm_insn_r->mem_rec_count = 1;
11522
11523 /* If wback is true, also save the base register, which is going to be
11524 written to. */
11525 if (wback)
11526 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11527 }
11528
11529 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11530 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11531 return 0;
11532 }
11533
11534 /* Handling opcode 011 insns. */
11535
11536 static int
11537 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
11538 {
11539 struct regcache *reg_cache = arm_insn_r->regcache;
11540
11541 uint32_t shift_imm = 0;
11542 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11543 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11544 uint32_t record_buf[8], record_buf_mem[8];
11545
11546 LONGEST s_word;
11547 ULONGEST u_regval[2];
11548
11549 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11550 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11551
11552 /* Handle enhanced store insns and LDRD DSP insn,
11553 order begins according to addressing modes for store insns
11554 STRH insn. */
11555
11556 /* LDR or STR? */
11557 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11558 {
11559 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11560 /* LDR insn has a capability to do branching, if
11561 MOV LR, PC is precedded by LDR insn having Rn as R15
11562 in that case, it emulates branch and link insn, and hence we
11563 need to save CSPR and PC as well. */
11564 if (15 != reg_dest)
11565 {
11566 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11567 arm_insn_r->reg_rec_count = 1;
11568 }
11569 else
11570 {
11571 record_buf[0] = reg_dest;
11572 record_buf[1] = ARM_PS_REGNUM;
11573 arm_insn_r->reg_rec_count = 2;
11574 }
11575 }
11576 else
11577 {
11578 if (! bits (arm_insn_r->arm_insn, 4, 11))
11579 {
11580 /* Store insn, register offset and register pre-indexed,
11581 register post-indexed. */
11582 /* Get Rm. */
11583 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11584 /* Get Rn. */
11585 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11586 regcache_raw_read_unsigned (reg_cache, reg_src1
11587 , &u_regval[0]);
11588 regcache_raw_read_unsigned (reg_cache, reg_src2
11589 , &u_regval[1]);
11590 if (15 == reg_src2)
11591 {
11592 /* If R15 was used as Rn, hence current PC+8. */
11593 /* Pre-indexed mode doesnt reach here ; illegal insn. */
11594 u_regval[0] = u_regval[0] + 8;
11595 }
11596 /* Calculate target store address, Rn +/- Rm, register offset. */
11597 /* U == 1. */
11598 if (bit (arm_insn_r->arm_insn, 23))
11599 {
11600 tgt_mem_addr = u_regval[0] + u_regval[1];
11601 }
11602 else
11603 {
11604 tgt_mem_addr = u_regval[1] - u_regval[0];
11605 }
11606
11607 switch (arm_insn_r->opcode)
11608 {
11609 /* STR. */
11610 case 8:
11611 case 12:
11612 /* STR. */
11613 case 9:
11614 case 13:
11615 /* STRT. */
11616 case 1:
11617 case 5:
11618 /* STR. */
11619 case 0:
11620 case 4:
11621 record_buf_mem[0] = 4;
11622 break;
11623
11624 /* STRB. */
11625 case 10:
11626 case 14:
11627 /* STRB. */
11628 case 11:
11629 case 15:
11630 /* STRBT. */
11631 case 3:
11632 case 7:
11633 /* STRB. */
11634 case 2:
11635 case 6:
11636 record_buf_mem[0] = 1;
11637 break;
11638
11639 default:
11640 gdb_assert_not_reached ("no decoding pattern found");
11641 break;
11642 }
11643 record_buf_mem[1] = tgt_mem_addr;
11644 arm_insn_r->mem_rec_count = 1;
11645
11646 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11647 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11648 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11649 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11650 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11651 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11652 )
11653 {
11654 /* Rn is going to be changed in pre-indexed mode and
11655 post-indexed mode as well. */
11656 record_buf[0] = reg_src2;
11657 arm_insn_r->reg_rec_count = 1;
11658 }
11659 }
11660 else
11661 {
11662 /* Store insn, scaled register offset; scaled pre-indexed. */
11663 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
11664 /* Get Rm. */
11665 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11666 /* Get Rn. */
11667 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11668 /* Get shift_imm. */
11669 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
11670 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11671 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
11672 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11673 /* Offset_12 used as shift. */
11674 switch (offset_12)
11675 {
11676 case 0:
11677 /* Offset_12 used as index. */
11678 offset_12 = u_regval[0] << shift_imm;
11679 break;
11680
11681 case 1:
11682 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
11683 break;
11684
11685 case 2:
11686 if (!shift_imm)
11687 {
11688 if (bit (u_regval[0], 31))
11689 {
11690 offset_12 = 0xFFFFFFFF;
11691 }
11692 else
11693 {
11694 offset_12 = 0;
11695 }
11696 }
11697 else
11698 {
11699 /* This is arithmetic shift. */
11700 offset_12 = s_word >> shift_imm;
11701 }
11702 break;
11703
11704 case 3:
11705 if (!shift_imm)
11706 {
11707 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
11708 &u_regval[1]);
11709 /* Get C flag value and shift it by 31. */
11710 offset_12 = (((bit (u_regval[1], 29)) << 31) \
11711 | (u_regval[0]) >> 1);
11712 }
11713 else
11714 {
11715 offset_12 = (u_regval[0] >> shift_imm) \
11716 | (u_regval[0] <<
11717 (sizeof(uint32_t) - shift_imm));
11718 }
11719 break;
11720
11721 default:
11722 gdb_assert_not_reached ("no decoding pattern found");
11723 break;
11724 }
11725
11726 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11727 /* bit U set. */
11728 if (bit (arm_insn_r->arm_insn, 23))
11729 {
11730 tgt_mem_addr = u_regval[1] + offset_12;
11731 }
11732 else
11733 {
11734 tgt_mem_addr = u_regval[1] - offset_12;
11735 }
11736
11737 switch (arm_insn_r->opcode)
11738 {
11739 /* STR. */
11740 case 8:
11741 case 12:
11742 /* STR. */
11743 case 9:
11744 case 13:
11745 /* STRT. */
11746 case 1:
11747 case 5:
11748 /* STR. */
11749 case 0:
11750 case 4:
11751 record_buf_mem[0] = 4;
11752 break;
11753
11754 /* STRB. */
11755 case 10:
11756 case 14:
11757 /* STRB. */
11758 case 11:
11759 case 15:
11760 /* STRBT. */
11761 case 3:
11762 case 7:
11763 /* STRB. */
11764 case 2:
11765 case 6:
11766 record_buf_mem[0] = 1;
11767 break;
11768
11769 default:
11770 gdb_assert_not_reached ("no decoding pattern found");
11771 break;
11772 }
11773 record_buf_mem[1] = tgt_mem_addr;
11774 arm_insn_r->mem_rec_count = 1;
11775
11776 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11777 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11778 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11779 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11780 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11781 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11782 )
11783 {
11784 /* Rn is going to be changed in register scaled pre-indexed
11785 mode,and scaled post indexed mode. */
11786 record_buf[0] = reg_src2;
11787 arm_insn_r->reg_rec_count = 1;
11788 }
11789 }
11790 }
11791
11792 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11793 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11794 return 0;
11795 }
11796
11797 /* Handle ARM mode instructions with opcode 100. */
11798
11799 static int
11800 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
11801 {
11802 struct regcache *reg_cache = arm_insn_r->regcache;
11803 uint32_t register_count = 0, register_bits;
11804 uint32_t reg_base, addr_mode;
11805 uint32_t record_buf[24], record_buf_mem[48];
11806 uint32_t wback;
11807 ULONGEST u_regval;
11808
11809 /* Fetch the list of registers. */
11810 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11811 arm_insn_r->reg_rec_count = 0;
11812
11813 /* Fetch the base register that contains the address we are loading data
11814 to. */
11815 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
11816
11817 /* Calculate wback. */
11818 wback = (bit (arm_insn_r->arm_insn, 21) == 1);
11819
11820 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11821 {
11822 /* LDM/LDMIA/LDMFD, LDMDA/LDMFA, LDMDB and LDMIB. */
11823
11824 /* Find out which registers are going to be loaded from memory. */
11825 while (register_bits)
11826 {
11827 if (register_bits & 0x00000001)
11828 record_buf[arm_insn_r->reg_rec_count++] = register_count;
11829 register_bits = register_bits >> 1;
11830 register_count++;
11831 }
11832
11833
11834 /* If wback is true, also save the base register, which is going to be
11835 written to. */
11836 if (wback)
11837 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11838
11839 /* Save the CPSR register. */
11840 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
11841 }
11842 else
11843 {
11844 /* STM (STMIA, STMEA), STMDA (STMED), STMDB (STMFD) and STMIB (STMFA). */
11845
11846 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11847
11848 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
11849
11850 /* Find out how many registers are going to be stored to memory. */
11851 while (register_bits)
11852 {
11853 if (register_bits & 0x00000001)
11854 register_count++;
11855 register_bits = register_bits >> 1;
11856 }
11857
11858 switch (addr_mode)
11859 {
11860 /* STMDA (STMED): Decrement after. */
11861 case 0:
11862 record_buf_mem[1] = (uint32_t) u_regval
11863 - register_count * INT_REGISTER_SIZE + 4;
11864 break;
11865 /* STM (STMIA, STMEA): Increment after. */
11866 case 1:
11867 record_buf_mem[1] = (uint32_t) u_regval;
11868 break;
11869 /* STMDB (STMFD): Decrement before. */
11870 case 2:
11871 record_buf_mem[1] = (uint32_t) u_regval
11872 - register_count * INT_REGISTER_SIZE;
11873 break;
11874 /* STMIB (STMFA): Increment before. */
11875 case 3:
11876 record_buf_mem[1] = (uint32_t) u_regval + INT_REGISTER_SIZE;
11877 break;
11878 default:
11879 gdb_assert_not_reached ("no decoding pattern found");
11880 break;
11881 }
11882
11883 record_buf_mem[0] = register_count * INT_REGISTER_SIZE;
11884 arm_insn_r->mem_rec_count = 1;
11885
11886 /* If wback is true, also save the base register, which is going to be
11887 written to. */
11888 if (wback)
11889 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11890 }
11891
11892 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11893 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11894 return 0;
11895 }
11896
11897 /* Handling opcode 101 insns. */
11898
11899 static int
11900 arm_record_b_bl (insn_decode_record *arm_insn_r)
11901 {
11902 uint32_t record_buf[8];
11903
11904 /* Handle B, BL, BLX(1) insns. */
11905 /* B simply branches so we do nothing here. */
11906 /* Note: BLX(1) doesnt fall here but instead it falls into
11907 extension space. */
11908 if (bit (arm_insn_r->arm_insn, 24))
11909 {
11910 record_buf[0] = ARM_LR_REGNUM;
11911 arm_insn_r->reg_rec_count = 1;
11912 }
11913
11914 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11915
11916 return 0;
11917 }
11918
11919 /* Handling opcode 110 insns. */
11920
11921 static int
11922 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11923 {
11924 printf_unfiltered (_("Process record does not support instruction "
11925 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11926 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11927
11928 return -1;
11929 }
11930
11931 /* Record handler for vector data transfer instructions. */
11932
11933 static int
11934 arm_record_vdata_transfer_insn (insn_decode_record *arm_insn_r)
11935 {
11936 uint32_t bits_a, bit_c, bit_l, reg_t, reg_v;
11937 uint32_t record_buf[4];
11938
11939 const int num_regs = gdbarch_num_regs (arm_insn_r->gdbarch);
11940 reg_t = bits (arm_insn_r->arm_insn, 12, 15);
11941 reg_v = bits (arm_insn_r->arm_insn, 21, 23);
11942 bits_a = bits (arm_insn_r->arm_insn, 21, 23);
11943 bit_l = bit (arm_insn_r->arm_insn, 20);
11944 bit_c = bit (arm_insn_r->arm_insn, 8);
11945
11946 /* Handle VMOV instruction. */
11947 if (bit_l && bit_c)
11948 {
11949 record_buf[0] = reg_t;
11950 arm_insn_r->reg_rec_count = 1;
11951 }
11952 else if (bit_l && !bit_c)
11953 {
11954 /* Handle VMOV instruction. */
11955 if (bits_a == 0x00)
11956 {
11957 if (bit (arm_insn_r->arm_insn, 20))
11958 record_buf[0] = reg_t;
11959 else
11960 record_buf[0] = num_regs + (bit (arm_insn_r->arm_insn, 7) |
11961 (reg_v << 1));
11962
11963 arm_insn_r->reg_rec_count = 1;
11964 }
11965 /* Handle VMRS instruction. */
11966 else if (bits_a == 0x07)
11967 {
11968 if (reg_t == 15)
11969 reg_t = ARM_PS_REGNUM;
11970
11971 record_buf[0] = reg_t;
11972 arm_insn_r->reg_rec_count = 1;
11973 }
11974 }
11975 else if (!bit_l && !bit_c)
11976 {
11977 /* Handle VMOV instruction. */
11978 if (bits_a == 0x00)
11979 {
11980 if (bit (arm_insn_r->arm_insn, 20))
11981 record_buf[0] = reg_t;
11982 else
11983 record_buf[0] = num_regs + (bit (arm_insn_r->arm_insn, 7) |
11984 (reg_v << 1));
11985
11986 arm_insn_r->reg_rec_count = 1;
11987 }
11988 /* Handle VMSR instruction. */
11989 else if (bits_a == 0x07)
11990 {
11991 record_buf[0] = ARM_FPSCR_REGNUM;
11992 arm_insn_r->reg_rec_count = 1;
11993 }
11994 }
11995 else if (!bit_l && bit_c)
11996 {
11997 /* Handle VMOV instruction. */
11998 if (!(bits_a & 0x04))
11999 {
12000 record_buf[0] = (reg_v | (bit (arm_insn_r->arm_insn, 7) << 4))
12001 + ARM_D0_REGNUM;
12002 arm_insn_r->reg_rec_count = 1;
12003 }
12004 /* Handle VDUP instruction. */
12005 else
12006 {
12007 if (bit (arm_insn_r->arm_insn, 21))
12008 {
12009 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
12010 record_buf[0] = reg_v + ARM_D0_REGNUM;
12011 record_buf[1] = reg_v + ARM_D0_REGNUM + 1;
12012 arm_insn_r->reg_rec_count = 2;
12013 }
12014 else
12015 {
12016 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
12017 record_buf[0] = reg_v + ARM_D0_REGNUM;
12018 arm_insn_r->reg_rec_count = 1;
12019 }
12020 }
12021 }
12022
12023 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
12024 return 0;
12025 }
12026
12027 /* Record handler for extension register load/store instructions. */
12028
12029 static int
12030 arm_record_exreg_ld_st_insn (insn_decode_record *arm_insn_r)
12031 {
12032 uint32_t opcode, single_reg;
12033 uint8_t op_vldm_vstm;
12034 uint32_t record_buf[8], record_buf_mem[128];
12035 ULONGEST u_regval = 0;
12036
12037 struct regcache *reg_cache = arm_insn_r->regcache;
12038 const int num_regs = gdbarch_num_regs (arm_insn_r->gdbarch);
12039
12040 opcode = bits (arm_insn_r->arm_insn, 20, 24);
12041 single_reg = bit (arm_insn_r->arm_insn, 8);
12042 op_vldm_vstm = opcode & 0x1b;
12043
12044 /* Handle VMOV instructions. */
12045 if ((opcode & 0x1e) == 0x04)
12046 {
12047 if (bit (arm_insn_r->arm_insn, 4))
12048 {
12049 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
12050 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
12051 arm_insn_r->reg_rec_count = 2;
12052 }
12053 else
12054 {
12055 uint8_t reg_m = (bits (arm_insn_r->arm_insn, 0, 3) << 1)
12056 | bit (arm_insn_r->arm_insn, 5);
12057
12058 if (!single_reg)
12059 {
12060 record_buf[0] = num_regs + reg_m;
12061 record_buf[1] = num_regs + reg_m + 1;
12062 arm_insn_r->reg_rec_count = 2;
12063 }
12064 else
12065 {
12066 record_buf[0] = reg_m + ARM_D0_REGNUM;
12067 arm_insn_r->reg_rec_count = 1;
12068 }
12069 }
12070 }
12071 /* Handle VSTM and VPUSH instructions. */
12072 else if (op_vldm_vstm == 0x08 || op_vldm_vstm == 0x0a
12073 || op_vldm_vstm == 0x12)
12074 {
12075 uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count;
12076 uint32_t memory_index = 0;
12077
12078 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
12079 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12080 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
12081 imm_off32 = imm_off8 << 24;
12082 memory_count = imm_off8;
12083
12084 if (bit (arm_insn_r->arm_insn, 23))
12085 start_address = u_regval;
12086 else
12087 start_address = u_regval - imm_off32;
12088
12089 if (bit (arm_insn_r->arm_insn, 21))
12090 {
12091 record_buf[0] = reg_rn;
12092 arm_insn_r->reg_rec_count = 1;
12093 }
12094
12095 while (memory_count > 0)
12096 {
12097 if (!single_reg)
12098 {
12099 record_buf_mem[memory_index] = start_address;
12100 record_buf_mem[memory_index + 1] = 4;
12101 start_address = start_address + 4;
12102 memory_index = memory_index + 2;
12103 }
12104 else
12105 {
12106 record_buf_mem[memory_index] = start_address;
12107 record_buf_mem[memory_index + 1] = 4;
12108 record_buf_mem[memory_index + 2] = start_address + 4;
12109 record_buf_mem[memory_index + 3] = 4;
12110 start_address = start_address + 8;
12111 memory_index = memory_index + 4;
12112 }
12113 memory_count--;
12114 }
12115 arm_insn_r->mem_rec_count = (memory_index >> 1);
12116 }
12117 /* Handle VLDM instructions. */
12118 else if (op_vldm_vstm == 0x09 || op_vldm_vstm == 0x0b
12119 || op_vldm_vstm == 0x13)
12120 {
12121 uint32_t reg_count, reg_vd;
12122 uint32_t reg_index = 0;
12123
12124 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
12125 reg_count = bits (arm_insn_r->arm_insn, 0, 7);
12126
12127 if (single_reg)
12128 reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4);
12129 else
12130 reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22);
12131
12132 if (bit (arm_insn_r->arm_insn, 21))
12133 record_buf[reg_index++] = bits (arm_insn_r->arm_insn, 16, 19);
12134
12135 while (reg_count > 0)
12136 {
12137 if (single_reg)
12138 record_buf[reg_index++] = num_regs + reg_vd + reg_count - 1;
12139 else
12140 record_buf[reg_index++] = ARM_D0_REGNUM + reg_vd + reg_count - 1;
12141
12142 reg_count--;
12143 }
12144 arm_insn_r->reg_rec_count = reg_index;
12145 }
12146 /* VSTR Vector store register. */
12147 else if ((opcode & 0x13) == 0x10)
12148 {
12149 uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count;
12150 uint32_t memory_index = 0;
12151
12152 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
12153 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12154 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
12155 imm_off32 = imm_off8 << 24;
12156 memory_count = imm_off8;
12157
12158 if (bit (arm_insn_r->arm_insn, 23))
12159 start_address = u_regval + imm_off32;
12160 else
12161 start_address = u_regval - imm_off32;
12162
12163 if (single_reg)
12164 {
12165 record_buf_mem[memory_index] = start_address;
12166 record_buf_mem[memory_index + 1] = 4;
12167 arm_insn_r->mem_rec_count = 1;
12168 }
12169 else
12170 {
12171 record_buf_mem[memory_index] = start_address;
12172 record_buf_mem[memory_index + 1] = 4;
12173 record_buf_mem[memory_index + 2] = start_address + 4;
12174 record_buf_mem[memory_index + 3] = 4;
12175 arm_insn_r->mem_rec_count = 2;
12176 }
12177 }
12178 /* VLDR Vector load register. */
12179 else if ((opcode & 0x13) == 0x11)
12180 {
12181 uint32_t reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
12182
12183 if (!single_reg)
12184 {
12185 reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4);
12186 record_buf[0] = ARM_D0_REGNUM + reg_vd;
12187 }
12188 else
12189 {
12190 reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22);
12191 record_buf[0] = num_regs + reg_vd;
12192 }
12193 arm_insn_r->reg_rec_count = 1;
12194 }
12195
12196 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
12197 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
12198 return 0;
12199 }
12200
12201 /* Record handler for arm/thumb mode VFP data processing instructions. */
12202
12203 static int
12204 arm_record_vfp_data_proc_insn (insn_decode_record *arm_insn_r)
12205 {
12206 uint32_t opc1, opc2, opc3, dp_op_sz, bit_d, reg_vd;
12207 uint32_t record_buf[4];
12208 enum insn_types {INSN_T0, INSN_T1, INSN_T2, INSN_T3, INSN_INV};
12209 enum insn_types curr_insn_type = INSN_INV;
12210
12211 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
12212 opc1 = bits (arm_insn_r->arm_insn, 20, 23);
12213 opc2 = bits (arm_insn_r->arm_insn, 16, 19);
12214 opc3 = bits (arm_insn_r->arm_insn, 6, 7);
12215 dp_op_sz = bit (arm_insn_r->arm_insn, 8);
12216 bit_d = bit (arm_insn_r->arm_insn, 22);
12217 opc1 = opc1 & 0x04;
12218
12219 /* Handle VMLA, VMLS. */
12220 if (opc1 == 0x00)
12221 {
12222 if (bit (arm_insn_r->arm_insn, 10))
12223 {
12224 if (bit (arm_insn_r->arm_insn, 6))
12225 curr_insn_type = INSN_T0;
12226 else
12227 curr_insn_type = INSN_T1;
12228 }
12229 else
12230 {
12231 if (dp_op_sz)
12232 curr_insn_type = INSN_T1;
12233 else
12234 curr_insn_type = INSN_T2;
12235 }
12236 }
12237 /* Handle VNMLA, VNMLS, VNMUL. */
12238 else if (opc1 == 0x01)
12239 {
12240 if (dp_op_sz)
12241 curr_insn_type = INSN_T1;
12242 else
12243 curr_insn_type = INSN_T2;
12244 }
12245 /* Handle VMUL. */
12246 else if (opc1 == 0x02 && !(opc3 & 0x01))
12247 {
12248 if (bit (arm_insn_r->arm_insn, 10))
12249 {
12250 if (bit (arm_insn_r->arm_insn, 6))
12251 curr_insn_type = INSN_T0;
12252 else
12253 curr_insn_type = INSN_T1;
12254 }
12255 else
12256 {
12257 if (dp_op_sz)
12258 curr_insn_type = INSN_T1;
12259 else
12260 curr_insn_type = INSN_T2;
12261 }
12262 }
12263 /* Handle VADD, VSUB. */
12264 else if (opc1 == 0x03)
12265 {
12266 if (!bit (arm_insn_r->arm_insn, 9))
12267 {
12268 if (bit (arm_insn_r->arm_insn, 6))
12269 curr_insn_type = INSN_T0;
12270 else
12271 curr_insn_type = INSN_T1;
12272 }
12273 else
12274 {
12275 if (dp_op_sz)
12276 curr_insn_type = INSN_T1;
12277 else
12278 curr_insn_type = INSN_T2;
12279 }
12280 }
12281 /* Handle VDIV. */
12282 else if (opc1 == 0x0b)
12283 {
12284 if (dp_op_sz)
12285 curr_insn_type = INSN_T1;
12286 else
12287 curr_insn_type = INSN_T2;
12288 }
12289 /* Handle all other vfp data processing instructions. */
12290 else if (opc1 == 0x0b)
12291 {
12292 /* Handle VMOV. */
12293 if (!(opc3 & 0x01) || (opc2 == 0x00 && opc3 == 0x01))
12294 {
12295 if (bit (arm_insn_r->arm_insn, 4))
12296 {
12297 if (bit (arm_insn_r->arm_insn, 6))
12298 curr_insn_type = INSN_T0;
12299 else
12300 curr_insn_type = INSN_T1;
12301 }
12302 else
12303 {
12304 if (dp_op_sz)
12305 curr_insn_type = INSN_T1;
12306 else
12307 curr_insn_type = INSN_T2;
12308 }
12309 }
12310 /* Handle VNEG and VABS. */
12311 else if ((opc2 == 0x01 && opc3 == 0x01)
12312 || (opc2 == 0x00 && opc3 == 0x03))
12313 {
12314 if (!bit (arm_insn_r->arm_insn, 11))
12315 {
12316 if (bit (arm_insn_r->arm_insn, 6))
12317 curr_insn_type = INSN_T0;
12318 else
12319 curr_insn_type = INSN_T1;
12320 }
12321 else
12322 {
12323 if (dp_op_sz)
12324 curr_insn_type = INSN_T1;
12325 else
12326 curr_insn_type = INSN_T2;
12327 }
12328 }
12329 /* Handle VSQRT. */
12330 else if (opc2 == 0x01 && opc3 == 0x03)
12331 {
12332 if (dp_op_sz)
12333 curr_insn_type = INSN_T1;
12334 else
12335 curr_insn_type = INSN_T2;
12336 }
12337 /* Handle VCVT. */
12338 else if (opc2 == 0x07 && opc3 == 0x03)
12339 {
12340 if (!dp_op_sz)
12341 curr_insn_type = INSN_T1;
12342 else
12343 curr_insn_type = INSN_T2;
12344 }
12345 else if (opc3 & 0x01)
12346 {
12347 /* Handle VCVT. */
12348 if ((opc2 == 0x08) || (opc2 & 0x0e) == 0x0c)
12349 {
12350 if (!bit (arm_insn_r->arm_insn, 18))
12351 curr_insn_type = INSN_T2;
12352 else
12353 {
12354 if (dp_op_sz)
12355 curr_insn_type = INSN_T1;
12356 else
12357 curr_insn_type = INSN_T2;
12358 }
12359 }
12360 /* Handle VCVT. */
12361 else if ((opc2 & 0x0e) == 0x0a || (opc2 & 0x0e) == 0x0e)
12362 {
12363 if (dp_op_sz)
12364 curr_insn_type = INSN_T1;
12365 else
12366 curr_insn_type = INSN_T2;
12367 }
12368 /* Handle VCVTB, VCVTT. */
12369 else if ((opc2 & 0x0e) == 0x02)
12370 curr_insn_type = INSN_T2;
12371 /* Handle VCMP, VCMPE. */
12372 else if ((opc2 & 0x0e) == 0x04)
12373 curr_insn_type = INSN_T3;
12374 }
12375 }
12376
12377 switch (curr_insn_type)
12378 {
12379 case INSN_T0:
12380 reg_vd = reg_vd | (bit_d << 4);
12381 record_buf[0] = reg_vd + ARM_D0_REGNUM;
12382 record_buf[1] = reg_vd + ARM_D0_REGNUM + 1;
12383 arm_insn_r->reg_rec_count = 2;
12384 break;
12385
12386 case INSN_T1:
12387 reg_vd = reg_vd | (bit_d << 4);
12388 record_buf[0] = reg_vd + ARM_D0_REGNUM;
12389 arm_insn_r->reg_rec_count = 1;
12390 break;
12391
12392 case INSN_T2:
12393 reg_vd = (reg_vd << 1) | bit_d;
12394 record_buf[0] = reg_vd + ARM_D0_REGNUM;
12395 arm_insn_r->reg_rec_count = 1;
12396 break;
12397
12398 case INSN_T3:
12399 record_buf[0] = ARM_FPSCR_REGNUM;
12400 arm_insn_r->reg_rec_count = 1;
12401 break;
12402
12403 default:
12404 gdb_assert_not_reached ("no decoding pattern found");
12405 break;
12406 }
12407
12408 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
12409 return 0;
12410 }
12411
12412 /* Handling opcode 110 insns. */
12413
12414 static int
12415 arm_record_asimd_vfp_coproc (insn_decode_record *arm_insn_r)
12416 {
12417 uint32_t op, op1, op1_sbit, op1_ebit, coproc;
12418
12419 coproc = bits (arm_insn_r->arm_insn, 8, 11);
12420 op1 = bits (arm_insn_r->arm_insn, 20, 25);
12421 op1_ebit = bit (arm_insn_r->arm_insn, 20);
12422
12423 if ((coproc & 0x0e) == 0x0a)
12424 {
12425 /* Handle extension register ld/st instructions. */
12426 if (!(op1 & 0x20))
12427 return arm_record_exreg_ld_st_insn (arm_insn_r);
12428
12429 /* 64-bit transfers between arm core and extension registers. */
12430 if ((op1 & 0x3e) == 0x04)
12431 return arm_record_exreg_ld_st_insn (arm_insn_r);
12432 }
12433 else
12434 {
12435 /* Handle coprocessor ld/st instructions. */
12436 if (!(op1 & 0x3a))
12437 {
12438 /* Store. */
12439 if (!op1_ebit)
12440 return arm_record_unsupported_insn (arm_insn_r);
12441 else
12442 /* Load. */
12443 return arm_record_unsupported_insn (arm_insn_r);
12444 }
12445
12446 /* Move to coprocessor from two arm core registers. */
12447 if (op1 == 0x4)
12448 return arm_record_unsupported_insn (arm_insn_r);
12449
12450 /* Move to two arm core registers from coprocessor. */
12451 if (op1 == 0x5)
12452 {
12453 uint32_t reg_t[2];
12454
12455 reg_t[0] = bits (arm_insn_r->arm_insn, 12, 15);
12456 reg_t[1] = bits (arm_insn_r->arm_insn, 16, 19);
12457 arm_insn_r->reg_rec_count = 2;
12458
12459 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, reg_t);
12460 return 0;
12461 }
12462 }
12463 return arm_record_unsupported_insn (arm_insn_r);
12464 }
12465
12466 /* Handling opcode 111 insns. */
12467
12468 static int
12469 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
12470 {
12471 uint32_t op, op1_sbit, op1_ebit, coproc;
12472 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
12473 struct regcache *reg_cache = arm_insn_r->regcache;
12474 ULONGEST u_regval = 0;
12475
12476 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
12477 coproc = bits (arm_insn_r->arm_insn, 8, 11);
12478 op1_sbit = bit (arm_insn_r->arm_insn, 24);
12479 op1_ebit = bit (arm_insn_r->arm_insn, 20);
12480 op = bit (arm_insn_r->arm_insn, 4);
12481
12482 /* Handle arm SWI/SVC system call instructions. */
12483 if (op1_sbit)
12484 {
12485 if (tdep->arm_syscall_record != NULL)
12486 {
12487 ULONGEST svc_operand, svc_number;
12488
12489 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
12490
12491 if (svc_operand) /* OABI. */
12492 svc_number = svc_operand - 0x900000;
12493 else /* EABI. */
12494 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
12495
12496 return tdep->arm_syscall_record (reg_cache, svc_number);
12497 }
12498 else
12499 {
12500 printf_unfiltered (_("no syscall record support\n"));
12501 return -1;
12502 }
12503 }
12504
12505 if ((coproc & 0x0e) == 0x0a)
12506 {
12507 /* VFP data-processing instructions. */
12508 if (!op1_sbit && !op)
12509 return arm_record_vfp_data_proc_insn (arm_insn_r);
12510
12511 /* Advanced SIMD, VFP instructions. */
12512 if (!op1_sbit && op)
12513 return arm_record_vdata_transfer_insn (arm_insn_r);
12514 }
12515 else
12516 {
12517 /* Coprocessor data operations. */
12518 if (!op1_sbit && !op)
12519 return arm_record_unsupported_insn (arm_insn_r);
12520
12521 /* Move to Coprocessor from ARM core register. */
12522 if (!op1_sbit && !op1_ebit && op)
12523 return arm_record_unsupported_insn (arm_insn_r);
12524
12525 /* Move to arm core register from coprocessor. */
12526 if (!op1_sbit && op1_ebit && op)
12527 {
12528 uint32_t record_buf[1];
12529
12530 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
12531 if (record_buf[0] == 15)
12532 record_buf[0] = ARM_PS_REGNUM;
12533
12534 arm_insn_r->reg_rec_count = 1;
12535 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count,
12536 record_buf);
12537 return 0;
12538 }
12539 }
12540
12541 return arm_record_unsupported_insn (arm_insn_r);
12542 }
12543
12544 /* Handling opcode 000 insns. */
12545
12546 static int
12547 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
12548 {
12549 uint32_t record_buf[8];
12550 uint32_t reg_src1 = 0;
12551
12552 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12553
12554 record_buf[0] = ARM_PS_REGNUM;
12555 record_buf[1] = reg_src1;
12556 thumb_insn_r->reg_rec_count = 2;
12557
12558 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12559
12560 return 0;
12561 }
12562
12563
12564 /* Handling opcode 001 insns. */
12565
12566 static int
12567 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
12568 {
12569 uint32_t record_buf[8];
12570 uint32_t reg_src1 = 0;
12571
12572 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12573
12574 record_buf[0] = ARM_PS_REGNUM;
12575 record_buf[1] = reg_src1;
12576 thumb_insn_r->reg_rec_count = 2;
12577
12578 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12579
12580 return 0;
12581 }
12582
12583 /* Handling opcode 010 insns. */
12584
12585 static int
12586 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
12587 {
12588 struct regcache *reg_cache = thumb_insn_r->regcache;
12589 uint32_t record_buf[8], record_buf_mem[8];
12590
12591 uint32_t reg_src1 = 0, reg_src2 = 0;
12592 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
12593
12594 ULONGEST u_regval[2] = {0};
12595
12596 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
12597
12598 if (bit (thumb_insn_r->arm_insn, 12))
12599 {
12600 /* Handle load/store register offset. */
12601 opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
12602 if (opcode2 >= 12 && opcode2 <= 15)
12603 {
12604 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
12605 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
12606 record_buf[0] = reg_src1;
12607 thumb_insn_r->reg_rec_count = 1;
12608 }
12609 else if (opcode2 >= 8 && opcode2 <= 10)
12610 {
12611 /* STR(2), STRB(2), STRH(2) . */
12612 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12613 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
12614 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
12615 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
12616 if (8 == opcode2)
12617 record_buf_mem[0] = 4; /* STR (2). */
12618 else if (10 == opcode2)
12619 record_buf_mem[0] = 1; /* STRB (2). */
12620 else if (9 == opcode2)
12621 record_buf_mem[0] = 2; /* STRH (2). */
12622 record_buf_mem[1] = u_regval[0] + u_regval[1];
12623 thumb_insn_r->mem_rec_count = 1;
12624 }
12625 }
12626 else if (bit (thumb_insn_r->arm_insn, 11))
12627 {
12628 /* Handle load from literal pool. */
12629 /* LDR(3). */
12630 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12631 record_buf[0] = reg_src1;
12632 thumb_insn_r->reg_rec_count = 1;
12633 }
12634 else if (opcode1)
12635 {
12636 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
12637 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
12638 if ((3 == opcode2) && (!opcode3))
12639 {
12640 /* Branch with exchange. */
12641 record_buf[0] = ARM_PS_REGNUM;
12642 thumb_insn_r->reg_rec_count = 1;
12643 }
12644 else
12645 {
12646 /* Format 8; special data processing insns. */
12647 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12648 record_buf[0] = ARM_PS_REGNUM;
12649 record_buf[1] = reg_src1;
12650 thumb_insn_r->reg_rec_count = 2;
12651 }
12652 }
12653 else
12654 {
12655 /* Format 5; data processing insns. */
12656 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12657 if (bit (thumb_insn_r->arm_insn, 7))
12658 {
12659 reg_src1 = reg_src1 + 8;
12660 }
12661 record_buf[0] = ARM_PS_REGNUM;
12662 record_buf[1] = reg_src1;
12663 thumb_insn_r->reg_rec_count = 2;
12664 }
12665
12666 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12667 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12668 record_buf_mem);
12669
12670 return 0;
12671 }
12672
12673 /* Handling opcode 001 insns. */
12674
12675 static int
12676 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
12677 {
12678 struct regcache *reg_cache = thumb_insn_r->regcache;
12679 uint32_t record_buf[8], record_buf_mem[8];
12680
12681 uint32_t reg_src1 = 0;
12682 uint32_t opcode = 0, immed_5 = 0;
12683
12684 ULONGEST u_regval = 0;
12685
12686 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12687
12688 if (opcode)
12689 {
12690 /* LDR(1). */
12691 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12692 record_buf[0] = reg_src1;
12693 thumb_insn_r->reg_rec_count = 1;
12694 }
12695 else
12696 {
12697 /* STR(1). */
12698 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12699 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12700 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12701 record_buf_mem[0] = 4;
12702 record_buf_mem[1] = u_regval + (immed_5 * 4);
12703 thumb_insn_r->mem_rec_count = 1;
12704 }
12705
12706 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12707 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12708 record_buf_mem);
12709
12710 return 0;
12711 }
12712
12713 /* Handling opcode 100 insns. */
12714
12715 static int
12716 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
12717 {
12718 struct regcache *reg_cache = thumb_insn_r->regcache;
12719 uint32_t record_buf[8], record_buf_mem[8];
12720
12721 uint32_t reg_src1 = 0;
12722 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
12723
12724 ULONGEST u_regval = 0;
12725
12726 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12727
12728 if (3 == opcode)
12729 {
12730 /* LDR(4). */
12731 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12732 record_buf[0] = reg_src1;
12733 thumb_insn_r->reg_rec_count = 1;
12734 }
12735 else if (1 == opcode)
12736 {
12737 /* LDRH(1). */
12738 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12739 record_buf[0] = reg_src1;
12740 thumb_insn_r->reg_rec_count = 1;
12741 }
12742 else if (2 == opcode)
12743 {
12744 /* STR(3). */
12745 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12746 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12747 record_buf_mem[0] = 4;
12748 record_buf_mem[1] = u_regval + (immed_8 * 4);
12749 thumb_insn_r->mem_rec_count = 1;
12750 }
12751 else if (0 == opcode)
12752 {
12753 /* STRH(1). */
12754 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12755 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12756 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12757 record_buf_mem[0] = 2;
12758 record_buf_mem[1] = u_regval + (immed_5 * 2);
12759 thumb_insn_r->mem_rec_count = 1;
12760 }
12761
12762 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12763 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12764 record_buf_mem);
12765
12766 return 0;
12767 }
12768
12769 /* Handling opcode 101 insns. */
12770
12771 static int
12772 thumb_record_misc (insn_decode_record *thumb_insn_r)
12773 {
12774 struct regcache *reg_cache = thumb_insn_r->regcache;
12775
12776 uint32_t opcode = 0, opcode1 = 0, opcode2 = 0;
12777 uint32_t register_bits = 0, register_count = 0;
12778 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12779 uint32_t record_buf[24], record_buf_mem[48];
12780 uint32_t reg_src1;
12781
12782 ULONGEST u_regval = 0;
12783
12784 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12785 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12786 opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
12787
12788 if (14 == opcode2)
12789 {
12790 /* POP. */
12791 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12792 while (register_bits)
12793 {
12794 if (register_bits & 0x00000001)
12795 record_buf[index++] = register_count;
12796 register_bits = register_bits >> 1;
12797 register_count++;
12798 }
12799 record_buf[index++] = ARM_PS_REGNUM;
12800 record_buf[index++] = ARM_SP_REGNUM;
12801 thumb_insn_r->reg_rec_count = index;
12802 }
12803 else if (10 == opcode2)
12804 {
12805 /* PUSH. */
12806 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12807 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12808 while (register_bits)
12809 {
12810 if (register_bits & 0x00000001)
12811 register_count++;
12812 register_bits = register_bits >> 1;
12813 }
12814 start_address = u_regval - \
12815 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12816 thumb_insn_r->mem_rec_count = register_count;
12817 while (register_count)
12818 {
12819 record_buf_mem[(register_count * 2) - 1] = start_address;
12820 record_buf_mem[(register_count * 2) - 2] = 4;
12821 start_address = start_address + 4;
12822 register_count--;
12823 }
12824 record_buf[0] = ARM_SP_REGNUM;
12825 thumb_insn_r->reg_rec_count = 1;
12826 }
12827 else if (0x1E == opcode1)
12828 {
12829 /* BKPT insn. */
12830 /* Handle enhanced software breakpoint insn, BKPT. */
12831 /* CPSR is changed to be executed in ARM state, disabling normal
12832 interrupts, entering abort mode. */
12833 /* According to high vector configuration PC is set. */
12834 /* User hits breakpoint and type reverse, in that case, we need to go back with
12835 previous CPSR and Program Counter. */
12836 record_buf[0] = ARM_PS_REGNUM;
12837 record_buf[1] = ARM_LR_REGNUM;
12838 thumb_insn_r->reg_rec_count = 2;
12839 /* We need to save SPSR value, which is not yet done. */
12840 printf_unfiltered (_("Process record does not support instruction "
12841 "0x%0x at address %s.\n"),
12842 thumb_insn_r->arm_insn,
12843 paddress (thumb_insn_r->gdbarch,
12844 thumb_insn_r->this_addr));
12845 return -1;
12846 }
12847 else if ((0 == opcode) || (1 == opcode))
12848 {
12849 /* ADD(5), ADD(6). */
12850 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12851 record_buf[0] = reg_src1;
12852 thumb_insn_r->reg_rec_count = 1;
12853 }
12854 else if (2 == opcode)
12855 {
12856 /* ADD(7), SUB(4). */
12857 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12858 record_buf[0] = ARM_SP_REGNUM;
12859 thumb_insn_r->reg_rec_count = 1;
12860 }
12861
12862 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12863 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12864 record_buf_mem);
12865
12866 return 0;
12867 }
12868
12869 /* Handling opcode 110 insns. */
12870
12871 static int
12872 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12873 {
12874 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12875 struct regcache *reg_cache = thumb_insn_r->regcache;
12876
12877 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12878 uint32_t reg_src1 = 0;
12879 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12880 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12881 uint32_t record_buf[24], record_buf_mem[48];
12882
12883 ULONGEST u_regval = 0;
12884
12885 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12886 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12887
12888 if (1 == opcode2)
12889 {
12890
12891 /* LDMIA. */
12892 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12893 /* Get Rn. */
12894 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12895 while (register_bits)
12896 {
12897 if (register_bits & 0x00000001)
12898 record_buf[index++] = register_count;
12899 register_bits = register_bits >> 1;
12900 register_count++;
12901 }
12902 record_buf[index++] = reg_src1;
12903 thumb_insn_r->reg_rec_count = index;
12904 }
12905 else if (0 == opcode2)
12906 {
12907 /* It handles both STMIA. */
12908 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12909 /* Get Rn. */
12910 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12911 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12912 while (register_bits)
12913 {
12914 if (register_bits & 0x00000001)
12915 register_count++;
12916 register_bits = register_bits >> 1;
12917 }
12918 start_address = u_regval;
12919 thumb_insn_r->mem_rec_count = register_count;
12920 while (register_count)
12921 {
12922 record_buf_mem[(register_count * 2) - 1] = start_address;
12923 record_buf_mem[(register_count * 2) - 2] = 4;
12924 start_address = start_address + 4;
12925 register_count--;
12926 }
12927 }
12928 else if (0x1F == opcode1)
12929 {
12930 /* Handle arm syscall insn. */
12931 if (tdep->arm_syscall_record != NULL)
12932 {
12933 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12934 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12935 }
12936 else
12937 {
12938 printf_unfiltered (_("no syscall record support\n"));
12939 return -1;
12940 }
12941 }
12942
12943 /* B (1), conditional branch is automatically taken care in process_record,
12944 as PC is saved there. */
12945
12946 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12947 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12948 record_buf_mem);
12949
12950 return ret;
12951 }
12952
12953 /* Handling opcode 111 insns. */
12954
12955 static int
12956 thumb_record_branch (insn_decode_record *thumb_insn_r)
12957 {
12958 uint32_t record_buf[8];
12959 uint32_t bits_h = 0;
12960
12961 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12962
12963 if (2 == bits_h || 3 == bits_h)
12964 {
12965 /* BL */
12966 record_buf[0] = ARM_LR_REGNUM;
12967 thumb_insn_r->reg_rec_count = 1;
12968 }
12969 else if (1 == bits_h)
12970 {
12971 /* BLX(1). */
12972 record_buf[0] = ARM_PS_REGNUM;
12973 record_buf[1] = ARM_LR_REGNUM;
12974 thumb_insn_r->reg_rec_count = 2;
12975 }
12976
12977 /* B(2) is automatically taken care in process_record, as PC is
12978 saved there. */
12979
12980 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12981
12982 return 0;
12983 }
12984
12985 /* Handler for thumb2 load/store multiple instructions. */
12986
12987 static int
12988 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12989 {
12990 struct regcache *reg_cache = thumb2_insn_r->regcache;
12991
12992 uint32_t reg_rn, op;
12993 uint32_t register_bits = 0, register_count = 0;
12994 uint32_t index = 0, start_address = 0;
12995 uint32_t record_buf[24], record_buf_mem[48];
12996
12997 ULONGEST u_regval = 0;
12998
12999 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
13000 op = bits (thumb2_insn_r->arm_insn, 23, 24);
13001
13002 if (0 == op || 3 == op)
13003 {
13004 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
13005 {
13006 /* Handle RFE instruction. */
13007 record_buf[0] = ARM_PS_REGNUM;
13008 thumb2_insn_r->reg_rec_count = 1;
13009 }
13010 else
13011 {
13012 /* Handle SRS instruction after reading banked SP. */
13013 return arm_record_unsupported_insn (thumb2_insn_r);
13014 }
13015 }
13016 else if (1 == op || 2 == op)
13017 {
13018 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
13019 {
13020 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
13021 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
13022 while (register_bits)
13023 {
13024 if (register_bits & 0x00000001)
13025 record_buf[index++] = register_count;
13026
13027 register_count++;
13028 register_bits = register_bits >> 1;
13029 }
13030 record_buf[index++] = reg_rn;
13031 record_buf[index++] = ARM_PS_REGNUM;
13032 thumb2_insn_r->reg_rec_count = index;
13033 }
13034 else
13035 {
13036 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
13037 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
13038 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
13039 while (register_bits)
13040 {
13041 if (register_bits & 0x00000001)
13042 register_count++;
13043
13044 register_bits = register_bits >> 1;
13045 }
13046
13047 if (1 == op)
13048 {
13049 /* Start address calculation for LDMDB/LDMEA. */
13050 start_address = u_regval;
13051 }
13052 else if (2 == op)
13053 {
13054 /* Start address calculation for LDMDB/LDMEA. */
13055 start_address = u_regval - register_count * 4;
13056 }
13057
13058 thumb2_insn_r->mem_rec_count = register_count;
13059 while (register_count)
13060 {
13061 record_buf_mem[register_count * 2 - 1] = start_address;
13062 record_buf_mem[register_count * 2 - 2] = 4;
13063 start_address = start_address + 4;
13064 register_count--;
13065 }
13066 record_buf[0] = reg_rn;
13067 record_buf[1] = ARM_PS_REGNUM;
13068 thumb2_insn_r->reg_rec_count = 2;
13069 }
13070 }
13071
13072 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
13073 record_buf_mem);
13074 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13075 record_buf);
13076 return ARM_RECORD_SUCCESS;
13077 }
13078
13079 /* Handler for thumb2 load/store (dual/exclusive) and table branch
13080 instructions. */
13081
13082 static int
13083 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
13084 {
13085 struct regcache *reg_cache = thumb2_insn_r->regcache;
13086
13087 uint32_t reg_rd, reg_rn, offset_imm;
13088 uint32_t reg_dest1, reg_dest2;
13089 uint32_t address, offset_addr;
13090 uint32_t record_buf[8], record_buf_mem[8];
13091 uint32_t op1, op2, op3;
13092 LONGEST s_word;
13093
13094 ULONGEST u_regval[2];
13095
13096 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
13097 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
13098 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
13099
13100 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
13101 {
13102 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
13103 {
13104 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
13105 record_buf[0] = reg_dest1;
13106 record_buf[1] = ARM_PS_REGNUM;
13107 thumb2_insn_r->reg_rec_count = 2;
13108 }
13109
13110 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
13111 {
13112 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
13113 record_buf[2] = reg_dest2;
13114 thumb2_insn_r->reg_rec_count = 3;
13115 }
13116 }
13117 else
13118 {
13119 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
13120 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
13121
13122 if (0 == op1 && 0 == op2)
13123 {
13124 /* Handle STREX. */
13125 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
13126 address = u_regval[0] + (offset_imm * 4);
13127 record_buf_mem[0] = 4;
13128 record_buf_mem[1] = address;
13129 thumb2_insn_r->mem_rec_count = 1;
13130 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
13131 record_buf[0] = reg_rd;
13132 thumb2_insn_r->reg_rec_count = 1;
13133 }
13134 else if (1 == op1 && 0 == op2)
13135 {
13136 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
13137 record_buf[0] = reg_rd;
13138 thumb2_insn_r->reg_rec_count = 1;
13139 address = u_regval[0];
13140 record_buf_mem[1] = address;
13141
13142 if (4 == op3)
13143 {
13144 /* Handle STREXB. */
13145 record_buf_mem[0] = 1;
13146 thumb2_insn_r->mem_rec_count = 1;
13147 }
13148 else if (5 == op3)
13149 {
13150 /* Handle STREXH. */
13151 record_buf_mem[0] = 2 ;
13152 thumb2_insn_r->mem_rec_count = 1;
13153 }
13154 else if (7 == op3)
13155 {
13156 /* Handle STREXD. */
13157 address = u_regval[0];
13158 record_buf_mem[0] = 4;
13159 record_buf_mem[2] = 4;
13160 record_buf_mem[3] = address + 4;
13161 thumb2_insn_r->mem_rec_count = 2;
13162 }
13163 }
13164 else
13165 {
13166 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
13167
13168 if (bit (thumb2_insn_r->arm_insn, 24))
13169 {
13170 if (bit (thumb2_insn_r->arm_insn, 23))
13171 offset_addr = u_regval[0] + (offset_imm * 4);
13172 else
13173 offset_addr = u_regval[0] - (offset_imm * 4);
13174
13175 address = offset_addr;
13176 }
13177 else
13178 address = u_regval[0];
13179
13180 record_buf_mem[0] = 4;
13181 record_buf_mem[1] = address;
13182 record_buf_mem[2] = 4;
13183 record_buf_mem[3] = address + 4;
13184 thumb2_insn_r->mem_rec_count = 2;
13185 record_buf[0] = reg_rn;
13186 thumb2_insn_r->reg_rec_count = 1;
13187 }
13188 }
13189
13190 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13191 record_buf);
13192 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
13193 record_buf_mem);
13194 return ARM_RECORD_SUCCESS;
13195 }
13196
13197 /* Handler for thumb2 data processing (shift register and modified immediate)
13198 instructions. */
13199
13200 static int
13201 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
13202 {
13203 uint32_t reg_rd, op;
13204 uint32_t record_buf[8];
13205
13206 op = bits (thumb2_insn_r->arm_insn, 21, 24);
13207 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
13208
13209 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
13210 {
13211 record_buf[0] = ARM_PS_REGNUM;
13212 thumb2_insn_r->reg_rec_count = 1;
13213 }
13214 else
13215 {
13216 record_buf[0] = reg_rd;
13217 record_buf[1] = ARM_PS_REGNUM;
13218 thumb2_insn_r->reg_rec_count = 2;
13219 }
13220
13221 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13222 record_buf);
13223 return ARM_RECORD_SUCCESS;
13224 }
13225
13226 /* Generic handler for thumb2 instructions which effect destination and PS
13227 registers. */
13228
13229 static int
13230 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
13231 {
13232 uint32_t reg_rd;
13233 uint32_t record_buf[8];
13234
13235 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
13236
13237 record_buf[0] = reg_rd;
13238 record_buf[1] = ARM_PS_REGNUM;
13239 thumb2_insn_r->reg_rec_count = 2;
13240
13241 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13242 record_buf);
13243 return ARM_RECORD_SUCCESS;
13244 }
13245
13246 /* Handler for thumb2 branch and miscellaneous control instructions. */
13247
13248 static int
13249 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
13250 {
13251 uint32_t op, op1, op2;
13252 uint32_t record_buf[8];
13253
13254 op = bits (thumb2_insn_r->arm_insn, 20, 26);
13255 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
13256 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
13257
13258 /* Handle MSR insn. */
13259 if (!(op1 & 0x2) && 0x38 == op)
13260 {
13261 if (!(op2 & 0x3))
13262 {
13263 /* CPSR is going to be changed. */
13264 record_buf[0] = ARM_PS_REGNUM;
13265 thumb2_insn_r->reg_rec_count = 1;
13266 }
13267 else
13268 {
13269 arm_record_unsupported_insn(thumb2_insn_r);
13270 return -1;
13271 }
13272 }
13273 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
13274 {
13275 /* BLX. */
13276 record_buf[0] = ARM_PS_REGNUM;
13277 record_buf[1] = ARM_LR_REGNUM;
13278 thumb2_insn_r->reg_rec_count = 2;
13279 }
13280
13281 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13282 record_buf);
13283 return ARM_RECORD_SUCCESS;
13284 }
13285
13286 /* Handler for thumb2 store single data item instructions. */
13287
13288 static int
13289 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
13290 {
13291 struct regcache *reg_cache = thumb2_insn_r->regcache;
13292
13293 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
13294 uint32_t address, offset_addr;
13295 uint32_t record_buf[8], record_buf_mem[8];
13296 uint32_t op1, op2;
13297
13298 ULONGEST u_regval[2];
13299
13300 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
13301 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
13302 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
13303 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
13304
13305 if (bit (thumb2_insn_r->arm_insn, 23))
13306 {
13307 /* T2 encoding. */
13308 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
13309 offset_addr = u_regval[0] + offset_imm;
13310 address = offset_addr;
13311 }
13312 else
13313 {
13314 /* T3 encoding. */
13315 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
13316 {
13317 /* Handle STRB (register). */
13318 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
13319 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
13320 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
13321 offset_addr = u_regval[1] << shift_imm;
13322 address = u_regval[0] + offset_addr;
13323 }
13324 else
13325 {
13326 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
13327 if (bit (thumb2_insn_r->arm_insn, 10))
13328 {
13329 if (bit (thumb2_insn_r->arm_insn, 9))
13330 offset_addr = u_regval[0] + offset_imm;
13331 else
13332 offset_addr = u_regval[0] - offset_imm;
13333
13334 address = offset_addr;
13335 }
13336 else
13337 address = u_regval[0];
13338 }
13339 }
13340
13341 switch (op1)
13342 {
13343 /* Store byte instructions. */
13344 case 4:
13345 case 0:
13346 record_buf_mem[0] = 1;
13347 break;
13348 /* Store half word instructions. */
13349 case 1:
13350 case 5:
13351 record_buf_mem[0] = 2;
13352 break;
13353 /* Store word instructions. */
13354 case 2:
13355 case 6:
13356 record_buf_mem[0] = 4;
13357 break;
13358
13359 default:
13360 gdb_assert_not_reached ("no decoding pattern found");
13361 break;
13362 }
13363
13364 record_buf_mem[1] = address;
13365 thumb2_insn_r->mem_rec_count = 1;
13366 record_buf[0] = reg_rn;
13367 thumb2_insn_r->reg_rec_count = 1;
13368
13369 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13370 record_buf);
13371 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
13372 record_buf_mem);
13373 return ARM_RECORD_SUCCESS;
13374 }
13375
13376 /* Handler for thumb2 load memory hints instructions. */
13377
13378 static int
13379 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
13380 {
13381 uint32_t record_buf[8];
13382 uint32_t reg_rt, reg_rn;
13383
13384 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
13385 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
13386
13387 if (ARM_PC_REGNUM != reg_rt)
13388 {
13389 record_buf[0] = reg_rt;
13390 record_buf[1] = reg_rn;
13391 record_buf[2] = ARM_PS_REGNUM;
13392 thumb2_insn_r->reg_rec_count = 3;
13393
13394 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13395 record_buf);
13396 return ARM_RECORD_SUCCESS;
13397 }
13398
13399 return ARM_RECORD_FAILURE;
13400 }
13401
13402 /* Handler for thumb2 load word instructions. */
13403
13404 static int
13405 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
13406 {
13407 uint32_t opcode1 = 0, opcode2 = 0;
13408 uint32_t record_buf[8];
13409
13410 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
13411 record_buf[1] = ARM_PS_REGNUM;
13412 thumb2_insn_r->reg_rec_count = 2;
13413
13414 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13415 record_buf);
13416 return ARM_RECORD_SUCCESS;
13417 }
13418
13419 /* Handler for thumb2 long multiply, long multiply accumulate, and
13420 divide instructions. */
13421
13422 static int
13423 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
13424 {
13425 uint32_t opcode1 = 0, opcode2 = 0;
13426 uint32_t record_buf[8];
13427 uint32_t reg_src1 = 0;
13428
13429 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
13430 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
13431
13432 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
13433 {
13434 /* Handle SMULL, UMULL, SMULAL. */
13435 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
13436 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
13437 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
13438 record_buf[2] = ARM_PS_REGNUM;
13439 thumb2_insn_r->reg_rec_count = 3;
13440 }
13441 else if (1 == opcode1 || 3 == opcode2)
13442 {
13443 /* Handle SDIV and UDIV. */
13444 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
13445 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
13446 record_buf[2] = ARM_PS_REGNUM;
13447 thumb2_insn_r->reg_rec_count = 3;
13448 }
13449 else
13450 return ARM_RECORD_FAILURE;
13451
13452 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13453 record_buf);
13454 return ARM_RECORD_SUCCESS;
13455 }
13456
13457 /* Record handler for thumb32 coprocessor instructions. */
13458
13459 static int
13460 thumb2_record_coproc_insn (insn_decode_record *thumb2_insn_r)
13461 {
13462 if (bit (thumb2_insn_r->arm_insn, 25))
13463 return arm_record_coproc_data_proc (thumb2_insn_r);
13464 else
13465 return arm_record_asimd_vfp_coproc (thumb2_insn_r);
13466 }
13467
13468 /* Record handler for advance SIMD structure load/store instructions. */
13469
13470 static int
13471 thumb2_record_asimd_struct_ld_st (insn_decode_record *thumb2_insn_r)
13472 {
13473 struct regcache *reg_cache = thumb2_insn_r->regcache;
13474 uint32_t l_bit, a_bit, b_bits;
13475 uint32_t record_buf[128], record_buf_mem[128];
13476 uint32_t reg_rn, reg_vd, address, f_esize, f_elem;
13477 uint32_t index_r = 0, index_e = 0, bf_regs = 0, index_m = 0, loop_t = 0;
13478 uint8_t f_ebytes;
13479
13480 l_bit = bit (thumb2_insn_r->arm_insn, 21);
13481 a_bit = bit (thumb2_insn_r->arm_insn, 23);
13482 b_bits = bits (thumb2_insn_r->arm_insn, 8, 11);
13483 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
13484 reg_vd = bits (thumb2_insn_r->arm_insn, 12, 15);
13485 reg_vd = (bit (thumb2_insn_r->arm_insn, 22) << 4) | reg_vd;
13486 f_ebytes = (1 << bits (thumb2_insn_r->arm_insn, 6, 7));
13487 f_esize = 8 * f_ebytes;
13488 f_elem = 8 / f_ebytes;
13489
13490 if (!l_bit)
13491 {
13492 ULONGEST u_regval = 0;
13493 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
13494 address = u_regval;
13495
13496 if (!a_bit)
13497 {
13498 /* Handle VST1. */
13499 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
13500 {
13501 if (b_bits == 0x07)
13502 bf_regs = 1;
13503 else if (b_bits == 0x0a)
13504 bf_regs = 2;
13505 else if (b_bits == 0x06)
13506 bf_regs = 3;
13507 else if (b_bits == 0x02)
13508 bf_regs = 4;
13509 else
13510 bf_regs = 0;
13511
13512 for (index_r = 0; index_r < bf_regs; index_r++)
13513 {
13514 for (index_e = 0; index_e < f_elem; index_e++)
13515 {
13516 record_buf_mem[index_m++] = f_ebytes;
13517 record_buf_mem[index_m++] = address;
13518 address = address + f_ebytes;
13519 thumb2_insn_r->mem_rec_count += 1;
13520 }
13521 }
13522 }
13523 /* Handle VST2. */
13524 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
13525 {
13526 if (b_bits == 0x09 || b_bits == 0x08)
13527 bf_regs = 1;
13528 else if (b_bits == 0x03)
13529 bf_regs = 2;
13530 else
13531 bf_regs = 0;
13532
13533 for (index_r = 0; index_r < bf_regs; index_r++)
13534 for (index_e = 0; index_e < f_elem; index_e++)
13535 {
13536 for (loop_t = 0; loop_t < 2; loop_t++)
13537 {
13538 record_buf_mem[index_m++] = f_ebytes;
13539 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
13540 thumb2_insn_r->mem_rec_count += 1;
13541 }
13542 address = address + (2 * f_ebytes);
13543 }
13544 }
13545 /* Handle VST3. */
13546 else if ((b_bits & 0x0e) == 0x04)
13547 {
13548 for (index_e = 0; index_e < f_elem; index_e++)
13549 {
13550 for (loop_t = 0; loop_t < 3; loop_t++)
13551 {
13552 record_buf_mem[index_m++] = f_ebytes;
13553 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
13554 thumb2_insn_r->mem_rec_count += 1;
13555 }
13556 address = address + (3 * f_ebytes);
13557 }
13558 }
13559 /* Handle VST4. */
13560 else if (!(b_bits & 0x0e))
13561 {
13562 for (index_e = 0; index_e < f_elem; index_e++)
13563 {
13564 for (loop_t = 0; loop_t < 4; loop_t++)
13565 {
13566 record_buf_mem[index_m++] = f_ebytes;
13567 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
13568 thumb2_insn_r->mem_rec_count += 1;
13569 }
13570 address = address + (4 * f_ebytes);
13571 }
13572 }
13573 }
13574 else
13575 {
13576 uint8_t bft_size = bits (thumb2_insn_r->arm_insn, 10, 11);
13577
13578 if (bft_size == 0x00)
13579 f_ebytes = 1;
13580 else if (bft_size == 0x01)
13581 f_ebytes = 2;
13582 else if (bft_size == 0x02)
13583 f_ebytes = 4;
13584 else
13585 f_ebytes = 0;
13586
13587 /* Handle VST1. */
13588 if (!(b_bits & 0x0b) || b_bits == 0x08)
13589 thumb2_insn_r->mem_rec_count = 1;
13590 /* Handle VST2. */
13591 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09)
13592 thumb2_insn_r->mem_rec_count = 2;
13593 /* Handle VST3. */
13594 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a)
13595 thumb2_insn_r->mem_rec_count = 3;
13596 /* Handle VST4. */
13597 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b)
13598 thumb2_insn_r->mem_rec_count = 4;
13599
13600 for (index_m = 0; index_m < thumb2_insn_r->mem_rec_count; index_m++)
13601 {
13602 record_buf_mem[index_m] = f_ebytes;
13603 record_buf_mem[index_m] = address + (index_m * f_ebytes);
13604 }
13605 }
13606 }
13607 else
13608 {
13609 if (!a_bit)
13610 {
13611 /* Handle VLD1. */
13612 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
13613 thumb2_insn_r->reg_rec_count = 1;
13614 /* Handle VLD2. */
13615 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
13616 thumb2_insn_r->reg_rec_count = 2;
13617 /* Handle VLD3. */
13618 else if ((b_bits & 0x0e) == 0x04)
13619 thumb2_insn_r->reg_rec_count = 3;
13620 /* Handle VLD4. */
13621 else if (!(b_bits & 0x0e))
13622 thumb2_insn_r->reg_rec_count = 4;
13623 }
13624 else
13625 {
13626 /* Handle VLD1. */
13627 if (!(b_bits & 0x0b) || b_bits == 0x08 || b_bits == 0x0c)
13628 thumb2_insn_r->reg_rec_count = 1;
13629 /* Handle VLD2. */
13630 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09 || b_bits == 0x0d)
13631 thumb2_insn_r->reg_rec_count = 2;
13632 /* Handle VLD3. */
13633 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a || b_bits == 0x0e)
13634 thumb2_insn_r->reg_rec_count = 3;
13635 /* Handle VLD4. */
13636 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b || b_bits == 0x0f)
13637 thumb2_insn_r->reg_rec_count = 4;
13638
13639 for (index_r = 0; index_r < thumb2_insn_r->reg_rec_count; index_r++)
13640 record_buf[index_r] = reg_vd + ARM_D0_REGNUM + index_r;
13641 }
13642 }
13643
13644 if (bits (thumb2_insn_r->arm_insn, 0, 3) != 15)
13645 {
13646 record_buf[index_r] = reg_rn;
13647 thumb2_insn_r->reg_rec_count += 1;
13648 }
13649
13650 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
13651 record_buf);
13652 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
13653 record_buf_mem);
13654 return 0;
13655 }
13656
13657 /* Decodes thumb2 instruction type and invokes its record handler. */
13658
13659 static unsigned int
13660 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
13661 {
13662 uint32_t op, op1, op2;
13663
13664 op = bit (thumb2_insn_r->arm_insn, 15);
13665 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
13666 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
13667
13668 if (op1 == 0x01)
13669 {
13670 if (!(op2 & 0x64 ))
13671 {
13672 /* Load/store multiple instruction. */
13673 return thumb2_record_ld_st_multiple (thumb2_insn_r);
13674 }
13675 else if (!((op2 & 0x64) ^ 0x04))
13676 {
13677 /* Load/store (dual/exclusive) and table branch instruction. */
13678 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
13679 }
13680 else if (!((op2 & 0x20) ^ 0x20))
13681 {
13682 /* Data-processing (shifted register). */
13683 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
13684 }
13685 else if (op2 & 0x40)
13686 {
13687 /* Co-processor instructions. */
13688 return thumb2_record_coproc_insn (thumb2_insn_r);
13689 }
13690 }
13691 else if (op1 == 0x02)
13692 {
13693 if (op)
13694 {
13695 /* Branches and miscellaneous control instructions. */
13696 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
13697 }
13698 else if (op2 & 0x20)
13699 {
13700 /* Data-processing (plain binary immediate) instruction. */
13701 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13702 }
13703 else
13704 {
13705 /* Data-processing (modified immediate). */
13706 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
13707 }
13708 }
13709 else if (op1 == 0x03)
13710 {
13711 if (!(op2 & 0x71 ))
13712 {
13713 /* Store single data item. */
13714 return thumb2_record_str_single_data (thumb2_insn_r);
13715 }
13716 else if (!((op2 & 0x71) ^ 0x10))
13717 {
13718 /* Advanced SIMD or structure load/store instructions. */
13719 return thumb2_record_asimd_struct_ld_st (thumb2_insn_r);
13720 }
13721 else if (!((op2 & 0x67) ^ 0x01))
13722 {
13723 /* Load byte, memory hints instruction. */
13724 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13725 }
13726 else if (!((op2 & 0x67) ^ 0x03))
13727 {
13728 /* Load halfword, memory hints instruction. */
13729 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13730 }
13731 else if (!((op2 & 0x67) ^ 0x05))
13732 {
13733 /* Load word instruction. */
13734 return thumb2_record_ld_word (thumb2_insn_r);
13735 }
13736 else if (!((op2 & 0x70) ^ 0x20))
13737 {
13738 /* Data-processing (register) instruction. */
13739 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13740 }
13741 else if (!((op2 & 0x78) ^ 0x30))
13742 {
13743 /* Multiply, multiply accumulate, abs diff instruction. */
13744 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13745 }
13746 else if (!((op2 & 0x78) ^ 0x38))
13747 {
13748 /* Long multiply, long multiply accumulate, and divide. */
13749 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
13750 }
13751 else if (op2 & 0x40)
13752 {
13753 /* Co-processor instructions. */
13754 return thumb2_record_coproc_insn (thumb2_insn_r);
13755 }
13756 }
13757
13758 return -1;
13759 }
13760
13761 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
13762 and positive val on fauilure. */
13763
13764 static int
13765 extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size)
13766 {
13767 gdb_byte buf[insn_size];
13768
13769 memset (&buf[0], 0, insn_size);
13770
13771 if (target_read_memory (insn_record->this_addr, &buf[0], insn_size))
13772 return 1;
13773 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13774 insn_size,
13775 gdbarch_byte_order_for_code (insn_record->gdbarch));
13776 return 0;
13777 }
13778
13779 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13780
13781 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13782 dispatch it. */
13783
13784 static int
13785 decode_insn (insn_decode_record *arm_record, record_type_t record_type,
13786 uint32_t insn_size)
13787 {
13788
13789 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */
13790 static const sti_arm_hdl_fp_t const arm_handle_insn[8] =
13791 {
13792 arm_record_data_proc_misc_ld_str, /* 000. */
13793 arm_record_data_proc_imm, /* 001. */
13794 arm_record_ld_st_imm_offset, /* 010. */
13795 arm_record_ld_st_reg_offset, /* 011. */
13796 arm_record_ld_st_multiple, /* 100. */
13797 arm_record_b_bl, /* 101. */
13798 arm_record_asimd_vfp_coproc, /* 110. */
13799 arm_record_coproc_data_proc /* 111. */
13800 };
13801
13802 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */
13803 static const sti_arm_hdl_fp_t const thumb_handle_insn[8] =
13804 { \
13805 thumb_record_shift_add_sub, /* 000. */
13806 thumb_record_add_sub_cmp_mov, /* 001. */
13807 thumb_record_ld_st_reg_offset, /* 010. */
13808 thumb_record_ld_st_imm_offset, /* 011. */
13809 thumb_record_ld_st_stack, /* 100. */
13810 thumb_record_misc, /* 101. */
13811 thumb_record_ldm_stm_swi, /* 110. */
13812 thumb_record_branch /* 111. */
13813 };
13814
13815 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13816 uint32_t insn_id = 0;
13817
13818 if (extract_arm_insn (arm_record, insn_size))
13819 {
13820 if (record_debug)
13821 {
13822 printf_unfiltered (_("Process record: error reading memory at "
13823 "addr %s len = %d.\n"),
13824 paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
13825 }
13826 return -1;
13827 }
13828 else if (ARM_RECORD == record_type)
13829 {
13830 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13831 insn_id = bits (arm_record->arm_insn, 25, 27);
13832 ret = arm_record_extension_space (arm_record);
13833 /* If this insn has fallen into extension space
13834 then we need not decode it anymore. */
13835 if (ret != -1 && !INSN_RECORDED(arm_record))
13836 {
13837 ret = arm_handle_insn[insn_id] (arm_record);
13838 }
13839 }
13840 else if (THUMB_RECORD == record_type)
13841 {
13842 /* As thumb does not have condition codes, we set negative. */
13843 arm_record->cond = -1;
13844 insn_id = bits (arm_record->arm_insn, 13, 15);
13845 ret = thumb_handle_insn[insn_id] (arm_record);
13846 }
13847 else if (THUMB2_RECORD == record_type)
13848 {
13849 /* As thumb does not have condition codes, we set negative. */
13850 arm_record->cond = -1;
13851
13852 /* Swap first half of 32bit thumb instruction with second half. */
13853 arm_record->arm_insn
13854 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13855
13856 insn_id = thumb2_record_decode_insn_handler (arm_record);
13857
13858 if (insn_id != ARM_RECORD_SUCCESS)
13859 {
13860 arm_record_unsupported_insn (arm_record);
13861 ret = -1;
13862 }
13863 }
13864 else
13865 {
13866 /* Throw assertion. */
13867 gdb_assert_not_reached ("not a valid instruction, could not decode");
13868 }
13869
13870 return ret;
13871 }
13872
13873
13874 /* Cleans up local record registers and memory allocations. */
13875
13876 static void
13877 deallocate_reg_mem (insn_decode_record *record)
13878 {
13879 xfree (record->arm_regs);
13880 xfree (record->arm_mems);
13881 }
13882
13883
13884 /* Parse the current instruction and record the values of the registers and
13885 memory that will be changed in current instruction to record_arch_list".
13886 Return -1 if something is wrong. */
13887
13888 int
13889 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13890 CORE_ADDR insn_addr)
13891 {
13892
13893 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
13894 uint32_t no_of_rec = 0;
13895 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13896 ULONGEST t_bit = 0, insn_id = 0;
13897
13898 ULONGEST u_regval = 0;
13899
13900 insn_decode_record arm_record;
13901
13902 memset (&arm_record, 0, sizeof (insn_decode_record));
13903 arm_record.regcache = regcache;
13904 arm_record.this_addr = insn_addr;
13905 arm_record.gdbarch = gdbarch;
13906
13907
13908 if (record_debug > 1)
13909 {
13910 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13911 "addr = %s\n",
13912 paddress (gdbarch, arm_record.this_addr));
13913 }
13914
13915 if (extract_arm_insn (&arm_record, 2))
13916 {
13917 if (record_debug)
13918 {
13919 printf_unfiltered (_("Process record: error reading memory at "
13920 "addr %s len = %d.\n"),
13921 paddress (arm_record.gdbarch,
13922 arm_record.this_addr), 2);
13923 }
13924 return -1;
13925 }
13926
13927 /* Check the insn, whether it is thumb or arm one. */
13928
13929 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13930 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13931
13932
13933 if (!(u_regval & t_bit))
13934 {
13935 /* We are decoding arm insn. */
13936 ret = decode_insn (&arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13937 }
13938 else
13939 {
13940 insn_id = bits (arm_record.arm_insn, 11, 15);
13941 /* is it thumb2 insn? */
13942 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13943 {
13944 ret = decode_insn (&arm_record, THUMB2_RECORD,
13945 THUMB2_INSN_SIZE_BYTES);
13946 }
13947 else
13948 {
13949 /* We are decoding thumb insn. */
13950 ret = decode_insn (&arm_record, THUMB_RECORD, THUMB_INSN_SIZE_BYTES);
13951 }
13952 }
13953
13954 if (0 == ret)
13955 {
13956 /* Record registers. */
13957 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13958 if (arm_record.arm_regs)
13959 {
13960 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13961 {
13962 if (record_full_arch_list_add_reg
13963 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13964 ret = -1;
13965 }
13966 }
13967 /* Record memories. */
13968 if (arm_record.arm_mems)
13969 {
13970 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13971 {
13972 if (record_full_arch_list_add_mem
13973 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13974 arm_record.arm_mems[no_of_rec].len))
13975 ret = -1;
13976 }
13977 }
13978
13979 if (record_full_arch_list_add_end ())
13980 ret = -1;
13981 }
13982
13983
13984 deallocate_reg_mem (&arm_record);
13985
13986 return ret;
13987 }
13988
This page took 0.373748 seconds and 4 git commands to generate.