0e7d9c2f872e17f1068c1c16e87ad2ddc3d22305
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include <string.h>
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "remote.h"
45 #include "target-descriptions.h"
46 #include "user-regs.h"
47 #include "observer.h"
48
49 #include "arm-tdep.h"
50 #include "gdb/sim-arm.h"
51
52 #include "elf-bfd.h"
53 #include "coff/internal.h"
54 #include "elf/arm.h"
55
56 #include "gdb_assert.h"
57 #include "vec.h"
58
59 #include "record.h"
60 #include "record-full.h"
61
62 #include "features/arm-with-m.c"
63 #include "features/arm-with-m-fpa-layout.c"
64 #include "features/arm-with-m-vfp-d16.c"
65 #include "features/arm-with-iwmmxt.c"
66 #include "features/arm-with-vfpv2.c"
67 #include "features/arm-with-vfpv3.c"
68 #include "features/arm-with-neon.c"
69
70 static int arm_debug;
71
72 /* Macros for setting and testing a bit in a minimal symbol that marks
73 it as Thumb function. The MSB of the minimal symbol's "info" field
74 is used for this purpose.
75
76 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
77 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
78
79 #define MSYMBOL_SET_SPECIAL(msym) \
80 MSYMBOL_TARGET_FLAG_1 (msym) = 1
81
82 #define MSYMBOL_IS_SPECIAL(msym) \
83 MSYMBOL_TARGET_FLAG_1 (msym)
84
85 /* Per-objfile data used for mapping symbols. */
86 static const struct objfile_data *arm_objfile_data_key;
87
88 struct arm_mapping_symbol
89 {
90 bfd_vma value;
91 char type;
92 };
93 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
94 DEF_VEC_O(arm_mapping_symbol_s);
95
96 struct arm_per_objfile
97 {
98 VEC(arm_mapping_symbol_s) **section_maps;
99 };
100
101 /* The list of available "set arm ..." and "show arm ..." commands. */
102 static struct cmd_list_element *setarmcmdlist = NULL;
103 static struct cmd_list_element *showarmcmdlist = NULL;
104
105 /* The type of floating-point to use. Keep this in sync with enum
106 arm_float_model, and the help string in _initialize_arm_tdep. */
107 static const char *const fp_model_strings[] =
108 {
109 "auto",
110 "softfpa",
111 "fpa",
112 "softvfp",
113 "vfp",
114 NULL
115 };
116
117 /* A variable that can be configured by the user. */
118 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
119 static const char *current_fp_model = "auto";
120
121 /* The ABI to use. Keep this in sync with arm_abi_kind. */
122 static const char *const arm_abi_strings[] =
123 {
124 "auto",
125 "APCS",
126 "AAPCS",
127 NULL
128 };
129
130 /* A variable that can be configured by the user. */
131 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
132 static const char *arm_abi_string = "auto";
133
134 /* The execution mode to assume. */
135 static const char *const arm_mode_strings[] =
136 {
137 "auto",
138 "arm",
139 "thumb",
140 NULL
141 };
142
143 static const char *arm_fallback_mode_string = "auto";
144 static const char *arm_force_mode_string = "auto";
145
146 /* Internal override of the execution mode. -1 means no override,
147 0 means override to ARM mode, 1 means override to Thumb mode.
148 The effect is the same as if arm_force_mode has been set by the
149 user (except the internal override has precedence over a user's
150 arm_force_mode override). */
151 static int arm_override_mode = -1;
152
153 /* Number of different reg name sets (options). */
154 static int num_disassembly_options;
155
156 /* The standard register names, and all the valid aliases for them. Note
157 that `fp', `sp' and `pc' are not added in this alias list, because they
158 have been added as builtin user registers in
159 std-regs.c:_initialize_frame_reg. */
160 static const struct
161 {
162 const char *name;
163 int regnum;
164 } arm_register_aliases[] = {
165 /* Basic register numbers. */
166 { "r0", 0 },
167 { "r1", 1 },
168 { "r2", 2 },
169 { "r3", 3 },
170 { "r4", 4 },
171 { "r5", 5 },
172 { "r6", 6 },
173 { "r7", 7 },
174 { "r8", 8 },
175 { "r9", 9 },
176 { "r10", 10 },
177 { "r11", 11 },
178 { "r12", 12 },
179 { "r13", 13 },
180 { "r14", 14 },
181 { "r15", 15 },
182 /* Synonyms (argument and variable registers). */
183 { "a1", 0 },
184 { "a2", 1 },
185 { "a3", 2 },
186 { "a4", 3 },
187 { "v1", 4 },
188 { "v2", 5 },
189 { "v3", 6 },
190 { "v4", 7 },
191 { "v5", 8 },
192 { "v6", 9 },
193 { "v7", 10 },
194 { "v8", 11 },
195 /* Other platform-specific names for r9. */
196 { "sb", 9 },
197 { "tr", 9 },
198 /* Special names. */
199 { "ip", 12 },
200 { "lr", 14 },
201 /* Names used by GCC (not listed in the ARM EABI). */
202 { "sl", 10 },
203 /* A special name from the older ATPCS. */
204 { "wr", 7 },
205 };
206
207 static const char *const arm_register_names[] =
208 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
209 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
210 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
211 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
212 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
213 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
214 "fps", "cpsr" }; /* 24 25 */
215
216 /* Valid register name styles. */
217 static const char **valid_disassembly_styles;
218
219 /* Disassembly style to use. Default to "std" register names. */
220 static const char *disassembly_style;
221
222 /* This is used to keep the bfd arch_info in sync with the disassembly
223 style. */
224 static void set_disassembly_style_sfunc(char *, int,
225 struct cmd_list_element *);
226 static void set_disassembly_style (void);
227
228 static void convert_from_extended (const struct floatformat *, const void *,
229 void *, int);
230 static void convert_to_extended (const struct floatformat *, void *,
231 const void *, int);
232
233 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
234 struct regcache *regcache,
235 int regnum, gdb_byte *buf);
236 static void arm_neon_quad_write (struct gdbarch *gdbarch,
237 struct regcache *regcache,
238 int regnum, const gdb_byte *buf);
239
240 static int thumb_insn_size (unsigned short inst1);
241
242 struct arm_prologue_cache
243 {
244 /* The stack pointer at the time this frame was created; i.e. the
245 caller's stack pointer when this function was called. It is used
246 to identify this frame. */
247 CORE_ADDR prev_sp;
248
249 /* The frame base for this frame is just prev_sp - frame size.
250 FRAMESIZE is the distance from the frame pointer to the
251 initial stack pointer. */
252
253 int framesize;
254
255 /* The register used to hold the frame pointer for this frame. */
256 int framereg;
257
258 /* Saved register offsets. */
259 struct trad_frame_saved_reg *saved_regs;
260 };
261
262 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
263 CORE_ADDR prologue_start,
264 CORE_ADDR prologue_end,
265 struct arm_prologue_cache *cache);
266
267 /* Architecture version for displaced stepping. This effects the behaviour of
268 certain instructions, and really should not be hard-wired. */
269
270 #define DISPLACED_STEPPING_ARCH_VERSION 5
271
272 /* Addresses for calling Thumb functions have the bit 0 set.
273 Here are some macros to test, set, or clear bit 0 of addresses. */
274 #define IS_THUMB_ADDR(addr) ((addr) & 1)
275 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
276 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
277
278 /* Set to true if the 32-bit mode is in use. */
279
280 int arm_apcs_32 = 1;
281
282 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
283
284 int
285 arm_psr_thumb_bit (struct gdbarch *gdbarch)
286 {
287 if (gdbarch_tdep (gdbarch)->is_m)
288 return XPSR_T;
289 else
290 return CPSR_T;
291 }
292
293 /* Determine if FRAME is executing in Thumb mode. */
294
295 int
296 arm_frame_is_thumb (struct frame_info *frame)
297 {
298 CORE_ADDR cpsr;
299 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
300
301 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
302 directly (from a signal frame or dummy frame) or by interpreting
303 the saved LR (from a prologue or DWARF frame). So consult it and
304 trust the unwinders. */
305 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
306
307 return (cpsr & t_bit) != 0;
308 }
309
310 /* Callback for VEC_lower_bound. */
311
312 static inline int
313 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
314 const struct arm_mapping_symbol *rhs)
315 {
316 return lhs->value < rhs->value;
317 }
318
319 /* Search for the mapping symbol covering MEMADDR. If one is found,
320 return its type. Otherwise, return 0. If START is non-NULL,
321 set *START to the location of the mapping symbol. */
322
323 static char
324 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
325 {
326 struct obj_section *sec;
327
328 /* If there are mapping symbols, consult them. */
329 sec = find_pc_section (memaddr);
330 if (sec != NULL)
331 {
332 struct arm_per_objfile *data;
333 VEC(arm_mapping_symbol_s) *map;
334 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
335 0 };
336 unsigned int idx;
337
338 data = objfile_data (sec->objfile, arm_objfile_data_key);
339 if (data != NULL)
340 {
341 map = data->section_maps[sec->the_bfd_section->index];
342 if (!VEC_empty (arm_mapping_symbol_s, map))
343 {
344 struct arm_mapping_symbol *map_sym;
345
346 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
347 arm_compare_mapping_symbols);
348
349 /* VEC_lower_bound finds the earliest ordered insertion
350 point. If the following symbol starts at this exact
351 address, we use that; otherwise, the preceding
352 mapping symbol covers this address. */
353 if (idx < VEC_length (arm_mapping_symbol_s, map))
354 {
355 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
356 if (map_sym->value == map_key.value)
357 {
358 if (start)
359 *start = map_sym->value + obj_section_addr (sec);
360 return map_sym->type;
361 }
362 }
363
364 if (idx > 0)
365 {
366 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
367 if (start)
368 *start = map_sym->value + obj_section_addr (sec);
369 return map_sym->type;
370 }
371 }
372 }
373 }
374
375 return 0;
376 }
377
378 /* Determine if the program counter specified in MEMADDR is in a Thumb
379 function. This function should be called for addresses unrelated to
380 any executing frame; otherwise, prefer arm_frame_is_thumb. */
381
382 int
383 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
384 {
385 struct bound_minimal_symbol sym;
386 char type;
387 struct displaced_step_closure* dsc
388 = get_displaced_step_closure_by_addr(memaddr);
389
390 /* If checking the mode of displaced instruction in copy area, the mode
391 should be determined by instruction on the original address. */
392 if (dsc)
393 {
394 if (debug_displaced)
395 fprintf_unfiltered (gdb_stdlog,
396 "displaced: check mode of %.8lx instead of %.8lx\n",
397 (unsigned long) dsc->insn_addr,
398 (unsigned long) memaddr);
399 memaddr = dsc->insn_addr;
400 }
401
402 /* If bit 0 of the address is set, assume this is a Thumb address. */
403 if (IS_THUMB_ADDR (memaddr))
404 return 1;
405
406 /* Respect internal mode override if active. */
407 if (arm_override_mode != -1)
408 return arm_override_mode;
409
410 /* If the user wants to override the symbol table, let him. */
411 if (strcmp (arm_force_mode_string, "arm") == 0)
412 return 0;
413 if (strcmp (arm_force_mode_string, "thumb") == 0)
414 return 1;
415
416 /* ARM v6-M and v7-M are always in Thumb mode. */
417 if (gdbarch_tdep (gdbarch)->is_m)
418 return 1;
419
420 /* If there are mapping symbols, consult them. */
421 type = arm_find_mapping_symbol (memaddr, NULL);
422 if (type)
423 return type == 't';
424
425 /* Thumb functions have a "special" bit set in minimal symbols. */
426 sym = lookup_minimal_symbol_by_pc (memaddr);
427 if (sym.minsym)
428 return (MSYMBOL_IS_SPECIAL (sym.minsym));
429
430 /* If the user wants to override the fallback mode, let them. */
431 if (strcmp (arm_fallback_mode_string, "arm") == 0)
432 return 0;
433 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
434 return 1;
435
436 /* If we couldn't find any symbol, but we're talking to a running
437 target, then trust the current value of $cpsr. This lets
438 "display/i $pc" always show the correct mode (though if there is
439 a symbol table we will not reach here, so it still may not be
440 displayed in the mode it will be executed). */
441 if (target_has_registers)
442 return arm_frame_is_thumb (get_current_frame ());
443
444 /* Otherwise we're out of luck; we assume ARM. */
445 return 0;
446 }
447
448 /* Remove useless bits from addresses in a running program. */
449 static CORE_ADDR
450 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
451 {
452 /* On M-profile devices, do not strip the low bit from EXC_RETURN
453 (the magic exception return address). */
454 if (gdbarch_tdep (gdbarch)->is_m
455 && (val & 0xfffffff0) == 0xfffffff0)
456 return val;
457
458 if (arm_apcs_32)
459 return UNMAKE_THUMB_ADDR (val);
460 else
461 return (val & 0x03fffffc);
462 }
463
464 /* Return 1 if PC is the start of a compiler helper function which
465 can be safely ignored during prologue skipping. IS_THUMB is true
466 if the function is known to be a Thumb function due to the way it
467 is being called. */
468 static int
469 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
470 {
471 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
472 struct bound_minimal_symbol msym;
473
474 msym = lookup_minimal_symbol_by_pc (pc);
475 if (msym.minsym != NULL
476 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
477 && MSYMBOL_LINKAGE_NAME (msym.minsym) != NULL)
478 {
479 const char *name = MSYMBOL_LINKAGE_NAME (msym.minsym);
480
481 /* The GNU linker's Thumb call stub to foo is named
482 __foo_from_thumb. */
483 if (strstr (name, "_from_thumb") != NULL)
484 name += 2;
485
486 /* On soft-float targets, __truncdfsf2 is called to convert promoted
487 arguments to their argument types in non-prototyped
488 functions. */
489 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
490 return 1;
491 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
492 return 1;
493
494 /* Internal functions related to thread-local storage. */
495 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
496 return 1;
497 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
498 return 1;
499 }
500 else
501 {
502 /* If we run against a stripped glibc, we may be unable to identify
503 special functions by name. Check for one important case,
504 __aeabi_read_tp, by comparing the *code* against the default
505 implementation (this is hand-written ARM assembler in glibc). */
506
507 if (!is_thumb
508 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
509 == 0xe3e00a0f /* mov r0, #0xffff0fff */
510 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
511 == 0xe240f01f) /* sub pc, r0, #31 */
512 return 1;
513 }
514
515 return 0;
516 }
517
518 /* Support routines for instruction parsing. */
519 #define submask(x) ((1L << ((x) + 1)) - 1)
520 #define bit(obj,st) (((obj) >> (st)) & 1)
521 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
522 #define sbits(obj,st,fn) \
523 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
524 #define BranchDest(addr,instr) \
525 ((CORE_ADDR) (((unsigned long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
526
527 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
528 the first 16-bit of instruction, and INSN2 is the second 16-bit of
529 instruction. */
530 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
531 ((bits ((insn1), 0, 3) << 12) \
532 | (bits ((insn1), 10, 10) << 11) \
533 | (bits ((insn2), 12, 14) << 8) \
534 | bits ((insn2), 0, 7))
535
536 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
537 the 32-bit instruction. */
538 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
539 ((bits ((insn), 16, 19) << 12) \
540 | bits ((insn), 0, 11))
541
542 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
543
544 static unsigned int
545 thumb_expand_immediate (unsigned int imm)
546 {
547 unsigned int count = imm >> 7;
548
549 if (count < 8)
550 switch (count / 2)
551 {
552 case 0:
553 return imm & 0xff;
554 case 1:
555 return (imm & 0xff) | ((imm & 0xff) << 16);
556 case 2:
557 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
558 case 3:
559 return (imm & 0xff) | ((imm & 0xff) << 8)
560 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
561 }
562
563 return (0x80 | (imm & 0x7f)) << (32 - count);
564 }
565
566 /* Return 1 if the 16-bit Thumb instruction INST might change
567 control flow, 0 otherwise. */
568
569 static int
570 thumb_instruction_changes_pc (unsigned short inst)
571 {
572 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
573 return 1;
574
575 if ((inst & 0xf000) == 0xd000) /* conditional branch */
576 return 1;
577
578 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
579 return 1;
580
581 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
582 return 1;
583
584 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
585 return 1;
586
587 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
588 return 1;
589
590 return 0;
591 }
592
593 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
594 might change control flow, 0 otherwise. */
595
596 static int
597 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
598 {
599 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
600 {
601 /* Branches and miscellaneous control instructions. */
602
603 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
604 {
605 /* B, BL, BLX. */
606 return 1;
607 }
608 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
609 {
610 /* SUBS PC, LR, #imm8. */
611 return 1;
612 }
613 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
614 {
615 /* Conditional branch. */
616 return 1;
617 }
618
619 return 0;
620 }
621
622 if ((inst1 & 0xfe50) == 0xe810)
623 {
624 /* Load multiple or RFE. */
625
626 if (bit (inst1, 7) && !bit (inst1, 8))
627 {
628 /* LDMIA or POP */
629 if (bit (inst2, 15))
630 return 1;
631 }
632 else if (!bit (inst1, 7) && bit (inst1, 8))
633 {
634 /* LDMDB */
635 if (bit (inst2, 15))
636 return 1;
637 }
638 else if (bit (inst1, 7) && bit (inst1, 8))
639 {
640 /* RFEIA */
641 return 1;
642 }
643 else if (!bit (inst1, 7) && !bit (inst1, 8))
644 {
645 /* RFEDB */
646 return 1;
647 }
648
649 return 0;
650 }
651
652 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
653 {
654 /* MOV PC or MOVS PC. */
655 return 1;
656 }
657
658 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
659 {
660 /* LDR PC. */
661 if (bits (inst1, 0, 3) == 15)
662 return 1;
663 if (bit (inst1, 7))
664 return 1;
665 if (bit (inst2, 11))
666 return 1;
667 if ((inst2 & 0x0fc0) == 0x0000)
668 return 1;
669
670 return 0;
671 }
672
673 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
674 {
675 /* TBB. */
676 return 1;
677 }
678
679 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
680 {
681 /* TBH. */
682 return 1;
683 }
684
685 return 0;
686 }
687
688 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
689 epilogue, 0 otherwise. */
690
691 static int
692 thumb_instruction_restores_sp (unsigned short insn)
693 {
694 return (insn == 0x46bd /* mov sp, r7 */
695 || (insn & 0xff80) == 0xb000 /* add sp, imm */
696 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
697 }
698
699 /* Analyze a Thumb prologue, looking for a recognizable stack frame
700 and frame pointer. Scan until we encounter a store that could
701 clobber the stack frame unexpectedly, or an unknown instruction.
702 Return the last address which is definitely safe to skip for an
703 initial breakpoint. */
704
705 static CORE_ADDR
706 thumb_analyze_prologue (struct gdbarch *gdbarch,
707 CORE_ADDR start, CORE_ADDR limit,
708 struct arm_prologue_cache *cache)
709 {
710 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
711 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
712 int i;
713 pv_t regs[16];
714 struct pv_area *stack;
715 struct cleanup *back_to;
716 CORE_ADDR offset;
717 CORE_ADDR unrecognized_pc = 0;
718
719 for (i = 0; i < 16; i++)
720 regs[i] = pv_register (i, 0);
721 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
722 back_to = make_cleanup_free_pv_area (stack);
723
724 while (start < limit)
725 {
726 unsigned short insn;
727
728 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
729
730 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
731 {
732 int regno;
733 int mask;
734
735 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
736 break;
737
738 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
739 whether to save LR (R14). */
740 mask = (insn & 0xff) | ((insn & 0x100) << 6);
741
742 /* Calculate offsets of saved R0-R7 and LR. */
743 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
744 if (mask & (1 << regno))
745 {
746 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
747 -4);
748 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
749 }
750 }
751 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
752 {
753 offset = (insn & 0x7f) << 2; /* get scaled offset */
754 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
755 -offset);
756 }
757 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
758 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
759 (insn & 0xff) << 2);
760 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
761 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
762 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
763 bits (insn, 6, 8));
764 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
765 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
766 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
767 bits (insn, 0, 7));
768 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
769 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
770 && pv_is_constant (regs[bits (insn, 3, 5)]))
771 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
772 regs[bits (insn, 6, 8)]);
773 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
774 && pv_is_constant (regs[bits (insn, 3, 6)]))
775 {
776 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
777 int rm = bits (insn, 3, 6);
778 regs[rd] = pv_add (regs[rd], regs[rm]);
779 }
780 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
781 {
782 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
783 int src_reg = (insn & 0x78) >> 3;
784 regs[dst_reg] = regs[src_reg];
785 }
786 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
787 {
788 /* Handle stores to the stack. Normally pushes are used,
789 but with GCC -mtpcs-frame, there may be other stores
790 in the prologue to create the frame. */
791 int regno = (insn >> 8) & 0x7;
792 pv_t addr;
793
794 offset = (insn & 0xff) << 2;
795 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
796
797 if (pv_area_store_would_trash (stack, addr))
798 break;
799
800 pv_area_store (stack, addr, 4, regs[regno]);
801 }
802 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
803 {
804 int rd = bits (insn, 0, 2);
805 int rn = bits (insn, 3, 5);
806 pv_t addr;
807
808 offset = bits (insn, 6, 10) << 2;
809 addr = pv_add_constant (regs[rn], offset);
810
811 if (pv_area_store_would_trash (stack, addr))
812 break;
813
814 pv_area_store (stack, addr, 4, regs[rd]);
815 }
816 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
817 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
818 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
819 /* Ignore stores of argument registers to the stack. */
820 ;
821 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
822 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
823 /* Ignore block loads from the stack, potentially copying
824 parameters from memory. */
825 ;
826 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
827 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
828 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
829 /* Similarly ignore single loads from the stack. */
830 ;
831 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
832 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
833 /* Skip register copies, i.e. saves to another register
834 instead of the stack. */
835 ;
836 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
837 /* Recognize constant loads; even with small stacks these are necessary
838 on Thumb. */
839 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
840 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
841 {
842 /* Constant pool loads, for the same reason. */
843 unsigned int constant;
844 CORE_ADDR loc;
845
846 loc = start + 4 + bits (insn, 0, 7) * 4;
847 constant = read_memory_unsigned_integer (loc, 4, byte_order);
848 regs[bits (insn, 8, 10)] = pv_constant (constant);
849 }
850 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
851 {
852 unsigned short inst2;
853
854 inst2 = read_memory_unsigned_integer (start + 2, 2,
855 byte_order_for_code);
856
857 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
858 {
859 /* BL, BLX. Allow some special function calls when
860 skipping the prologue; GCC generates these before
861 storing arguments to the stack. */
862 CORE_ADDR nextpc;
863 int j1, j2, imm1, imm2;
864
865 imm1 = sbits (insn, 0, 10);
866 imm2 = bits (inst2, 0, 10);
867 j1 = bit (inst2, 13);
868 j2 = bit (inst2, 11);
869
870 offset = ((imm1 << 12) + (imm2 << 1));
871 offset ^= ((!j2) << 22) | ((!j1) << 23);
872
873 nextpc = start + 4 + offset;
874 /* For BLX make sure to clear the low bits. */
875 if (bit (inst2, 12) == 0)
876 nextpc = nextpc & 0xfffffffc;
877
878 if (!skip_prologue_function (gdbarch, nextpc,
879 bit (inst2, 12) != 0))
880 break;
881 }
882
883 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
884 { registers } */
885 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
886 {
887 pv_t addr = regs[bits (insn, 0, 3)];
888 int regno;
889
890 if (pv_area_store_would_trash (stack, addr))
891 break;
892
893 /* Calculate offsets of saved registers. */
894 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
895 if (inst2 & (1 << regno))
896 {
897 addr = pv_add_constant (addr, -4);
898 pv_area_store (stack, addr, 4, regs[regno]);
899 }
900
901 if (insn & 0x0020)
902 regs[bits (insn, 0, 3)] = addr;
903 }
904
905 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
906 [Rn, #+/-imm]{!} */
907 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
908 {
909 int regno1 = bits (inst2, 12, 15);
910 int regno2 = bits (inst2, 8, 11);
911 pv_t addr = regs[bits (insn, 0, 3)];
912
913 offset = inst2 & 0xff;
914 if (insn & 0x0080)
915 addr = pv_add_constant (addr, offset);
916 else
917 addr = pv_add_constant (addr, -offset);
918
919 if (pv_area_store_would_trash (stack, addr))
920 break;
921
922 pv_area_store (stack, addr, 4, regs[regno1]);
923 pv_area_store (stack, pv_add_constant (addr, 4),
924 4, regs[regno2]);
925
926 if (insn & 0x0020)
927 regs[bits (insn, 0, 3)] = addr;
928 }
929
930 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
931 && (inst2 & 0x0c00) == 0x0c00
932 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
933 {
934 int regno = bits (inst2, 12, 15);
935 pv_t addr = regs[bits (insn, 0, 3)];
936
937 offset = inst2 & 0xff;
938 if (inst2 & 0x0200)
939 addr = pv_add_constant (addr, offset);
940 else
941 addr = pv_add_constant (addr, -offset);
942
943 if (pv_area_store_would_trash (stack, addr))
944 break;
945
946 pv_area_store (stack, addr, 4, regs[regno]);
947
948 if (inst2 & 0x0100)
949 regs[bits (insn, 0, 3)] = addr;
950 }
951
952 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
953 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
954 {
955 int regno = bits (inst2, 12, 15);
956 pv_t addr;
957
958 offset = inst2 & 0xfff;
959 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
960
961 if (pv_area_store_would_trash (stack, addr))
962 break;
963
964 pv_area_store (stack, addr, 4, regs[regno]);
965 }
966
967 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
968 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
969 /* Ignore stores of argument registers to the stack. */
970 ;
971
972 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
973 && (inst2 & 0x0d00) == 0x0c00
974 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
975 /* Ignore stores of argument registers to the stack. */
976 ;
977
978 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
979 { registers } */
980 && (inst2 & 0x8000) == 0x0000
981 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
982 /* Ignore block loads from the stack, potentially copying
983 parameters from memory. */
984 ;
985
986 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
987 [Rn, #+/-imm] */
988 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
989 /* Similarly ignore dual loads from the stack. */
990 ;
991
992 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
993 && (inst2 & 0x0d00) == 0x0c00
994 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
995 /* Similarly ignore single loads from the stack. */
996 ;
997
998 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
999 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
1000 /* Similarly ignore single loads from the stack. */
1001 ;
1002
1003 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1004 && (inst2 & 0x8000) == 0x0000)
1005 {
1006 unsigned int imm = ((bits (insn, 10, 10) << 11)
1007 | (bits (inst2, 12, 14) << 8)
1008 | bits (inst2, 0, 7));
1009
1010 regs[bits (inst2, 8, 11)]
1011 = pv_add_constant (regs[bits (insn, 0, 3)],
1012 thumb_expand_immediate (imm));
1013 }
1014
1015 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1016 && (inst2 & 0x8000) == 0x0000)
1017 {
1018 unsigned int imm = ((bits (insn, 10, 10) << 11)
1019 | (bits (inst2, 12, 14) << 8)
1020 | bits (inst2, 0, 7));
1021
1022 regs[bits (inst2, 8, 11)]
1023 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1024 }
1025
1026 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1027 && (inst2 & 0x8000) == 0x0000)
1028 {
1029 unsigned int imm = ((bits (insn, 10, 10) << 11)
1030 | (bits (inst2, 12, 14) << 8)
1031 | bits (inst2, 0, 7));
1032
1033 regs[bits (inst2, 8, 11)]
1034 = pv_add_constant (regs[bits (insn, 0, 3)],
1035 - (CORE_ADDR) thumb_expand_immediate (imm));
1036 }
1037
1038 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1039 && (inst2 & 0x8000) == 0x0000)
1040 {
1041 unsigned int imm = ((bits (insn, 10, 10) << 11)
1042 | (bits (inst2, 12, 14) << 8)
1043 | bits (inst2, 0, 7));
1044
1045 regs[bits (inst2, 8, 11)]
1046 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1047 }
1048
1049 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1050 {
1051 unsigned int imm = ((bits (insn, 10, 10) << 11)
1052 | (bits (inst2, 12, 14) << 8)
1053 | bits (inst2, 0, 7));
1054
1055 regs[bits (inst2, 8, 11)]
1056 = pv_constant (thumb_expand_immediate (imm));
1057 }
1058
1059 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1060 {
1061 unsigned int imm
1062 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1063
1064 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1065 }
1066
1067 else if (insn == 0xea5f /* mov.w Rd,Rm */
1068 && (inst2 & 0xf0f0) == 0)
1069 {
1070 int dst_reg = (inst2 & 0x0f00) >> 8;
1071 int src_reg = inst2 & 0xf;
1072 regs[dst_reg] = regs[src_reg];
1073 }
1074
1075 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1076 {
1077 /* Constant pool loads. */
1078 unsigned int constant;
1079 CORE_ADDR loc;
1080
1081 offset = bits (inst2, 0, 11);
1082 if (insn & 0x0080)
1083 loc = start + 4 + offset;
1084 else
1085 loc = start + 4 - offset;
1086
1087 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1088 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1089 }
1090
1091 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1092 {
1093 /* Constant pool loads. */
1094 unsigned int constant;
1095 CORE_ADDR loc;
1096
1097 offset = bits (inst2, 0, 7) << 2;
1098 if (insn & 0x0080)
1099 loc = start + 4 + offset;
1100 else
1101 loc = start + 4 - offset;
1102
1103 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1104 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1105
1106 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1107 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1108 }
1109
1110 else if (thumb2_instruction_changes_pc (insn, inst2))
1111 {
1112 /* Don't scan past anything that might change control flow. */
1113 break;
1114 }
1115 else
1116 {
1117 /* The optimizer might shove anything into the prologue,
1118 so we just skip what we don't recognize. */
1119 unrecognized_pc = start;
1120 }
1121
1122 start += 2;
1123 }
1124 else if (thumb_instruction_changes_pc (insn))
1125 {
1126 /* Don't scan past anything that might change control flow. */
1127 break;
1128 }
1129 else
1130 {
1131 /* The optimizer might shove anything into the prologue,
1132 so we just skip what we don't recognize. */
1133 unrecognized_pc = start;
1134 }
1135
1136 start += 2;
1137 }
1138
1139 if (arm_debug)
1140 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1141 paddress (gdbarch, start));
1142
1143 if (unrecognized_pc == 0)
1144 unrecognized_pc = start;
1145
1146 if (cache == NULL)
1147 {
1148 do_cleanups (back_to);
1149 return unrecognized_pc;
1150 }
1151
1152 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1153 {
1154 /* Frame pointer is fp. Frame size is constant. */
1155 cache->framereg = ARM_FP_REGNUM;
1156 cache->framesize = -regs[ARM_FP_REGNUM].k;
1157 }
1158 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1159 {
1160 /* Frame pointer is r7. Frame size is constant. */
1161 cache->framereg = THUMB_FP_REGNUM;
1162 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1163 }
1164 else
1165 {
1166 /* Try the stack pointer... this is a bit desperate. */
1167 cache->framereg = ARM_SP_REGNUM;
1168 cache->framesize = -regs[ARM_SP_REGNUM].k;
1169 }
1170
1171 for (i = 0; i < 16; i++)
1172 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1173 cache->saved_regs[i].addr = offset;
1174
1175 do_cleanups (back_to);
1176 return unrecognized_pc;
1177 }
1178
1179
1180 /* Try to analyze the instructions starting from PC, which load symbol
1181 __stack_chk_guard. Return the address of instruction after loading this
1182 symbol, set the dest register number to *BASEREG, and set the size of
1183 instructions for loading symbol in OFFSET. Return 0 if instructions are
1184 not recognized. */
1185
1186 static CORE_ADDR
1187 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1188 unsigned int *destreg, int *offset)
1189 {
1190 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1191 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1192 unsigned int low, high, address;
1193
1194 address = 0;
1195 if (is_thumb)
1196 {
1197 unsigned short insn1
1198 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1199
1200 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1201 {
1202 *destreg = bits (insn1, 8, 10);
1203 *offset = 2;
1204 address = bits (insn1, 0, 7);
1205 }
1206 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1207 {
1208 unsigned short insn2
1209 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1210
1211 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1212
1213 insn1
1214 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1215 insn2
1216 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1217
1218 /* movt Rd, #const */
1219 if ((insn1 & 0xfbc0) == 0xf2c0)
1220 {
1221 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1222 *destreg = bits (insn2, 8, 11);
1223 *offset = 8;
1224 address = (high << 16 | low);
1225 }
1226 }
1227 }
1228 else
1229 {
1230 unsigned int insn
1231 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1232
1233 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1234 {
1235 address = bits (insn, 0, 11);
1236 *destreg = bits (insn, 12, 15);
1237 *offset = 4;
1238 }
1239 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1240 {
1241 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1242
1243 insn
1244 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1245
1246 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1247 {
1248 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1249 *destreg = bits (insn, 12, 15);
1250 *offset = 8;
1251 address = (high << 16 | low);
1252 }
1253 }
1254 }
1255
1256 return address;
1257 }
1258
1259 /* Try to skip a sequence of instructions used for stack protector. If PC
1260 points to the first instruction of this sequence, return the address of
1261 first instruction after this sequence, otherwise, return original PC.
1262
1263 On arm, this sequence of instructions is composed of mainly three steps,
1264 Step 1: load symbol __stack_chk_guard,
1265 Step 2: load from address of __stack_chk_guard,
1266 Step 3: store it to somewhere else.
1267
1268 Usually, instructions on step 2 and step 3 are the same on various ARM
1269 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1270 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1271 instructions in step 1 vary from different ARM architectures. On ARMv7,
1272 they are,
1273
1274 movw Rn, #:lower16:__stack_chk_guard
1275 movt Rn, #:upper16:__stack_chk_guard
1276
1277 On ARMv5t, it is,
1278
1279 ldr Rn, .Label
1280 ....
1281 .Lable:
1282 .word __stack_chk_guard
1283
1284 Since ldr/str is a very popular instruction, we can't use them as
1285 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1286 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1287 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1288
1289 static CORE_ADDR
1290 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1291 {
1292 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1293 unsigned int basereg;
1294 struct bound_minimal_symbol stack_chk_guard;
1295 int offset;
1296 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1297 CORE_ADDR addr;
1298
1299 /* Try to parse the instructions in Step 1. */
1300 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1301 &basereg, &offset);
1302 if (!addr)
1303 return pc;
1304
1305 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1306 /* If name of symbol doesn't start with '__stack_chk_guard', this
1307 instruction sequence is not for stack protector. If symbol is
1308 removed, we conservatively think this sequence is for stack protector. */
1309 if (stack_chk_guard.minsym
1310 && strncmp (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym),
1311 "__stack_chk_guard",
1312 strlen ("__stack_chk_guard")) != 0)
1313 return pc;
1314
1315 if (is_thumb)
1316 {
1317 unsigned int destreg;
1318 unsigned short insn
1319 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1320
1321 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1322 if ((insn & 0xf800) != 0x6800)
1323 return pc;
1324 if (bits (insn, 3, 5) != basereg)
1325 return pc;
1326 destreg = bits (insn, 0, 2);
1327
1328 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1329 byte_order_for_code);
1330 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1331 if ((insn & 0xf800) != 0x6000)
1332 return pc;
1333 if (destreg != bits (insn, 0, 2))
1334 return pc;
1335 }
1336 else
1337 {
1338 unsigned int destreg;
1339 unsigned int insn
1340 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1341
1342 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1343 if ((insn & 0x0e500000) != 0x04100000)
1344 return pc;
1345 if (bits (insn, 16, 19) != basereg)
1346 return pc;
1347 destreg = bits (insn, 12, 15);
1348 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1349 insn = read_memory_unsigned_integer (pc + offset + 4,
1350 4, byte_order_for_code);
1351 if ((insn & 0x0e500000) != 0x04000000)
1352 return pc;
1353 if (bits (insn, 12, 15) != destreg)
1354 return pc;
1355 }
1356 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1357 on arm. */
1358 if (is_thumb)
1359 return pc + offset + 4;
1360 else
1361 return pc + offset + 8;
1362 }
1363
1364 /* Advance the PC across any function entry prologue instructions to
1365 reach some "real" code.
1366
1367 The APCS (ARM Procedure Call Standard) defines the following
1368 prologue:
1369
1370 mov ip, sp
1371 [stmfd sp!, {a1,a2,a3,a4}]
1372 stmfd sp!, {...,fp,ip,lr,pc}
1373 [stfe f7, [sp, #-12]!]
1374 [stfe f6, [sp, #-12]!]
1375 [stfe f5, [sp, #-12]!]
1376 [stfe f4, [sp, #-12]!]
1377 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1378
1379 static CORE_ADDR
1380 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1381 {
1382 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1383 unsigned long inst;
1384 CORE_ADDR skip_pc;
1385 CORE_ADDR func_addr, limit_pc;
1386
1387 /* See if we can determine the end of the prologue via the symbol table.
1388 If so, then return either PC, or the PC after the prologue, whichever
1389 is greater. */
1390 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1391 {
1392 CORE_ADDR post_prologue_pc
1393 = skip_prologue_using_sal (gdbarch, func_addr);
1394 struct symtab *s = find_pc_symtab (func_addr);
1395
1396 if (post_prologue_pc)
1397 post_prologue_pc
1398 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1399
1400
1401 /* GCC always emits a line note before the prologue and another
1402 one after, even if the two are at the same address or on the
1403 same line. Take advantage of this so that we do not need to
1404 know every instruction that might appear in the prologue. We
1405 will have producer information for most binaries; if it is
1406 missing (e.g. for -gstabs), assuming the GNU tools. */
1407 if (post_prologue_pc
1408 && (s == NULL
1409 || s->producer == NULL
1410 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0
1411 || strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
1412 return post_prologue_pc;
1413
1414 if (post_prologue_pc != 0)
1415 {
1416 CORE_ADDR analyzed_limit;
1417
1418 /* For non-GCC compilers, make sure the entire line is an
1419 acceptable prologue; GDB will round this function's
1420 return value up to the end of the following line so we
1421 can not skip just part of a line (and we do not want to).
1422
1423 RealView does not treat the prologue specially, but does
1424 associate prologue code with the opening brace; so this
1425 lets us skip the first line if we think it is the opening
1426 brace. */
1427 if (arm_pc_is_thumb (gdbarch, func_addr))
1428 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1429 post_prologue_pc, NULL);
1430 else
1431 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1432 post_prologue_pc, NULL);
1433
1434 if (analyzed_limit != post_prologue_pc)
1435 return func_addr;
1436
1437 return post_prologue_pc;
1438 }
1439 }
1440
1441 /* Can't determine prologue from the symbol table, need to examine
1442 instructions. */
1443
1444 /* Find an upper limit on the function prologue using the debug
1445 information. If the debug information could not be used to provide
1446 that bound, then use an arbitrary large number as the upper bound. */
1447 /* Like arm_scan_prologue, stop no later than pc + 64. */
1448 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1449 if (limit_pc == 0)
1450 limit_pc = pc + 64; /* Magic. */
1451
1452
1453 /* Check if this is Thumb code. */
1454 if (arm_pc_is_thumb (gdbarch, pc))
1455 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1456
1457 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1458 {
1459 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1460
1461 /* "mov ip, sp" is no longer a required part of the prologue. */
1462 if (inst == 0xe1a0c00d) /* mov ip, sp */
1463 continue;
1464
1465 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1466 continue;
1467
1468 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1469 continue;
1470
1471 /* Some prologues begin with "str lr, [sp, #-4]!". */
1472 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1473 continue;
1474
1475 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1476 continue;
1477
1478 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1479 continue;
1480
1481 /* Any insns after this point may float into the code, if it makes
1482 for better instruction scheduling, so we skip them only if we
1483 find them, but still consider the function to be frame-ful. */
1484
1485 /* We may have either one sfmfd instruction here, or several stfe
1486 insns, depending on the version of floating point code we
1487 support. */
1488 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1489 continue;
1490
1491 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1492 continue;
1493
1494 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1495 continue;
1496
1497 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1498 continue;
1499
1500 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1501 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1502 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1503 continue;
1504
1505 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1506 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1507 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1508 continue;
1509
1510 /* Un-recognized instruction; stop scanning. */
1511 break;
1512 }
1513
1514 return skip_pc; /* End of prologue. */
1515 }
1516
1517 /* *INDENT-OFF* */
1518 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1519 This function decodes a Thumb function prologue to determine:
1520 1) the size of the stack frame
1521 2) which registers are saved on it
1522 3) the offsets of saved regs
1523 4) the offset from the stack pointer to the frame pointer
1524
1525 A typical Thumb function prologue would create this stack frame
1526 (offsets relative to FP)
1527 old SP -> 24 stack parameters
1528 20 LR
1529 16 R7
1530 R7 -> 0 local variables (16 bytes)
1531 SP -> -12 additional stack space (12 bytes)
1532 The frame size would thus be 36 bytes, and the frame offset would be
1533 12 bytes. The frame register is R7.
1534
1535 The comments for thumb_skip_prolog() describe the algorithm we use
1536 to detect the end of the prolog. */
1537 /* *INDENT-ON* */
1538
1539 static void
1540 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1541 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1542 {
1543 CORE_ADDR prologue_start;
1544 CORE_ADDR prologue_end;
1545
1546 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1547 &prologue_end))
1548 {
1549 /* See comment in arm_scan_prologue for an explanation of
1550 this heuristics. */
1551 if (prologue_end > prologue_start + 64)
1552 {
1553 prologue_end = prologue_start + 64;
1554 }
1555 }
1556 else
1557 /* We're in the boondocks: we have no idea where the start of the
1558 function is. */
1559 return;
1560
1561 prologue_end = min (prologue_end, prev_pc);
1562
1563 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1564 }
1565
1566 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1567
1568 static int
1569 arm_instruction_changes_pc (uint32_t this_instr)
1570 {
1571 if (bits (this_instr, 28, 31) == INST_NV)
1572 /* Unconditional instructions. */
1573 switch (bits (this_instr, 24, 27))
1574 {
1575 case 0xa:
1576 case 0xb:
1577 /* Branch with Link and change to Thumb. */
1578 return 1;
1579 case 0xc:
1580 case 0xd:
1581 case 0xe:
1582 /* Coprocessor register transfer. */
1583 if (bits (this_instr, 12, 15) == 15)
1584 error (_("Invalid update to pc in instruction"));
1585 return 0;
1586 default:
1587 return 0;
1588 }
1589 else
1590 switch (bits (this_instr, 25, 27))
1591 {
1592 case 0x0:
1593 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1594 {
1595 /* Multiplies and extra load/stores. */
1596 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1597 /* Neither multiplies nor extension load/stores are allowed
1598 to modify PC. */
1599 return 0;
1600
1601 /* Otherwise, miscellaneous instructions. */
1602
1603 /* BX <reg>, BXJ <reg>, BLX <reg> */
1604 if (bits (this_instr, 4, 27) == 0x12fff1
1605 || bits (this_instr, 4, 27) == 0x12fff2
1606 || bits (this_instr, 4, 27) == 0x12fff3)
1607 return 1;
1608
1609 /* Other miscellaneous instructions are unpredictable if they
1610 modify PC. */
1611 return 0;
1612 }
1613 /* Data processing instruction. Fall through. */
1614
1615 case 0x1:
1616 if (bits (this_instr, 12, 15) == 15)
1617 return 1;
1618 else
1619 return 0;
1620
1621 case 0x2:
1622 case 0x3:
1623 /* Media instructions and architecturally undefined instructions. */
1624 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1625 return 0;
1626
1627 /* Stores. */
1628 if (bit (this_instr, 20) == 0)
1629 return 0;
1630
1631 /* Loads. */
1632 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1633 return 1;
1634 else
1635 return 0;
1636
1637 case 0x4:
1638 /* Load/store multiple. */
1639 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1640 return 1;
1641 else
1642 return 0;
1643
1644 case 0x5:
1645 /* Branch and branch with link. */
1646 return 1;
1647
1648 case 0x6:
1649 case 0x7:
1650 /* Coprocessor transfers or SWIs can not affect PC. */
1651 return 0;
1652
1653 default:
1654 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1655 }
1656 }
1657
1658 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1659 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1660 fill it in. Return the first address not recognized as a prologue
1661 instruction.
1662
1663 We recognize all the instructions typically found in ARM prologues,
1664 plus harmless instructions which can be skipped (either for analysis
1665 purposes, or a more restrictive set that can be skipped when finding
1666 the end of the prologue). */
1667
1668 static CORE_ADDR
1669 arm_analyze_prologue (struct gdbarch *gdbarch,
1670 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1671 struct arm_prologue_cache *cache)
1672 {
1673 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1674 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1675 int regno;
1676 CORE_ADDR offset, current_pc;
1677 pv_t regs[ARM_FPS_REGNUM];
1678 struct pv_area *stack;
1679 struct cleanup *back_to;
1680 int framereg, framesize;
1681 CORE_ADDR unrecognized_pc = 0;
1682
1683 /* Search the prologue looking for instructions that set up the
1684 frame pointer, adjust the stack pointer, and save registers.
1685
1686 Be careful, however, and if it doesn't look like a prologue,
1687 don't try to scan it. If, for instance, a frameless function
1688 begins with stmfd sp!, then we will tell ourselves there is
1689 a frame, which will confuse stack traceback, as well as "finish"
1690 and other operations that rely on a knowledge of the stack
1691 traceback. */
1692
1693 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1694 regs[regno] = pv_register (regno, 0);
1695 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1696 back_to = make_cleanup_free_pv_area (stack);
1697
1698 for (current_pc = prologue_start;
1699 current_pc < prologue_end;
1700 current_pc += 4)
1701 {
1702 unsigned int insn
1703 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1704
1705 if (insn == 0xe1a0c00d) /* mov ip, sp */
1706 {
1707 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1708 continue;
1709 }
1710 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1711 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1712 {
1713 unsigned imm = insn & 0xff; /* immediate value */
1714 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1715 int rd = bits (insn, 12, 15);
1716 imm = (imm >> rot) | (imm << (32 - rot));
1717 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1718 continue;
1719 }
1720 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1721 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1722 {
1723 unsigned imm = insn & 0xff; /* immediate value */
1724 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1725 int rd = bits (insn, 12, 15);
1726 imm = (imm >> rot) | (imm << (32 - rot));
1727 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1728 continue;
1729 }
1730 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1731 [sp, #-4]! */
1732 {
1733 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1734 break;
1735 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1736 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1737 regs[bits (insn, 12, 15)]);
1738 continue;
1739 }
1740 else if ((insn & 0xffff0000) == 0xe92d0000)
1741 /* stmfd sp!, {..., fp, ip, lr, pc}
1742 or
1743 stmfd sp!, {a1, a2, a3, a4} */
1744 {
1745 int mask = insn & 0xffff;
1746
1747 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1748 break;
1749
1750 /* Calculate offsets of saved registers. */
1751 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1752 if (mask & (1 << regno))
1753 {
1754 regs[ARM_SP_REGNUM]
1755 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1756 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1757 }
1758 }
1759 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1760 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1761 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1762 {
1763 /* No need to add this to saved_regs -- it's just an arg reg. */
1764 continue;
1765 }
1766 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1767 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1768 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1769 {
1770 /* No need to add this to saved_regs -- it's just an arg reg. */
1771 continue;
1772 }
1773 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1774 { registers } */
1775 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1776 {
1777 /* No need to add this to saved_regs -- it's just arg regs. */
1778 continue;
1779 }
1780 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1781 {
1782 unsigned imm = insn & 0xff; /* immediate value */
1783 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1784 imm = (imm >> rot) | (imm << (32 - rot));
1785 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1786 }
1787 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1788 {
1789 unsigned imm = insn & 0xff; /* immediate value */
1790 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1791 imm = (imm >> rot) | (imm << (32 - rot));
1792 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1793 }
1794 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1795 [sp, -#c]! */
1796 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1797 {
1798 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1799 break;
1800
1801 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1802 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1803 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1804 }
1805 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1806 [sp!] */
1807 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1808 {
1809 int n_saved_fp_regs;
1810 unsigned int fp_start_reg, fp_bound_reg;
1811
1812 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1813 break;
1814
1815 if ((insn & 0x800) == 0x800) /* N0 is set */
1816 {
1817 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1818 n_saved_fp_regs = 3;
1819 else
1820 n_saved_fp_regs = 1;
1821 }
1822 else
1823 {
1824 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1825 n_saved_fp_regs = 2;
1826 else
1827 n_saved_fp_regs = 4;
1828 }
1829
1830 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1831 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1832 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1833 {
1834 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1835 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1836 regs[fp_start_reg++]);
1837 }
1838 }
1839 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1840 {
1841 /* Allow some special function calls when skipping the
1842 prologue; GCC generates these before storing arguments to
1843 the stack. */
1844 CORE_ADDR dest = BranchDest (current_pc, insn);
1845
1846 if (skip_prologue_function (gdbarch, dest, 0))
1847 continue;
1848 else
1849 break;
1850 }
1851 else if ((insn & 0xf0000000) != 0xe0000000)
1852 break; /* Condition not true, exit early. */
1853 else if (arm_instruction_changes_pc (insn))
1854 /* Don't scan past anything that might change control flow. */
1855 break;
1856 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1857 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1858 /* Ignore block loads from the stack, potentially copying
1859 parameters from memory. */
1860 continue;
1861 else if ((insn & 0xfc500000) == 0xe4100000
1862 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1863 /* Similarly ignore single loads from the stack. */
1864 continue;
1865 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1866 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1867 register instead of the stack. */
1868 continue;
1869 else
1870 {
1871 /* The optimizer might shove anything into the prologue,
1872 so we just skip what we don't recognize. */
1873 unrecognized_pc = current_pc;
1874 continue;
1875 }
1876 }
1877
1878 if (unrecognized_pc == 0)
1879 unrecognized_pc = current_pc;
1880
1881 /* The frame size is just the distance from the frame register
1882 to the original stack pointer. */
1883 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1884 {
1885 /* Frame pointer is fp. */
1886 framereg = ARM_FP_REGNUM;
1887 framesize = -regs[ARM_FP_REGNUM].k;
1888 }
1889 else
1890 {
1891 /* Try the stack pointer... this is a bit desperate. */
1892 framereg = ARM_SP_REGNUM;
1893 framesize = -regs[ARM_SP_REGNUM].k;
1894 }
1895
1896 if (cache)
1897 {
1898 cache->framereg = framereg;
1899 cache->framesize = framesize;
1900
1901 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1902 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1903 cache->saved_regs[regno].addr = offset;
1904 }
1905
1906 if (arm_debug)
1907 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1908 paddress (gdbarch, unrecognized_pc));
1909
1910 do_cleanups (back_to);
1911 return unrecognized_pc;
1912 }
1913
1914 static void
1915 arm_scan_prologue (struct frame_info *this_frame,
1916 struct arm_prologue_cache *cache)
1917 {
1918 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1919 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1920 int regno;
1921 CORE_ADDR prologue_start, prologue_end, current_pc;
1922 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1923 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1924 pv_t regs[ARM_FPS_REGNUM];
1925 struct pv_area *stack;
1926 struct cleanup *back_to;
1927 CORE_ADDR offset;
1928
1929 /* Assume there is no frame until proven otherwise. */
1930 cache->framereg = ARM_SP_REGNUM;
1931 cache->framesize = 0;
1932
1933 /* Check for Thumb prologue. */
1934 if (arm_frame_is_thumb (this_frame))
1935 {
1936 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1937 return;
1938 }
1939
1940 /* Find the function prologue. If we can't find the function in
1941 the symbol table, peek in the stack frame to find the PC. */
1942 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1943 &prologue_end))
1944 {
1945 /* One way to find the end of the prologue (which works well
1946 for unoptimized code) is to do the following:
1947
1948 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1949
1950 if (sal.line == 0)
1951 prologue_end = prev_pc;
1952 else if (sal.end < prologue_end)
1953 prologue_end = sal.end;
1954
1955 This mechanism is very accurate so long as the optimizer
1956 doesn't move any instructions from the function body into the
1957 prologue. If this happens, sal.end will be the last
1958 instruction in the first hunk of prologue code just before
1959 the first instruction that the scheduler has moved from
1960 the body to the prologue.
1961
1962 In order to make sure that we scan all of the prologue
1963 instructions, we use a slightly less accurate mechanism which
1964 may scan more than necessary. To help compensate for this
1965 lack of accuracy, the prologue scanning loop below contains
1966 several clauses which'll cause the loop to terminate early if
1967 an implausible prologue instruction is encountered.
1968
1969 The expression
1970
1971 prologue_start + 64
1972
1973 is a suitable endpoint since it accounts for the largest
1974 possible prologue plus up to five instructions inserted by
1975 the scheduler. */
1976
1977 if (prologue_end > prologue_start + 64)
1978 {
1979 prologue_end = prologue_start + 64; /* See above. */
1980 }
1981 }
1982 else
1983 {
1984 /* We have no symbol information. Our only option is to assume this
1985 function has a standard stack frame and the normal frame register.
1986 Then, we can find the value of our frame pointer on entrance to
1987 the callee (or at the present moment if this is the innermost frame).
1988 The value stored there should be the address of the stmfd + 8. */
1989 CORE_ADDR frame_loc;
1990 LONGEST return_value;
1991
1992 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1993 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1994 return;
1995 else
1996 {
1997 prologue_start = gdbarch_addr_bits_remove
1998 (gdbarch, return_value) - 8;
1999 prologue_end = prologue_start + 64; /* See above. */
2000 }
2001 }
2002
2003 if (prev_pc < prologue_end)
2004 prologue_end = prev_pc;
2005
2006 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2007 }
2008
2009 static struct arm_prologue_cache *
2010 arm_make_prologue_cache (struct frame_info *this_frame)
2011 {
2012 int reg;
2013 struct arm_prologue_cache *cache;
2014 CORE_ADDR unwound_fp;
2015
2016 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2017 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2018
2019 arm_scan_prologue (this_frame, cache);
2020
2021 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2022 if (unwound_fp == 0)
2023 return cache;
2024
2025 cache->prev_sp = unwound_fp + cache->framesize;
2026
2027 /* Calculate actual addresses of saved registers using offsets
2028 determined by arm_scan_prologue. */
2029 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2030 if (trad_frame_addr_p (cache->saved_regs, reg))
2031 cache->saved_regs[reg].addr += cache->prev_sp;
2032
2033 return cache;
2034 }
2035
2036 /* Our frame ID for a normal frame is the current function's starting PC
2037 and the caller's SP when we were called. */
2038
2039 static void
2040 arm_prologue_this_id (struct frame_info *this_frame,
2041 void **this_cache,
2042 struct frame_id *this_id)
2043 {
2044 struct arm_prologue_cache *cache;
2045 struct frame_id id;
2046 CORE_ADDR pc, func;
2047
2048 if (*this_cache == NULL)
2049 *this_cache = arm_make_prologue_cache (this_frame);
2050 cache = *this_cache;
2051
2052 /* This is meant to halt the backtrace at "_start". */
2053 pc = get_frame_pc (this_frame);
2054 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2055 return;
2056
2057 /* If we've hit a wall, stop. */
2058 if (cache->prev_sp == 0)
2059 return;
2060
2061 /* Use function start address as part of the frame ID. If we cannot
2062 identify the start address (due to missing symbol information),
2063 fall back to just using the current PC. */
2064 func = get_frame_func (this_frame);
2065 if (!func)
2066 func = pc;
2067
2068 id = frame_id_build (cache->prev_sp, func);
2069 *this_id = id;
2070 }
2071
2072 static struct value *
2073 arm_prologue_prev_register (struct frame_info *this_frame,
2074 void **this_cache,
2075 int prev_regnum)
2076 {
2077 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2078 struct arm_prologue_cache *cache;
2079
2080 if (*this_cache == NULL)
2081 *this_cache = arm_make_prologue_cache (this_frame);
2082 cache = *this_cache;
2083
2084 /* If we are asked to unwind the PC, then we need to return the LR
2085 instead. The prologue may save PC, but it will point into this
2086 frame's prologue, not the next frame's resume location. Also
2087 strip the saved T bit. A valid LR may have the low bit set, but
2088 a valid PC never does. */
2089 if (prev_regnum == ARM_PC_REGNUM)
2090 {
2091 CORE_ADDR lr;
2092
2093 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2094 return frame_unwind_got_constant (this_frame, prev_regnum,
2095 arm_addr_bits_remove (gdbarch, lr));
2096 }
2097
2098 /* SP is generally not saved to the stack, but this frame is
2099 identified by the next frame's stack pointer at the time of the call.
2100 The value was already reconstructed into PREV_SP. */
2101 if (prev_regnum == ARM_SP_REGNUM)
2102 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2103
2104 /* The CPSR may have been changed by the call instruction and by the
2105 called function. The only bit we can reconstruct is the T bit,
2106 by checking the low bit of LR as of the call. This is a reliable
2107 indicator of Thumb-ness except for some ARM v4T pre-interworking
2108 Thumb code, which could get away with a clear low bit as long as
2109 the called function did not use bx. Guess that all other
2110 bits are unchanged; the condition flags are presumably lost,
2111 but the processor status is likely valid. */
2112 if (prev_regnum == ARM_PS_REGNUM)
2113 {
2114 CORE_ADDR lr, cpsr;
2115 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2116
2117 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2118 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2119 if (IS_THUMB_ADDR (lr))
2120 cpsr |= t_bit;
2121 else
2122 cpsr &= ~t_bit;
2123 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2124 }
2125
2126 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2127 prev_regnum);
2128 }
2129
2130 struct frame_unwind arm_prologue_unwind = {
2131 NORMAL_FRAME,
2132 default_frame_unwind_stop_reason,
2133 arm_prologue_this_id,
2134 arm_prologue_prev_register,
2135 NULL,
2136 default_frame_sniffer
2137 };
2138
2139 /* Maintain a list of ARM exception table entries per objfile, similar to the
2140 list of mapping symbols. We only cache entries for standard ARM-defined
2141 personality routines; the cache will contain only the frame unwinding
2142 instructions associated with the entry (not the descriptors). */
2143
2144 static const struct objfile_data *arm_exidx_data_key;
2145
2146 struct arm_exidx_entry
2147 {
2148 bfd_vma addr;
2149 gdb_byte *entry;
2150 };
2151 typedef struct arm_exidx_entry arm_exidx_entry_s;
2152 DEF_VEC_O(arm_exidx_entry_s);
2153
2154 struct arm_exidx_data
2155 {
2156 VEC(arm_exidx_entry_s) **section_maps;
2157 };
2158
2159 static void
2160 arm_exidx_data_free (struct objfile *objfile, void *arg)
2161 {
2162 struct arm_exidx_data *data = arg;
2163 unsigned int i;
2164
2165 for (i = 0; i < objfile->obfd->section_count; i++)
2166 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2167 }
2168
2169 static inline int
2170 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2171 const struct arm_exidx_entry *rhs)
2172 {
2173 return lhs->addr < rhs->addr;
2174 }
2175
2176 static struct obj_section *
2177 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2178 {
2179 struct obj_section *osect;
2180
2181 ALL_OBJFILE_OSECTIONS (objfile, osect)
2182 if (bfd_get_section_flags (objfile->obfd,
2183 osect->the_bfd_section) & SEC_ALLOC)
2184 {
2185 bfd_vma start, size;
2186 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2187 size = bfd_get_section_size (osect->the_bfd_section);
2188
2189 if (start <= vma && vma < start + size)
2190 return osect;
2191 }
2192
2193 return NULL;
2194 }
2195
2196 /* Parse contents of exception table and exception index sections
2197 of OBJFILE, and fill in the exception table entry cache.
2198
2199 For each entry that refers to a standard ARM-defined personality
2200 routine, extract the frame unwinding instructions (from either
2201 the index or the table section). The unwinding instructions
2202 are normalized by:
2203 - extracting them from the rest of the table data
2204 - converting to host endianness
2205 - appending the implicit 0xb0 ("Finish") code
2206
2207 The extracted and normalized instructions are stored for later
2208 retrieval by the arm_find_exidx_entry routine. */
2209
2210 static void
2211 arm_exidx_new_objfile (struct objfile *objfile)
2212 {
2213 struct cleanup *cleanups;
2214 struct arm_exidx_data *data;
2215 asection *exidx, *extab;
2216 bfd_vma exidx_vma = 0, extab_vma = 0;
2217 bfd_size_type exidx_size = 0, extab_size = 0;
2218 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2219 LONGEST i;
2220
2221 /* If we've already touched this file, do nothing. */
2222 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2223 return;
2224 cleanups = make_cleanup (null_cleanup, NULL);
2225
2226 /* Read contents of exception table and index. */
2227 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2228 if (exidx)
2229 {
2230 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2231 exidx_size = bfd_get_section_size (exidx);
2232 exidx_data = xmalloc (exidx_size);
2233 make_cleanup (xfree, exidx_data);
2234
2235 if (!bfd_get_section_contents (objfile->obfd, exidx,
2236 exidx_data, 0, exidx_size))
2237 {
2238 do_cleanups (cleanups);
2239 return;
2240 }
2241 }
2242
2243 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2244 if (extab)
2245 {
2246 extab_vma = bfd_section_vma (objfile->obfd, extab);
2247 extab_size = bfd_get_section_size (extab);
2248 extab_data = xmalloc (extab_size);
2249 make_cleanup (xfree, extab_data);
2250
2251 if (!bfd_get_section_contents (objfile->obfd, extab,
2252 extab_data, 0, extab_size))
2253 {
2254 do_cleanups (cleanups);
2255 return;
2256 }
2257 }
2258
2259 /* Allocate exception table data structure. */
2260 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2261 set_objfile_data (objfile, arm_exidx_data_key, data);
2262 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2263 objfile->obfd->section_count,
2264 VEC(arm_exidx_entry_s) *);
2265
2266 /* Fill in exception table. */
2267 for (i = 0; i < exidx_size / 8; i++)
2268 {
2269 struct arm_exidx_entry new_exidx_entry;
2270 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2271 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2272 bfd_vma addr = 0, word = 0;
2273 int n_bytes = 0, n_words = 0;
2274 struct obj_section *sec;
2275 gdb_byte *entry = NULL;
2276
2277 /* Extract address of start of function. */
2278 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2279 idx += exidx_vma + i * 8;
2280
2281 /* Find section containing function and compute section offset. */
2282 sec = arm_obj_section_from_vma (objfile, idx);
2283 if (sec == NULL)
2284 continue;
2285 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2286
2287 /* Determine address of exception table entry. */
2288 if (val == 1)
2289 {
2290 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2291 }
2292 else if ((val & 0xff000000) == 0x80000000)
2293 {
2294 /* Exception table entry embedded in .ARM.exidx
2295 -- must be short form. */
2296 word = val;
2297 n_bytes = 3;
2298 }
2299 else if (!(val & 0x80000000))
2300 {
2301 /* Exception table entry in .ARM.extab. */
2302 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2303 addr += exidx_vma + i * 8 + 4;
2304
2305 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2306 {
2307 word = bfd_h_get_32 (objfile->obfd,
2308 extab_data + addr - extab_vma);
2309 addr += 4;
2310
2311 if ((word & 0xff000000) == 0x80000000)
2312 {
2313 /* Short form. */
2314 n_bytes = 3;
2315 }
2316 else if ((word & 0xff000000) == 0x81000000
2317 || (word & 0xff000000) == 0x82000000)
2318 {
2319 /* Long form. */
2320 n_bytes = 2;
2321 n_words = ((word >> 16) & 0xff);
2322 }
2323 else if (!(word & 0x80000000))
2324 {
2325 bfd_vma pers;
2326 struct obj_section *pers_sec;
2327 int gnu_personality = 0;
2328
2329 /* Custom personality routine. */
2330 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2331 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2332
2333 /* Check whether we've got one of the variants of the
2334 GNU personality routines. */
2335 pers_sec = arm_obj_section_from_vma (objfile, pers);
2336 if (pers_sec)
2337 {
2338 static const char *personality[] =
2339 {
2340 "__gcc_personality_v0",
2341 "__gxx_personality_v0",
2342 "__gcj_personality_v0",
2343 "__gnu_objc_personality_v0",
2344 NULL
2345 };
2346
2347 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2348 int k;
2349
2350 for (k = 0; personality[k]; k++)
2351 if (lookup_minimal_symbol_by_pc_name
2352 (pc, personality[k], objfile))
2353 {
2354 gnu_personality = 1;
2355 break;
2356 }
2357 }
2358
2359 /* If so, the next word contains a word count in the high
2360 byte, followed by the same unwind instructions as the
2361 pre-defined forms. */
2362 if (gnu_personality
2363 && addr + 4 <= extab_vma + extab_size)
2364 {
2365 word = bfd_h_get_32 (objfile->obfd,
2366 extab_data + addr - extab_vma);
2367 addr += 4;
2368 n_bytes = 3;
2369 n_words = ((word >> 24) & 0xff);
2370 }
2371 }
2372 }
2373 }
2374
2375 /* Sanity check address. */
2376 if (n_words)
2377 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2378 n_words = n_bytes = 0;
2379
2380 /* The unwind instructions reside in WORD (only the N_BYTES least
2381 significant bytes are valid), followed by N_WORDS words in the
2382 extab section starting at ADDR. */
2383 if (n_bytes || n_words)
2384 {
2385 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2386 n_bytes + n_words * 4 + 1);
2387
2388 while (n_bytes--)
2389 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2390
2391 while (n_words--)
2392 {
2393 word = bfd_h_get_32 (objfile->obfd,
2394 extab_data + addr - extab_vma);
2395 addr += 4;
2396
2397 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2398 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2399 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2400 *p++ = (gdb_byte) (word & 0xff);
2401 }
2402
2403 /* Implied "Finish" to terminate the list. */
2404 *p++ = 0xb0;
2405 }
2406
2407 /* Push entry onto vector. They are guaranteed to always
2408 appear in order of increasing addresses. */
2409 new_exidx_entry.addr = idx;
2410 new_exidx_entry.entry = entry;
2411 VEC_safe_push (arm_exidx_entry_s,
2412 data->section_maps[sec->the_bfd_section->index],
2413 &new_exidx_entry);
2414 }
2415
2416 do_cleanups (cleanups);
2417 }
2418
2419 /* Search for the exception table entry covering MEMADDR. If one is found,
2420 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2421 set *START to the start of the region covered by this entry. */
2422
2423 static gdb_byte *
2424 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2425 {
2426 struct obj_section *sec;
2427
2428 sec = find_pc_section (memaddr);
2429 if (sec != NULL)
2430 {
2431 struct arm_exidx_data *data;
2432 VEC(arm_exidx_entry_s) *map;
2433 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2434 unsigned int idx;
2435
2436 data = objfile_data (sec->objfile, arm_exidx_data_key);
2437 if (data != NULL)
2438 {
2439 map = data->section_maps[sec->the_bfd_section->index];
2440 if (!VEC_empty (arm_exidx_entry_s, map))
2441 {
2442 struct arm_exidx_entry *map_sym;
2443
2444 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2445 arm_compare_exidx_entries);
2446
2447 /* VEC_lower_bound finds the earliest ordered insertion
2448 point. If the following symbol starts at this exact
2449 address, we use that; otherwise, the preceding
2450 exception table entry covers this address. */
2451 if (idx < VEC_length (arm_exidx_entry_s, map))
2452 {
2453 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2454 if (map_sym->addr == map_key.addr)
2455 {
2456 if (start)
2457 *start = map_sym->addr + obj_section_addr (sec);
2458 return map_sym->entry;
2459 }
2460 }
2461
2462 if (idx > 0)
2463 {
2464 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2465 if (start)
2466 *start = map_sym->addr + obj_section_addr (sec);
2467 return map_sym->entry;
2468 }
2469 }
2470 }
2471 }
2472
2473 return NULL;
2474 }
2475
2476 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2477 instruction list from the ARM exception table entry ENTRY, allocate and
2478 return a prologue cache structure describing how to unwind this frame.
2479
2480 Return NULL if the unwinding instruction list contains a "spare",
2481 "reserved" or "refuse to unwind" instruction as defined in section
2482 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2483 for the ARM Architecture" document. */
2484
2485 static struct arm_prologue_cache *
2486 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2487 {
2488 CORE_ADDR vsp = 0;
2489 int vsp_valid = 0;
2490
2491 struct arm_prologue_cache *cache;
2492 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2493 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2494
2495 for (;;)
2496 {
2497 gdb_byte insn;
2498
2499 /* Whenever we reload SP, we actually have to retrieve its
2500 actual value in the current frame. */
2501 if (!vsp_valid)
2502 {
2503 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2504 {
2505 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2506 vsp = get_frame_register_unsigned (this_frame, reg);
2507 }
2508 else
2509 {
2510 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2511 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2512 }
2513
2514 vsp_valid = 1;
2515 }
2516
2517 /* Decode next unwind instruction. */
2518 insn = *entry++;
2519
2520 if ((insn & 0xc0) == 0)
2521 {
2522 int offset = insn & 0x3f;
2523 vsp += (offset << 2) + 4;
2524 }
2525 else if ((insn & 0xc0) == 0x40)
2526 {
2527 int offset = insn & 0x3f;
2528 vsp -= (offset << 2) + 4;
2529 }
2530 else if ((insn & 0xf0) == 0x80)
2531 {
2532 int mask = ((insn & 0xf) << 8) | *entry++;
2533 int i;
2534
2535 /* The special case of an all-zero mask identifies
2536 "Refuse to unwind". We return NULL to fall back
2537 to the prologue analyzer. */
2538 if (mask == 0)
2539 return NULL;
2540
2541 /* Pop registers r4..r15 under mask. */
2542 for (i = 0; i < 12; i++)
2543 if (mask & (1 << i))
2544 {
2545 cache->saved_regs[4 + i].addr = vsp;
2546 vsp += 4;
2547 }
2548
2549 /* Special-case popping SP -- we need to reload vsp. */
2550 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2551 vsp_valid = 0;
2552 }
2553 else if ((insn & 0xf0) == 0x90)
2554 {
2555 int reg = insn & 0xf;
2556
2557 /* Reserved cases. */
2558 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2559 return NULL;
2560
2561 /* Set SP from another register and mark VSP for reload. */
2562 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2563 vsp_valid = 0;
2564 }
2565 else if ((insn & 0xf0) == 0xa0)
2566 {
2567 int count = insn & 0x7;
2568 int pop_lr = (insn & 0x8) != 0;
2569 int i;
2570
2571 /* Pop r4..r[4+count]. */
2572 for (i = 0; i <= count; i++)
2573 {
2574 cache->saved_regs[4 + i].addr = vsp;
2575 vsp += 4;
2576 }
2577
2578 /* If indicated by flag, pop LR as well. */
2579 if (pop_lr)
2580 {
2581 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2582 vsp += 4;
2583 }
2584 }
2585 else if (insn == 0xb0)
2586 {
2587 /* We could only have updated PC by popping into it; if so, it
2588 will show up as address. Otherwise, copy LR into PC. */
2589 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2590 cache->saved_regs[ARM_PC_REGNUM]
2591 = cache->saved_regs[ARM_LR_REGNUM];
2592
2593 /* We're done. */
2594 break;
2595 }
2596 else if (insn == 0xb1)
2597 {
2598 int mask = *entry++;
2599 int i;
2600
2601 /* All-zero mask and mask >= 16 is "spare". */
2602 if (mask == 0 || mask >= 16)
2603 return NULL;
2604
2605 /* Pop r0..r3 under mask. */
2606 for (i = 0; i < 4; i++)
2607 if (mask & (1 << i))
2608 {
2609 cache->saved_regs[i].addr = vsp;
2610 vsp += 4;
2611 }
2612 }
2613 else if (insn == 0xb2)
2614 {
2615 ULONGEST offset = 0;
2616 unsigned shift = 0;
2617
2618 do
2619 {
2620 offset |= (*entry & 0x7f) << shift;
2621 shift += 7;
2622 }
2623 while (*entry++ & 0x80);
2624
2625 vsp += 0x204 + (offset << 2);
2626 }
2627 else if (insn == 0xb3)
2628 {
2629 int start = *entry >> 4;
2630 int count = (*entry++) & 0xf;
2631 int i;
2632
2633 /* Only registers D0..D15 are valid here. */
2634 if (start + count >= 16)
2635 return NULL;
2636
2637 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2638 for (i = 0; i <= count; i++)
2639 {
2640 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2641 vsp += 8;
2642 }
2643
2644 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2645 vsp += 4;
2646 }
2647 else if ((insn & 0xf8) == 0xb8)
2648 {
2649 int count = insn & 0x7;
2650 int i;
2651
2652 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2653 for (i = 0; i <= count; i++)
2654 {
2655 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2656 vsp += 8;
2657 }
2658
2659 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2660 vsp += 4;
2661 }
2662 else if (insn == 0xc6)
2663 {
2664 int start = *entry >> 4;
2665 int count = (*entry++) & 0xf;
2666 int i;
2667
2668 /* Only registers WR0..WR15 are valid. */
2669 if (start + count >= 16)
2670 return NULL;
2671
2672 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2673 for (i = 0; i <= count; i++)
2674 {
2675 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2676 vsp += 8;
2677 }
2678 }
2679 else if (insn == 0xc7)
2680 {
2681 int mask = *entry++;
2682 int i;
2683
2684 /* All-zero mask and mask >= 16 is "spare". */
2685 if (mask == 0 || mask >= 16)
2686 return NULL;
2687
2688 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2689 for (i = 0; i < 4; i++)
2690 if (mask & (1 << i))
2691 {
2692 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2693 vsp += 4;
2694 }
2695 }
2696 else if ((insn & 0xf8) == 0xc0)
2697 {
2698 int count = insn & 0x7;
2699 int i;
2700
2701 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2702 for (i = 0; i <= count; i++)
2703 {
2704 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2705 vsp += 8;
2706 }
2707 }
2708 else if (insn == 0xc8)
2709 {
2710 int start = *entry >> 4;
2711 int count = (*entry++) & 0xf;
2712 int i;
2713
2714 /* Only registers D0..D31 are valid. */
2715 if (start + count >= 16)
2716 return NULL;
2717
2718 /* Pop VFP double-precision registers
2719 D[16+start]..D[16+start+count]. */
2720 for (i = 0; i <= count; i++)
2721 {
2722 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2723 vsp += 8;
2724 }
2725 }
2726 else if (insn == 0xc9)
2727 {
2728 int start = *entry >> 4;
2729 int count = (*entry++) & 0xf;
2730 int i;
2731
2732 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2733 for (i = 0; i <= count; i++)
2734 {
2735 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2736 vsp += 8;
2737 }
2738 }
2739 else if ((insn & 0xf8) == 0xd0)
2740 {
2741 int count = insn & 0x7;
2742 int i;
2743
2744 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2745 for (i = 0; i <= count; i++)
2746 {
2747 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2748 vsp += 8;
2749 }
2750 }
2751 else
2752 {
2753 /* Everything else is "spare". */
2754 return NULL;
2755 }
2756 }
2757
2758 /* If we restore SP from a register, assume this was the frame register.
2759 Otherwise just fall back to SP as frame register. */
2760 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2761 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2762 else
2763 cache->framereg = ARM_SP_REGNUM;
2764
2765 /* Determine offset to previous frame. */
2766 cache->framesize
2767 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2768
2769 /* We already got the previous SP. */
2770 cache->prev_sp = vsp;
2771
2772 return cache;
2773 }
2774
2775 /* Unwinding via ARM exception table entries. Note that the sniffer
2776 already computes a filled-in prologue cache, which is then used
2777 with the same arm_prologue_this_id and arm_prologue_prev_register
2778 routines also used for prologue-parsing based unwinding. */
2779
2780 static int
2781 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2782 struct frame_info *this_frame,
2783 void **this_prologue_cache)
2784 {
2785 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2786 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2787 CORE_ADDR addr_in_block, exidx_region, func_start;
2788 struct arm_prologue_cache *cache;
2789 gdb_byte *entry;
2790
2791 /* See if we have an ARM exception table entry covering this address. */
2792 addr_in_block = get_frame_address_in_block (this_frame);
2793 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2794 if (!entry)
2795 return 0;
2796
2797 /* The ARM exception table does not describe unwind information
2798 for arbitrary PC values, but is guaranteed to be correct only
2799 at call sites. We have to decide here whether we want to use
2800 ARM exception table information for this frame, or fall back
2801 to using prologue parsing. (Note that if we have DWARF CFI,
2802 this sniffer isn't even called -- CFI is always preferred.)
2803
2804 Before we make this decision, however, we check whether we
2805 actually have *symbol* information for the current frame.
2806 If not, prologue parsing would not work anyway, so we might
2807 as well use the exception table and hope for the best. */
2808 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2809 {
2810 int exc_valid = 0;
2811
2812 /* If the next frame is "normal", we are at a call site in this
2813 frame, so exception information is guaranteed to be valid. */
2814 if (get_next_frame (this_frame)
2815 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2816 exc_valid = 1;
2817
2818 /* We also assume exception information is valid if we're currently
2819 blocked in a system call. The system library is supposed to
2820 ensure this, so that e.g. pthread cancellation works. */
2821 if (arm_frame_is_thumb (this_frame))
2822 {
2823 LONGEST insn;
2824
2825 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2826 byte_order_for_code, &insn)
2827 && (insn & 0xff00) == 0xdf00 /* svc */)
2828 exc_valid = 1;
2829 }
2830 else
2831 {
2832 LONGEST insn;
2833
2834 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2835 byte_order_for_code, &insn)
2836 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2837 exc_valid = 1;
2838 }
2839
2840 /* Bail out if we don't know that exception information is valid. */
2841 if (!exc_valid)
2842 return 0;
2843
2844 /* The ARM exception index does not mark the *end* of the region
2845 covered by the entry, and some functions will not have any entry.
2846 To correctly recognize the end of the covered region, the linker
2847 should have inserted dummy records with a CANTUNWIND marker.
2848
2849 Unfortunately, current versions of GNU ld do not reliably do
2850 this, and thus we may have found an incorrect entry above.
2851 As a (temporary) sanity check, we only use the entry if it
2852 lies *within* the bounds of the function. Note that this check
2853 might reject perfectly valid entries that just happen to cover
2854 multiple functions; therefore this check ought to be removed
2855 once the linker is fixed. */
2856 if (func_start > exidx_region)
2857 return 0;
2858 }
2859
2860 /* Decode the list of unwinding instructions into a prologue cache.
2861 Note that this may fail due to e.g. a "refuse to unwind" code. */
2862 cache = arm_exidx_fill_cache (this_frame, entry);
2863 if (!cache)
2864 return 0;
2865
2866 *this_prologue_cache = cache;
2867 return 1;
2868 }
2869
2870 struct frame_unwind arm_exidx_unwind = {
2871 NORMAL_FRAME,
2872 default_frame_unwind_stop_reason,
2873 arm_prologue_this_id,
2874 arm_prologue_prev_register,
2875 NULL,
2876 arm_exidx_unwind_sniffer
2877 };
2878
2879 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2880 trampoline, return the target PC. Otherwise return 0.
2881
2882 void call0a (char c, short s, int i, long l) {}
2883
2884 int main (void)
2885 {
2886 (*pointer_to_call0a) (c, s, i, l);
2887 }
2888
2889 Instead of calling a stub library function _call_via_xx (xx is
2890 the register name), GCC may inline the trampoline in the object
2891 file as below (register r2 has the address of call0a).
2892
2893 .global main
2894 .type main, %function
2895 ...
2896 bl .L1
2897 ...
2898 .size main, .-main
2899
2900 .L1:
2901 bx r2
2902
2903 The trampoline 'bx r2' doesn't belong to main. */
2904
2905 static CORE_ADDR
2906 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2907 {
2908 /* The heuristics of recognizing such trampoline is that FRAME is
2909 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2910 if (arm_frame_is_thumb (frame))
2911 {
2912 gdb_byte buf[2];
2913
2914 if (target_read_memory (pc, buf, 2) == 0)
2915 {
2916 struct gdbarch *gdbarch = get_frame_arch (frame);
2917 enum bfd_endian byte_order_for_code
2918 = gdbarch_byte_order_for_code (gdbarch);
2919 uint16_t insn
2920 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2921
2922 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2923 {
2924 CORE_ADDR dest
2925 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2926
2927 /* Clear the LSB so that gdb core sets step-resume
2928 breakpoint at the right address. */
2929 return UNMAKE_THUMB_ADDR (dest);
2930 }
2931 }
2932 }
2933
2934 return 0;
2935 }
2936
2937 static struct arm_prologue_cache *
2938 arm_make_stub_cache (struct frame_info *this_frame)
2939 {
2940 struct arm_prologue_cache *cache;
2941
2942 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2943 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2944
2945 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2946
2947 return cache;
2948 }
2949
2950 /* Our frame ID for a stub frame is the current SP and LR. */
2951
2952 static void
2953 arm_stub_this_id (struct frame_info *this_frame,
2954 void **this_cache,
2955 struct frame_id *this_id)
2956 {
2957 struct arm_prologue_cache *cache;
2958
2959 if (*this_cache == NULL)
2960 *this_cache = arm_make_stub_cache (this_frame);
2961 cache = *this_cache;
2962
2963 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2964 }
2965
2966 static int
2967 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2968 struct frame_info *this_frame,
2969 void **this_prologue_cache)
2970 {
2971 CORE_ADDR addr_in_block;
2972 gdb_byte dummy[4];
2973 CORE_ADDR pc, start_addr;
2974 const char *name;
2975
2976 addr_in_block = get_frame_address_in_block (this_frame);
2977 pc = get_frame_pc (this_frame);
2978 if (in_plt_section (addr_in_block)
2979 /* We also use the stub winder if the target memory is unreadable
2980 to avoid having the prologue unwinder trying to read it. */
2981 || target_read_memory (pc, dummy, 4) != 0)
2982 return 1;
2983
2984 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2985 && arm_skip_bx_reg (this_frame, pc) != 0)
2986 return 1;
2987
2988 return 0;
2989 }
2990
2991 struct frame_unwind arm_stub_unwind = {
2992 NORMAL_FRAME,
2993 default_frame_unwind_stop_reason,
2994 arm_stub_this_id,
2995 arm_prologue_prev_register,
2996 NULL,
2997 arm_stub_unwind_sniffer
2998 };
2999
3000 /* Put here the code to store, into CACHE->saved_regs, the addresses
3001 of the saved registers of frame described by THIS_FRAME. CACHE is
3002 returned. */
3003
3004 static struct arm_prologue_cache *
3005 arm_m_exception_cache (struct frame_info *this_frame)
3006 {
3007 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3008 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3009 struct arm_prologue_cache *cache;
3010 CORE_ADDR unwound_sp;
3011 LONGEST xpsr;
3012
3013 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
3014 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
3015
3016 unwound_sp = get_frame_register_unsigned (this_frame,
3017 ARM_SP_REGNUM);
3018
3019 /* The hardware saves eight 32-bit words, comprising xPSR,
3020 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
3021 "B1.5.6 Exception entry behavior" in
3022 "ARMv7-M Architecture Reference Manual". */
3023 cache->saved_regs[0].addr = unwound_sp;
3024 cache->saved_regs[1].addr = unwound_sp + 4;
3025 cache->saved_regs[2].addr = unwound_sp + 8;
3026 cache->saved_regs[3].addr = unwound_sp + 12;
3027 cache->saved_regs[12].addr = unwound_sp + 16;
3028 cache->saved_regs[14].addr = unwound_sp + 20;
3029 cache->saved_regs[15].addr = unwound_sp + 24;
3030 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
3031
3032 /* If bit 9 of the saved xPSR is set, then there is a four-byte
3033 aligner between the top of the 32-byte stack frame and the
3034 previous context's stack pointer. */
3035 cache->prev_sp = unwound_sp + 32;
3036 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
3037 && (xpsr & (1 << 9)) != 0)
3038 cache->prev_sp += 4;
3039
3040 return cache;
3041 }
3042
3043 /* Implementation of function hook 'this_id' in
3044 'struct frame_uwnind'. */
3045
3046 static void
3047 arm_m_exception_this_id (struct frame_info *this_frame,
3048 void **this_cache,
3049 struct frame_id *this_id)
3050 {
3051 struct arm_prologue_cache *cache;
3052
3053 if (*this_cache == NULL)
3054 *this_cache = arm_m_exception_cache (this_frame);
3055 cache = *this_cache;
3056
3057 /* Our frame ID for a stub frame is the current SP and LR. */
3058 *this_id = frame_id_build (cache->prev_sp,
3059 get_frame_pc (this_frame));
3060 }
3061
3062 /* Implementation of function hook 'prev_register' in
3063 'struct frame_uwnind'. */
3064
3065 static struct value *
3066 arm_m_exception_prev_register (struct frame_info *this_frame,
3067 void **this_cache,
3068 int prev_regnum)
3069 {
3070 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3071 struct arm_prologue_cache *cache;
3072
3073 if (*this_cache == NULL)
3074 *this_cache = arm_m_exception_cache (this_frame);
3075 cache = *this_cache;
3076
3077 /* The value was already reconstructed into PREV_SP. */
3078 if (prev_regnum == ARM_SP_REGNUM)
3079 return frame_unwind_got_constant (this_frame, prev_regnum,
3080 cache->prev_sp);
3081
3082 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3083 prev_regnum);
3084 }
3085
3086 /* Implementation of function hook 'sniffer' in
3087 'struct frame_uwnind'. */
3088
3089 static int
3090 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3091 struct frame_info *this_frame,
3092 void **this_prologue_cache)
3093 {
3094 CORE_ADDR this_pc = get_frame_pc (this_frame);
3095
3096 /* No need to check is_m; this sniffer is only registered for
3097 M-profile architectures. */
3098
3099 /* Exception frames return to one of these magic PCs. Other values
3100 are not defined as of v7-M. See details in "B1.5.8 Exception
3101 return behavior" in "ARMv7-M Architecture Reference Manual". */
3102 if (this_pc == 0xfffffff1 || this_pc == 0xfffffff9
3103 || this_pc == 0xfffffffd)
3104 return 1;
3105
3106 return 0;
3107 }
3108
3109 /* Frame unwinder for M-profile exceptions. */
3110
3111 struct frame_unwind arm_m_exception_unwind =
3112 {
3113 SIGTRAMP_FRAME,
3114 default_frame_unwind_stop_reason,
3115 arm_m_exception_this_id,
3116 arm_m_exception_prev_register,
3117 NULL,
3118 arm_m_exception_unwind_sniffer
3119 };
3120
3121 static CORE_ADDR
3122 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3123 {
3124 struct arm_prologue_cache *cache;
3125
3126 if (*this_cache == NULL)
3127 *this_cache = arm_make_prologue_cache (this_frame);
3128 cache = *this_cache;
3129
3130 return cache->prev_sp - cache->framesize;
3131 }
3132
3133 struct frame_base arm_normal_base = {
3134 &arm_prologue_unwind,
3135 arm_normal_frame_base,
3136 arm_normal_frame_base,
3137 arm_normal_frame_base
3138 };
3139
3140 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
3141 dummy frame. The frame ID's base needs to match the TOS value
3142 saved by save_dummy_frame_tos() and returned from
3143 arm_push_dummy_call, and the PC needs to match the dummy frame's
3144 breakpoint. */
3145
3146 static struct frame_id
3147 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3148 {
3149 return frame_id_build (get_frame_register_unsigned (this_frame,
3150 ARM_SP_REGNUM),
3151 get_frame_pc (this_frame));
3152 }
3153
3154 /* Given THIS_FRAME, find the previous frame's resume PC (which will
3155 be used to construct the previous frame's ID, after looking up the
3156 containing function). */
3157
3158 static CORE_ADDR
3159 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
3160 {
3161 CORE_ADDR pc;
3162 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
3163 return arm_addr_bits_remove (gdbarch, pc);
3164 }
3165
3166 static CORE_ADDR
3167 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
3168 {
3169 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
3170 }
3171
3172 static struct value *
3173 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3174 int regnum)
3175 {
3176 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3177 CORE_ADDR lr, cpsr;
3178 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3179
3180 switch (regnum)
3181 {
3182 case ARM_PC_REGNUM:
3183 /* The PC is normally copied from the return column, which
3184 describes saves of LR. However, that version may have an
3185 extra bit set to indicate Thumb state. The bit is not
3186 part of the PC. */
3187 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3188 return frame_unwind_got_constant (this_frame, regnum,
3189 arm_addr_bits_remove (gdbarch, lr));
3190
3191 case ARM_PS_REGNUM:
3192 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3193 cpsr = get_frame_register_unsigned (this_frame, regnum);
3194 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3195 if (IS_THUMB_ADDR (lr))
3196 cpsr |= t_bit;
3197 else
3198 cpsr &= ~t_bit;
3199 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3200
3201 default:
3202 internal_error (__FILE__, __LINE__,
3203 _("Unexpected register %d"), regnum);
3204 }
3205 }
3206
3207 static void
3208 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3209 struct dwarf2_frame_state_reg *reg,
3210 struct frame_info *this_frame)
3211 {
3212 switch (regnum)
3213 {
3214 case ARM_PC_REGNUM:
3215 case ARM_PS_REGNUM:
3216 reg->how = DWARF2_FRAME_REG_FN;
3217 reg->loc.fn = arm_dwarf2_prev_register;
3218 break;
3219 case ARM_SP_REGNUM:
3220 reg->how = DWARF2_FRAME_REG_CFA;
3221 break;
3222 }
3223 }
3224
3225 /* Return true if we are in the function's epilogue, i.e. after the
3226 instruction that destroyed the function's stack frame. */
3227
3228 static int
3229 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3230 {
3231 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3232 unsigned int insn, insn2;
3233 int found_return = 0, found_stack_adjust = 0;
3234 CORE_ADDR func_start, func_end;
3235 CORE_ADDR scan_pc;
3236 gdb_byte buf[4];
3237
3238 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3239 return 0;
3240
3241 /* The epilogue is a sequence of instructions along the following lines:
3242
3243 - add stack frame size to SP or FP
3244 - [if frame pointer used] restore SP from FP
3245 - restore registers from SP [may include PC]
3246 - a return-type instruction [if PC wasn't already restored]
3247
3248 In a first pass, we scan forward from the current PC and verify the
3249 instructions we find as compatible with this sequence, ending in a
3250 return instruction.
3251
3252 However, this is not sufficient to distinguish indirect function calls
3253 within a function from indirect tail calls in the epilogue in some cases.
3254 Therefore, if we didn't already find any SP-changing instruction during
3255 forward scan, we add a backward scanning heuristic to ensure we actually
3256 are in the epilogue. */
3257
3258 scan_pc = pc;
3259 while (scan_pc < func_end && !found_return)
3260 {
3261 if (target_read_memory (scan_pc, buf, 2))
3262 break;
3263
3264 scan_pc += 2;
3265 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3266
3267 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3268 found_return = 1;
3269 else if (insn == 0x46f7) /* mov pc, lr */
3270 found_return = 1;
3271 else if (thumb_instruction_restores_sp (insn))
3272 {
3273 found_stack_adjust = 1;
3274 if ((insn & 0xfe00) == 0xbd00) /* pop <registers, PC> */
3275 found_return = 1;
3276 }
3277 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3278 {
3279 if (target_read_memory (scan_pc, buf, 2))
3280 break;
3281
3282 scan_pc += 2;
3283 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3284
3285 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3286 {
3287 found_stack_adjust = 1;
3288 if (insn2 & 0x8000) /* <registers> include PC. */
3289 found_return = 1;
3290 }
3291 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3292 && (insn2 & 0x0fff) == 0x0b04)
3293 {
3294 found_stack_adjust = 1;
3295 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3296 found_return = 1;
3297 }
3298 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3299 && (insn2 & 0x0e00) == 0x0a00)
3300 found_stack_adjust = 1;
3301 else
3302 break;
3303 }
3304 else
3305 break;
3306 }
3307
3308 if (!found_return)
3309 return 0;
3310
3311 /* Since any instruction in the epilogue sequence, with the possible
3312 exception of return itself, updates the stack pointer, we need to
3313 scan backwards for at most one instruction. Try either a 16-bit or
3314 a 32-bit instruction. This is just a heuristic, so we do not worry
3315 too much about false positives. */
3316
3317 if (!found_stack_adjust)
3318 {
3319 if (pc - 4 < func_start)
3320 return 0;
3321 if (target_read_memory (pc - 4, buf, 4))
3322 return 0;
3323
3324 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3325 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3326
3327 if (thumb_instruction_restores_sp (insn2))
3328 found_stack_adjust = 1;
3329 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3330 found_stack_adjust = 1;
3331 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3332 && (insn2 & 0x0fff) == 0x0b04)
3333 found_stack_adjust = 1;
3334 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3335 && (insn2 & 0x0e00) == 0x0a00)
3336 found_stack_adjust = 1;
3337 }
3338
3339 return found_stack_adjust;
3340 }
3341
3342 /* Return true if we are in the function's epilogue, i.e. after the
3343 instruction that destroyed the function's stack frame. */
3344
3345 static int
3346 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3347 {
3348 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3349 unsigned int insn;
3350 int found_return, found_stack_adjust;
3351 CORE_ADDR func_start, func_end;
3352
3353 if (arm_pc_is_thumb (gdbarch, pc))
3354 return thumb_in_function_epilogue_p (gdbarch, pc);
3355
3356 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3357 return 0;
3358
3359 /* We are in the epilogue if the previous instruction was a stack
3360 adjustment and the next instruction is a possible return (bx, mov
3361 pc, or pop). We could have to scan backwards to find the stack
3362 adjustment, or forwards to find the return, but this is a decent
3363 approximation. First scan forwards. */
3364
3365 found_return = 0;
3366 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3367 if (bits (insn, 28, 31) != INST_NV)
3368 {
3369 if ((insn & 0x0ffffff0) == 0x012fff10)
3370 /* BX. */
3371 found_return = 1;
3372 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3373 /* MOV PC. */
3374 found_return = 1;
3375 else if ((insn & 0x0fff0000) == 0x08bd0000
3376 && (insn & 0x0000c000) != 0)
3377 /* POP (LDMIA), including PC or LR. */
3378 found_return = 1;
3379 }
3380
3381 if (!found_return)
3382 return 0;
3383
3384 /* Scan backwards. This is just a heuristic, so do not worry about
3385 false positives from mode changes. */
3386
3387 if (pc < func_start + 4)
3388 return 0;
3389
3390 found_stack_adjust = 0;
3391 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3392 if (bits (insn, 28, 31) != INST_NV)
3393 {
3394 if ((insn & 0x0df0f000) == 0x0080d000)
3395 /* ADD SP (register or immediate). */
3396 found_stack_adjust = 1;
3397 else if ((insn & 0x0df0f000) == 0x0040d000)
3398 /* SUB SP (register or immediate). */
3399 found_stack_adjust = 1;
3400 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3401 /* MOV SP. */
3402 found_stack_adjust = 1;
3403 else if ((insn & 0x0fff0000) == 0x08bd0000)
3404 /* POP (LDMIA). */
3405 found_stack_adjust = 1;
3406 else if ((insn & 0x0fff0000) == 0x049d0000)
3407 /* POP of a single register. */
3408 found_stack_adjust = 1;
3409 }
3410
3411 if (found_stack_adjust)
3412 return 1;
3413
3414 return 0;
3415 }
3416
3417
3418 /* When arguments must be pushed onto the stack, they go on in reverse
3419 order. The code below implements a FILO (stack) to do this. */
3420
3421 struct stack_item
3422 {
3423 int len;
3424 struct stack_item *prev;
3425 void *data;
3426 };
3427
3428 static struct stack_item *
3429 push_stack_item (struct stack_item *prev, const void *contents, int len)
3430 {
3431 struct stack_item *si;
3432 si = xmalloc (sizeof (struct stack_item));
3433 si->data = xmalloc (len);
3434 si->len = len;
3435 si->prev = prev;
3436 memcpy (si->data, contents, len);
3437 return si;
3438 }
3439
3440 static struct stack_item *
3441 pop_stack_item (struct stack_item *si)
3442 {
3443 struct stack_item *dead = si;
3444 si = si->prev;
3445 xfree (dead->data);
3446 xfree (dead);
3447 return si;
3448 }
3449
3450
3451 /* Return the alignment (in bytes) of the given type. */
3452
3453 static int
3454 arm_type_align (struct type *t)
3455 {
3456 int n;
3457 int align;
3458 int falign;
3459
3460 t = check_typedef (t);
3461 switch (TYPE_CODE (t))
3462 {
3463 default:
3464 /* Should never happen. */
3465 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3466 return 4;
3467
3468 case TYPE_CODE_PTR:
3469 case TYPE_CODE_ENUM:
3470 case TYPE_CODE_INT:
3471 case TYPE_CODE_FLT:
3472 case TYPE_CODE_SET:
3473 case TYPE_CODE_RANGE:
3474 case TYPE_CODE_REF:
3475 case TYPE_CODE_CHAR:
3476 case TYPE_CODE_BOOL:
3477 return TYPE_LENGTH (t);
3478
3479 case TYPE_CODE_ARRAY:
3480 case TYPE_CODE_COMPLEX:
3481 /* TODO: What about vector types? */
3482 return arm_type_align (TYPE_TARGET_TYPE (t));
3483
3484 case TYPE_CODE_STRUCT:
3485 case TYPE_CODE_UNION:
3486 align = 1;
3487 for (n = 0; n < TYPE_NFIELDS (t); n++)
3488 {
3489 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3490 if (falign > align)
3491 align = falign;
3492 }
3493 return align;
3494 }
3495 }
3496
3497 /* Possible base types for a candidate for passing and returning in
3498 VFP registers. */
3499
3500 enum arm_vfp_cprc_base_type
3501 {
3502 VFP_CPRC_UNKNOWN,
3503 VFP_CPRC_SINGLE,
3504 VFP_CPRC_DOUBLE,
3505 VFP_CPRC_VEC64,
3506 VFP_CPRC_VEC128
3507 };
3508
3509 /* The length of one element of base type B. */
3510
3511 static unsigned
3512 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3513 {
3514 switch (b)
3515 {
3516 case VFP_CPRC_SINGLE:
3517 return 4;
3518 case VFP_CPRC_DOUBLE:
3519 return 8;
3520 case VFP_CPRC_VEC64:
3521 return 8;
3522 case VFP_CPRC_VEC128:
3523 return 16;
3524 default:
3525 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3526 (int) b);
3527 }
3528 }
3529
3530 /* The character ('s', 'd' or 'q') for the type of VFP register used
3531 for passing base type B. */
3532
3533 static int
3534 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3535 {
3536 switch (b)
3537 {
3538 case VFP_CPRC_SINGLE:
3539 return 's';
3540 case VFP_CPRC_DOUBLE:
3541 return 'd';
3542 case VFP_CPRC_VEC64:
3543 return 'd';
3544 case VFP_CPRC_VEC128:
3545 return 'q';
3546 default:
3547 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3548 (int) b);
3549 }
3550 }
3551
3552 /* Determine whether T may be part of a candidate for passing and
3553 returning in VFP registers, ignoring the limit on the total number
3554 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3555 classification of the first valid component found; if it is not
3556 VFP_CPRC_UNKNOWN, all components must have the same classification
3557 as *BASE_TYPE. If it is found that T contains a type not permitted
3558 for passing and returning in VFP registers, a type differently
3559 classified from *BASE_TYPE, or two types differently classified
3560 from each other, return -1, otherwise return the total number of
3561 base-type elements found (possibly 0 in an empty structure or
3562 array). Vectors and complex types are not currently supported,
3563 matching the generic AAPCS support. */
3564
3565 static int
3566 arm_vfp_cprc_sub_candidate (struct type *t,
3567 enum arm_vfp_cprc_base_type *base_type)
3568 {
3569 t = check_typedef (t);
3570 switch (TYPE_CODE (t))
3571 {
3572 case TYPE_CODE_FLT:
3573 switch (TYPE_LENGTH (t))
3574 {
3575 case 4:
3576 if (*base_type == VFP_CPRC_UNKNOWN)
3577 *base_type = VFP_CPRC_SINGLE;
3578 else if (*base_type != VFP_CPRC_SINGLE)
3579 return -1;
3580 return 1;
3581
3582 case 8:
3583 if (*base_type == VFP_CPRC_UNKNOWN)
3584 *base_type = VFP_CPRC_DOUBLE;
3585 else if (*base_type != VFP_CPRC_DOUBLE)
3586 return -1;
3587 return 1;
3588
3589 default:
3590 return -1;
3591 }
3592 break;
3593
3594 case TYPE_CODE_ARRAY:
3595 {
3596 int count;
3597 unsigned unitlen;
3598 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3599 if (count == -1)
3600 return -1;
3601 if (TYPE_LENGTH (t) == 0)
3602 {
3603 gdb_assert (count == 0);
3604 return 0;
3605 }
3606 else if (count == 0)
3607 return -1;
3608 unitlen = arm_vfp_cprc_unit_length (*base_type);
3609 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3610 return TYPE_LENGTH (t) / unitlen;
3611 }
3612 break;
3613
3614 case TYPE_CODE_STRUCT:
3615 {
3616 int count = 0;
3617 unsigned unitlen;
3618 int i;
3619 for (i = 0; i < TYPE_NFIELDS (t); i++)
3620 {
3621 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3622 base_type);
3623 if (sub_count == -1)
3624 return -1;
3625 count += sub_count;
3626 }
3627 if (TYPE_LENGTH (t) == 0)
3628 {
3629 gdb_assert (count == 0);
3630 return 0;
3631 }
3632 else if (count == 0)
3633 return -1;
3634 unitlen = arm_vfp_cprc_unit_length (*base_type);
3635 if (TYPE_LENGTH (t) != unitlen * count)
3636 return -1;
3637 return count;
3638 }
3639
3640 case TYPE_CODE_UNION:
3641 {
3642 int count = 0;
3643 unsigned unitlen;
3644 int i;
3645 for (i = 0; i < TYPE_NFIELDS (t); i++)
3646 {
3647 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3648 base_type);
3649 if (sub_count == -1)
3650 return -1;
3651 count = (count > sub_count ? count : sub_count);
3652 }
3653 if (TYPE_LENGTH (t) == 0)
3654 {
3655 gdb_assert (count == 0);
3656 return 0;
3657 }
3658 else if (count == 0)
3659 return -1;
3660 unitlen = arm_vfp_cprc_unit_length (*base_type);
3661 if (TYPE_LENGTH (t) != unitlen * count)
3662 return -1;
3663 return count;
3664 }
3665
3666 default:
3667 break;
3668 }
3669
3670 return -1;
3671 }
3672
3673 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3674 if passed to or returned from a non-variadic function with the VFP
3675 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3676 *BASE_TYPE to the base type for T and *COUNT to the number of
3677 elements of that base type before returning. */
3678
3679 static int
3680 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3681 int *count)
3682 {
3683 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3684 int c = arm_vfp_cprc_sub_candidate (t, &b);
3685 if (c <= 0 || c > 4)
3686 return 0;
3687 *base_type = b;
3688 *count = c;
3689 return 1;
3690 }
3691
3692 /* Return 1 if the VFP ABI should be used for passing arguments to and
3693 returning values from a function of type FUNC_TYPE, 0
3694 otherwise. */
3695
3696 static int
3697 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3698 {
3699 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3700 /* Variadic functions always use the base ABI. Assume that functions
3701 without debug info are not variadic. */
3702 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3703 return 0;
3704 /* The VFP ABI is only supported as a variant of AAPCS. */
3705 if (tdep->arm_abi != ARM_ABI_AAPCS)
3706 return 0;
3707 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3708 }
3709
3710 /* We currently only support passing parameters in integer registers, which
3711 conforms with GCC's default model, and VFP argument passing following
3712 the VFP variant of AAPCS. Several other variants exist and
3713 we should probably support some of them based on the selected ABI. */
3714
3715 static CORE_ADDR
3716 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3717 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3718 struct value **args, CORE_ADDR sp, int struct_return,
3719 CORE_ADDR struct_addr)
3720 {
3721 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3722 int argnum;
3723 int argreg;
3724 int nstack;
3725 struct stack_item *si = NULL;
3726 int use_vfp_abi;
3727 struct type *ftype;
3728 unsigned vfp_regs_free = (1 << 16) - 1;
3729
3730 /* Determine the type of this function and whether the VFP ABI
3731 applies. */
3732 ftype = check_typedef (value_type (function));
3733 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3734 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3735 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3736
3737 /* Set the return address. For the ARM, the return breakpoint is
3738 always at BP_ADDR. */
3739 if (arm_pc_is_thumb (gdbarch, bp_addr))
3740 bp_addr |= 1;
3741 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3742
3743 /* Walk through the list of args and determine how large a temporary
3744 stack is required. Need to take care here as structs may be
3745 passed on the stack, and we have to push them. */
3746 nstack = 0;
3747
3748 argreg = ARM_A1_REGNUM;
3749 nstack = 0;
3750
3751 /* The struct_return pointer occupies the first parameter
3752 passing register. */
3753 if (struct_return)
3754 {
3755 if (arm_debug)
3756 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3757 gdbarch_register_name (gdbarch, argreg),
3758 paddress (gdbarch, struct_addr));
3759 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3760 argreg++;
3761 }
3762
3763 for (argnum = 0; argnum < nargs; argnum++)
3764 {
3765 int len;
3766 struct type *arg_type;
3767 struct type *target_type;
3768 enum type_code typecode;
3769 const bfd_byte *val;
3770 int align;
3771 enum arm_vfp_cprc_base_type vfp_base_type;
3772 int vfp_base_count;
3773 int may_use_core_reg = 1;
3774
3775 arg_type = check_typedef (value_type (args[argnum]));
3776 len = TYPE_LENGTH (arg_type);
3777 target_type = TYPE_TARGET_TYPE (arg_type);
3778 typecode = TYPE_CODE (arg_type);
3779 val = value_contents (args[argnum]);
3780
3781 align = arm_type_align (arg_type);
3782 /* Round alignment up to a whole number of words. */
3783 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3784 /* Different ABIs have different maximum alignments. */
3785 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3786 {
3787 /* The APCS ABI only requires word alignment. */
3788 align = INT_REGISTER_SIZE;
3789 }
3790 else
3791 {
3792 /* The AAPCS requires at most doubleword alignment. */
3793 if (align > INT_REGISTER_SIZE * 2)
3794 align = INT_REGISTER_SIZE * 2;
3795 }
3796
3797 if (use_vfp_abi
3798 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3799 &vfp_base_count))
3800 {
3801 int regno;
3802 int unit_length;
3803 int shift;
3804 unsigned mask;
3805
3806 /* Because this is a CPRC it cannot go in a core register or
3807 cause a core register to be skipped for alignment.
3808 Either it goes in VFP registers and the rest of this loop
3809 iteration is skipped for this argument, or it goes on the
3810 stack (and the stack alignment code is correct for this
3811 case). */
3812 may_use_core_reg = 0;
3813
3814 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3815 shift = unit_length / 4;
3816 mask = (1 << (shift * vfp_base_count)) - 1;
3817 for (regno = 0; regno < 16; regno += shift)
3818 if (((vfp_regs_free >> regno) & mask) == mask)
3819 break;
3820
3821 if (regno < 16)
3822 {
3823 int reg_char;
3824 int reg_scaled;
3825 int i;
3826
3827 vfp_regs_free &= ~(mask << regno);
3828 reg_scaled = regno / shift;
3829 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3830 for (i = 0; i < vfp_base_count; i++)
3831 {
3832 char name_buf[4];
3833 int regnum;
3834 if (reg_char == 'q')
3835 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3836 val + i * unit_length);
3837 else
3838 {
3839 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3840 reg_char, reg_scaled + i);
3841 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3842 strlen (name_buf));
3843 regcache_cooked_write (regcache, regnum,
3844 val + i * unit_length);
3845 }
3846 }
3847 continue;
3848 }
3849 else
3850 {
3851 /* This CPRC could not go in VFP registers, so all VFP
3852 registers are now marked as used. */
3853 vfp_regs_free = 0;
3854 }
3855 }
3856
3857 /* Push stack padding for dowubleword alignment. */
3858 if (nstack & (align - 1))
3859 {
3860 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3861 nstack += INT_REGISTER_SIZE;
3862 }
3863
3864 /* Doubleword aligned quantities must go in even register pairs. */
3865 if (may_use_core_reg
3866 && argreg <= ARM_LAST_ARG_REGNUM
3867 && align > INT_REGISTER_SIZE
3868 && argreg & 1)
3869 argreg++;
3870
3871 /* If the argument is a pointer to a function, and it is a
3872 Thumb function, create a LOCAL copy of the value and set
3873 the THUMB bit in it. */
3874 if (TYPE_CODE_PTR == typecode
3875 && target_type != NULL
3876 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3877 {
3878 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3879 if (arm_pc_is_thumb (gdbarch, regval))
3880 {
3881 bfd_byte *copy = alloca (len);
3882 store_unsigned_integer (copy, len, byte_order,
3883 MAKE_THUMB_ADDR (regval));
3884 val = copy;
3885 }
3886 }
3887
3888 /* Copy the argument to general registers or the stack in
3889 register-sized pieces. Large arguments are split between
3890 registers and stack. */
3891 while (len > 0)
3892 {
3893 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3894
3895 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3896 {
3897 /* The argument is being passed in a general purpose
3898 register. */
3899 CORE_ADDR regval
3900 = extract_unsigned_integer (val, partial_len, byte_order);
3901 if (byte_order == BFD_ENDIAN_BIG)
3902 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3903 if (arm_debug)
3904 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3905 argnum,
3906 gdbarch_register_name
3907 (gdbarch, argreg),
3908 phex (regval, INT_REGISTER_SIZE));
3909 regcache_cooked_write_unsigned (regcache, argreg, regval);
3910 argreg++;
3911 }
3912 else
3913 {
3914 /* Push the arguments onto the stack. */
3915 if (arm_debug)
3916 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3917 argnum, nstack);
3918 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3919 nstack += INT_REGISTER_SIZE;
3920 }
3921
3922 len -= partial_len;
3923 val += partial_len;
3924 }
3925 }
3926 /* If we have an odd number of words to push, then decrement the stack
3927 by one word now, so first stack argument will be dword aligned. */
3928 if (nstack & 4)
3929 sp -= 4;
3930
3931 while (si)
3932 {
3933 sp -= si->len;
3934 write_memory (sp, si->data, si->len);
3935 si = pop_stack_item (si);
3936 }
3937
3938 /* Finally, update teh SP register. */
3939 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3940
3941 return sp;
3942 }
3943
3944
3945 /* Always align the frame to an 8-byte boundary. This is required on
3946 some platforms and harmless on the rest. */
3947
3948 static CORE_ADDR
3949 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3950 {
3951 /* Align the stack to eight bytes. */
3952 return sp & ~ (CORE_ADDR) 7;
3953 }
3954
3955 static void
3956 print_fpu_flags (struct ui_file *file, int flags)
3957 {
3958 if (flags & (1 << 0))
3959 fputs_filtered ("IVO ", file);
3960 if (flags & (1 << 1))
3961 fputs_filtered ("DVZ ", file);
3962 if (flags & (1 << 2))
3963 fputs_filtered ("OFL ", file);
3964 if (flags & (1 << 3))
3965 fputs_filtered ("UFL ", file);
3966 if (flags & (1 << 4))
3967 fputs_filtered ("INX ", file);
3968 fputc_filtered ('\n', file);
3969 }
3970
3971 /* Print interesting information about the floating point processor
3972 (if present) or emulator. */
3973 static void
3974 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3975 struct frame_info *frame, const char *args)
3976 {
3977 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3978 int type;
3979
3980 type = (status >> 24) & 127;
3981 if (status & (1 << 31))
3982 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
3983 else
3984 fprintf_filtered (file, _("Software FPU type %d\n"), type);
3985 /* i18n: [floating point unit] mask */
3986 fputs_filtered (_("mask: "), file);
3987 print_fpu_flags (file, status >> 16);
3988 /* i18n: [floating point unit] flags */
3989 fputs_filtered (_("flags: "), file);
3990 print_fpu_flags (file, status);
3991 }
3992
3993 /* Construct the ARM extended floating point type. */
3994 static struct type *
3995 arm_ext_type (struct gdbarch *gdbarch)
3996 {
3997 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3998
3999 if (!tdep->arm_ext_type)
4000 tdep->arm_ext_type
4001 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
4002 floatformats_arm_ext);
4003
4004 return tdep->arm_ext_type;
4005 }
4006
4007 static struct type *
4008 arm_neon_double_type (struct gdbarch *gdbarch)
4009 {
4010 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4011
4012 if (tdep->neon_double_type == NULL)
4013 {
4014 struct type *t, *elem;
4015
4016 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
4017 TYPE_CODE_UNION);
4018 elem = builtin_type (gdbarch)->builtin_uint8;
4019 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
4020 elem = builtin_type (gdbarch)->builtin_uint16;
4021 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
4022 elem = builtin_type (gdbarch)->builtin_uint32;
4023 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
4024 elem = builtin_type (gdbarch)->builtin_uint64;
4025 append_composite_type_field (t, "u64", elem);
4026 elem = builtin_type (gdbarch)->builtin_float;
4027 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
4028 elem = builtin_type (gdbarch)->builtin_double;
4029 append_composite_type_field (t, "f64", elem);
4030
4031 TYPE_VECTOR (t) = 1;
4032 TYPE_NAME (t) = "neon_d";
4033 tdep->neon_double_type = t;
4034 }
4035
4036 return tdep->neon_double_type;
4037 }
4038
4039 /* FIXME: The vector types are not correctly ordered on big-endian
4040 targets. Just as s0 is the low bits of d0, d0[0] is also the low
4041 bits of d0 - regardless of what unit size is being held in d0. So
4042 the offset of the first uint8 in d0 is 7, but the offset of the
4043 first float is 4. This code works as-is for little-endian
4044 targets. */
4045
4046 static struct type *
4047 arm_neon_quad_type (struct gdbarch *gdbarch)
4048 {
4049 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4050
4051 if (tdep->neon_quad_type == NULL)
4052 {
4053 struct type *t, *elem;
4054
4055 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
4056 TYPE_CODE_UNION);
4057 elem = builtin_type (gdbarch)->builtin_uint8;
4058 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
4059 elem = builtin_type (gdbarch)->builtin_uint16;
4060 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
4061 elem = builtin_type (gdbarch)->builtin_uint32;
4062 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
4063 elem = builtin_type (gdbarch)->builtin_uint64;
4064 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4065 elem = builtin_type (gdbarch)->builtin_float;
4066 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4067 elem = builtin_type (gdbarch)->builtin_double;
4068 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4069
4070 TYPE_VECTOR (t) = 1;
4071 TYPE_NAME (t) = "neon_q";
4072 tdep->neon_quad_type = t;
4073 }
4074
4075 return tdep->neon_quad_type;
4076 }
4077
4078 /* Return the GDB type object for the "standard" data type of data in
4079 register N. */
4080
4081 static struct type *
4082 arm_register_type (struct gdbarch *gdbarch, int regnum)
4083 {
4084 int num_regs = gdbarch_num_regs (gdbarch);
4085
4086 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4087 && regnum >= num_regs && regnum < num_regs + 32)
4088 return builtin_type (gdbarch)->builtin_float;
4089
4090 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4091 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4092 return arm_neon_quad_type (gdbarch);
4093
4094 /* If the target description has register information, we are only
4095 in this function so that we can override the types of
4096 double-precision registers for NEON. */
4097 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4098 {
4099 struct type *t = tdesc_register_type (gdbarch, regnum);
4100
4101 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4102 && TYPE_CODE (t) == TYPE_CODE_FLT
4103 && gdbarch_tdep (gdbarch)->have_neon)
4104 return arm_neon_double_type (gdbarch);
4105 else
4106 return t;
4107 }
4108
4109 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4110 {
4111 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4112 return builtin_type (gdbarch)->builtin_void;
4113
4114 return arm_ext_type (gdbarch);
4115 }
4116 else if (regnum == ARM_SP_REGNUM)
4117 return builtin_type (gdbarch)->builtin_data_ptr;
4118 else if (regnum == ARM_PC_REGNUM)
4119 return builtin_type (gdbarch)->builtin_func_ptr;
4120 else if (regnum >= ARRAY_SIZE (arm_register_names))
4121 /* These registers are only supported on targets which supply
4122 an XML description. */
4123 return builtin_type (gdbarch)->builtin_int0;
4124 else
4125 return builtin_type (gdbarch)->builtin_uint32;
4126 }
4127
4128 /* Map a DWARF register REGNUM onto the appropriate GDB register
4129 number. */
4130
4131 static int
4132 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4133 {
4134 /* Core integer regs. */
4135 if (reg >= 0 && reg <= 15)
4136 return reg;
4137
4138 /* Legacy FPA encoding. These were once used in a way which
4139 overlapped with VFP register numbering, so their use is
4140 discouraged, but GDB doesn't support the ARM toolchain
4141 which used them for VFP. */
4142 if (reg >= 16 && reg <= 23)
4143 return ARM_F0_REGNUM + reg - 16;
4144
4145 /* New assignments for the FPA registers. */
4146 if (reg >= 96 && reg <= 103)
4147 return ARM_F0_REGNUM + reg - 96;
4148
4149 /* WMMX register assignments. */
4150 if (reg >= 104 && reg <= 111)
4151 return ARM_WCGR0_REGNUM + reg - 104;
4152
4153 if (reg >= 112 && reg <= 127)
4154 return ARM_WR0_REGNUM + reg - 112;
4155
4156 if (reg >= 192 && reg <= 199)
4157 return ARM_WC0_REGNUM + reg - 192;
4158
4159 /* VFP v2 registers. A double precision value is actually
4160 in d1 rather than s2, but the ABI only defines numbering
4161 for the single precision registers. This will "just work"
4162 in GDB for little endian targets (we'll read eight bytes,
4163 starting in s0 and then progressing to s1), but will be
4164 reversed on big endian targets with VFP. This won't
4165 be a problem for the new Neon quad registers; you're supposed
4166 to use DW_OP_piece for those. */
4167 if (reg >= 64 && reg <= 95)
4168 {
4169 char name_buf[4];
4170
4171 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4172 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4173 strlen (name_buf));
4174 }
4175
4176 /* VFP v3 / Neon registers. This range is also used for VFP v2
4177 registers, except that it now describes d0 instead of s0. */
4178 if (reg >= 256 && reg <= 287)
4179 {
4180 char name_buf[4];
4181
4182 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4183 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4184 strlen (name_buf));
4185 }
4186
4187 return -1;
4188 }
4189
4190 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4191 static int
4192 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4193 {
4194 int reg = regnum;
4195 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4196
4197 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4198 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4199
4200 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4201 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4202
4203 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4204 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4205
4206 if (reg < NUM_GREGS)
4207 return SIM_ARM_R0_REGNUM + reg;
4208 reg -= NUM_GREGS;
4209
4210 if (reg < NUM_FREGS)
4211 return SIM_ARM_FP0_REGNUM + reg;
4212 reg -= NUM_FREGS;
4213
4214 if (reg < NUM_SREGS)
4215 return SIM_ARM_FPS_REGNUM + reg;
4216 reg -= NUM_SREGS;
4217
4218 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4219 }
4220
4221 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4222 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4223 It is thought that this is is the floating-point register format on
4224 little-endian systems. */
4225
4226 static void
4227 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4228 void *dbl, int endianess)
4229 {
4230 DOUBLEST d;
4231
4232 if (endianess == BFD_ENDIAN_BIG)
4233 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4234 else
4235 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4236 ptr, &d);
4237 floatformat_from_doublest (fmt, &d, dbl);
4238 }
4239
4240 static void
4241 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4242 int endianess)
4243 {
4244 DOUBLEST d;
4245
4246 floatformat_to_doublest (fmt, ptr, &d);
4247 if (endianess == BFD_ENDIAN_BIG)
4248 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4249 else
4250 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4251 &d, dbl);
4252 }
4253
4254 static int
4255 condition_true (unsigned long cond, unsigned long status_reg)
4256 {
4257 if (cond == INST_AL || cond == INST_NV)
4258 return 1;
4259
4260 switch (cond)
4261 {
4262 case INST_EQ:
4263 return ((status_reg & FLAG_Z) != 0);
4264 case INST_NE:
4265 return ((status_reg & FLAG_Z) == 0);
4266 case INST_CS:
4267 return ((status_reg & FLAG_C) != 0);
4268 case INST_CC:
4269 return ((status_reg & FLAG_C) == 0);
4270 case INST_MI:
4271 return ((status_reg & FLAG_N) != 0);
4272 case INST_PL:
4273 return ((status_reg & FLAG_N) == 0);
4274 case INST_VS:
4275 return ((status_reg & FLAG_V) != 0);
4276 case INST_VC:
4277 return ((status_reg & FLAG_V) == 0);
4278 case INST_HI:
4279 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4280 case INST_LS:
4281 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4282 case INST_GE:
4283 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4284 case INST_LT:
4285 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4286 case INST_GT:
4287 return (((status_reg & FLAG_Z) == 0)
4288 && (((status_reg & FLAG_N) == 0)
4289 == ((status_reg & FLAG_V) == 0)));
4290 case INST_LE:
4291 return (((status_reg & FLAG_Z) != 0)
4292 || (((status_reg & FLAG_N) == 0)
4293 != ((status_reg & FLAG_V) == 0)));
4294 }
4295 return 1;
4296 }
4297
4298 static unsigned long
4299 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4300 unsigned long pc_val, unsigned long status_reg)
4301 {
4302 unsigned long res, shift;
4303 int rm = bits (inst, 0, 3);
4304 unsigned long shifttype = bits (inst, 5, 6);
4305
4306 if (bit (inst, 4))
4307 {
4308 int rs = bits (inst, 8, 11);
4309 shift = (rs == 15 ? pc_val + 8
4310 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4311 }
4312 else
4313 shift = bits (inst, 7, 11);
4314
4315 res = (rm == ARM_PC_REGNUM
4316 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4317 : get_frame_register_unsigned (frame, rm));
4318
4319 switch (shifttype)
4320 {
4321 case 0: /* LSL */
4322 res = shift >= 32 ? 0 : res << shift;
4323 break;
4324
4325 case 1: /* LSR */
4326 res = shift >= 32 ? 0 : res >> shift;
4327 break;
4328
4329 case 2: /* ASR */
4330 if (shift >= 32)
4331 shift = 31;
4332 res = ((res & 0x80000000L)
4333 ? ~((~res) >> shift) : res >> shift);
4334 break;
4335
4336 case 3: /* ROR/RRX */
4337 shift &= 31;
4338 if (shift == 0)
4339 res = (res >> 1) | (carry ? 0x80000000L : 0);
4340 else
4341 res = (res >> shift) | (res << (32 - shift));
4342 break;
4343 }
4344
4345 return res & 0xffffffff;
4346 }
4347
4348 /* Return number of 1-bits in VAL. */
4349
4350 static int
4351 bitcount (unsigned long val)
4352 {
4353 int nbits;
4354 for (nbits = 0; val != 0; nbits++)
4355 val &= val - 1; /* Delete rightmost 1-bit in val. */
4356 return nbits;
4357 }
4358
4359 /* Return the size in bytes of the complete Thumb instruction whose
4360 first halfword is INST1. */
4361
4362 static int
4363 thumb_insn_size (unsigned short inst1)
4364 {
4365 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4366 return 4;
4367 else
4368 return 2;
4369 }
4370
4371 static int
4372 thumb_advance_itstate (unsigned int itstate)
4373 {
4374 /* Preserve IT[7:5], the first three bits of the condition. Shift
4375 the upcoming condition flags left by one bit. */
4376 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4377
4378 /* If we have finished the IT block, clear the state. */
4379 if ((itstate & 0x0f) == 0)
4380 itstate = 0;
4381
4382 return itstate;
4383 }
4384
4385 /* Find the next PC after the current instruction executes. In some
4386 cases we can not statically determine the answer (see the IT state
4387 handling in this function); in that case, a breakpoint may be
4388 inserted in addition to the returned PC, which will be used to set
4389 another breakpoint by our caller. */
4390
4391 static CORE_ADDR
4392 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4393 {
4394 struct gdbarch *gdbarch = get_frame_arch (frame);
4395 struct address_space *aspace = get_frame_address_space (frame);
4396 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4397 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4398 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4399 unsigned short inst1;
4400 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4401 unsigned long offset;
4402 ULONGEST status, itstate;
4403
4404 nextpc = MAKE_THUMB_ADDR (nextpc);
4405 pc_val = MAKE_THUMB_ADDR (pc_val);
4406
4407 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4408
4409 /* Thumb-2 conditional execution support. There are eight bits in
4410 the CPSR which describe conditional execution state. Once
4411 reconstructed (they're in a funny order), the low five bits
4412 describe the low bit of the condition for each instruction and
4413 how many instructions remain. The high three bits describe the
4414 base condition. One of the low four bits will be set if an IT
4415 block is active. These bits read as zero on earlier
4416 processors. */
4417 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4418 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4419
4420 /* If-Then handling. On GNU/Linux, where this routine is used, we
4421 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4422 can disable execution of the undefined instruction. So we might
4423 miss the breakpoint if we set it on a skipped conditional
4424 instruction. Because conditional instructions can change the
4425 flags, affecting the execution of further instructions, we may
4426 need to set two breakpoints. */
4427
4428 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4429 {
4430 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4431 {
4432 /* An IT instruction. Because this instruction does not
4433 modify the flags, we can accurately predict the next
4434 executed instruction. */
4435 itstate = inst1 & 0x00ff;
4436 pc += thumb_insn_size (inst1);
4437
4438 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4439 {
4440 inst1 = read_memory_unsigned_integer (pc, 2,
4441 byte_order_for_code);
4442 pc += thumb_insn_size (inst1);
4443 itstate = thumb_advance_itstate (itstate);
4444 }
4445
4446 return MAKE_THUMB_ADDR (pc);
4447 }
4448 else if (itstate != 0)
4449 {
4450 /* We are in a conditional block. Check the condition. */
4451 if (! condition_true (itstate >> 4, status))
4452 {
4453 /* Advance to the next executed instruction. */
4454 pc += thumb_insn_size (inst1);
4455 itstate = thumb_advance_itstate (itstate);
4456
4457 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4458 {
4459 inst1 = read_memory_unsigned_integer (pc, 2,
4460 byte_order_for_code);
4461 pc += thumb_insn_size (inst1);
4462 itstate = thumb_advance_itstate (itstate);
4463 }
4464
4465 return MAKE_THUMB_ADDR (pc);
4466 }
4467 else if ((itstate & 0x0f) == 0x08)
4468 {
4469 /* This is the last instruction of the conditional
4470 block, and it is executed. We can handle it normally
4471 because the following instruction is not conditional,
4472 and we must handle it normally because it is
4473 permitted to branch. Fall through. */
4474 }
4475 else
4476 {
4477 int cond_negated;
4478
4479 /* There are conditional instructions after this one.
4480 If this instruction modifies the flags, then we can
4481 not predict what the next executed instruction will
4482 be. Fortunately, this instruction is architecturally
4483 forbidden to branch; we know it will fall through.
4484 Start by skipping past it. */
4485 pc += thumb_insn_size (inst1);
4486 itstate = thumb_advance_itstate (itstate);
4487
4488 /* Set a breakpoint on the following instruction. */
4489 gdb_assert ((itstate & 0x0f) != 0);
4490 arm_insert_single_step_breakpoint (gdbarch, aspace,
4491 MAKE_THUMB_ADDR (pc));
4492 cond_negated = (itstate >> 4) & 1;
4493
4494 /* Skip all following instructions with the same
4495 condition. If there is a later instruction in the IT
4496 block with the opposite condition, set the other
4497 breakpoint there. If not, then set a breakpoint on
4498 the instruction after the IT block. */
4499 do
4500 {
4501 inst1 = read_memory_unsigned_integer (pc, 2,
4502 byte_order_for_code);
4503 pc += thumb_insn_size (inst1);
4504 itstate = thumb_advance_itstate (itstate);
4505 }
4506 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4507
4508 return MAKE_THUMB_ADDR (pc);
4509 }
4510 }
4511 }
4512 else if (itstate & 0x0f)
4513 {
4514 /* We are in a conditional block. Check the condition. */
4515 int cond = itstate >> 4;
4516
4517 if (! condition_true (cond, status))
4518 /* Advance to the next instruction. All the 32-bit
4519 instructions share a common prefix. */
4520 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4521
4522 /* Otherwise, handle the instruction normally. */
4523 }
4524
4525 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4526 {
4527 CORE_ADDR sp;
4528
4529 /* Fetch the saved PC from the stack. It's stored above
4530 all of the other registers. */
4531 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4532 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4533 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4534 }
4535 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4536 {
4537 unsigned long cond = bits (inst1, 8, 11);
4538 if (cond == 0x0f) /* 0x0f = SWI */
4539 {
4540 struct gdbarch_tdep *tdep;
4541 tdep = gdbarch_tdep (gdbarch);
4542
4543 if (tdep->syscall_next_pc != NULL)
4544 nextpc = tdep->syscall_next_pc (frame);
4545
4546 }
4547 else if (cond != 0x0f && condition_true (cond, status))
4548 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4549 }
4550 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4551 {
4552 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4553 }
4554 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4555 {
4556 unsigned short inst2;
4557 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4558
4559 /* Default to the next instruction. */
4560 nextpc = pc + 4;
4561 nextpc = MAKE_THUMB_ADDR (nextpc);
4562
4563 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4564 {
4565 /* Branches and miscellaneous control instructions. */
4566
4567 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4568 {
4569 /* B, BL, BLX. */
4570 int j1, j2, imm1, imm2;
4571
4572 imm1 = sbits (inst1, 0, 10);
4573 imm2 = bits (inst2, 0, 10);
4574 j1 = bit (inst2, 13);
4575 j2 = bit (inst2, 11);
4576
4577 offset = ((imm1 << 12) + (imm2 << 1));
4578 offset ^= ((!j2) << 22) | ((!j1) << 23);
4579
4580 nextpc = pc_val + offset;
4581 /* For BLX make sure to clear the low bits. */
4582 if (bit (inst2, 12) == 0)
4583 nextpc = nextpc & 0xfffffffc;
4584 }
4585 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4586 {
4587 /* SUBS PC, LR, #imm8. */
4588 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4589 nextpc -= inst2 & 0x00ff;
4590 }
4591 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4592 {
4593 /* Conditional branch. */
4594 if (condition_true (bits (inst1, 6, 9), status))
4595 {
4596 int sign, j1, j2, imm1, imm2;
4597
4598 sign = sbits (inst1, 10, 10);
4599 imm1 = bits (inst1, 0, 5);
4600 imm2 = bits (inst2, 0, 10);
4601 j1 = bit (inst2, 13);
4602 j2 = bit (inst2, 11);
4603
4604 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4605 offset += (imm1 << 12) + (imm2 << 1);
4606
4607 nextpc = pc_val + offset;
4608 }
4609 }
4610 }
4611 else if ((inst1 & 0xfe50) == 0xe810)
4612 {
4613 /* Load multiple or RFE. */
4614 int rn, offset, load_pc = 1;
4615
4616 rn = bits (inst1, 0, 3);
4617 if (bit (inst1, 7) && !bit (inst1, 8))
4618 {
4619 /* LDMIA or POP */
4620 if (!bit (inst2, 15))
4621 load_pc = 0;
4622 offset = bitcount (inst2) * 4 - 4;
4623 }
4624 else if (!bit (inst1, 7) && bit (inst1, 8))
4625 {
4626 /* LDMDB */
4627 if (!bit (inst2, 15))
4628 load_pc = 0;
4629 offset = -4;
4630 }
4631 else if (bit (inst1, 7) && bit (inst1, 8))
4632 {
4633 /* RFEIA */
4634 offset = 0;
4635 }
4636 else if (!bit (inst1, 7) && !bit (inst1, 8))
4637 {
4638 /* RFEDB */
4639 offset = -8;
4640 }
4641 else
4642 load_pc = 0;
4643
4644 if (load_pc)
4645 {
4646 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4647 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4648 }
4649 }
4650 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4651 {
4652 /* MOV PC or MOVS PC. */
4653 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4654 nextpc = MAKE_THUMB_ADDR (nextpc);
4655 }
4656 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4657 {
4658 /* LDR PC. */
4659 CORE_ADDR base;
4660 int rn, load_pc = 1;
4661
4662 rn = bits (inst1, 0, 3);
4663 base = get_frame_register_unsigned (frame, rn);
4664 if (rn == ARM_PC_REGNUM)
4665 {
4666 base = (base + 4) & ~(CORE_ADDR) 0x3;
4667 if (bit (inst1, 7))
4668 base += bits (inst2, 0, 11);
4669 else
4670 base -= bits (inst2, 0, 11);
4671 }
4672 else if (bit (inst1, 7))
4673 base += bits (inst2, 0, 11);
4674 else if (bit (inst2, 11))
4675 {
4676 if (bit (inst2, 10))
4677 {
4678 if (bit (inst2, 9))
4679 base += bits (inst2, 0, 7);
4680 else
4681 base -= bits (inst2, 0, 7);
4682 }
4683 }
4684 else if ((inst2 & 0x0fc0) == 0x0000)
4685 {
4686 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4687 base += get_frame_register_unsigned (frame, rm) << shift;
4688 }
4689 else
4690 /* Reserved. */
4691 load_pc = 0;
4692
4693 if (load_pc)
4694 nextpc = get_frame_memory_unsigned (frame, base, 4);
4695 }
4696 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4697 {
4698 /* TBB. */
4699 CORE_ADDR tbl_reg, table, offset, length;
4700
4701 tbl_reg = bits (inst1, 0, 3);
4702 if (tbl_reg == 0x0f)
4703 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4704 else
4705 table = get_frame_register_unsigned (frame, tbl_reg);
4706
4707 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4708 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4709 nextpc = pc_val + length;
4710 }
4711 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4712 {
4713 /* TBH. */
4714 CORE_ADDR tbl_reg, table, offset, length;
4715
4716 tbl_reg = bits (inst1, 0, 3);
4717 if (tbl_reg == 0x0f)
4718 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4719 else
4720 table = get_frame_register_unsigned (frame, tbl_reg);
4721
4722 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4723 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4724 nextpc = pc_val + length;
4725 }
4726 }
4727 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4728 {
4729 if (bits (inst1, 3, 6) == 0x0f)
4730 nextpc = UNMAKE_THUMB_ADDR (pc_val);
4731 else
4732 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4733 }
4734 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4735 {
4736 if (bits (inst1, 3, 6) == 0x0f)
4737 nextpc = pc_val;
4738 else
4739 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4740
4741 nextpc = MAKE_THUMB_ADDR (nextpc);
4742 }
4743 else if ((inst1 & 0xf500) == 0xb100)
4744 {
4745 /* CBNZ or CBZ. */
4746 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4747 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4748
4749 if (bit (inst1, 11) && reg != 0)
4750 nextpc = pc_val + imm;
4751 else if (!bit (inst1, 11) && reg == 0)
4752 nextpc = pc_val + imm;
4753 }
4754 return nextpc;
4755 }
4756
4757 /* Get the raw next address. PC is the current program counter, in
4758 FRAME, which is assumed to be executing in ARM mode.
4759
4760 The value returned has the execution state of the next instruction
4761 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4762 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4763 address. */
4764
4765 static CORE_ADDR
4766 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4767 {
4768 struct gdbarch *gdbarch = get_frame_arch (frame);
4769 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4770 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4771 unsigned long pc_val;
4772 unsigned long this_instr;
4773 unsigned long status;
4774 CORE_ADDR nextpc;
4775
4776 pc_val = (unsigned long) pc;
4777 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4778
4779 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4780 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4781
4782 if (bits (this_instr, 28, 31) == INST_NV)
4783 switch (bits (this_instr, 24, 27))
4784 {
4785 case 0xa:
4786 case 0xb:
4787 {
4788 /* Branch with Link and change to Thumb. */
4789 nextpc = BranchDest (pc, this_instr);
4790 nextpc |= bit (this_instr, 24) << 1;
4791 nextpc = MAKE_THUMB_ADDR (nextpc);
4792 break;
4793 }
4794 case 0xc:
4795 case 0xd:
4796 case 0xe:
4797 /* Coprocessor register transfer. */
4798 if (bits (this_instr, 12, 15) == 15)
4799 error (_("Invalid update to pc in instruction"));
4800 break;
4801 }
4802 else if (condition_true (bits (this_instr, 28, 31), status))
4803 {
4804 switch (bits (this_instr, 24, 27))
4805 {
4806 case 0x0:
4807 case 0x1: /* data processing */
4808 case 0x2:
4809 case 0x3:
4810 {
4811 unsigned long operand1, operand2, result = 0;
4812 unsigned long rn;
4813 int c;
4814
4815 if (bits (this_instr, 12, 15) != 15)
4816 break;
4817
4818 if (bits (this_instr, 22, 25) == 0
4819 && bits (this_instr, 4, 7) == 9) /* multiply */
4820 error (_("Invalid update to pc in instruction"));
4821
4822 /* BX <reg>, BLX <reg> */
4823 if (bits (this_instr, 4, 27) == 0x12fff1
4824 || bits (this_instr, 4, 27) == 0x12fff3)
4825 {
4826 rn = bits (this_instr, 0, 3);
4827 nextpc = ((rn == ARM_PC_REGNUM)
4828 ? (pc_val + 8)
4829 : get_frame_register_unsigned (frame, rn));
4830
4831 return nextpc;
4832 }
4833
4834 /* Multiply into PC. */
4835 c = (status & FLAG_C) ? 1 : 0;
4836 rn = bits (this_instr, 16, 19);
4837 operand1 = ((rn == ARM_PC_REGNUM)
4838 ? (pc_val + 8)
4839 : get_frame_register_unsigned (frame, rn));
4840
4841 if (bit (this_instr, 25))
4842 {
4843 unsigned long immval = bits (this_instr, 0, 7);
4844 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4845 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4846 & 0xffffffff;
4847 }
4848 else /* operand 2 is a shifted register. */
4849 operand2 = shifted_reg_val (frame, this_instr, c,
4850 pc_val, status);
4851
4852 switch (bits (this_instr, 21, 24))
4853 {
4854 case 0x0: /*and */
4855 result = operand1 & operand2;
4856 break;
4857
4858 case 0x1: /*eor */
4859 result = operand1 ^ operand2;
4860 break;
4861
4862 case 0x2: /*sub */
4863 result = operand1 - operand2;
4864 break;
4865
4866 case 0x3: /*rsb */
4867 result = operand2 - operand1;
4868 break;
4869
4870 case 0x4: /*add */
4871 result = operand1 + operand2;
4872 break;
4873
4874 case 0x5: /*adc */
4875 result = operand1 + operand2 + c;
4876 break;
4877
4878 case 0x6: /*sbc */
4879 result = operand1 - operand2 + c;
4880 break;
4881
4882 case 0x7: /*rsc */
4883 result = operand2 - operand1 + c;
4884 break;
4885
4886 case 0x8:
4887 case 0x9:
4888 case 0xa:
4889 case 0xb: /* tst, teq, cmp, cmn */
4890 result = (unsigned long) nextpc;
4891 break;
4892
4893 case 0xc: /*orr */
4894 result = operand1 | operand2;
4895 break;
4896
4897 case 0xd: /*mov */
4898 /* Always step into a function. */
4899 result = operand2;
4900 break;
4901
4902 case 0xe: /*bic */
4903 result = operand1 & ~operand2;
4904 break;
4905
4906 case 0xf: /*mvn */
4907 result = ~operand2;
4908 break;
4909 }
4910
4911 /* In 26-bit APCS the bottom two bits of the result are
4912 ignored, and we always end up in ARM state. */
4913 if (!arm_apcs_32)
4914 nextpc = arm_addr_bits_remove (gdbarch, result);
4915 else
4916 nextpc = result;
4917
4918 break;
4919 }
4920
4921 case 0x4:
4922 case 0x5: /* data transfer */
4923 case 0x6:
4924 case 0x7:
4925 if (bit (this_instr, 20))
4926 {
4927 /* load */
4928 if (bits (this_instr, 12, 15) == 15)
4929 {
4930 /* rd == pc */
4931 unsigned long rn;
4932 unsigned long base;
4933
4934 if (bit (this_instr, 22))
4935 error (_("Invalid update to pc in instruction"));
4936
4937 /* byte write to PC */
4938 rn = bits (this_instr, 16, 19);
4939 base = ((rn == ARM_PC_REGNUM)
4940 ? (pc_val + 8)
4941 : get_frame_register_unsigned (frame, rn));
4942
4943 if (bit (this_instr, 24))
4944 {
4945 /* pre-indexed */
4946 int c = (status & FLAG_C) ? 1 : 0;
4947 unsigned long offset =
4948 (bit (this_instr, 25)
4949 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4950 : bits (this_instr, 0, 11));
4951
4952 if (bit (this_instr, 23))
4953 base += offset;
4954 else
4955 base -= offset;
4956 }
4957 nextpc =
4958 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4959 4, byte_order);
4960 }
4961 }
4962 break;
4963
4964 case 0x8:
4965 case 0x9: /* block transfer */
4966 if (bit (this_instr, 20))
4967 {
4968 /* LDM */
4969 if (bit (this_instr, 15))
4970 {
4971 /* loading pc */
4972 int offset = 0;
4973 unsigned long rn_val
4974 = get_frame_register_unsigned (frame,
4975 bits (this_instr, 16, 19));
4976
4977 if (bit (this_instr, 23))
4978 {
4979 /* up */
4980 unsigned long reglist = bits (this_instr, 0, 14);
4981 offset = bitcount (reglist) * 4;
4982 if (bit (this_instr, 24)) /* pre */
4983 offset += 4;
4984 }
4985 else if (bit (this_instr, 24))
4986 offset = -4;
4987
4988 nextpc =
4989 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4990 (rn_val + offset),
4991 4, byte_order);
4992 }
4993 }
4994 break;
4995
4996 case 0xb: /* branch & link */
4997 case 0xa: /* branch */
4998 {
4999 nextpc = BranchDest (pc, this_instr);
5000 break;
5001 }
5002
5003 case 0xc:
5004 case 0xd:
5005 case 0xe: /* coproc ops */
5006 break;
5007 case 0xf: /* SWI */
5008 {
5009 struct gdbarch_tdep *tdep;
5010 tdep = gdbarch_tdep (gdbarch);
5011
5012 if (tdep->syscall_next_pc != NULL)
5013 nextpc = tdep->syscall_next_pc (frame);
5014
5015 }
5016 break;
5017
5018 default:
5019 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
5020 return (pc);
5021 }
5022 }
5023
5024 return nextpc;
5025 }
5026
5027 /* Determine next PC after current instruction executes. Will call either
5028 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
5029 loop is detected. */
5030
5031 CORE_ADDR
5032 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
5033 {
5034 CORE_ADDR nextpc;
5035
5036 if (arm_frame_is_thumb (frame))
5037 nextpc = thumb_get_next_pc_raw (frame, pc);
5038 else
5039 nextpc = arm_get_next_pc_raw (frame, pc);
5040
5041 return nextpc;
5042 }
5043
5044 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
5045 of the appropriate mode (as encoded in the PC value), even if this
5046 differs from what would be expected according to the symbol tables. */
5047
5048 void
5049 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
5050 struct address_space *aspace,
5051 CORE_ADDR pc)
5052 {
5053 struct cleanup *old_chain
5054 = make_cleanup_restore_integer (&arm_override_mode);
5055
5056 arm_override_mode = IS_THUMB_ADDR (pc);
5057 pc = gdbarch_addr_bits_remove (gdbarch, pc);
5058
5059 insert_single_step_breakpoint (gdbarch, aspace, pc);
5060
5061 do_cleanups (old_chain);
5062 }
5063
5064 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
5065 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
5066 is found, attempt to step through it. A breakpoint is placed at the end of
5067 the sequence. */
5068
5069 static int
5070 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
5071 {
5072 struct gdbarch *gdbarch = get_frame_arch (frame);
5073 struct address_space *aspace = get_frame_address_space (frame);
5074 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5075 CORE_ADDR pc = get_frame_pc (frame);
5076 CORE_ADDR breaks[2] = {-1, -1};
5077 CORE_ADDR loc = pc;
5078 unsigned short insn1, insn2;
5079 int insn_count;
5080 int index;
5081 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5082 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5083 ULONGEST status, itstate;
5084
5085 /* We currently do not support atomic sequences within an IT block. */
5086 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
5087 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
5088 if (itstate & 0x0f)
5089 return 0;
5090
5091 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
5092 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5093 loc += 2;
5094 if (thumb_insn_size (insn1) != 4)
5095 return 0;
5096
5097 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5098 loc += 2;
5099 if (!((insn1 & 0xfff0) == 0xe850
5100 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
5101 return 0;
5102
5103 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5104 instructions. */
5105 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5106 {
5107 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5108 loc += 2;
5109
5110 if (thumb_insn_size (insn1) != 4)
5111 {
5112 /* Assume that there is at most one conditional branch in the
5113 atomic sequence. If a conditional branch is found, put a
5114 breakpoint in its destination address. */
5115 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
5116 {
5117 if (last_breakpoint > 0)
5118 return 0; /* More than one conditional branch found,
5119 fallback to the standard code. */
5120
5121 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
5122 last_breakpoint++;
5123 }
5124
5125 /* We do not support atomic sequences that use any *other*
5126 instructions but conditional branches to change the PC.
5127 Fall back to standard code to avoid losing control of
5128 execution. */
5129 else if (thumb_instruction_changes_pc (insn1))
5130 return 0;
5131 }
5132 else
5133 {
5134 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5135 loc += 2;
5136
5137 /* Assume that there is at most one conditional branch in the
5138 atomic sequence. If a conditional branch is found, put a
5139 breakpoint in its destination address. */
5140 if ((insn1 & 0xf800) == 0xf000
5141 && (insn2 & 0xd000) == 0x8000
5142 && (insn1 & 0x0380) != 0x0380)
5143 {
5144 int sign, j1, j2, imm1, imm2;
5145 unsigned int offset;
5146
5147 sign = sbits (insn1, 10, 10);
5148 imm1 = bits (insn1, 0, 5);
5149 imm2 = bits (insn2, 0, 10);
5150 j1 = bit (insn2, 13);
5151 j2 = bit (insn2, 11);
5152
5153 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
5154 offset += (imm1 << 12) + (imm2 << 1);
5155
5156 if (last_breakpoint > 0)
5157 return 0; /* More than one conditional branch found,
5158 fallback to the standard code. */
5159
5160 breaks[1] = loc + offset;
5161 last_breakpoint++;
5162 }
5163
5164 /* We do not support atomic sequences that use any *other*
5165 instructions but conditional branches to change the PC.
5166 Fall back to standard code to avoid losing control of
5167 execution. */
5168 else if (thumb2_instruction_changes_pc (insn1, insn2))
5169 return 0;
5170
5171 /* If we find a strex{,b,h,d}, we're done. */
5172 if ((insn1 & 0xfff0) == 0xe840
5173 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
5174 break;
5175 }
5176 }
5177
5178 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5179 if (insn_count == atomic_sequence_length)
5180 return 0;
5181
5182 /* Insert a breakpoint right after the end of the atomic sequence. */
5183 breaks[0] = loc;
5184
5185 /* Check for duplicated breakpoints. Check also for a breakpoint
5186 placed (branch instruction's destination) anywhere in sequence. */
5187 if (last_breakpoint
5188 && (breaks[1] == breaks[0]
5189 || (breaks[1] >= pc && breaks[1] < loc)))
5190 last_breakpoint = 0;
5191
5192 /* Effectively inserts the breakpoints. */
5193 for (index = 0; index <= last_breakpoint; index++)
5194 arm_insert_single_step_breakpoint (gdbarch, aspace,
5195 MAKE_THUMB_ADDR (breaks[index]));
5196
5197 return 1;
5198 }
5199
5200 static int
5201 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5202 {
5203 struct gdbarch *gdbarch = get_frame_arch (frame);
5204 struct address_space *aspace = get_frame_address_space (frame);
5205 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5206 CORE_ADDR pc = get_frame_pc (frame);
5207 CORE_ADDR breaks[2] = {-1, -1};
5208 CORE_ADDR loc = pc;
5209 unsigned int insn;
5210 int insn_count;
5211 int index;
5212 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5213 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5214
5215 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5216 Note that we do not currently support conditionally executed atomic
5217 instructions. */
5218 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5219 loc += 4;
5220 if ((insn & 0xff9000f0) != 0xe1900090)
5221 return 0;
5222
5223 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5224 instructions. */
5225 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5226 {
5227 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5228 loc += 4;
5229
5230 /* Assume that there is at most one conditional branch in the atomic
5231 sequence. If a conditional branch is found, put a breakpoint in
5232 its destination address. */
5233 if (bits (insn, 24, 27) == 0xa)
5234 {
5235 if (last_breakpoint > 0)
5236 return 0; /* More than one conditional branch found, fallback
5237 to the standard single-step code. */
5238
5239 breaks[1] = BranchDest (loc - 4, insn);
5240 last_breakpoint++;
5241 }
5242
5243 /* We do not support atomic sequences that use any *other* instructions
5244 but conditional branches to change the PC. Fall back to standard
5245 code to avoid losing control of execution. */
5246 else if (arm_instruction_changes_pc (insn))
5247 return 0;
5248
5249 /* If we find a strex{,b,h,d}, we're done. */
5250 if ((insn & 0xff9000f0) == 0xe1800090)
5251 break;
5252 }
5253
5254 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5255 if (insn_count == atomic_sequence_length)
5256 return 0;
5257
5258 /* Insert a breakpoint right after the end of the atomic sequence. */
5259 breaks[0] = loc;
5260
5261 /* Check for duplicated breakpoints. Check also for a breakpoint
5262 placed (branch instruction's destination) anywhere in sequence. */
5263 if (last_breakpoint
5264 && (breaks[1] == breaks[0]
5265 || (breaks[1] >= pc && breaks[1] < loc)))
5266 last_breakpoint = 0;
5267
5268 /* Effectively inserts the breakpoints. */
5269 for (index = 0; index <= last_breakpoint; index++)
5270 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5271
5272 return 1;
5273 }
5274
5275 int
5276 arm_deal_with_atomic_sequence (struct frame_info *frame)
5277 {
5278 if (arm_frame_is_thumb (frame))
5279 return thumb_deal_with_atomic_sequence_raw (frame);
5280 else
5281 return arm_deal_with_atomic_sequence_raw (frame);
5282 }
5283
5284 /* single_step() is called just before we want to resume the inferior,
5285 if we want to single-step it but there is no hardware or kernel
5286 single-step support. We find the target of the coming instruction
5287 and breakpoint it. */
5288
5289 int
5290 arm_software_single_step (struct frame_info *frame)
5291 {
5292 struct gdbarch *gdbarch = get_frame_arch (frame);
5293 struct address_space *aspace = get_frame_address_space (frame);
5294 CORE_ADDR next_pc;
5295
5296 if (arm_deal_with_atomic_sequence (frame))
5297 return 1;
5298
5299 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5300 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5301
5302 return 1;
5303 }
5304
5305 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5306 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5307 NULL if an error occurs. BUF is freed. */
5308
5309 static gdb_byte *
5310 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5311 int old_len, int new_len)
5312 {
5313 gdb_byte *new_buf;
5314 int bytes_to_read = new_len - old_len;
5315
5316 new_buf = xmalloc (new_len);
5317 memcpy (new_buf + bytes_to_read, buf, old_len);
5318 xfree (buf);
5319 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5320 {
5321 xfree (new_buf);
5322 return NULL;
5323 }
5324 return new_buf;
5325 }
5326
5327 /* An IT block is at most the 2-byte IT instruction followed by
5328 four 4-byte instructions. The furthest back we must search to
5329 find an IT block that affects the current instruction is thus
5330 2 + 3 * 4 == 14 bytes. */
5331 #define MAX_IT_BLOCK_PREFIX 14
5332
5333 /* Use a quick scan if there are more than this many bytes of
5334 code. */
5335 #define IT_SCAN_THRESHOLD 32
5336
5337 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5338 A breakpoint in an IT block may not be hit, depending on the
5339 condition flags. */
5340 static CORE_ADDR
5341 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5342 {
5343 gdb_byte *buf;
5344 char map_type;
5345 CORE_ADDR boundary, func_start;
5346 int buf_len;
5347 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5348 int i, any, last_it, last_it_count;
5349
5350 /* If we are using BKPT breakpoints, none of this is necessary. */
5351 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5352 return bpaddr;
5353
5354 /* ARM mode does not have this problem. */
5355 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5356 return bpaddr;
5357
5358 /* We are setting a breakpoint in Thumb code that could potentially
5359 contain an IT block. The first step is to find how much Thumb
5360 code there is; we do not need to read outside of known Thumb
5361 sequences. */
5362 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5363 if (map_type == 0)
5364 /* Thumb-2 code must have mapping symbols to have a chance. */
5365 return bpaddr;
5366
5367 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5368
5369 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5370 && func_start > boundary)
5371 boundary = func_start;
5372
5373 /* Search for a candidate IT instruction. We have to do some fancy
5374 footwork to distinguish a real IT instruction from the second
5375 half of a 32-bit instruction, but there is no need for that if
5376 there's no candidate. */
5377 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5378 if (buf_len == 0)
5379 /* No room for an IT instruction. */
5380 return bpaddr;
5381
5382 buf = xmalloc (buf_len);
5383 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5384 return bpaddr;
5385 any = 0;
5386 for (i = 0; i < buf_len; i += 2)
5387 {
5388 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5389 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5390 {
5391 any = 1;
5392 break;
5393 }
5394 }
5395 if (any == 0)
5396 {
5397 xfree (buf);
5398 return bpaddr;
5399 }
5400
5401 /* OK, the code bytes before this instruction contain at least one
5402 halfword which resembles an IT instruction. We know that it's
5403 Thumb code, but there are still two possibilities. Either the
5404 halfword really is an IT instruction, or it is the second half of
5405 a 32-bit Thumb instruction. The only way we can tell is to
5406 scan forwards from a known instruction boundary. */
5407 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5408 {
5409 int definite;
5410
5411 /* There's a lot of code before this instruction. Start with an
5412 optimistic search; it's easy to recognize halfwords that can
5413 not be the start of a 32-bit instruction, and use that to
5414 lock on to the instruction boundaries. */
5415 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5416 if (buf == NULL)
5417 return bpaddr;
5418 buf_len = IT_SCAN_THRESHOLD;
5419
5420 definite = 0;
5421 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5422 {
5423 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5424 if (thumb_insn_size (inst1) == 2)
5425 {
5426 definite = 1;
5427 break;
5428 }
5429 }
5430
5431 /* At this point, if DEFINITE, BUF[I] is the first place we
5432 are sure that we know the instruction boundaries, and it is far
5433 enough from BPADDR that we could not miss an IT instruction
5434 affecting BPADDR. If ! DEFINITE, give up - start from a
5435 known boundary. */
5436 if (! definite)
5437 {
5438 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5439 bpaddr - boundary);
5440 if (buf == NULL)
5441 return bpaddr;
5442 buf_len = bpaddr - boundary;
5443 i = 0;
5444 }
5445 }
5446 else
5447 {
5448 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5449 if (buf == NULL)
5450 return bpaddr;
5451 buf_len = bpaddr - boundary;
5452 i = 0;
5453 }
5454
5455 /* Scan forwards. Find the last IT instruction before BPADDR. */
5456 last_it = -1;
5457 last_it_count = 0;
5458 while (i < buf_len)
5459 {
5460 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5461 last_it_count--;
5462 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5463 {
5464 last_it = i;
5465 if (inst1 & 0x0001)
5466 last_it_count = 4;
5467 else if (inst1 & 0x0002)
5468 last_it_count = 3;
5469 else if (inst1 & 0x0004)
5470 last_it_count = 2;
5471 else
5472 last_it_count = 1;
5473 }
5474 i += thumb_insn_size (inst1);
5475 }
5476
5477 xfree (buf);
5478
5479 if (last_it == -1)
5480 /* There wasn't really an IT instruction after all. */
5481 return bpaddr;
5482
5483 if (last_it_count < 1)
5484 /* It was too far away. */
5485 return bpaddr;
5486
5487 /* This really is a trouble spot. Move the breakpoint to the IT
5488 instruction. */
5489 return bpaddr - buf_len + last_it;
5490 }
5491
5492 /* ARM displaced stepping support.
5493
5494 Generally ARM displaced stepping works as follows:
5495
5496 1. When an instruction is to be single-stepped, it is first decoded by
5497 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5498 Depending on the type of instruction, it is then copied to a scratch
5499 location, possibly in a modified form. The copy_* set of functions
5500 performs such modification, as necessary. A breakpoint is placed after
5501 the modified instruction in the scratch space to return control to GDB.
5502 Note in particular that instructions which modify the PC will no longer
5503 do so after modification.
5504
5505 2. The instruction is single-stepped, by setting the PC to the scratch
5506 location address, and resuming. Control returns to GDB when the
5507 breakpoint is hit.
5508
5509 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5510 function used for the current instruction. This function's job is to
5511 put the CPU/memory state back to what it would have been if the
5512 instruction had been executed unmodified in its original location. */
5513
5514 /* NOP instruction (mov r0, r0). */
5515 #define ARM_NOP 0xe1a00000
5516 #define THUMB_NOP 0x4600
5517
5518 /* Helper for register reads for displaced stepping. In particular, this
5519 returns the PC as it would be seen by the instruction at its original
5520 location. */
5521
5522 ULONGEST
5523 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5524 int regno)
5525 {
5526 ULONGEST ret;
5527 CORE_ADDR from = dsc->insn_addr;
5528
5529 if (regno == ARM_PC_REGNUM)
5530 {
5531 /* Compute pipeline offset:
5532 - When executing an ARM instruction, PC reads as the address of the
5533 current instruction plus 8.
5534 - When executing a Thumb instruction, PC reads as the address of the
5535 current instruction plus 4. */
5536
5537 if (!dsc->is_thumb)
5538 from += 8;
5539 else
5540 from += 4;
5541
5542 if (debug_displaced)
5543 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5544 (unsigned long) from);
5545 return (ULONGEST) from;
5546 }
5547 else
5548 {
5549 regcache_cooked_read_unsigned (regs, regno, &ret);
5550 if (debug_displaced)
5551 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5552 regno, (unsigned long) ret);
5553 return ret;
5554 }
5555 }
5556
5557 static int
5558 displaced_in_arm_mode (struct regcache *regs)
5559 {
5560 ULONGEST ps;
5561 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5562
5563 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5564
5565 return (ps & t_bit) == 0;
5566 }
5567
5568 /* Write to the PC as from a branch instruction. */
5569
5570 static void
5571 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5572 ULONGEST val)
5573 {
5574 if (!dsc->is_thumb)
5575 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5576 architecture versions < 6. */
5577 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5578 val & ~(ULONGEST) 0x3);
5579 else
5580 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5581 val & ~(ULONGEST) 0x1);
5582 }
5583
5584 /* Write to the PC as from a branch-exchange instruction. */
5585
5586 static void
5587 bx_write_pc (struct regcache *regs, ULONGEST val)
5588 {
5589 ULONGEST ps;
5590 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5591
5592 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5593
5594 if ((val & 1) == 1)
5595 {
5596 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5597 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5598 }
5599 else if ((val & 2) == 0)
5600 {
5601 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5602 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5603 }
5604 else
5605 {
5606 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5607 mode, align dest to 4 bytes). */
5608 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5609 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5610 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5611 }
5612 }
5613
5614 /* Write to the PC as if from a load instruction. */
5615
5616 static void
5617 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5618 ULONGEST val)
5619 {
5620 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5621 bx_write_pc (regs, val);
5622 else
5623 branch_write_pc (regs, dsc, val);
5624 }
5625
5626 /* Write to the PC as if from an ALU instruction. */
5627
5628 static void
5629 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5630 ULONGEST val)
5631 {
5632 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5633 bx_write_pc (regs, val);
5634 else
5635 branch_write_pc (regs, dsc, val);
5636 }
5637
5638 /* Helper for writing to registers for displaced stepping. Writing to the PC
5639 has a varying effects depending on the instruction which does the write:
5640 this is controlled by the WRITE_PC argument. */
5641
5642 void
5643 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5644 int regno, ULONGEST val, enum pc_write_style write_pc)
5645 {
5646 if (regno == ARM_PC_REGNUM)
5647 {
5648 if (debug_displaced)
5649 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5650 (unsigned long) val);
5651 switch (write_pc)
5652 {
5653 case BRANCH_WRITE_PC:
5654 branch_write_pc (regs, dsc, val);
5655 break;
5656
5657 case BX_WRITE_PC:
5658 bx_write_pc (regs, val);
5659 break;
5660
5661 case LOAD_WRITE_PC:
5662 load_write_pc (regs, dsc, val);
5663 break;
5664
5665 case ALU_WRITE_PC:
5666 alu_write_pc (regs, dsc, val);
5667 break;
5668
5669 case CANNOT_WRITE_PC:
5670 warning (_("Instruction wrote to PC in an unexpected way when "
5671 "single-stepping"));
5672 break;
5673
5674 default:
5675 internal_error (__FILE__, __LINE__,
5676 _("Invalid argument to displaced_write_reg"));
5677 }
5678
5679 dsc->wrote_to_pc = 1;
5680 }
5681 else
5682 {
5683 if (debug_displaced)
5684 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5685 regno, (unsigned long) val);
5686 regcache_cooked_write_unsigned (regs, regno, val);
5687 }
5688 }
5689
5690 /* This function is used to concisely determine if an instruction INSN
5691 references PC. Register fields of interest in INSN should have the
5692 corresponding fields of BITMASK set to 0b1111. The function
5693 returns return 1 if any of these fields in INSN reference the PC
5694 (also 0b1111, r15), else it returns 0. */
5695
5696 static int
5697 insn_references_pc (uint32_t insn, uint32_t bitmask)
5698 {
5699 uint32_t lowbit = 1;
5700
5701 while (bitmask != 0)
5702 {
5703 uint32_t mask;
5704
5705 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5706 ;
5707
5708 if (!lowbit)
5709 break;
5710
5711 mask = lowbit * 0xf;
5712
5713 if ((insn & mask) == mask)
5714 return 1;
5715
5716 bitmask &= ~mask;
5717 }
5718
5719 return 0;
5720 }
5721
5722 /* The simplest copy function. Many instructions have the same effect no
5723 matter what address they are executed at: in those cases, use this. */
5724
5725 static int
5726 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5727 const char *iname, struct displaced_step_closure *dsc)
5728 {
5729 if (debug_displaced)
5730 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5731 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5732 iname);
5733
5734 dsc->modinsn[0] = insn;
5735
5736 return 0;
5737 }
5738
5739 static int
5740 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5741 uint16_t insn2, const char *iname,
5742 struct displaced_step_closure *dsc)
5743 {
5744 if (debug_displaced)
5745 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5746 "opcode/class '%s' unmodified\n", insn1, insn2,
5747 iname);
5748
5749 dsc->modinsn[0] = insn1;
5750 dsc->modinsn[1] = insn2;
5751 dsc->numinsns = 2;
5752
5753 return 0;
5754 }
5755
5756 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5757 modification. */
5758 static int
5759 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5760 const char *iname,
5761 struct displaced_step_closure *dsc)
5762 {
5763 if (debug_displaced)
5764 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5765 "opcode/class '%s' unmodified\n", insn,
5766 iname);
5767
5768 dsc->modinsn[0] = insn;
5769
5770 return 0;
5771 }
5772
5773 /* Preload instructions with immediate offset. */
5774
5775 static void
5776 cleanup_preload (struct gdbarch *gdbarch,
5777 struct regcache *regs, struct displaced_step_closure *dsc)
5778 {
5779 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5780 if (!dsc->u.preload.immed)
5781 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5782 }
5783
5784 static void
5785 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5786 struct displaced_step_closure *dsc, unsigned int rn)
5787 {
5788 ULONGEST rn_val;
5789 /* Preload instructions:
5790
5791 {pli/pld} [rn, #+/-imm]
5792 ->
5793 {pli/pld} [r0, #+/-imm]. */
5794
5795 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5796 rn_val = displaced_read_reg (regs, dsc, rn);
5797 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5798 dsc->u.preload.immed = 1;
5799
5800 dsc->cleanup = &cleanup_preload;
5801 }
5802
5803 static int
5804 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5805 struct displaced_step_closure *dsc)
5806 {
5807 unsigned int rn = bits (insn, 16, 19);
5808
5809 if (!insn_references_pc (insn, 0x000f0000ul))
5810 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5811
5812 if (debug_displaced)
5813 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5814 (unsigned long) insn);
5815
5816 dsc->modinsn[0] = insn & 0xfff0ffff;
5817
5818 install_preload (gdbarch, regs, dsc, rn);
5819
5820 return 0;
5821 }
5822
5823 static int
5824 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5825 struct regcache *regs, struct displaced_step_closure *dsc)
5826 {
5827 unsigned int rn = bits (insn1, 0, 3);
5828 unsigned int u_bit = bit (insn1, 7);
5829 int imm12 = bits (insn2, 0, 11);
5830 ULONGEST pc_val;
5831
5832 if (rn != ARM_PC_REGNUM)
5833 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5834
5835 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5836 PLD (literal) Encoding T1. */
5837 if (debug_displaced)
5838 fprintf_unfiltered (gdb_stdlog,
5839 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5840 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5841 imm12);
5842
5843 if (!u_bit)
5844 imm12 = -1 * imm12;
5845
5846 /* Rewrite instruction {pli/pld} PC imm12 into:
5847 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5848
5849 {pli/pld} [r0, r1]
5850
5851 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5852
5853 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5854 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5855
5856 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5857
5858 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5859 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5860 dsc->u.preload.immed = 0;
5861
5862 /* {pli/pld} [r0, r1] */
5863 dsc->modinsn[0] = insn1 & 0xfff0;
5864 dsc->modinsn[1] = 0xf001;
5865 dsc->numinsns = 2;
5866
5867 dsc->cleanup = &cleanup_preload;
5868 return 0;
5869 }
5870
5871 /* Preload instructions with register offset. */
5872
5873 static void
5874 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5875 struct displaced_step_closure *dsc, unsigned int rn,
5876 unsigned int rm)
5877 {
5878 ULONGEST rn_val, rm_val;
5879
5880 /* Preload register-offset instructions:
5881
5882 {pli/pld} [rn, rm {, shift}]
5883 ->
5884 {pli/pld} [r0, r1 {, shift}]. */
5885
5886 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5887 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5888 rn_val = displaced_read_reg (regs, dsc, rn);
5889 rm_val = displaced_read_reg (regs, dsc, rm);
5890 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5891 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5892 dsc->u.preload.immed = 0;
5893
5894 dsc->cleanup = &cleanup_preload;
5895 }
5896
5897 static int
5898 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5899 struct regcache *regs,
5900 struct displaced_step_closure *dsc)
5901 {
5902 unsigned int rn = bits (insn, 16, 19);
5903 unsigned int rm = bits (insn, 0, 3);
5904
5905
5906 if (!insn_references_pc (insn, 0x000f000ful))
5907 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5908
5909 if (debug_displaced)
5910 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5911 (unsigned long) insn);
5912
5913 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5914
5915 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5916 return 0;
5917 }
5918
5919 /* Copy/cleanup coprocessor load and store instructions. */
5920
5921 static void
5922 cleanup_copro_load_store (struct gdbarch *gdbarch,
5923 struct regcache *regs,
5924 struct displaced_step_closure *dsc)
5925 {
5926 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5927
5928 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5929
5930 if (dsc->u.ldst.writeback)
5931 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5932 }
5933
5934 static void
5935 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5936 struct displaced_step_closure *dsc,
5937 int writeback, unsigned int rn)
5938 {
5939 ULONGEST rn_val;
5940
5941 /* Coprocessor load/store instructions:
5942
5943 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5944 ->
5945 {stc/stc2} [r0, #+/-imm].
5946
5947 ldc/ldc2 are handled identically. */
5948
5949 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5950 rn_val = displaced_read_reg (regs, dsc, rn);
5951 /* PC should be 4-byte aligned. */
5952 rn_val = rn_val & 0xfffffffc;
5953 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5954
5955 dsc->u.ldst.writeback = writeback;
5956 dsc->u.ldst.rn = rn;
5957
5958 dsc->cleanup = &cleanup_copro_load_store;
5959 }
5960
5961 static int
5962 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5963 struct regcache *regs,
5964 struct displaced_step_closure *dsc)
5965 {
5966 unsigned int rn = bits (insn, 16, 19);
5967
5968 if (!insn_references_pc (insn, 0x000f0000ul))
5969 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5970
5971 if (debug_displaced)
5972 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5973 "load/store insn %.8lx\n", (unsigned long) insn);
5974
5975 dsc->modinsn[0] = insn & 0xfff0ffff;
5976
5977 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5978
5979 return 0;
5980 }
5981
5982 static int
5983 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5984 uint16_t insn2, struct regcache *regs,
5985 struct displaced_step_closure *dsc)
5986 {
5987 unsigned int rn = bits (insn1, 0, 3);
5988
5989 if (rn != ARM_PC_REGNUM)
5990 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5991 "copro load/store", dsc);
5992
5993 if (debug_displaced)
5994 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5995 "load/store insn %.4x%.4x\n", insn1, insn2);
5996
5997 dsc->modinsn[0] = insn1 & 0xfff0;
5998 dsc->modinsn[1] = insn2;
5999 dsc->numinsns = 2;
6000
6001 /* This function is called for copying instruction LDC/LDC2/VLDR, which
6002 doesn't support writeback, so pass 0. */
6003 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
6004
6005 return 0;
6006 }
6007
6008 /* Clean up branch instructions (actually perform the branch, by setting
6009 PC). */
6010
6011 static void
6012 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
6013 struct displaced_step_closure *dsc)
6014 {
6015 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6016 int branch_taken = condition_true (dsc->u.branch.cond, status);
6017 enum pc_write_style write_pc = dsc->u.branch.exchange
6018 ? BX_WRITE_PC : BRANCH_WRITE_PC;
6019
6020 if (!branch_taken)
6021 return;
6022
6023 if (dsc->u.branch.link)
6024 {
6025 /* The value of LR should be the next insn of current one. In order
6026 not to confuse logic hanlding later insn `bx lr', if current insn mode
6027 is Thumb, the bit 0 of LR value should be set to 1. */
6028 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
6029
6030 if (dsc->is_thumb)
6031 next_insn_addr |= 0x1;
6032
6033 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
6034 CANNOT_WRITE_PC);
6035 }
6036
6037 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
6038 }
6039
6040 /* Copy B/BL/BLX instructions with immediate destinations. */
6041
6042 static void
6043 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
6044 struct displaced_step_closure *dsc,
6045 unsigned int cond, int exchange, int link, long offset)
6046 {
6047 /* Implement "BL<cond> <label>" as:
6048
6049 Preparation: cond <- instruction condition
6050 Insn: mov r0, r0 (nop)
6051 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
6052
6053 B<cond> similar, but don't set r14 in cleanup. */
6054
6055 dsc->u.branch.cond = cond;
6056 dsc->u.branch.link = link;
6057 dsc->u.branch.exchange = exchange;
6058
6059 dsc->u.branch.dest = dsc->insn_addr;
6060 if (link && exchange)
6061 /* For BLX, offset is computed from the Align (PC, 4). */
6062 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
6063
6064 if (dsc->is_thumb)
6065 dsc->u.branch.dest += 4 + offset;
6066 else
6067 dsc->u.branch.dest += 8 + offset;
6068
6069 dsc->cleanup = &cleanup_branch;
6070 }
6071 static int
6072 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
6073 struct regcache *regs, struct displaced_step_closure *dsc)
6074 {
6075 unsigned int cond = bits (insn, 28, 31);
6076 int exchange = (cond == 0xf);
6077 int link = exchange || bit (insn, 24);
6078 long offset;
6079
6080 if (debug_displaced)
6081 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
6082 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
6083 (unsigned long) insn);
6084 if (exchange)
6085 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
6086 then arrange the switch into Thumb mode. */
6087 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
6088 else
6089 offset = bits (insn, 0, 23) << 2;
6090
6091 if (bit (offset, 25))
6092 offset = offset | ~0x3ffffff;
6093
6094 dsc->modinsn[0] = ARM_NOP;
6095
6096 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6097 return 0;
6098 }
6099
6100 static int
6101 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
6102 uint16_t insn2, struct regcache *regs,
6103 struct displaced_step_closure *dsc)
6104 {
6105 int link = bit (insn2, 14);
6106 int exchange = link && !bit (insn2, 12);
6107 int cond = INST_AL;
6108 long offset = 0;
6109 int j1 = bit (insn2, 13);
6110 int j2 = bit (insn2, 11);
6111 int s = sbits (insn1, 10, 10);
6112 int i1 = !(j1 ^ bit (insn1, 10));
6113 int i2 = !(j2 ^ bit (insn1, 10));
6114
6115 if (!link && !exchange) /* B */
6116 {
6117 offset = (bits (insn2, 0, 10) << 1);
6118 if (bit (insn2, 12)) /* Encoding T4 */
6119 {
6120 offset |= (bits (insn1, 0, 9) << 12)
6121 | (i2 << 22)
6122 | (i1 << 23)
6123 | (s << 24);
6124 cond = INST_AL;
6125 }
6126 else /* Encoding T3 */
6127 {
6128 offset |= (bits (insn1, 0, 5) << 12)
6129 | (j1 << 18)
6130 | (j2 << 19)
6131 | (s << 20);
6132 cond = bits (insn1, 6, 9);
6133 }
6134 }
6135 else
6136 {
6137 offset = (bits (insn1, 0, 9) << 12);
6138 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
6139 offset |= exchange ?
6140 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
6141 }
6142
6143 if (debug_displaced)
6144 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
6145 "%.4x %.4x with offset %.8lx\n",
6146 link ? (exchange) ? "blx" : "bl" : "b",
6147 insn1, insn2, offset);
6148
6149 dsc->modinsn[0] = THUMB_NOP;
6150
6151 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6152 return 0;
6153 }
6154
6155 /* Copy B Thumb instructions. */
6156 static int
6157 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
6158 struct displaced_step_closure *dsc)
6159 {
6160 unsigned int cond = 0;
6161 int offset = 0;
6162 unsigned short bit_12_15 = bits (insn, 12, 15);
6163 CORE_ADDR from = dsc->insn_addr;
6164
6165 if (bit_12_15 == 0xd)
6166 {
6167 /* offset = SignExtend (imm8:0, 32) */
6168 offset = sbits ((insn << 1), 0, 8);
6169 cond = bits (insn, 8, 11);
6170 }
6171 else if (bit_12_15 == 0xe) /* Encoding T2 */
6172 {
6173 offset = sbits ((insn << 1), 0, 11);
6174 cond = INST_AL;
6175 }
6176
6177 if (debug_displaced)
6178 fprintf_unfiltered (gdb_stdlog,
6179 "displaced: copying b immediate insn %.4x "
6180 "with offset %d\n", insn, offset);
6181
6182 dsc->u.branch.cond = cond;
6183 dsc->u.branch.link = 0;
6184 dsc->u.branch.exchange = 0;
6185 dsc->u.branch.dest = from + 4 + offset;
6186
6187 dsc->modinsn[0] = THUMB_NOP;
6188
6189 dsc->cleanup = &cleanup_branch;
6190
6191 return 0;
6192 }
6193
6194 /* Copy BX/BLX with register-specified destinations. */
6195
6196 static void
6197 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6198 struct displaced_step_closure *dsc, int link,
6199 unsigned int cond, unsigned int rm)
6200 {
6201 /* Implement {BX,BLX}<cond> <reg>" as:
6202
6203 Preparation: cond <- instruction condition
6204 Insn: mov r0, r0 (nop)
6205 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6206
6207 Don't set r14 in cleanup for BX. */
6208
6209 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6210
6211 dsc->u.branch.cond = cond;
6212 dsc->u.branch.link = link;
6213
6214 dsc->u.branch.exchange = 1;
6215
6216 dsc->cleanup = &cleanup_branch;
6217 }
6218
6219 static int
6220 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6221 struct regcache *regs, struct displaced_step_closure *dsc)
6222 {
6223 unsigned int cond = bits (insn, 28, 31);
6224 /* BX: x12xxx1x
6225 BLX: x12xxx3x. */
6226 int link = bit (insn, 5);
6227 unsigned int rm = bits (insn, 0, 3);
6228
6229 if (debug_displaced)
6230 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6231 (unsigned long) insn);
6232
6233 dsc->modinsn[0] = ARM_NOP;
6234
6235 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6236 return 0;
6237 }
6238
6239 static int
6240 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6241 struct regcache *regs,
6242 struct displaced_step_closure *dsc)
6243 {
6244 int link = bit (insn, 7);
6245 unsigned int rm = bits (insn, 3, 6);
6246
6247 if (debug_displaced)
6248 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6249 (unsigned short) insn);
6250
6251 dsc->modinsn[0] = THUMB_NOP;
6252
6253 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6254
6255 return 0;
6256 }
6257
6258
6259 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6260
6261 static void
6262 cleanup_alu_imm (struct gdbarch *gdbarch,
6263 struct regcache *regs, struct displaced_step_closure *dsc)
6264 {
6265 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6266 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6267 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6268 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6269 }
6270
6271 static int
6272 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6273 struct displaced_step_closure *dsc)
6274 {
6275 unsigned int rn = bits (insn, 16, 19);
6276 unsigned int rd = bits (insn, 12, 15);
6277 unsigned int op = bits (insn, 21, 24);
6278 int is_mov = (op == 0xd);
6279 ULONGEST rd_val, rn_val;
6280
6281 if (!insn_references_pc (insn, 0x000ff000ul))
6282 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6283
6284 if (debug_displaced)
6285 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6286 "%.8lx\n", is_mov ? "move" : "ALU",
6287 (unsigned long) insn);
6288
6289 /* Instruction is of form:
6290
6291 <op><cond> rd, [rn,] #imm
6292
6293 Rewrite as:
6294
6295 Preparation: tmp1, tmp2 <- r0, r1;
6296 r0, r1 <- rd, rn
6297 Insn: <op><cond> r0, r1, #imm
6298 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6299 */
6300
6301 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6302 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6303 rn_val = displaced_read_reg (regs, dsc, rn);
6304 rd_val = displaced_read_reg (regs, dsc, rd);
6305 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6306 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6307 dsc->rd = rd;
6308
6309 if (is_mov)
6310 dsc->modinsn[0] = insn & 0xfff00fff;
6311 else
6312 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6313
6314 dsc->cleanup = &cleanup_alu_imm;
6315
6316 return 0;
6317 }
6318
6319 static int
6320 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6321 uint16_t insn2, struct regcache *regs,
6322 struct displaced_step_closure *dsc)
6323 {
6324 unsigned int op = bits (insn1, 5, 8);
6325 unsigned int rn, rm, rd;
6326 ULONGEST rd_val, rn_val;
6327
6328 rn = bits (insn1, 0, 3); /* Rn */
6329 rm = bits (insn2, 0, 3); /* Rm */
6330 rd = bits (insn2, 8, 11); /* Rd */
6331
6332 /* This routine is only called for instruction MOV. */
6333 gdb_assert (op == 0x2 && rn == 0xf);
6334
6335 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6336 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6337
6338 if (debug_displaced)
6339 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6340 "ALU", insn1, insn2);
6341
6342 /* Instruction is of form:
6343
6344 <op><cond> rd, [rn,] #imm
6345
6346 Rewrite as:
6347
6348 Preparation: tmp1, tmp2 <- r0, r1;
6349 r0, r1 <- rd, rn
6350 Insn: <op><cond> r0, r1, #imm
6351 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6352 */
6353
6354 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6355 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6356 rn_val = displaced_read_reg (regs, dsc, rn);
6357 rd_val = displaced_read_reg (regs, dsc, rd);
6358 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6359 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6360 dsc->rd = rd;
6361
6362 dsc->modinsn[0] = insn1;
6363 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6364 dsc->numinsns = 2;
6365
6366 dsc->cleanup = &cleanup_alu_imm;
6367
6368 return 0;
6369 }
6370
6371 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6372
6373 static void
6374 cleanup_alu_reg (struct gdbarch *gdbarch,
6375 struct regcache *regs, struct displaced_step_closure *dsc)
6376 {
6377 ULONGEST rd_val;
6378 int i;
6379
6380 rd_val = displaced_read_reg (regs, dsc, 0);
6381
6382 for (i = 0; i < 3; i++)
6383 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6384
6385 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6386 }
6387
6388 static void
6389 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6390 struct displaced_step_closure *dsc,
6391 unsigned int rd, unsigned int rn, unsigned int rm)
6392 {
6393 ULONGEST rd_val, rn_val, rm_val;
6394
6395 /* Instruction is of form:
6396
6397 <op><cond> rd, [rn,] rm [, <shift>]
6398
6399 Rewrite as:
6400
6401 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6402 r0, r1, r2 <- rd, rn, rm
6403 Insn: <op><cond> r0, r1, r2 [, <shift>]
6404 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6405 */
6406
6407 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6408 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6409 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6410 rd_val = displaced_read_reg (regs, dsc, rd);
6411 rn_val = displaced_read_reg (regs, dsc, rn);
6412 rm_val = displaced_read_reg (regs, dsc, rm);
6413 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6414 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6415 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6416 dsc->rd = rd;
6417
6418 dsc->cleanup = &cleanup_alu_reg;
6419 }
6420
6421 static int
6422 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6423 struct displaced_step_closure *dsc)
6424 {
6425 unsigned int op = bits (insn, 21, 24);
6426 int is_mov = (op == 0xd);
6427
6428 if (!insn_references_pc (insn, 0x000ff00ful))
6429 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6430
6431 if (debug_displaced)
6432 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6433 is_mov ? "move" : "ALU", (unsigned long) insn);
6434
6435 if (is_mov)
6436 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6437 else
6438 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6439
6440 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6441 bits (insn, 0, 3));
6442 return 0;
6443 }
6444
6445 static int
6446 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6447 struct regcache *regs,
6448 struct displaced_step_closure *dsc)
6449 {
6450 unsigned rn, rm, rd;
6451
6452 rd = bits (insn, 3, 6);
6453 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6454 rm = 2;
6455
6456 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6457 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6458
6459 if (debug_displaced)
6460 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6461 "ALU", (unsigned short) insn);
6462
6463 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6464
6465 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6466
6467 return 0;
6468 }
6469
6470 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6471
6472 static void
6473 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6474 struct regcache *regs,
6475 struct displaced_step_closure *dsc)
6476 {
6477 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6478 int i;
6479
6480 for (i = 0; i < 4; i++)
6481 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6482
6483 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6484 }
6485
6486 static void
6487 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6488 struct displaced_step_closure *dsc,
6489 unsigned int rd, unsigned int rn, unsigned int rm,
6490 unsigned rs)
6491 {
6492 int i;
6493 ULONGEST rd_val, rn_val, rm_val, rs_val;
6494
6495 /* Instruction is of form:
6496
6497 <op><cond> rd, [rn,] rm, <shift> rs
6498
6499 Rewrite as:
6500
6501 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6502 r0, r1, r2, r3 <- rd, rn, rm, rs
6503 Insn: <op><cond> r0, r1, r2, <shift> r3
6504 Cleanup: tmp5 <- r0
6505 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6506 rd <- tmp5
6507 */
6508
6509 for (i = 0; i < 4; i++)
6510 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6511
6512 rd_val = displaced_read_reg (regs, dsc, rd);
6513 rn_val = displaced_read_reg (regs, dsc, rn);
6514 rm_val = displaced_read_reg (regs, dsc, rm);
6515 rs_val = displaced_read_reg (regs, dsc, rs);
6516 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6517 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6518 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6519 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6520 dsc->rd = rd;
6521 dsc->cleanup = &cleanup_alu_shifted_reg;
6522 }
6523
6524 static int
6525 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6526 struct regcache *regs,
6527 struct displaced_step_closure *dsc)
6528 {
6529 unsigned int op = bits (insn, 21, 24);
6530 int is_mov = (op == 0xd);
6531 unsigned int rd, rn, rm, rs;
6532
6533 if (!insn_references_pc (insn, 0x000fff0ful))
6534 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6535
6536 if (debug_displaced)
6537 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6538 "%.8lx\n", is_mov ? "move" : "ALU",
6539 (unsigned long) insn);
6540
6541 rn = bits (insn, 16, 19);
6542 rm = bits (insn, 0, 3);
6543 rs = bits (insn, 8, 11);
6544 rd = bits (insn, 12, 15);
6545
6546 if (is_mov)
6547 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6548 else
6549 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6550
6551 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6552
6553 return 0;
6554 }
6555
6556 /* Clean up load instructions. */
6557
6558 static void
6559 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6560 struct displaced_step_closure *dsc)
6561 {
6562 ULONGEST rt_val, rt_val2 = 0, rn_val;
6563
6564 rt_val = displaced_read_reg (regs, dsc, 0);
6565 if (dsc->u.ldst.xfersize == 8)
6566 rt_val2 = displaced_read_reg (regs, dsc, 1);
6567 rn_val = displaced_read_reg (regs, dsc, 2);
6568
6569 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6570 if (dsc->u.ldst.xfersize > 4)
6571 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6572 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6573 if (!dsc->u.ldst.immed)
6574 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6575
6576 /* Handle register writeback. */
6577 if (dsc->u.ldst.writeback)
6578 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6579 /* Put result in right place. */
6580 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6581 if (dsc->u.ldst.xfersize == 8)
6582 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6583 }
6584
6585 /* Clean up store instructions. */
6586
6587 static void
6588 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6589 struct displaced_step_closure *dsc)
6590 {
6591 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6592
6593 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6594 if (dsc->u.ldst.xfersize > 4)
6595 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6596 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6597 if (!dsc->u.ldst.immed)
6598 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6599 if (!dsc->u.ldst.restore_r4)
6600 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6601
6602 /* Writeback. */
6603 if (dsc->u.ldst.writeback)
6604 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6605 }
6606
6607 /* Copy "extra" load/store instructions. These are halfword/doubleword
6608 transfers, which have a different encoding to byte/word transfers. */
6609
6610 static int
6611 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6612 struct regcache *regs, struct displaced_step_closure *dsc)
6613 {
6614 unsigned int op1 = bits (insn, 20, 24);
6615 unsigned int op2 = bits (insn, 5, 6);
6616 unsigned int rt = bits (insn, 12, 15);
6617 unsigned int rn = bits (insn, 16, 19);
6618 unsigned int rm = bits (insn, 0, 3);
6619 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6620 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6621 int immed = (op1 & 0x4) != 0;
6622 int opcode;
6623 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6624
6625 if (!insn_references_pc (insn, 0x000ff00ful))
6626 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6627
6628 if (debug_displaced)
6629 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6630 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6631 (unsigned long) insn);
6632
6633 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6634
6635 if (opcode < 0)
6636 internal_error (__FILE__, __LINE__,
6637 _("copy_extra_ld_st: instruction decode error"));
6638
6639 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6640 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6641 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6642 if (!immed)
6643 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6644
6645 rt_val = displaced_read_reg (regs, dsc, rt);
6646 if (bytesize[opcode] == 8)
6647 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6648 rn_val = displaced_read_reg (regs, dsc, rn);
6649 if (!immed)
6650 rm_val = displaced_read_reg (regs, dsc, rm);
6651
6652 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6653 if (bytesize[opcode] == 8)
6654 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6655 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6656 if (!immed)
6657 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6658
6659 dsc->rd = rt;
6660 dsc->u.ldst.xfersize = bytesize[opcode];
6661 dsc->u.ldst.rn = rn;
6662 dsc->u.ldst.immed = immed;
6663 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6664 dsc->u.ldst.restore_r4 = 0;
6665
6666 if (immed)
6667 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6668 ->
6669 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6670 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6671 else
6672 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6673 ->
6674 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6675 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6676
6677 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6678
6679 return 0;
6680 }
6681
6682 /* Copy byte/half word/word loads and stores. */
6683
6684 static void
6685 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6686 struct displaced_step_closure *dsc, int load,
6687 int immed, int writeback, int size, int usermode,
6688 int rt, int rm, int rn)
6689 {
6690 ULONGEST rt_val, rn_val, rm_val = 0;
6691
6692 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6693 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6694 if (!immed)
6695 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6696 if (!load)
6697 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6698
6699 rt_val = displaced_read_reg (regs, dsc, rt);
6700 rn_val = displaced_read_reg (regs, dsc, rn);
6701 if (!immed)
6702 rm_val = displaced_read_reg (regs, dsc, rm);
6703
6704 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6705 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6706 if (!immed)
6707 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6708 dsc->rd = rt;
6709 dsc->u.ldst.xfersize = size;
6710 dsc->u.ldst.rn = rn;
6711 dsc->u.ldst.immed = immed;
6712 dsc->u.ldst.writeback = writeback;
6713
6714 /* To write PC we can do:
6715
6716 Before this sequence of instructions:
6717 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6718 r2 is the Rn value got from dispalced_read_reg.
6719
6720 Insn1: push {pc} Write address of STR instruction + offset on stack
6721 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6722 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6723 = addr(Insn1) + offset - addr(Insn3) - 8
6724 = offset - 16
6725 Insn4: add r4, r4, #8 r4 = offset - 8
6726 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6727 = from + offset
6728 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6729
6730 Otherwise we don't know what value to write for PC, since the offset is
6731 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6732 of this can be found in Section "Saving from r15" in
6733 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6734
6735 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6736 }
6737
6738
6739 static int
6740 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6741 uint16_t insn2, struct regcache *regs,
6742 struct displaced_step_closure *dsc, int size)
6743 {
6744 unsigned int u_bit = bit (insn1, 7);
6745 unsigned int rt = bits (insn2, 12, 15);
6746 int imm12 = bits (insn2, 0, 11);
6747 ULONGEST pc_val;
6748
6749 if (debug_displaced)
6750 fprintf_unfiltered (gdb_stdlog,
6751 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6752 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6753 imm12);
6754
6755 if (!u_bit)
6756 imm12 = -1 * imm12;
6757
6758 /* Rewrite instruction LDR Rt imm12 into:
6759
6760 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6761
6762 LDR R0, R2, R3,
6763
6764 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6765
6766
6767 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6768 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6769 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6770
6771 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6772
6773 pc_val = pc_val & 0xfffffffc;
6774
6775 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6776 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6777
6778 dsc->rd = rt;
6779
6780 dsc->u.ldst.xfersize = size;
6781 dsc->u.ldst.immed = 0;
6782 dsc->u.ldst.writeback = 0;
6783 dsc->u.ldst.restore_r4 = 0;
6784
6785 /* LDR R0, R2, R3 */
6786 dsc->modinsn[0] = 0xf852;
6787 dsc->modinsn[1] = 0x3;
6788 dsc->numinsns = 2;
6789
6790 dsc->cleanup = &cleanup_load;
6791
6792 return 0;
6793 }
6794
6795 static int
6796 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6797 uint16_t insn2, struct regcache *regs,
6798 struct displaced_step_closure *dsc,
6799 int writeback, int immed)
6800 {
6801 unsigned int rt = bits (insn2, 12, 15);
6802 unsigned int rn = bits (insn1, 0, 3);
6803 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6804 /* In LDR (register), there is also a register Rm, which is not allowed to
6805 be PC, so we don't have to check it. */
6806
6807 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6808 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6809 dsc);
6810
6811 if (debug_displaced)
6812 fprintf_unfiltered (gdb_stdlog,
6813 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6814 rt, rn, insn1, insn2);
6815
6816 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6817 0, rt, rm, rn);
6818
6819 dsc->u.ldst.restore_r4 = 0;
6820
6821 if (immed)
6822 /* ldr[b]<cond> rt, [rn, #imm], etc.
6823 ->
6824 ldr[b]<cond> r0, [r2, #imm]. */
6825 {
6826 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6827 dsc->modinsn[1] = insn2 & 0x0fff;
6828 }
6829 else
6830 /* ldr[b]<cond> rt, [rn, rm], etc.
6831 ->
6832 ldr[b]<cond> r0, [r2, r3]. */
6833 {
6834 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6835 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6836 }
6837
6838 dsc->numinsns = 2;
6839
6840 return 0;
6841 }
6842
6843
6844 static int
6845 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6846 struct regcache *regs,
6847 struct displaced_step_closure *dsc,
6848 int load, int size, int usermode)
6849 {
6850 int immed = !bit (insn, 25);
6851 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6852 unsigned int rt = bits (insn, 12, 15);
6853 unsigned int rn = bits (insn, 16, 19);
6854 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6855
6856 if (!insn_references_pc (insn, 0x000ff00ful))
6857 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6858
6859 if (debug_displaced)
6860 fprintf_unfiltered (gdb_stdlog,
6861 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6862 load ? (size == 1 ? "ldrb" : "ldr")
6863 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6864 rt, rn,
6865 (unsigned long) insn);
6866
6867 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6868 usermode, rt, rm, rn);
6869
6870 if (load || rt != ARM_PC_REGNUM)
6871 {
6872 dsc->u.ldst.restore_r4 = 0;
6873
6874 if (immed)
6875 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6876 ->
6877 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6878 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6879 else
6880 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6881 ->
6882 {ldr,str}[b]<cond> r0, [r2, r3]. */
6883 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6884 }
6885 else
6886 {
6887 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6888 dsc->u.ldst.restore_r4 = 1;
6889 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6890 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6891 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6892 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6893 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6894
6895 /* As above. */
6896 if (immed)
6897 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6898 else
6899 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6900
6901 dsc->numinsns = 6;
6902 }
6903
6904 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6905
6906 return 0;
6907 }
6908
6909 /* Cleanup LDM instructions with fully-populated register list. This is an
6910 unfortunate corner case: it's impossible to implement correctly by modifying
6911 the instruction. The issue is as follows: we have an instruction,
6912
6913 ldm rN, {r0-r15}
6914
6915 which we must rewrite to avoid loading PC. A possible solution would be to
6916 do the load in two halves, something like (with suitable cleanup
6917 afterwards):
6918
6919 mov r8, rN
6920 ldm[id][ab] r8!, {r0-r7}
6921 str r7, <temp>
6922 ldm[id][ab] r8, {r7-r14}
6923 <bkpt>
6924
6925 but at present there's no suitable place for <temp>, since the scratch space
6926 is overwritten before the cleanup routine is called. For now, we simply
6927 emulate the instruction. */
6928
6929 static void
6930 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6931 struct displaced_step_closure *dsc)
6932 {
6933 int inc = dsc->u.block.increment;
6934 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6935 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6936 uint32_t regmask = dsc->u.block.regmask;
6937 int regno = inc ? 0 : 15;
6938 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6939 int exception_return = dsc->u.block.load && dsc->u.block.user
6940 && (regmask & 0x8000) != 0;
6941 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6942 int do_transfer = condition_true (dsc->u.block.cond, status);
6943 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6944
6945 if (!do_transfer)
6946 return;
6947
6948 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6949 sensible we can do here. Complain loudly. */
6950 if (exception_return)
6951 error (_("Cannot single-step exception return"));
6952
6953 /* We don't handle any stores here for now. */
6954 gdb_assert (dsc->u.block.load != 0);
6955
6956 if (debug_displaced)
6957 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6958 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6959 dsc->u.block.increment ? "inc" : "dec",
6960 dsc->u.block.before ? "before" : "after");
6961
6962 while (regmask)
6963 {
6964 uint32_t memword;
6965
6966 if (inc)
6967 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6968 regno++;
6969 else
6970 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6971 regno--;
6972
6973 xfer_addr += bump_before;
6974
6975 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6976 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6977
6978 xfer_addr += bump_after;
6979
6980 regmask &= ~(1 << regno);
6981 }
6982
6983 if (dsc->u.block.writeback)
6984 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6985 CANNOT_WRITE_PC);
6986 }
6987
6988 /* Clean up an STM which included the PC in the register list. */
6989
6990 static void
6991 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6992 struct displaced_step_closure *dsc)
6993 {
6994 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6995 int store_executed = condition_true (dsc->u.block.cond, status);
6996 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6997 CORE_ADDR stm_insn_addr;
6998 uint32_t pc_val;
6999 long offset;
7000 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7001
7002 /* If condition code fails, there's nothing else to do. */
7003 if (!store_executed)
7004 return;
7005
7006 if (dsc->u.block.increment)
7007 {
7008 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
7009
7010 if (dsc->u.block.before)
7011 pc_stored_at += 4;
7012 }
7013 else
7014 {
7015 pc_stored_at = dsc->u.block.xfer_addr;
7016
7017 if (dsc->u.block.before)
7018 pc_stored_at -= 4;
7019 }
7020
7021 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
7022 stm_insn_addr = dsc->scratch_base;
7023 offset = pc_val - stm_insn_addr;
7024
7025 if (debug_displaced)
7026 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
7027 "STM instruction\n", offset);
7028
7029 /* Rewrite the stored PC to the proper value for the non-displaced original
7030 instruction. */
7031 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
7032 dsc->insn_addr + offset);
7033 }
7034
7035 /* Clean up an LDM which includes the PC in the register list. We clumped all
7036 the registers in the transferred list into a contiguous range r0...rX (to
7037 avoid loading PC directly and losing control of the debugged program), so we
7038 must undo that here. */
7039
7040 static void
7041 cleanup_block_load_pc (struct gdbarch *gdbarch,
7042 struct regcache *regs,
7043 struct displaced_step_closure *dsc)
7044 {
7045 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
7046 int load_executed = condition_true (dsc->u.block.cond, status);
7047 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
7048 unsigned int regs_loaded = bitcount (mask);
7049 unsigned int num_to_shuffle = regs_loaded, clobbered;
7050
7051 /* The method employed here will fail if the register list is fully populated
7052 (we need to avoid loading PC directly). */
7053 gdb_assert (num_to_shuffle < 16);
7054
7055 if (!load_executed)
7056 return;
7057
7058 clobbered = (1 << num_to_shuffle) - 1;
7059
7060 while (num_to_shuffle > 0)
7061 {
7062 if ((mask & (1 << write_reg)) != 0)
7063 {
7064 unsigned int read_reg = num_to_shuffle - 1;
7065
7066 if (read_reg != write_reg)
7067 {
7068 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
7069 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
7070 if (debug_displaced)
7071 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
7072 "loaded register r%d to r%d\n"), read_reg,
7073 write_reg);
7074 }
7075 else if (debug_displaced)
7076 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
7077 "r%d already in the right place\n"),
7078 write_reg);
7079
7080 clobbered &= ~(1 << write_reg);
7081
7082 num_to_shuffle--;
7083 }
7084
7085 write_reg--;
7086 }
7087
7088 /* Restore any registers we scribbled over. */
7089 for (write_reg = 0; clobbered != 0; write_reg++)
7090 {
7091 if ((clobbered & (1 << write_reg)) != 0)
7092 {
7093 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
7094 CANNOT_WRITE_PC);
7095 if (debug_displaced)
7096 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
7097 "clobbered register r%d\n"), write_reg);
7098 clobbered &= ~(1 << write_reg);
7099 }
7100 }
7101
7102 /* Perform register writeback manually. */
7103 if (dsc->u.block.writeback)
7104 {
7105 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
7106
7107 if (dsc->u.block.increment)
7108 new_rn_val += regs_loaded * 4;
7109 else
7110 new_rn_val -= regs_loaded * 4;
7111
7112 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
7113 CANNOT_WRITE_PC);
7114 }
7115 }
7116
7117 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
7118 in user-level code (in particular exception return, ldm rn, {...pc}^). */
7119
7120 static int
7121 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
7122 struct regcache *regs,
7123 struct displaced_step_closure *dsc)
7124 {
7125 int load = bit (insn, 20);
7126 int user = bit (insn, 22);
7127 int increment = bit (insn, 23);
7128 int before = bit (insn, 24);
7129 int writeback = bit (insn, 21);
7130 int rn = bits (insn, 16, 19);
7131
7132 /* Block transfers which don't mention PC can be run directly
7133 out-of-line. */
7134 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
7135 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
7136
7137 if (rn == ARM_PC_REGNUM)
7138 {
7139 warning (_("displaced: Unpredictable LDM or STM with "
7140 "base register r15"));
7141 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
7142 }
7143
7144 if (debug_displaced)
7145 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7146 "%.8lx\n", (unsigned long) insn);
7147
7148 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7149 dsc->u.block.rn = rn;
7150
7151 dsc->u.block.load = load;
7152 dsc->u.block.user = user;
7153 dsc->u.block.increment = increment;
7154 dsc->u.block.before = before;
7155 dsc->u.block.writeback = writeback;
7156 dsc->u.block.cond = bits (insn, 28, 31);
7157
7158 dsc->u.block.regmask = insn & 0xffff;
7159
7160 if (load)
7161 {
7162 if ((insn & 0xffff) == 0xffff)
7163 {
7164 /* LDM with a fully-populated register list. This case is
7165 particularly tricky. Implement for now by fully emulating the
7166 instruction (which might not behave perfectly in all cases, but
7167 these instructions should be rare enough for that not to matter
7168 too much). */
7169 dsc->modinsn[0] = ARM_NOP;
7170
7171 dsc->cleanup = &cleanup_block_load_all;
7172 }
7173 else
7174 {
7175 /* LDM of a list of registers which includes PC. Implement by
7176 rewriting the list of registers to be transferred into a
7177 contiguous chunk r0...rX before doing the transfer, then shuffling
7178 registers into the correct places in the cleanup routine. */
7179 unsigned int regmask = insn & 0xffff;
7180 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7181 unsigned int to = 0, from = 0, i, new_rn;
7182
7183 for (i = 0; i < num_in_list; i++)
7184 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7185
7186 /* Writeback makes things complicated. We need to avoid clobbering
7187 the base register with one of the registers in our modified
7188 register list, but just using a different register can't work in
7189 all cases, e.g.:
7190
7191 ldm r14!, {r0-r13,pc}
7192
7193 which would need to be rewritten as:
7194
7195 ldm rN!, {r0-r14}
7196
7197 but that can't work, because there's no free register for N.
7198
7199 Solve this by turning off the writeback bit, and emulating
7200 writeback manually in the cleanup routine. */
7201
7202 if (writeback)
7203 insn &= ~(1 << 21);
7204
7205 new_regmask = (1 << num_in_list) - 1;
7206
7207 if (debug_displaced)
7208 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7209 "{..., pc}: original reg list %.4x, modified "
7210 "list %.4x\n"), rn, writeback ? "!" : "",
7211 (int) insn & 0xffff, new_regmask);
7212
7213 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7214
7215 dsc->cleanup = &cleanup_block_load_pc;
7216 }
7217 }
7218 else
7219 {
7220 /* STM of a list of registers which includes PC. Run the instruction
7221 as-is, but out of line: this will store the wrong value for the PC,
7222 so we must manually fix up the memory in the cleanup routine.
7223 Doing things this way has the advantage that we can auto-detect
7224 the offset of the PC write (which is architecture-dependent) in
7225 the cleanup routine. */
7226 dsc->modinsn[0] = insn;
7227
7228 dsc->cleanup = &cleanup_block_store_pc;
7229 }
7230
7231 return 0;
7232 }
7233
7234 static int
7235 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7236 struct regcache *regs,
7237 struct displaced_step_closure *dsc)
7238 {
7239 int rn = bits (insn1, 0, 3);
7240 int load = bit (insn1, 4);
7241 int writeback = bit (insn1, 5);
7242
7243 /* Block transfers which don't mention PC can be run directly
7244 out-of-line. */
7245 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7246 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7247
7248 if (rn == ARM_PC_REGNUM)
7249 {
7250 warning (_("displaced: Unpredictable LDM or STM with "
7251 "base register r15"));
7252 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7253 "unpredictable ldm/stm", dsc);
7254 }
7255
7256 if (debug_displaced)
7257 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7258 "%.4x%.4x\n", insn1, insn2);
7259
7260 /* Clear bit 13, since it should be always zero. */
7261 dsc->u.block.regmask = (insn2 & 0xdfff);
7262 dsc->u.block.rn = rn;
7263
7264 dsc->u.block.load = load;
7265 dsc->u.block.user = 0;
7266 dsc->u.block.increment = bit (insn1, 7);
7267 dsc->u.block.before = bit (insn1, 8);
7268 dsc->u.block.writeback = writeback;
7269 dsc->u.block.cond = INST_AL;
7270 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7271
7272 if (load)
7273 {
7274 if (dsc->u.block.regmask == 0xffff)
7275 {
7276 /* This branch is impossible to happen. */
7277 gdb_assert (0);
7278 }
7279 else
7280 {
7281 unsigned int regmask = dsc->u.block.regmask;
7282 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7283 unsigned int to = 0, from = 0, i, new_rn;
7284
7285 for (i = 0; i < num_in_list; i++)
7286 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7287
7288 if (writeback)
7289 insn1 &= ~(1 << 5);
7290
7291 new_regmask = (1 << num_in_list) - 1;
7292
7293 if (debug_displaced)
7294 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7295 "{..., pc}: original reg list %.4x, modified "
7296 "list %.4x\n"), rn, writeback ? "!" : "",
7297 (int) dsc->u.block.regmask, new_regmask);
7298
7299 dsc->modinsn[0] = insn1;
7300 dsc->modinsn[1] = (new_regmask & 0xffff);
7301 dsc->numinsns = 2;
7302
7303 dsc->cleanup = &cleanup_block_load_pc;
7304 }
7305 }
7306 else
7307 {
7308 dsc->modinsn[0] = insn1;
7309 dsc->modinsn[1] = insn2;
7310 dsc->numinsns = 2;
7311 dsc->cleanup = &cleanup_block_store_pc;
7312 }
7313 return 0;
7314 }
7315
7316 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7317 for Linux, where some SVC instructions must be treated specially. */
7318
7319 static void
7320 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7321 struct displaced_step_closure *dsc)
7322 {
7323 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7324
7325 if (debug_displaced)
7326 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7327 "%.8lx\n", (unsigned long) resume_addr);
7328
7329 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7330 }
7331
7332
7333 /* Common copy routine for svc instruciton. */
7334
7335 static int
7336 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7337 struct displaced_step_closure *dsc)
7338 {
7339 /* Preparation: none.
7340 Insn: unmodified svc.
7341 Cleanup: pc <- insn_addr + insn_size. */
7342
7343 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7344 instruction. */
7345 dsc->wrote_to_pc = 1;
7346
7347 /* Allow OS-specific code to override SVC handling. */
7348 if (dsc->u.svc.copy_svc_os)
7349 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7350 else
7351 {
7352 dsc->cleanup = &cleanup_svc;
7353 return 0;
7354 }
7355 }
7356
7357 static int
7358 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7359 struct regcache *regs, struct displaced_step_closure *dsc)
7360 {
7361
7362 if (debug_displaced)
7363 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7364 (unsigned long) insn);
7365
7366 dsc->modinsn[0] = insn;
7367
7368 return install_svc (gdbarch, regs, dsc);
7369 }
7370
7371 static int
7372 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7373 struct regcache *regs, struct displaced_step_closure *dsc)
7374 {
7375
7376 if (debug_displaced)
7377 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7378 insn);
7379
7380 dsc->modinsn[0] = insn;
7381
7382 return install_svc (gdbarch, regs, dsc);
7383 }
7384
7385 /* Copy undefined instructions. */
7386
7387 static int
7388 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7389 struct displaced_step_closure *dsc)
7390 {
7391 if (debug_displaced)
7392 fprintf_unfiltered (gdb_stdlog,
7393 "displaced: copying undefined insn %.8lx\n",
7394 (unsigned long) insn);
7395
7396 dsc->modinsn[0] = insn;
7397
7398 return 0;
7399 }
7400
7401 static int
7402 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7403 struct displaced_step_closure *dsc)
7404 {
7405
7406 if (debug_displaced)
7407 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7408 "%.4x %.4x\n", (unsigned short) insn1,
7409 (unsigned short) insn2);
7410
7411 dsc->modinsn[0] = insn1;
7412 dsc->modinsn[1] = insn2;
7413 dsc->numinsns = 2;
7414
7415 return 0;
7416 }
7417
7418 /* Copy unpredictable instructions. */
7419
7420 static int
7421 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7422 struct displaced_step_closure *dsc)
7423 {
7424 if (debug_displaced)
7425 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7426 "%.8lx\n", (unsigned long) insn);
7427
7428 dsc->modinsn[0] = insn;
7429
7430 return 0;
7431 }
7432
7433 /* The decode_* functions are instruction decoding helpers. They mostly follow
7434 the presentation in the ARM ARM. */
7435
7436 static int
7437 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7438 struct regcache *regs,
7439 struct displaced_step_closure *dsc)
7440 {
7441 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7442 unsigned int rn = bits (insn, 16, 19);
7443
7444 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7445 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7446 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7447 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7448 else if ((op1 & 0x60) == 0x20)
7449 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7450 else if ((op1 & 0x71) == 0x40)
7451 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7452 dsc);
7453 else if ((op1 & 0x77) == 0x41)
7454 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7455 else if ((op1 & 0x77) == 0x45)
7456 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7457 else if ((op1 & 0x77) == 0x51)
7458 {
7459 if (rn != 0xf)
7460 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7461 else
7462 return arm_copy_unpred (gdbarch, insn, dsc);
7463 }
7464 else if ((op1 & 0x77) == 0x55)
7465 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7466 else if (op1 == 0x57)
7467 switch (op2)
7468 {
7469 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7470 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7471 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7472 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7473 default: return arm_copy_unpred (gdbarch, insn, dsc);
7474 }
7475 else if ((op1 & 0x63) == 0x43)
7476 return arm_copy_unpred (gdbarch, insn, dsc);
7477 else if ((op2 & 0x1) == 0x0)
7478 switch (op1 & ~0x80)
7479 {
7480 case 0x61:
7481 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7482 case 0x65:
7483 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7484 case 0x71: case 0x75:
7485 /* pld/pldw reg. */
7486 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7487 case 0x63: case 0x67: case 0x73: case 0x77:
7488 return arm_copy_unpred (gdbarch, insn, dsc);
7489 default:
7490 return arm_copy_undef (gdbarch, insn, dsc);
7491 }
7492 else
7493 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7494 }
7495
7496 static int
7497 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7498 struct regcache *regs,
7499 struct displaced_step_closure *dsc)
7500 {
7501 if (bit (insn, 27) == 0)
7502 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7503 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7504 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7505 {
7506 case 0x0: case 0x2:
7507 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7508
7509 case 0x1: case 0x3:
7510 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7511
7512 case 0x4: case 0x5: case 0x6: case 0x7:
7513 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7514
7515 case 0x8:
7516 switch ((insn & 0xe00000) >> 21)
7517 {
7518 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7519 /* stc/stc2. */
7520 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7521
7522 case 0x2:
7523 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7524
7525 default:
7526 return arm_copy_undef (gdbarch, insn, dsc);
7527 }
7528
7529 case 0x9:
7530 {
7531 int rn_f = (bits (insn, 16, 19) == 0xf);
7532 switch ((insn & 0xe00000) >> 21)
7533 {
7534 case 0x1: case 0x3:
7535 /* ldc/ldc2 imm (undefined for rn == pc). */
7536 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7537 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7538
7539 case 0x2:
7540 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7541
7542 case 0x4: case 0x5: case 0x6: case 0x7:
7543 /* ldc/ldc2 lit (undefined for rn != pc). */
7544 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7545 : arm_copy_undef (gdbarch, insn, dsc);
7546
7547 default:
7548 return arm_copy_undef (gdbarch, insn, dsc);
7549 }
7550 }
7551
7552 case 0xa:
7553 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7554
7555 case 0xb:
7556 if (bits (insn, 16, 19) == 0xf)
7557 /* ldc/ldc2 lit. */
7558 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7559 else
7560 return arm_copy_undef (gdbarch, insn, dsc);
7561
7562 case 0xc:
7563 if (bit (insn, 4))
7564 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7565 else
7566 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7567
7568 case 0xd:
7569 if (bit (insn, 4))
7570 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7571 else
7572 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7573
7574 default:
7575 return arm_copy_undef (gdbarch, insn, dsc);
7576 }
7577 }
7578
7579 /* Decode miscellaneous instructions in dp/misc encoding space. */
7580
7581 static int
7582 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7583 struct regcache *regs,
7584 struct displaced_step_closure *dsc)
7585 {
7586 unsigned int op2 = bits (insn, 4, 6);
7587 unsigned int op = bits (insn, 21, 22);
7588 unsigned int op1 = bits (insn, 16, 19);
7589
7590 switch (op2)
7591 {
7592 case 0x0:
7593 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7594
7595 case 0x1:
7596 if (op == 0x1) /* bx. */
7597 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7598 else if (op == 0x3)
7599 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7600 else
7601 return arm_copy_undef (gdbarch, insn, dsc);
7602
7603 case 0x2:
7604 if (op == 0x1)
7605 /* Not really supported. */
7606 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7607 else
7608 return arm_copy_undef (gdbarch, insn, dsc);
7609
7610 case 0x3:
7611 if (op == 0x1)
7612 return arm_copy_bx_blx_reg (gdbarch, insn,
7613 regs, dsc); /* blx register. */
7614 else
7615 return arm_copy_undef (gdbarch, insn, dsc);
7616
7617 case 0x5:
7618 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7619
7620 case 0x7:
7621 if (op == 0x1)
7622 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7623 else if (op == 0x3)
7624 /* Not really supported. */
7625 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7626
7627 default:
7628 return arm_copy_undef (gdbarch, insn, dsc);
7629 }
7630 }
7631
7632 static int
7633 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7634 struct regcache *regs,
7635 struct displaced_step_closure *dsc)
7636 {
7637 if (bit (insn, 25))
7638 switch (bits (insn, 20, 24))
7639 {
7640 case 0x10:
7641 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7642
7643 case 0x14:
7644 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7645
7646 case 0x12: case 0x16:
7647 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7648
7649 default:
7650 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7651 }
7652 else
7653 {
7654 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7655
7656 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7657 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7658 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7659 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7660 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7661 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7662 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7663 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7664 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7665 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7666 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7667 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7668 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7669 /* 2nd arg means "unpriveleged". */
7670 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7671 dsc);
7672 }
7673
7674 /* Should be unreachable. */
7675 return 1;
7676 }
7677
7678 static int
7679 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7680 struct regcache *regs,
7681 struct displaced_step_closure *dsc)
7682 {
7683 int a = bit (insn, 25), b = bit (insn, 4);
7684 uint32_t op1 = bits (insn, 20, 24);
7685 int rn_f = bits (insn, 16, 19) == 0xf;
7686
7687 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7688 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7689 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7690 else if ((!a && (op1 & 0x17) == 0x02)
7691 || (a && (op1 & 0x17) == 0x02 && !b))
7692 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7693 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7694 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7695 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7696 else if ((!a && (op1 & 0x17) == 0x03)
7697 || (a && (op1 & 0x17) == 0x03 && !b))
7698 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7699 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7700 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7701 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7702 else if ((!a && (op1 & 0x17) == 0x06)
7703 || (a && (op1 & 0x17) == 0x06 && !b))
7704 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7705 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7706 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7707 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7708 else if ((!a && (op1 & 0x17) == 0x07)
7709 || (a && (op1 & 0x17) == 0x07 && !b))
7710 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7711
7712 /* Should be unreachable. */
7713 return 1;
7714 }
7715
7716 static int
7717 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7718 struct displaced_step_closure *dsc)
7719 {
7720 switch (bits (insn, 20, 24))
7721 {
7722 case 0x00: case 0x01: case 0x02: case 0x03:
7723 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7724
7725 case 0x04: case 0x05: case 0x06: case 0x07:
7726 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7727
7728 case 0x08: case 0x09: case 0x0a: case 0x0b:
7729 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7730 return arm_copy_unmodified (gdbarch, insn,
7731 "decode/pack/unpack/saturate/reverse", dsc);
7732
7733 case 0x18:
7734 if (bits (insn, 5, 7) == 0) /* op2. */
7735 {
7736 if (bits (insn, 12, 15) == 0xf)
7737 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7738 else
7739 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7740 }
7741 else
7742 return arm_copy_undef (gdbarch, insn, dsc);
7743
7744 case 0x1a: case 0x1b:
7745 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7746 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7747 else
7748 return arm_copy_undef (gdbarch, insn, dsc);
7749
7750 case 0x1c: case 0x1d:
7751 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7752 {
7753 if (bits (insn, 0, 3) == 0xf)
7754 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7755 else
7756 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7757 }
7758 else
7759 return arm_copy_undef (gdbarch, insn, dsc);
7760
7761 case 0x1e: case 0x1f:
7762 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7763 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7764 else
7765 return arm_copy_undef (gdbarch, insn, dsc);
7766 }
7767
7768 /* Should be unreachable. */
7769 return 1;
7770 }
7771
7772 static int
7773 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7774 struct regcache *regs,
7775 struct displaced_step_closure *dsc)
7776 {
7777 if (bit (insn, 25))
7778 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7779 else
7780 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7781 }
7782
7783 static int
7784 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7785 struct regcache *regs,
7786 struct displaced_step_closure *dsc)
7787 {
7788 unsigned int opcode = bits (insn, 20, 24);
7789
7790 switch (opcode)
7791 {
7792 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7793 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7794
7795 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7796 case 0x12: case 0x16:
7797 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7798
7799 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7800 case 0x13: case 0x17:
7801 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7802
7803 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7804 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7805 /* Note: no writeback for these instructions. Bit 25 will always be
7806 zero though (via caller), so the following works OK. */
7807 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7808 }
7809
7810 /* Should be unreachable. */
7811 return 1;
7812 }
7813
7814 /* Decode shifted register instructions. */
7815
7816 static int
7817 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7818 uint16_t insn2, struct regcache *regs,
7819 struct displaced_step_closure *dsc)
7820 {
7821 /* PC is only allowed to be used in instruction MOV. */
7822
7823 unsigned int op = bits (insn1, 5, 8);
7824 unsigned int rn = bits (insn1, 0, 3);
7825
7826 if (op == 0x2 && rn == 0xf) /* MOV */
7827 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7828 else
7829 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7830 "dp (shift reg)", dsc);
7831 }
7832
7833
7834 /* Decode extension register load/store. Exactly the same as
7835 arm_decode_ext_reg_ld_st. */
7836
7837 static int
7838 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7839 uint16_t insn2, struct regcache *regs,
7840 struct displaced_step_closure *dsc)
7841 {
7842 unsigned int opcode = bits (insn1, 4, 8);
7843
7844 switch (opcode)
7845 {
7846 case 0x04: case 0x05:
7847 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7848 "vfp/neon vmov", dsc);
7849
7850 case 0x08: case 0x0c: /* 01x00 */
7851 case 0x0a: case 0x0e: /* 01x10 */
7852 case 0x12: case 0x16: /* 10x10 */
7853 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7854 "vfp/neon vstm/vpush", dsc);
7855
7856 case 0x09: case 0x0d: /* 01x01 */
7857 case 0x0b: case 0x0f: /* 01x11 */
7858 case 0x13: case 0x17: /* 10x11 */
7859 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7860 "vfp/neon vldm/vpop", dsc);
7861
7862 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7863 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7864 "vstr", dsc);
7865 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7866 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7867 }
7868
7869 /* Should be unreachable. */
7870 return 1;
7871 }
7872
7873 static int
7874 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7875 struct regcache *regs, struct displaced_step_closure *dsc)
7876 {
7877 unsigned int op1 = bits (insn, 20, 25);
7878 int op = bit (insn, 4);
7879 unsigned int coproc = bits (insn, 8, 11);
7880 unsigned int rn = bits (insn, 16, 19);
7881
7882 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7883 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7884 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7885 && (coproc & 0xe) != 0xa)
7886 /* stc/stc2. */
7887 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7888 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7889 && (coproc & 0xe) != 0xa)
7890 /* ldc/ldc2 imm/lit. */
7891 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7892 else if ((op1 & 0x3e) == 0x00)
7893 return arm_copy_undef (gdbarch, insn, dsc);
7894 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7895 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7896 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7897 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7898 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7899 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7900 else if ((op1 & 0x30) == 0x20 && !op)
7901 {
7902 if ((coproc & 0xe) == 0xa)
7903 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7904 else
7905 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7906 }
7907 else if ((op1 & 0x30) == 0x20 && op)
7908 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7909 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7910 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7911 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7912 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7913 else if ((op1 & 0x30) == 0x30)
7914 return arm_copy_svc (gdbarch, insn, regs, dsc);
7915 else
7916 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7917 }
7918
7919 static int
7920 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7921 uint16_t insn2, struct regcache *regs,
7922 struct displaced_step_closure *dsc)
7923 {
7924 unsigned int coproc = bits (insn2, 8, 11);
7925 unsigned int op1 = bits (insn1, 4, 9);
7926 unsigned int bit_5_8 = bits (insn1, 5, 8);
7927 unsigned int bit_9 = bit (insn1, 9);
7928 unsigned int bit_4 = bit (insn1, 4);
7929 unsigned int rn = bits (insn1, 0, 3);
7930
7931 if (bit_9 == 0)
7932 {
7933 if (bit_5_8 == 2)
7934 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7935 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7936 dsc);
7937 else if (bit_5_8 == 0) /* UNDEFINED. */
7938 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7939 else
7940 {
7941 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7942 if ((coproc & 0xe) == 0xa)
7943 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7944 dsc);
7945 else /* coproc is not 101x. */
7946 {
7947 if (bit_4 == 0) /* STC/STC2. */
7948 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7949 "stc/stc2", dsc);
7950 else /* LDC/LDC2 {literal, immeidate}. */
7951 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7952 regs, dsc);
7953 }
7954 }
7955 }
7956 else
7957 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7958
7959 return 0;
7960 }
7961
7962 static void
7963 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7964 struct displaced_step_closure *dsc, int rd)
7965 {
7966 /* ADR Rd, #imm
7967
7968 Rewrite as:
7969
7970 Preparation: Rd <- PC
7971 Insn: ADD Rd, #imm
7972 Cleanup: Null.
7973 */
7974
7975 /* Rd <- PC */
7976 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7977 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7978 }
7979
7980 static int
7981 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7982 struct displaced_step_closure *dsc,
7983 int rd, unsigned int imm)
7984 {
7985
7986 /* Encoding T2: ADDS Rd, #imm */
7987 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7988
7989 install_pc_relative (gdbarch, regs, dsc, rd);
7990
7991 return 0;
7992 }
7993
7994 static int
7995 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7996 struct regcache *regs,
7997 struct displaced_step_closure *dsc)
7998 {
7999 unsigned int rd = bits (insn, 8, 10);
8000 unsigned int imm8 = bits (insn, 0, 7);
8001
8002 if (debug_displaced)
8003 fprintf_unfiltered (gdb_stdlog,
8004 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
8005 rd, imm8, insn);
8006
8007 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
8008 }
8009
8010 static int
8011 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
8012 uint16_t insn2, struct regcache *regs,
8013 struct displaced_step_closure *dsc)
8014 {
8015 unsigned int rd = bits (insn2, 8, 11);
8016 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
8017 extract raw immediate encoding rather than computing immediate. When
8018 generating ADD or SUB instruction, we can simply perform OR operation to
8019 set immediate into ADD. */
8020 unsigned int imm_3_8 = insn2 & 0x70ff;
8021 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
8022
8023 if (debug_displaced)
8024 fprintf_unfiltered (gdb_stdlog,
8025 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
8026 rd, imm_i, imm_3_8, insn1, insn2);
8027
8028 if (bit (insn1, 7)) /* Encoding T2 */
8029 {
8030 /* Encoding T3: SUB Rd, Rd, #imm */
8031 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
8032 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8033 }
8034 else /* Encoding T3 */
8035 {
8036 /* Encoding T3: ADD Rd, Rd, #imm */
8037 dsc->modinsn[0] = (0xf100 | rd | imm_i);
8038 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8039 }
8040 dsc->numinsns = 2;
8041
8042 install_pc_relative (gdbarch, regs, dsc, rd);
8043
8044 return 0;
8045 }
8046
8047 static int
8048 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
8049 struct regcache *regs,
8050 struct displaced_step_closure *dsc)
8051 {
8052 unsigned int rt = bits (insn1, 8, 10);
8053 unsigned int pc;
8054 int imm8 = (bits (insn1, 0, 7) << 2);
8055 CORE_ADDR from = dsc->insn_addr;
8056
8057 /* LDR Rd, #imm8
8058
8059 Rwrite as:
8060
8061 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
8062
8063 Insn: LDR R0, [R2, R3];
8064 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
8065
8066 if (debug_displaced)
8067 fprintf_unfiltered (gdb_stdlog,
8068 "displaced: copying thumb ldr r%d [pc #%d]\n"
8069 , rt, imm8);
8070
8071 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
8072 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
8073 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
8074 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
8075 /* The assembler calculates the required value of the offset from the
8076 Align(PC,4) value of this instruction to the label. */
8077 pc = pc & 0xfffffffc;
8078
8079 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
8080 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
8081
8082 dsc->rd = rt;
8083 dsc->u.ldst.xfersize = 4;
8084 dsc->u.ldst.rn = 0;
8085 dsc->u.ldst.immed = 0;
8086 dsc->u.ldst.writeback = 0;
8087 dsc->u.ldst.restore_r4 = 0;
8088
8089 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
8090
8091 dsc->cleanup = &cleanup_load;
8092
8093 return 0;
8094 }
8095
8096 /* Copy Thumb cbnz/cbz insruction. */
8097
8098 static int
8099 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
8100 struct regcache *regs,
8101 struct displaced_step_closure *dsc)
8102 {
8103 int non_zero = bit (insn1, 11);
8104 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
8105 CORE_ADDR from = dsc->insn_addr;
8106 int rn = bits (insn1, 0, 2);
8107 int rn_val = displaced_read_reg (regs, dsc, rn);
8108
8109 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
8110 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
8111 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
8112 condition is false, let it be, cleanup_branch will do nothing. */
8113 if (dsc->u.branch.cond)
8114 {
8115 dsc->u.branch.cond = INST_AL;
8116 dsc->u.branch.dest = from + 4 + imm5;
8117 }
8118 else
8119 dsc->u.branch.dest = from + 2;
8120
8121 dsc->u.branch.link = 0;
8122 dsc->u.branch.exchange = 0;
8123
8124 if (debug_displaced)
8125 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
8126 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
8127 rn, rn_val, insn1, dsc->u.branch.dest);
8128
8129 dsc->modinsn[0] = THUMB_NOP;
8130
8131 dsc->cleanup = &cleanup_branch;
8132 return 0;
8133 }
8134
8135 /* Copy Table Branch Byte/Halfword */
8136 static int
8137 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
8138 uint16_t insn2, struct regcache *regs,
8139 struct displaced_step_closure *dsc)
8140 {
8141 ULONGEST rn_val, rm_val;
8142 int is_tbh = bit (insn2, 4);
8143 CORE_ADDR halfwords = 0;
8144 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8145
8146 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
8147 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
8148
8149 if (is_tbh)
8150 {
8151 gdb_byte buf[2];
8152
8153 target_read_memory (rn_val + 2 * rm_val, buf, 2);
8154 halfwords = extract_unsigned_integer (buf, 2, byte_order);
8155 }
8156 else
8157 {
8158 gdb_byte buf[1];
8159
8160 target_read_memory (rn_val + rm_val, buf, 1);
8161 halfwords = extract_unsigned_integer (buf, 1, byte_order);
8162 }
8163
8164 if (debug_displaced)
8165 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
8166 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
8167 (unsigned int) rn_val, (unsigned int) rm_val,
8168 (unsigned int) halfwords);
8169
8170 dsc->u.branch.cond = INST_AL;
8171 dsc->u.branch.link = 0;
8172 dsc->u.branch.exchange = 0;
8173 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
8174
8175 dsc->cleanup = &cleanup_branch;
8176
8177 return 0;
8178 }
8179
8180 static void
8181 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
8182 struct displaced_step_closure *dsc)
8183 {
8184 /* PC <- r7 */
8185 int val = displaced_read_reg (regs, dsc, 7);
8186 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8187
8188 /* r7 <- r8 */
8189 val = displaced_read_reg (regs, dsc, 8);
8190 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8191
8192 /* r8 <- tmp[0] */
8193 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8194
8195 }
8196
8197 static int
8198 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8199 struct regcache *regs,
8200 struct displaced_step_closure *dsc)
8201 {
8202 dsc->u.block.regmask = insn1 & 0x00ff;
8203
8204 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8205 to :
8206
8207 (1) register list is full, that is, r0-r7 are used.
8208 Prepare: tmp[0] <- r8
8209
8210 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8211 MOV r8, r7; Move value of r7 to r8;
8212 POP {r7}; Store PC value into r7.
8213
8214 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8215
8216 (2) register list is not full, supposing there are N registers in
8217 register list (except PC, 0 <= N <= 7).
8218 Prepare: for each i, 0 - N, tmp[i] <- ri.
8219
8220 POP {r0, r1, ...., rN};
8221
8222 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8223 from tmp[] properly.
8224 */
8225 if (debug_displaced)
8226 fprintf_unfiltered (gdb_stdlog,
8227 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8228 dsc->u.block.regmask, insn1);
8229
8230 if (dsc->u.block.regmask == 0xff)
8231 {
8232 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8233
8234 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8235 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8236 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8237
8238 dsc->numinsns = 3;
8239 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8240 }
8241 else
8242 {
8243 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8244 unsigned int new_regmask, bit = 1;
8245 unsigned int to = 0, from = 0, i, new_rn;
8246
8247 for (i = 0; i < num_in_list + 1; i++)
8248 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8249
8250 new_regmask = (1 << (num_in_list + 1)) - 1;
8251
8252 if (debug_displaced)
8253 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8254 "{..., pc}: original reg list %.4x,"
8255 " modified list %.4x\n"),
8256 (int) dsc->u.block.regmask, new_regmask);
8257
8258 dsc->u.block.regmask |= 0x8000;
8259 dsc->u.block.writeback = 0;
8260 dsc->u.block.cond = INST_AL;
8261
8262 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8263
8264 dsc->cleanup = &cleanup_block_load_pc;
8265 }
8266
8267 return 0;
8268 }
8269
8270 static void
8271 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8272 struct regcache *regs,
8273 struct displaced_step_closure *dsc)
8274 {
8275 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8276 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8277 int err = 0;
8278
8279 /* 16-bit thumb instructions. */
8280 switch (op_bit_12_15)
8281 {
8282 /* Shift (imme), add, subtract, move and compare. */
8283 case 0: case 1: case 2: case 3:
8284 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8285 "shift/add/sub/mov/cmp",
8286 dsc);
8287 break;
8288 case 4:
8289 switch (op_bit_10_11)
8290 {
8291 case 0: /* Data-processing */
8292 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8293 "data-processing",
8294 dsc);
8295 break;
8296 case 1: /* Special data instructions and branch and exchange. */
8297 {
8298 unsigned short op = bits (insn1, 7, 9);
8299 if (op == 6 || op == 7) /* BX or BLX */
8300 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8301 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8302 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8303 else
8304 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8305 dsc);
8306 }
8307 break;
8308 default: /* LDR (literal) */
8309 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8310 }
8311 break;
8312 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8313 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8314 break;
8315 case 10:
8316 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8317 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8318 else /* Generate SP-relative address */
8319 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8320 break;
8321 case 11: /* Misc 16-bit instructions */
8322 {
8323 switch (bits (insn1, 8, 11))
8324 {
8325 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8326 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8327 break;
8328 case 12: case 13: /* POP */
8329 if (bit (insn1, 8)) /* PC is in register list. */
8330 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8331 else
8332 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8333 break;
8334 case 15: /* If-Then, and hints */
8335 if (bits (insn1, 0, 3))
8336 /* If-Then makes up to four following instructions conditional.
8337 IT instruction itself is not conditional, so handle it as a
8338 common unmodified instruction. */
8339 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8340 dsc);
8341 else
8342 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8343 break;
8344 default:
8345 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8346 }
8347 }
8348 break;
8349 case 12:
8350 if (op_bit_10_11 < 2) /* Store multiple registers */
8351 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8352 else /* Load multiple registers */
8353 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8354 break;
8355 case 13: /* Conditional branch and supervisor call */
8356 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8357 err = thumb_copy_b (gdbarch, insn1, dsc);
8358 else
8359 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8360 break;
8361 case 14: /* Unconditional branch */
8362 err = thumb_copy_b (gdbarch, insn1, dsc);
8363 break;
8364 default:
8365 err = 1;
8366 }
8367
8368 if (err)
8369 internal_error (__FILE__, __LINE__,
8370 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8371 }
8372
8373 static int
8374 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8375 uint16_t insn1, uint16_t insn2,
8376 struct regcache *regs,
8377 struct displaced_step_closure *dsc)
8378 {
8379 int rt = bits (insn2, 12, 15);
8380 int rn = bits (insn1, 0, 3);
8381 int op1 = bits (insn1, 7, 8);
8382 int err = 0;
8383
8384 switch (bits (insn1, 5, 6))
8385 {
8386 case 0: /* Load byte and memory hints */
8387 if (rt == 0xf) /* PLD/PLI */
8388 {
8389 if (rn == 0xf)
8390 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8391 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8392 else
8393 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8394 "pli/pld", dsc);
8395 }
8396 else
8397 {
8398 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8399 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8400 1);
8401 else
8402 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8403 "ldrb{reg, immediate}/ldrbt",
8404 dsc);
8405 }
8406
8407 break;
8408 case 1: /* Load halfword and memory hints. */
8409 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8410 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8411 "pld/unalloc memhint", dsc);
8412 else
8413 {
8414 if (rn == 0xf)
8415 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8416 2);
8417 else
8418 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8419 "ldrh/ldrht", dsc);
8420 }
8421 break;
8422 case 2: /* Load word */
8423 {
8424 int insn2_bit_8_11 = bits (insn2, 8, 11);
8425
8426 if (rn == 0xf)
8427 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8428 else if (op1 == 0x1) /* Encoding T3 */
8429 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8430 0, 1);
8431 else /* op1 == 0x0 */
8432 {
8433 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8434 /* LDR (immediate) */
8435 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8436 dsc, bit (insn2, 8), 1);
8437 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8438 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8439 "ldrt", dsc);
8440 else
8441 /* LDR (register) */
8442 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8443 dsc, 0, 0);
8444 }
8445 break;
8446 }
8447 default:
8448 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8449 break;
8450 }
8451 return 0;
8452 }
8453
8454 static void
8455 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8456 uint16_t insn2, struct regcache *regs,
8457 struct displaced_step_closure *dsc)
8458 {
8459 int err = 0;
8460 unsigned short op = bit (insn2, 15);
8461 unsigned int op1 = bits (insn1, 11, 12);
8462
8463 switch (op1)
8464 {
8465 case 1:
8466 {
8467 switch (bits (insn1, 9, 10))
8468 {
8469 case 0:
8470 if (bit (insn1, 6))
8471 {
8472 /* Load/store {dual, execlusive}, table branch. */
8473 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8474 && bits (insn2, 5, 7) == 0)
8475 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8476 dsc);
8477 else
8478 /* PC is not allowed to use in load/store {dual, exclusive}
8479 instructions. */
8480 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8481 "load/store dual/ex", dsc);
8482 }
8483 else /* load/store multiple */
8484 {
8485 switch (bits (insn1, 7, 8))
8486 {
8487 case 0: case 3: /* SRS, RFE */
8488 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8489 "srs/rfe", dsc);
8490 break;
8491 case 1: case 2: /* LDM/STM/PUSH/POP */
8492 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8493 break;
8494 }
8495 }
8496 break;
8497
8498 case 1:
8499 /* Data-processing (shift register). */
8500 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8501 dsc);
8502 break;
8503 default: /* Coprocessor instructions. */
8504 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8505 break;
8506 }
8507 break;
8508 }
8509 case 2: /* op1 = 2 */
8510 if (op) /* Branch and misc control. */
8511 {
8512 if (bit (insn2, 14) /* BLX/BL */
8513 || bit (insn2, 12) /* Unconditional branch */
8514 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8515 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8516 else
8517 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8518 "misc ctrl", dsc);
8519 }
8520 else
8521 {
8522 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8523 {
8524 int op = bits (insn1, 4, 8);
8525 int rn = bits (insn1, 0, 3);
8526 if ((op == 0 || op == 0xa) && rn == 0xf)
8527 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8528 regs, dsc);
8529 else
8530 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8531 "dp/pb", dsc);
8532 }
8533 else /* Data processing (modified immeidate) */
8534 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8535 "dp/mi", dsc);
8536 }
8537 break;
8538 case 3: /* op1 = 3 */
8539 switch (bits (insn1, 9, 10))
8540 {
8541 case 0:
8542 if (bit (insn1, 4))
8543 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8544 regs, dsc);
8545 else /* NEON Load/Store and Store single data item */
8546 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8547 "neon elt/struct load/store",
8548 dsc);
8549 break;
8550 case 1: /* op1 = 3, bits (9, 10) == 1 */
8551 switch (bits (insn1, 7, 8))
8552 {
8553 case 0: case 1: /* Data processing (register) */
8554 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8555 "dp(reg)", dsc);
8556 break;
8557 case 2: /* Multiply and absolute difference */
8558 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8559 "mul/mua/diff", dsc);
8560 break;
8561 case 3: /* Long multiply and divide */
8562 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8563 "lmul/lmua", dsc);
8564 break;
8565 }
8566 break;
8567 default: /* Coprocessor instructions */
8568 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8569 break;
8570 }
8571 break;
8572 default:
8573 err = 1;
8574 }
8575
8576 if (err)
8577 internal_error (__FILE__, __LINE__,
8578 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8579
8580 }
8581
8582 static void
8583 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8584 CORE_ADDR to, struct regcache *regs,
8585 struct displaced_step_closure *dsc)
8586 {
8587 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8588 uint16_t insn1
8589 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8590
8591 if (debug_displaced)
8592 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8593 "at %.8lx\n", insn1, (unsigned long) from);
8594
8595 dsc->is_thumb = 1;
8596 dsc->insn_size = thumb_insn_size (insn1);
8597 if (thumb_insn_size (insn1) == 4)
8598 {
8599 uint16_t insn2
8600 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8601 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8602 }
8603 else
8604 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8605 }
8606
8607 void
8608 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8609 CORE_ADDR to, struct regcache *regs,
8610 struct displaced_step_closure *dsc)
8611 {
8612 int err = 0;
8613 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8614 uint32_t insn;
8615
8616 /* Most displaced instructions use a 1-instruction scratch space, so set this
8617 here and override below if/when necessary. */
8618 dsc->numinsns = 1;
8619 dsc->insn_addr = from;
8620 dsc->scratch_base = to;
8621 dsc->cleanup = NULL;
8622 dsc->wrote_to_pc = 0;
8623
8624 if (!displaced_in_arm_mode (regs))
8625 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8626
8627 dsc->is_thumb = 0;
8628 dsc->insn_size = 4;
8629 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8630 if (debug_displaced)
8631 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8632 "at %.8lx\n", (unsigned long) insn,
8633 (unsigned long) from);
8634
8635 if ((insn & 0xf0000000) == 0xf0000000)
8636 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8637 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8638 {
8639 case 0x0: case 0x1: case 0x2: case 0x3:
8640 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8641 break;
8642
8643 case 0x4: case 0x5: case 0x6:
8644 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8645 break;
8646
8647 case 0x7:
8648 err = arm_decode_media (gdbarch, insn, dsc);
8649 break;
8650
8651 case 0x8: case 0x9: case 0xa: case 0xb:
8652 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8653 break;
8654
8655 case 0xc: case 0xd: case 0xe: case 0xf:
8656 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8657 break;
8658 }
8659
8660 if (err)
8661 internal_error (__FILE__, __LINE__,
8662 _("arm_process_displaced_insn: Instruction decode error"));
8663 }
8664
8665 /* Actually set up the scratch space for a displaced instruction. */
8666
8667 void
8668 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8669 CORE_ADDR to, struct displaced_step_closure *dsc)
8670 {
8671 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8672 unsigned int i, len, offset;
8673 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8674 int size = dsc->is_thumb? 2 : 4;
8675 const gdb_byte *bkp_insn;
8676
8677 offset = 0;
8678 /* Poke modified instruction(s). */
8679 for (i = 0; i < dsc->numinsns; i++)
8680 {
8681 if (debug_displaced)
8682 {
8683 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8684 if (size == 4)
8685 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8686 dsc->modinsn[i]);
8687 else if (size == 2)
8688 fprintf_unfiltered (gdb_stdlog, "%.4x",
8689 (unsigned short)dsc->modinsn[i]);
8690
8691 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8692 (unsigned long) to + offset);
8693
8694 }
8695 write_memory_unsigned_integer (to + offset, size,
8696 byte_order_for_code,
8697 dsc->modinsn[i]);
8698 offset += size;
8699 }
8700
8701 /* Choose the correct breakpoint instruction. */
8702 if (dsc->is_thumb)
8703 {
8704 bkp_insn = tdep->thumb_breakpoint;
8705 len = tdep->thumb_breakpoint_size;
8706 }
8707 else
8708 {
8709 bkp_insn = tdep->arm_breakpoint;
8710 len = tdep->arm_breakpoint_size;
8711 }
8712
8713 /* Put breakpoint afterwards. */
8714 write_memory (to + offset, bkp_insn, len);
8715
8716 if (debug_displaced)
8717 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8718 paddress (gdbarch, from), paddress (gdbarch, to));
8719 }
8720
8721 /* Entry point for copying an instruction into scratch space for displaced
8722 stepping. */
8723
8724 struct displaced_step_closure *
8725 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8726 CORE_ADDR from, CORE_ADDR to,
8727 struct regcache *regs)
8728 {
8729 struct displaced_step_closure *dsc
8730 = xmalloc (sizeof (struct displaced_step_closure));
8731 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8732 arm_displaced_init_closure (gdbarch, from, to, dsc);
8733
8734 return dsc;
8735 }
8736
8737 /* Entry point for cleaning things up after a displaced instruction has been
8738 single-stepped. */
8739
8740 void
8741 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8742 struct displaced_step_closure *dsc,
8743 CORE_ADDR from, CORE_ADDR to,
8744 struct regcache *regs)
8745 {
8746 if (dsc->cleanup)
8747 dsc->cleanup (gdbarch, regs, dsc);
8748
8749 if (!dsc->wrote_to_pc)
8750 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8751 dsc->insn_addr + dsc->insn_size);
8752
8753 }
8754
8755 #include "bfd-in2.h"
8756 #include "libcoff.h"
8757
8758 static int
8759 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8760 {
8761 struct gdbarch *gdbarch = info->application_data;
8762
8763 if (arm_pc_is_thumb (gdbarch, memaddr))
8764 {
8765 static asymbol *asym;
8766 static combined_entry_type ce;
8767 static struct coff_symbol_struct csym;
8768 static struct bfd fake_bfd;
8769 static bfd_target fake_target;
8770
8771 if (csym.native == NULL)
8772 {
8773 /* Create a fake symbol vector containing a Thumb symbol.
8774 This is solely so that the code in print_insn_little_arm()
8775 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8776 the presence of a Thumb symbol and switch to decoding
8777 Thumb instructions. */
8778
8779 fake_target.flavour = bfd_target_coff_flavour;
8780 fake_bfd.xvec = &fake_target;
8781 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8782 csym.native = &ce;
8783 csym.symbol.the_bfd = &fake_bfd;
8784 csym.symbol.name = "fake";
8785 asym = (asymbol *) & csym;
8786 }
8787
8788 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8789 info->symbols = &asym;
8790 }
8791 else
8792 info->symbols = NULL;
8793
8794 if (info->endian == BFD_ENDIAN_BIG)
8795 return print_insn_big_arm (memaddr, info);
8796 else
8797 return print_insn_little_arm (memaddr, info);
8798 }
8799
8800 /* The following define instruction sequences that will cause ARM
8801 cpu's to take an undefined instruction trap. These are used to
8802 signal a breakpoint to GDB.
8803
8804 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8805 modes. A different instruction is required for each mode. The ARM
8806 cpu's can also be big or little endian. Thus four different
8807 instructions are needed to support all cases.
8808
8809 Note: ARMv4 defines several new instructions that will take the
8810 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8811 not in fact add the new instructions. The new undefined
8812 instructions in ARMv4 are all instructions that had no defined
8813 behaviour in earlier chips. There is no guarantee that they will
8814 raise an exception, but may be treated as NOP's. In practice, it
8815 may only safe to rely on instructions matching:
8816
8817 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8818 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8819 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8820
8821 Even this may only true if the condition predicate is true. The
8822 following use a condition predicate of ALWAYS so it is always TRUE.
8823
8824 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8825 and NetBSD all use a software interrupt rather than an undefined
8826 instruction to force a trap. This can be handled by by the
8827 abi-specific code during establishment of the gdbarch vector. */
8828
8829 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8830 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8831 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8832 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8833
8834 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8835 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8836 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8837 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8838
8839 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8840 the program counter value to determine whether a 16-bit or 32-bit
8841 breakpoint should be used. It returns a pointer to a string of
8842 bytes that encode a breakpoint instruction, stores the length of
8843 the string to *lenptr, and adjusts the program counter (if
8844 necessary) to point to the actual memory location where the
8845 breakpoint should be inserted. */
8846
8847 static const unsigned char *
8848 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8849 {
8850 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8851 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8852
8853 if (arm_pc_is_thumb (gdbarch, *pcptr))
8854 {
8855 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8856
8857 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8858 check whether we are replacing a 32-bit instruction. */
8859 if (tdep->thumb2_breakpoint != NULL)
8860 {
8861 gdb_byte buf[2];
8862 if (target_read_memory (*pcptr, buf, 2) == 0)
8863 {
8864 unsigned short inst1;
8865 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8866 if (thumb_insn_size (inst1) == 4)
8867 {
8868 *lenptr = tdep->thumb2_breakpoint_size;
8869 return tdep->thumb2_breakpoint;
8870 }
8871 }
8872 }
8873
8874 *lenptr = tdep->thumb_breakpoint_size;
8875 return tdep->thumb_breakpoint;
8876 }
8877 else
8878 {
8879 *lenptr = tdep->arm_breakpoint_size;
8880 return tdep->arm_breakpoint;
8881 }
8882 }
8883
8884 static void
8885 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8886 int *kindptr)
8887 {
8888 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8889
8890 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8891 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8892 that this is not confused with a 32-bit ARM breakpoint. */
8893 *kindptr = 3;
8894 }
8895
8896 /* Extract from an array REGBUF containing the (raw) register state a
8897 function return value of type TYPE, and copy that, in virtual
8898 format, into VALBUF. */
8899
8900 static void
8901 arm_extract_return_value (struct type *type, struct regcache *regs,
8902 gdb_byte *valbuf)
8903 {
8904 struct gdbarch *gdbarch = get_regcache_arch (regs);
8905 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8906
8907 if (TYPE_CODE_FLT == TYPE_CODE (type))
8908 {
8909 switch (gdbarch_tdep (gdbarch)->fp_model)
8910 {
8911 case ARM_FLOAT_FPA:
8912 {
8913 /* The value is in register F0 in internal format. We need to
8914 extract the raw value and then convert it to the desired
8915 internal type. */
8916 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8917
8918 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8919 convert_from_extended (floatformat_from_type (type), tmpbuf,
8920 valbuf, gdbarch_byte_order (gdbarch));
8921 }
8922 break;
8923
8924 case ARM_FLOAT_SOFT_FPA:
8925 case ARM_FLOAT_SOFT_VFP:
8926 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8927 not using the VFP ABI code. */
8928 case ARM_FLOAT_VFP:
8929 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8930 if (TYPE_LENGTH (type) > 4)
8931 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8932 valbuf + INT_REGISTER_SIZE);
8933 break;
8934
8935 default:
8936 internal_error (__FILE__, __LINE__,
8937 _("arm_extract_return_value: "
8938 "Floating point model not supported"));
8939 break;
8940 }
8941 }
8942 else if (TYPE_CODE (type) == TYPE_CODE_INT
8943 || TYPE_CODE (type) == TYPE_CODE_CHAR
8944 || TYPE_CODE (type) == TYPE_CODE_BOOL
8945 || TYPE_CODE (type) == TYPE_CODE_PTR
8946 || TYPE_CODE (type) == TYPE_CODE_REF
8947 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8948 {
8949 /* If the type is a plain integer, then the access is
8950 straight-forward. Otherwise we have to play around a bit
8951 more. */
8952 int len = TYPE_LENGTH (type);
8953 int regno = ARM_A1_REGNUM;
8954 ULONGEST tmp;
8955
8956 while (len > 0)
8957 {
8958 /* By using store_unsigned_integer we avoid having to do
8959 anything special for small big-endian values. */
8960 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8961 store_unsigned_integer (valbuf,
8962 (len > INT_REGISTER_SIZE
8963 ? INT_REGISTER_SIZE : len),
8964 byte_order, tmp);
8965 len -= INT_REGISTER_SIZE;
8966 valbuf += INT_REGISTER_SIZE;
8967 }
8968 }
8969 else
8970 {
8971 /* For a structure or union the behaviour is as if the value had
8972 been stored to word-aligned memory and then loaded into
8973 registers with 32-bit load instruction(s). */
8974 int len = TYPE_LENGTH (type);
8975 int regno = ARM_A1_REGNUM;
8976 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8977
8978 while (len > 0)
8979 {
8980 regcache_cooked_read (regs, regno++, tmpbuf);
8981 memcpy (valbuf, tmpbuf,
8982 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8983 len -= INT_REGISTER_SIZE;
8984 valbuf += INT_REGISTER_SIZE;
8985 }
8986 }
8987 }
8988
8989
8990 /* Will a function return an aggregate type in memory or in a
8991 register? Return 0 if an aggregate type can be returned in a
8992 register, 1 if it must be returned in memory. */
8993
8994 static int
8995 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8996 {
8997 int nRc;
8998 enum type_code code;
8999
9000 CHECK_TYPEDEF (type);
9001
9002 /* In the ARM ABI, "integer" like aggregate types are returned in
9003 registers. For an aggregate type to be integer like, its size
9004 must be less than or equal to INT_REGISTER_SIZE and the
9005 offset of each addressable subfield must be zero. Note that bit
9006 fields are not addressable, and all addressable subfields of
9007 unions always start at offset zero.
9008
9009 This function is based on the behaviour of GCC 2.95.1.
9010 See: gcc/arm.c: arm_return_in_memory() for details.
9011
9012 Note: All versions of GCC before GCC 2.95.2 do not set up the
9013 parameters correctly for a function returning the following
9014 structure: struct { float f;}; This should be returned in memory,
9015 not a register. Richard Earnshaw sent me a patch, but I do not
9016 know of any way to detect if a function like the above has been
9017 compiled with the correct calling convention. */
9018
9019 /* All aggregate types that won't fit in a register must be returned
9020 in memory. */
9021 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
9022 {
9023 return 1;
9024 }
9025
9026 /* The AAPCS says all aggregates not larger than a word are returned
9027 in a register. */
9028 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
9029 return 0;
9030
9031 /* The only aggregate types that can be returned in a register are
9032 structs and unions. Arrays must be returned in memory. */
9033 code = TYPE_CODE (type);
9034 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
9035 {
9036 return 1;
9037 }
9038
9039 /* Assume all other aggregate types can be returned in a register.
9040 Run a check for structures, unions and arrays. */
9041 nRc = 0;
9042
9043 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
9044 {
9045 int i;
9046 /* Need to check if this struct/union is "integer" like. For
9047 this to be true, its size must be less than or equal to
9048 INT_REGISTER_SIZE and the offset of each addressable
9049 subfield must be zero. Note that bit fields are not
9050 addressable, and unions always start at offset zero. If any
9051 of the subfields is a floating point type, the struct/union
9052 cannot be an integer type. */
9053
9054 /* For each field in the object, check:
9055 1) Is it FP? --> yes, nRc = 1;
9056 2) Is it addressable (bitpos != 0) and
9057 not packed (bitsize == 0)?
9058 --> yes, nRc = 1
9059 */
9060
9061 for (i = 0; i < TYPE_NFIELDS (type); i++)
9062 {
9063 enum type_code field_type_code;
9064 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
9065 i)));
9066
9067 /* Is it a floating point type field? */
9068 if (field_type_code == TYPE_CODE_FLT)
9069 {
9070 nRc = 1;
9071 break;
9072 }
9073
9074 /* If bitpos != 0, then we have to care about it. */
9075 if (TYPE_FIELD_BITPOS (type, i) != 0)
9076 {
9077 /* Bitfields are not addressable. If the field bitsize is
9078 zero, then the field is not packed. Hence it cannot be
9079 a bitfield or any other packed type. */
9080 if (TYPE_FIELD_BITSIZE (type, i) == 0)
9081 {
9082 nRc = 1;
9083 break;
9084 }
9085 }
9086 }
9087 }
9088
9089 return nRc;
9090 }
9091
9092 /* Write into appropriate registers a function return value of type
9093 TYPE, given in virtual format. */
9094
9095 static void
9096 arm_store_return_value (struct type *type, struct regcache *regs,
9097 const gdb_byte *valbuf)
9098 {
9099 struct gdbarch *gdbarch = get_regcache_arch (regs);
9100 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9101
9102 if (TYPE_CODE (type) == TYPE_CODE_FLT)
9103 {
9104 gdb_byte buf[MAX_REGISTER_SIZE];
9105
9106 switch (gdbarch_tdep (gdbarch)->fp_model)
9107 {
9108 case ARM_FLOAT_FPA:
9109
9110 convert_to_extended (floatformat_from_type (type), buf, valbuf,
9111 gdbarch_byte_order (gdbarch));
9112 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
9113 break;
9114
9115 case ARM_FLOAT_SOFT_FPA:
9116 case ARM_FLOAT_SOFT_VFP:
9117 /* ARM_FLOAT_VFP can arise if this is a variadic function so
9118 not using the VFP ABI code. */
9119 case ARM_FLOAT_VFP:
9120 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
9121 if (TYPE_LENGTH (type) > 4)
9122 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
9123 valbuf + INT_REGISTER_SIZE);
9124 break;
9125
9126 default:
9127 internal_error (__FILE__, __LINE__,
9128 _("arm_store_return_value: Floating "
9129 "point model not supported"));
9130 break;
9131 }
9132 }
9133 else if (TYPE_CODE (type) == TYPE_CODE_INT
9134 || TYPE_CODE (type) == TYPE_CODE_CHAR
9135 || TYPE_CODE (type) == TYPE_CODE_BOOL
9136 || TYPE_CODE (type) == TYPE_CODE_PTR
9137 || TYPE_CODE (type) == TYPE_CODE_REF
9138 || TYPE_CODE (type) == TYPE_CODE_ENUM)
9139 {
9140 if (TYPE_LENGTH (type) <= 4)
9141 {
9142 /* Values of one word or less are zero/sign-extended and
9143 returned in r0. */
9144 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9145 LONGEST val = unpack_long (type, valbuf);
9146
9147 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
9148 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
9149 }
9150 else
9151 {
9152 /* Integral values greater than one word are stored in consecutive
9153 registers starting with r0. This will always be a multiple of
9154 the regiser size. */
9155 int len = TYPE_LENGTH (type);
9156 int regno = ARM_A1_REGNUM;
9157
9158 while (len > 0)
9159 {
9160 regcache_cooked_write (regs, regno++, valbuf);
9161 len -= INT_REGISTER_SIZE;
9162 valbuf += INT_REGISTER_SIZE;
9163 }
9164 }
9165 }
9166 else
9167 {
9168 /* For a structure or union the behaviour is as if the value had
9169 been stored to word-aligned memory and then loaded into
9170 registers with 32-bit load instruction(s). */
9171 int len = TYPE_LENGTH (type);
9172 int regno = ARM_A1_REGNUM;
9173 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9174
9175 while (len > 0)
9176 {
9177 memcpy (tmpbuf, valbuf,
9178 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
9179 regcache_cooked_write (regs, regno++, tmpbuf);
9180 len -= INT_REGISTER_SIZE;
9181 valbuf += INT_REGISTER_SIZE;
9182 }
9183 }
9184 }
9185
9186
9187 /* Handle function return values. */
9188
9189 static enum return_value_convention
9190 arm_return_value (struct gdbarch *gdbarch, struct value *function,
9191 struct type *valtype, struct regcache *regcache,
9192 gdb_byte *readbuf, const gdb_byte *writebuf)
9193 {
9194 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9195 struct type *func_type = function ? value_type (function) : NULL;
9196 enum arm_vfp_cprc_base_type vfp_base_type;
9197 int vfp_base_count;
9198
9199 if (arm_vfp_abi_for_function (gdbarch, func_type)
9200 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9201 {
9202 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9203 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9204 int i;
9205 for (i = 0; i < vfp_base_count; i++)
9206 {
9207 if (reg_char == 'q')
9208 {
9209 if (writebuf)
9210 arm_neon_quad_write (gdbarch, regcache, i,
9211 writebuf + i * unit_length);
9212
9213 if (readbuf)
9214 arm_neon_quad_read (gdbarch, regcache, i,
9215 readbuf + i * unit_length);
9216 }
9217 else
9218 {
9219 char name_buf[4];
9220 int regnum;
9221
9222 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
9223 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9224 strlen (name_buf));
9225 if (writebuf)
9226 regcache_cooked_write (regcache, regnum,
9227 writebuf + i * unit_length);
9228 if (readbuf)
9229 regcache_cooked_read (regcache, regnum,
9230 readbuf + i * unit_length);
9231 }
9232 }
9233 return RETURN_VALUE_REGISTER_CONVENTION;
9234 }
9235
9236 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9237 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9238 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9239 {
9240 if (tdep->struct_return == pcc_struct_return
9241 || arm_return_in_memory (gdbarch, valtype))
9242 return RETURN_VALUE_STRUCT_CONVENTION;
9243 }
9244
9245 /* AAPCS returns complex types longer than a register in memory. */
9246 if (tdep->arm_abi != ARM_ABI_APCS
9247 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9248 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9249 return RETURN_VALUE_STRUCT_CONVENTION;
9250
9251 if (writebuf)
9252 arm_store_return_value (valtype, regcache, writebuf);
9253
9254 if (readbuf)
9255 arm_extract_return_value (valtype, regcache, readbuf);
9256
9257 return RETURN_VALUE_REGISTER_CONVENTION;
9258 }
9259
9260
9261 static int
9262 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9263 {
9264 struct gdbarch *gdbarch = get_frame_arch (frame);
9265 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9266 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9267 CORE_ADDR jb_addr;
9268 gdb_byte buf[INT_REGISTER_SIZE];
9269
9270 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9271
9272 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9273 INT_REGISTER_SIZE))
9274 return 0;
9275
9276 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9277 return 1;
9278 }
9279
9280 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9281 return the target PC. Otherwise return 0. */
9282
9283 CORE_ADDR
9284 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9285 {
9286 const char *name;
9287 int namelen;
9288 CORE_ADDR start_addr;
9289
9290 /* Find the starting address and name of the function containing the PC. */
9291 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9292 {
9293 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
9294 check here. */
9295 start_addr = arm_skip_bx_reg (frame, pc);
9296 if (start_addr != 0)
9297 return start_addr;
9298
9299 return 0;
9300 }
9301
9302 /* If PC is in a Thumb call or return stub, return the address of the
9303 target PC, which is in a register. The thunk functions are called
9304 _call_via_xx, where x is the register name. The possible names
9305 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9306 functions, named __ARM_call_via_r[0-7]. */
9307 if (strncmp (name, "_call_via_", 10) == 0
9308 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9309 {
9310 /* Use the name suffix to determine which register contains the
9311 target PC. */
9312 static char *table[15] =
9313 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9314 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9315 };
9316 int regno;
9317 int offset = strlen (name) - 2;
9318
9319 for (regno = 0; regno <= 14; regno++)
9320 if (strcmp (&name[offset], table[regno]) == 0)
9321 return get_frame_register_unsigned (frame, regno);
9322 }
9323
9324 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9325 non-interworking calls to foo. We could decode the stubs
9326 to find the target but it's easier to use the symbol table. */
9327 namelen = strlen (name);
9328 if (name[0] == '_' && name[1] == '_'
9329 && ((namelen > 2 + strlen ("_from_thumb")
9330 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9331 strlen ("_from_thumb")) == 0)
9332 || (namelen > 2 + strlen ("_from_arm")
9333 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9334 strlen ("_from_arm")) == 0)))
9335 {
9336 char *target_name;
9337 int target_len = namelen - 2;
9338 struct bound_minimal_symbol minsym;
9339 struct objfile *objfile;
9340 struct obj_section *sec;
9341
9342 if (name[namelen - 1] == 'b')
9343 target_len -= strlen ("_from_thumb");
9344 else
9345 target_len -= strlen ("_from_arm");
9346
9347 target_name = alloca (target_len + 1);
9348 memcpy (target_name, name + 2, target_len);
9349 target_name[target_len] = '\0';
9350
9351 sec = find_pc_section (pc);
9352 objfile = (sec == NULL) ? NULL : sec->objfile;
9353 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9354 if (minsym.minsym != NULL)
9355 return BMSYMBOL_VALUE_ADDRESS (minsym);
9356 else
9357 return 0;
9358 }
9359
9360 return 0; /* not a stub */
9361 }
9362
9363 static void
9364 set_arm_command (char *args, int from_tty)
9365 {
9366 printf_unfiltered (_("\
9367 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9368 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9369 }
9370
9371 static void
9372 show_arm_command (char *args, int from_tty)
9373 {
9374 cmd_show_list (showarmcmdlist, from_tty, "");
9375 }
9376
9377 static void
9378 arm_update_current_architecture (void)
9379 {
9380 struct gdbarch_info info;
9381
9382 /* If the current architecture is not ARM, we have nothing to do. */
9383 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
9384 return;
9385
9386 /* Update the architecture. */
9387 gdbarch_info_init (&info);
9388
9389 if (!gdbarch_update_p (info))
9390 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9391 }
9392
9393 static void
9394 set_fp_model_sfunc (char *args, int from_tty,
9395 struct cmd_list_element *c)
9396 {
9397 enum arm_float_model fp_model;
9398
9399 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9400 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9401 {
9402 arm_fp_model = fp_model;
9403 break;
9404 }
9405
9406 if (fp_model == ARM_FLOAT_LAST)
9407 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9408 current_fp_model);
9409
9410 arm_update_current_architecture ();
9411 }
9412
9413 static void
9414 show_fp_model (struct ui_file *file, int from_tty,
9415 struct cmd_list_element *c, const char *value)
9416 {
9417 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9418
9419 if (arm_fp_model == ARM_FLOAT_AUTO
9420 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9421 fprintf_filtered (file, _("\
9422 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9423 fp_model_strings[tdep->fp_model]);
9424 else
9425 fprintf_filtered (file, _("\
9426 The current ARM floating point model is \"%s\".\n"),
9427 fp_model_strings[arm_fp_model]);
9428 }
9429
9430 static void
9431 arm_set_abi (char *args, int from_tty,
9432 struct cmd_list_element *c)
9433 {
9434 enum arm_abi_kind arm_abi;
9435
9436 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9437 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9438 {
9439 arm_abi_global = arm_abi;
9440 break;
9441 }
9442
9443 if (arm_abi == ARM_ABI_LAST)
9444 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9445 arm_abi_string);
9446
9447 arm_update_current_architecture ();
9448 }
9449
9450 static void
9451 arm_show_abi (struct ui_file *file, int from_tty,
9452 struct cmd_list_element *c, const char *value)
9453 {
9454 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9455
9456 if (arm_abi_global == ARM_ABI_AUTO
9457 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9458 fprintf_filtered (file, _("\
9459 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9460 arm_abi_strings[tdep->arm_abi]);
9461 else
9462 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9463 arm_abi_string);
9464 }
9465
9466 static void
9467 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9468 struct cmd_list_element *c, const char *value)
9469 {
9470 fprintf_filtered (file,
9471 _("The current execution mode assumed "
9472 "(when symbols are unavailable) is \"%s\".\n"),
9473 arm_fallback_mode_string);
9474 }
9475
9476 static void
9477 arm_show_force_mode (struct ui_file *file, int from_tty,
9478 struct cmd_list_element *c, const char *value)
9479 {
9480 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9481
9482 fprintf_filtered (file,
9483 _("The current execution mode assumed "
9484 "(even when symbols are available) is \"%s\".\n"),
9485 arm_force_mode_string);
9486 }
9487
9488 /* If the user changes the register disassembly style used for info
9489 register and other commands, we have to also switch the style used
9490 in opcodes for disassembly output. This function is run in the "set
9491 arm disassembly" command, and does that. */
9492
9493 static void
9494 set_disassembly_style_sfunc (char *args, int from_tty,
9495 struct cmd_list_element *c)
9496 {
9497 set_disassembly_style ();
9498 }
9499 \f
9500 /* Return the ARM register name corresponding to register I. */
9501 static const char *
9502 arm_register_name (struct gdbarch *gdbarch, int i)
9503 {
9504 const int num_regs = gdbarch_num_regs (gdbarch);
9505
9506 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9507 && i >= num_regs && i < num_regs + 32)
9508 {
9509 static const char *const vfp_pseudo_names[] = {
9510 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9511 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9512 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9513 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9514 };
9515
9516 return vfp_pseudo_names[i - num_regs];
9517 }
9518
9519 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9520 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9521 {
9522 static const char *const neon_pseudo_names[] = {
9523 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9524 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9525 };
9526
9527 return neon_pseudo_names[i - num_regs - 32];
9528 }
9529
9530 if (i >= ARRAY_SIZE (arm_register_names))
9531 /* These registers are only supported on targets which supply
9532 an XML description. */
9533 return "";
9534
9535 return arm_register_names[i];
9536 }
9537
9538 static void
9539 set_disassembly_style (void)
9540 {
9541 int current;
9542
9543 /* Find the style that the user wants. */
9544 for (current = 0; current < num_disassembly_options; current++)
9545 if (disassembly_style == valid_disassembly_styles[current])
9546 break;
9547 gdb_assert (current < num_disassembly_options);
9548
9549 /* Synchronize the disassembler. */
9550 set_arm_regname_option (current);
9551 }
9552
9553 /* Test whether the coff symbol specific value corresponds to a Thumb
9554 function. */
9555
9556 static int
9557 coff_sym_is_thumb (int val)
9558 {
9559 return (val == C_THUMBEXT
9560 || val == C_THUMBSTAT
9561 || val == C_THUMBEXTFUNC
9562 || val == C_THUMBSTATFUNC
9563 || val == C_THUMBLABEL);
9564 }
9565
9566 /* arm_coff_make_msymbol_special()
9567 arm_elf_make_msymbol_special()
9568
9569 These functions test whether the COFF or ELF symbol corresponds to
9570 an address in thumb code, and set a "special" bit in a minimal
9571 symbol to indicate that it does. */
9572
9573 static void
9574 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9575 {
9576 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9577 == ST_BRANCH_TO_THUMB)
9578 MSYMBOL_SET_SPECIAL (msym);
9579 }
9580
9581 static void
9582 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9583 {
9584 if (coff_sym_is_thumb (val))
9585 MSYMBOL_SET_SPECIAL (msym);
9586 }
9587
9588 static void
9589 arm_objfile_data_free (struct objfile *objfile, void *arg)
9590 {
9591 struct arm_per_objfile *data = arg;
9592 unsigned int i;
9593
9594 for (i = 0; i < objfile->obfd->section_count; i++)
9595 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9596 }
9597
9598 static void
9599 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9600 asymbol *sym)
9601 {
9602 const char *name = bfd_asymbol_name (sym);
9603 struct arm_per_objfile *data;
9604 VEC(arm_mapping_symbol_s) **map_p;
9605 struct arm_mapping_symbol new_map_sym;
9606
9607 gdb_assert (name[0] == '$');
9608 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9609 return;
9610
9611 data = objfile_data (objfile, arm_objfile_data_key);
9612 if (data == NULL)
9613 {
9614 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9615 struct arm_per_objfile);
9616 set_objfile_data (objfile, arm_objfile_data_key, data);
9617 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9618 objfile->obfd->section_count,
9619 VEC(arm_mapping_symbol_s) *);
9620 }
9621 map_p = &data->section_maps[bfd_get_section (sym)->index];
9622
9623 new_map_sym.value = sym->value;
9624 new_map_sym.type = name[1];
9625
9626 /* Assume that most mapping symbols appear in order of increasing
9627 value. If they were randomly distributed, it would be faster to
9628 always push here and then sort at first use. */
9629 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9630 {
9631 struct arm_mapping_symbol *prev_map_sym;
9632
9633 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9634 if (prev_map_sym->value >= sym->value)
9635 {
9636 unsigned int idx;
9637 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9638 arm_compare_mapping_symbols);
9639 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9640 return;
9641 }
9642 }
9643
9644 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9645 }
9646
9647 static void
9648 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9649 {
9650 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9651 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9652
9653 /* If necessary, set the T bit. */
9654 if (arm_apcs_32)
9655 {
9656 ULONGEST val, t_bit;
9657 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9658 t_bit = arm_psr_thumb_bit (gdbarch);
9659 if (arm_pc_is_thumb (gdbarch, pc))
9660 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9661 val | t_bit);
9662 else
9663 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9664 val & ~t_bit);
9665 }
9666 }
9667
9668 /* Read the contents of a NEON quad register, by reading from two
9669 double registers. This is used to implement the quad pseudo
9670 registers, and for argument passing in case the quad registers are
9671 missing; vectors are passed in quad registers when using the VFP
9672 ABI, even if a NEON unit is not present. REGNUM is the index of
9673 the quad register, in [0, 15]. */
9674
9675 static enum register_status
9676 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9677 int regnum, gdb_byte *buf)
9678 {
9679 char name_buf[4];
9680 gdb_byte reg_buf[8];
9681 int offset, double_regnum;
9682 enum register_status status;
9683
9684 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9685 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9686 strlen (name_buf));
9687
9688 /* d0 is always the least significant half of q0. */
9689 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9690 offset = 8;
9691 else
9692 offset = 0;
9693
9694 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9695 if (status != REG_VALID)
9696 return status;
9697 memcpy (buf + offset, reg_buf, 8);
9698
9699 offset = 8 - offset;
9700 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9701 if (status != REG_VALID)
9702 return status;
9703 memcpy (buf + offset, reg_buf, 8);
9704
9705 return REG_VALID;
9706 }
9707
9708 static enum register_status
9709 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9710 int regnum, gdb_byte *buf)
9711 {
9712 const int num_regs = gdbarch_num_regs (gdbarch);
9713 char name_buf[4];
9714 gdb_byte reg_buf[8];
9715 int offset, double_regnum;
9716
9717 gdb_assert (regnum >= num_regs);
9718 regnum -= num_regs;
9719
9720 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9721 /* Quad-precision register. */
9722 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9723 else
9724 {
9725 enum register_status status;
9726
9727 /* Single-precision register. */
9728 gdb_assert (regnum < 32);
9729
9730 /* s0 is always the least significant half of d0. */
9731 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9732 offset = (regnum & 1) ? 0 : 4;
9733 else
9734 offset = (regnum & 1) ? 4 : 0;
9735
9736 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9737 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9738 strlen (name_buf));
9739
9740 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9741 if (status == REG_VALID)
9742 memcpy (buf, reg_buf + offset, 4);
9743 return status;
9744 }
9745 }
9746
9747 /* Store the contents of BUF to a NEON quad register, by writing to
9748 two double registers. This is used to implement the quad pseudo
9749 registers, and for argument passing in case the quad registers are
9750 missing; vectors are passed in quad registers when using the VFP
9751 ABI, even if a NEON unit is not present. REGNUM is the index
9752 of the quad register, in [0, 15]. */
9753
9754 static void
9755 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9756 int regnum, const gdb_byte *buf)
9757 {
9758 char name_buf[4];
9759 int offset, double_regnum;
9760
9761 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9762 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9763 strlen (name_buf));
9764
9765 /* d0 is always the least significant half of q0. */
9766 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9767 offset = 8;
9768 else
9769 offset = 0;
9770
9771 regcache_raw_write (regcache, double_regnum, buf + offset);
9772 offset = 8 - offset;
9773 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9774 }
9775
9776 static void
9777 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9778 int regnum, const gdb_byte *buf)
9779 {
9780 const int num_regs = gdbarch_num_regs (gdbarch);
9781 char name_buf[4];
9782 gdb_byte reg_buf[8];
9783 int offset, double_regnum;
9784
9785 gdb_assert (regnum >= num_regs);
9786 regnum -= num_regs;
9787
9788 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9789 /* Quad-precision register. */
9790 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9791 else
9792 {
9793 /* Single-precision register. */
9794 gdb_assert (regnum < 32);
9795
9796 /* s0 is always the least significant half of d0. */
9797 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9798 offset = (regnum & 1) ? 0 : 4;
9799 else
9800 offset = (regnum & 1) ? 4 : 0;
9801
9802 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9803 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9804 strlen (name_buf));
9805
9806 regcache_raw_read (regcache, double_regnum, reg_buf);
9807 memcpy (reg_buf + offset, buf, 4);
9808 regcache_raw_write (regcache, double_regnum, reg_buf);
9809 }
9810 }
9811
9812 static struct value *
9813 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9814 {
9815 const int *reg_p = baton;
9816 return value_of_register (*reg_p, frame);
9817 }
9818 \f
9819 static enum gdb_osabi
9820 arm_elf_osabi_sniffer (bfd *abfd)
9821 {
9822 unsigned int elfosabi;
9823 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9824
9825 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9826
9827 if (elfosabi == ELFOSABI_ARM)
9828 /* GNU tools use this value. Check note sections in this case,
9829 as well. */
9830 bfd_map_over_sections (abfd,
9831 generic_elf_osabi_sniff_abi_tag_sections,
9832 &osabi);
9833
9834 /* Anything else will be handled by the generic ELF sniffer. */
9835 return osabi;
9836 }
9837
9838 static int
9839 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9840 struct reggroup *group)
9841 {
9842 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9843 this, FPS register belongs to save_regroup, restore_reggroup, and
9844 all_reggroup, of course. */
9845 if (regnum == ARM_FPS_REGNUM)
9846 return (group == float_reggroup
9847 || group == save_reggroup
9848 || group == restore_reggroup
9849 || group == all_reggroup);
9850 else
9851 return default_register_reggroup_p (gdbarch, regnum, group);
9852 }
9853
9854 \f
9855 /* For backward-compatibility we allow two 'g' packet lengths with
9856 the remote protocol depending on whether FPA registers are
9857 supplied. M-profile targets do not have FPA registers, but some
9858 stubs already exist in the wild which use a 'g' packet which
9859 supplies them albeit with dummy values. The packet format which
9860 includes FPA registers should be considered deprecated for
9861 M-profile targets. */
9862
9863 static void
9864 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
9865 {
9866 if (gdbarch_tdep (gdbarch)->is_m)
9867 {
9868 /* If we know from the executable this is an M-profile target,
9869 cater for remote targets whose register set layout is the
9870 same as the FPA layout. */
9871 register_remote_g_packet_guess (gdbarch,
9872 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
9873 (16 * INT_REGISTER_SIZE)
9874 + (8 * FP_REGISTER_SIZE)
9875 + (2 * INT_REGISTER_SIZE),
9876 tdesc_arm_with_m_fpa_layout);
9877
9878 /* The regular M-profile layout. */
9879 register_remote_g_packet_guess (gdbarch,
9880 /* r0-r12,sp,lr,pc; xpsr */
9881 (16 * INT_REGISTER_SIZE)
9882 + INT_REGISTER_SIZE,
9883 tdesc_arm_with_m);
9884
9885 /* M-profile plus M4F VFP. */
9886 register_remote_g_packet_guess (gdbarch,
9887 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
9888 (16 * INT_REGISTER_SIZE)
9889 + (16 * VFP_REGISTER_SIZE)
9890 + (2 * INT_REGISTER_SIZE),
9891 tdesc_arm_with_m_vfp_d16);
9892 }
9893
9894 /* Otherwise we don't have a useful guess. */
9895 }
9896
9897 \f
9898 /* Initialize the current architecture based on INFO. If possible,
9899 re-use an architecture from ARCHES, which is a list of
9900 architectures already created during this debugging session.
9901
9902 Called e.g. at program startup, when reading a core file, and when
9903 reading a binary file. */
9904
9905 static struct gdbarch *
9906 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9907 {
9908 struct gdbarch_tdep *tdep;
9909 struct gdbarch *gdbarch;
9910 struct gdbarch_list *best_arch;
9911 enum arm_abi_kind arm_abi = arm_abi_global;
9912 enum arm_float_model fp_model = arm_fp_model;
9913 struct tdesc_arch_data *tdesc_data = NULL;
9914 int i, is_m = 0;
9915 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9916 int have_neon = 0;
9917 int have_fpa_registers = 1;
9918 const struct target_desc *tdesc = info.target_desc;
9919
9920 /* If we have an object to base this architecture on, try to determine
9921 its ABI. */
9922
9923 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9924 {
9925 int ei_osabi, e_flags;
9926
9927 switch (bfd_get_flavour (info.abfd))
9928 {
9929 case bfd_target_aout_flavour:
9930 /* Assume it's an old APCS-style ABI. */
9931 arm_abi = ARM_ABI_APCS;
9932 break;
9933
9934 case bfd_target_coff_flavour:
9935 /* Assume it's an old APCS-style ABI. */
9936 /* XXX WinCE? */
9937 arm_abi = ARM_ABI_APCS;
9938 break;
9939
9940 case bfd_target_elf_flavour:
9941 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9942 e_flags = elf_elfheader (info.abfd)->e_flags;
9943
9944 if (ei_osabi == ELFOSABI_ARM)
9945 {
9946 /* GNU tools used to use this value, but do not for EABI
9947 objects. There's nowhere to tag an EABI version
9948 anyway, so assume APCS. */
9949 arm_abi = ARM_ABI_APCS;
9950 }
9951 else if (ei_osabi == ELFOSABI_NONE)
9952 {
9953 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9954 int attr_arch, attr_profile;
9955
9956 switch (eabi_ver)
9957 {
9958 case EF_ARM_EABI_UNKNOWN:
9959 /* Assume GNU tools. */
9960 arm_abi = ARM_ABI_APCS;
9961 break;
9962
9963 case EF_ARM_EABI_VER4:
9964 case EF_ARM_EABI_VER5:
9965 arm_abi = ARM_ABI_AAPCS;
9966 /* EABI binaries default to VFP float ordering.
9967 They may also contain build attributes that can
9968 be used to identify if the VFP argument-passing
9969 ABI is in use. */
9970 if (fp_model == ARM_FLOAT_AUTO)
9971 {
9972 #ifdef HAVE_ELF
9973 switch (bfd_elf_get_obj_attr_int (info.abfd,
9974 OBJ_ATTR_PROC,
9975 Tag_ABI_VFP_args))
9976 {
9977 case 0:
9978 /* "The user intended FP parameter/result
9979 passing to conform to AAPCS, base
9980 variant". */
9981 fp_model = ARM_FLOAT_SOFT_VFP;
9982 break;
9983 case 1:
9984 /* "The user intended FP parameter/result
9985 passing to conform to AAPCS, VFP
9986 variant". */
9987 fp_model = ARM_FLOAT_VFP;
9988 break;
9989 case 2:
9990 /* "The user intended FP parameter/result
9991 passing to conform to tool chain-specific
9992 conventions" - we don't know any such
9993 conventions, so leave it as "auto". */
9994 break;
9995 default:
9996 /* Attribute value not mentioned in the
9997 October 2008 ABI, so leave it as
9998 "auto". */
9999 break;
10000 }
10001 #else
10002 fp_model = ARM_FLOAT_SOFT_VFP;
10003 #endif
10004 }
10005 break;
10006
10007 default:
10008 /* Leave it as "auto". */
10009 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
10010 break;
10011 }
10012
10013 #ifdef HAVE_ELF
10014 /* Detect M-profile programs. This only works if the
10015 executable file includes build attributes; GCC does
10016 copy them to the executable, but e.g. RealView does
10017 not. */
10018 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
10019 Tag_CPU_arch);
10020 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
10021 OBJ_ATTR_PROC,
10022 Tag_CPU_arch_profile);
10023 /* GCC specifies the profile for v6-M; RealView only
10024 specifies the profile for architectures starting with
10025 V7 (as opposed to architectures with a tag
10026 numerically greater than TAG_CPU_ARCH_V7). */
10027 if (!tdesc_has_registers (tdesc)
10028 && (attr_arch == TAG_CPU_ARCH_V6_M
10029 || attr_arch == TAG_CPU_ARCH_V6S_M
10030 || attr_profile == 'M'))
10031 is_m = 1;
10032 #endif
10033 }
10034
10035 if (fp_model == ARM_FLOAT_AUTO)
10036 {
10037 int e_flags = elf_elfheader (info.abfd)->e_flags;
10038
10039 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
10040 {
10041 case 0:
10042 /* Leave it as "auto". Strictly speaking this case
10043 means FPA, but almost nobody uses that now, and
10044 many toolchains fail to set the appropriate bits
10045 for the floating-point model they use. */
10046 break;
10047 case EF_ARM_SOFT_FLOAT:
10048 fp_model = ARM_FLOAT_SOFT_FPA;
10049 break;
10050 case EF_ARM_VFP_FLOAT:
10051 fp_model = ARM_FLOAT_VFP;
10052 break;
10053 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
10054 fp_model = ARM_FLOAT_SOFT_VFP;
10055 break;
10056 }
10057 }
10058
10059 if (e_flags & EF_ARM_BE8)
10060 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
10061
10062 break;
10063
10064 default:
10065 /* Leave it as "auto". */
10066 break;
10067 }
10068 }
10069
10070 /* Check any target description for validity. */
10071 if (tdesc_has_registers (tdesc))
10072 {
10073 /* For most registers we require GDB's default names; but also allow
10074 the numeric names for sp / lr / pc, as a convenience. */
10075 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
10076 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
10077 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
10078
10079 const struct tdesc_feature *feature;
10080 int valid_p;
10081
10082 feature = tdesc_find_feature (tdesc,
10083 "org.gnu.gdb.arm.core");
10084 if (feature == NULL)
10085 {
10086 feature = tdesc_find_feature (tdesc,
10087 "org.gnu.gdb.arm.m-profile");
10088 if (feature == NULL)
10089 return NULL;
10090 else
10091 is_m = 1;
10092 }
10093
10094 tdesc_data = tdesc_data_alloc ();
10095
10096 valid_p = 1;
10097 for (i = 0; i < ARM_SP_REGNUM; i++)
10098 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10099 arm_register_names[i]);
10100 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10101 ARM_SP_REGNUM,
10102 arm_sp_names);
10103 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10104 ARM_LR_REGNUM,
10105 arm_lr_names);
10106 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10107 ARM_PC_REGNUM,
10108 arm_pc_names);
10109 if (is_m)
10110 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10111 ARM_PS_REGNUM, "xpsr");
10112 else
10113 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10114 ARM_PS_REGNUM, "cpsr");
10115
10116 if (!valid_p)
10117 {
10118 tdesc_data_cleanup (tdesc_data);
10119 return NULL;
10120 }
10121
10122 feature = tdesc_find_feature (tdesc,
10123 "org.gnu.gdb.arm.fpa");
10124 if (feature != NULL)
10125 {
10126 valid_p = 1;
10127 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
10128 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10129 arm_register_names[i]);
10130 if (!valid_p)
10131 {
10132 tdesc_data_cleanup (tdesc_data);
10133 return NULL;
10134 }
10135 }
10136 else
10137 have_fpa_registers = 0;
10138
10139 feature = tdesc_find_feature (tdesc,
10140 "org.gnu.gdb.xscale.iwmmxt");
10141 if (feature != NULL)
10142 {
10143 static const char *const iwmmxt_names[] = {
10144 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
10145 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
10146 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
10147 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
10148 };
10149
10150 valid_p = 1;
10151 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
10152 valid_p
10153 &= tdesc_numbered_register (feature, tdesc_data, i,
10154 iwmmxt_names[i - ARM_WR0_REGNUM]);
10155
10156 /* Check for the control registers, but do not fail if they
10157 are missing. */
10158 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
10159 tdesc_numbered_register (feature, tdesc_data, i,
10160 iwmmxt_names[i - ARM_WR0_REGNUM]);
10161
10162 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
10163 valid_p
10164 &= tdesc_numbered_register (feature, tdesc_data, i,
10165 iwmmxt_names[i - ARM_WR0_REGNUM]);
10166
10167 if (!valid_p)
10168 {
10169 tdesc_data_cleanup (tdesc_data);
10170 return NULL;
10171 }
10172 }
10173
10174 /* If we have a VFP unit, check whether the single precision registers
10175 are present. If not, then we will synthesize them as pseudo
10176 registers. */
10177 feature = tdesc_find_feature (tdesc,
10178 "org.gnu.gdb.arm.vfp");
10179 if (feature != NULL)
10180 {
10181 static const char *const vfp_double_names[] = {
10182 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
10183 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
10184 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
10185 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
10186 };
10187
10188 /* Require the double precision registers. There must be either
10189 16 or 32. */
10190 valid_p = 1;
10191 for (i = 0; i < 32; i++)
10192 {
10193 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10194 ARM_D0_REGNUM + i,
10195 vfp_double_names[i]);
10196 if (!valid_p)
10197 break;
10198 }
10199 if (!valid_p && i == 16)
10200 valid_p = 1;
10201
10202 /* Also require FPSCR. */
10203 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10204 ARM_FPSCR_REGNUM, "fpscr");
10205 if (!valid_p)
10206 {
10207 tdesc_data_cleanup (tdesc_data);
10208 return NULL;
10209 }
10210
10211 if (tdesc_unnumbered_register (feature, "s0") == 0)
10212 have_vfp_pseudos = 1;
10213
10214 have_vfp_registers = 1;
10215
10216 /* If we have VFP, also check for NEON. The architecture allows
10217 NEON without VFP (integer vector operations only), but GDB
10218 does not support that. */
10219 feature = tdesc_find_feature (tdesc,
10220 "org.gnu.gdb.arm.neon");
10221 if (feature != NULL)
10222 {
10223 /* NEON requires 32 double-precision registers. */
10224 if (i != 32)
10225 {
10226 tdesc_data_cleanup (tdesc_data);
10227 return NULL;
10228 }
10229
10230 /* If there are quad registers defined by the stub, use
10231 their type; otherwise (normally) provide them with
10232 the default type. */
10233 if (tdesc_unnumbered_register (feature, "q0") == 0)
10234 have_neon_pseudos = 1;
10235
10236 have_neon = 1;
10237 }
10238 }
10239 }
10240
10241 /* If there is already a candidate, use it. */
10242 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10243 best_arch != NULL;
10244 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10245 {
10246 if (arm_abi != ARM_ABI_AUTO
10247 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10248 continue;
10249
10250 if (fp_model != ARM_FLOAT_AUTO
10251 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10252 continue;
10253
10254 /* There are various other properties in tdep that we do not
10255 need to check here: those derived from a target description,
10256 since gdbarches with a different target description are
10257 automatically disqualified. */
10258
10259 /* Do check is_m, though, since it might come from the binary. */
10260 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10261 continue;
10262
10263 /* Found a match. */
10264 break;
10265 }
10266
10267 if (best_arch != NULL)
10268 {
10269 if (tdesc_data != NULL)
10270 tdesc_data_cleanup (tdesc_data);
10271 return best_arch->gdbarch;
10272 }
10273
10274 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10275 gdbarch = gdbarch_alloc (&info, tdep);
10276
10277 /* Record additional information about the architecture we are defining.
10278 These are gdbarch discriminators, like the OSABI. */
10279 tdep->arm_abi = arm_abi;
10280 tdep->fp_model = fp_model;
10281 tdep->is_m = is_m;
10282 tdep->have_fpa_registers = have_fpa_registers;
10283 tdep->have_vfp_registers = have_vfp_registers;
10284 tdep->have_vfp_pseudos = have_vfp_pseudos;
10285 tdep->have_neon_pseudos = have_neon_pseudos;
10286 tdep->have_neon = have_neon;
10287
10288 arm_register_g_packet_guesses (gdbarch);
10289
10290 /* Breakpoints. */
10291 switch (info.byte_order_for_code)
10292 {
10293 case BFD_ENDIAN_BIG:
10294 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10295 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10296 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10297 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10298
10299 break;
10300
10301 case BFD_ENDIAN_LITTLE:
10302 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10303 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10304 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10305 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10306
10307 break;
10308
10309 default:
10310 internal_error (__FILE__, __LINE__,
10311 _("arm_gdbarch_init: bad byte order for float format"));
10312 }
10313
10314 /* On ARM targets char defaults to unsigned. */
10315 set_gdbarch_char_signed (gdbarch, 0);
10316
10317 /* Note: for displaced stepping, this includes the breakpoint, and one word
10318 of additional scratch space. This setting isn't used for anything beside
10319 displaced stepping at present. */
10320 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10321
10322 /* This should be low enough for everything. */
10323 tdep->lowest_pc = 0x20;
10324 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10325
10326 /* The default, for both APCS and AAPCS, is to return small
10327 structures in registers. */
10328 tdep->struct_return = reg_struct_return;
10329
10330 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10331 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10332
10333 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10334
10335 /* Frame handling. */
10336 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10337 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10338 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10339
10340 frame_base_set_default (gdbarch, &arm_normal_base);
10341
10342 /* Address manipulation. */
10343 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10344
10345 /* Advance PC across function entry code. */
10346 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10347
10348 /* Detect whether PC is in function epilogue. */
10349 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10350
10351 /* Skip trampolines. */
10352 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10353
10354 /* The stack grows downward. */
10355 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10356
10357 /* Breakpoint manipulation. */
10358 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10359 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10360 arm_remote_breakpoint_from_pc);
10361
10362 /* Information about registers, etc. */
10363 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10364 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10365 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10366 set_gdbarch_register_type (gdbarch, arm_register_type);
10367 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10368
10369 /* This "info float" is FPA-specific. Use the generic version if we
10370 do not have FPA. */
10371 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10372 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10373
10374 /* Internal <-> external register number maps. */
10375 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10376 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10377
10378 set_gdbarch_register_name (gdbarch, arm_register_name);
10379
10380 /* Returning results. */
10381 set_gdbarch_return_value (gdbarch, arm_return_value);
10382
10383 /* Disassembly. */
10384 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10385
10386 /* Minsymbol frobbing. */
10387 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10388 set_gdbarch_coff_make_msymbol_special (gdbarch,
10389 arm_coff_make_msymbol_special);
10390 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10391
10392 /* Thumb-2 IT block support. */
10393 set_gdbarch_adjust_breakpoint_address (gdbarch,
10394 arm_adjust_breakpoint_address);
10395
10396 /* Virtual tables. */
10397 set_gdbarch_vbit_in_delta (gdbarch, 1);
10398
10399 /* Hook in the ABI-specific overrides, if they have been registered. */
10400 gdbarch_init_osabi (info, gdbarch);
10401
10402 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10403
10404 /* Add some default predicates. */
10405 if (is_m)
10406 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
10407 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10408 dwarf2_append_unwinders (gdbarch);
10409 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10410 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10411
10412 /* Now we have tuned the configuration, set a few final things,
10413 based on what the OS ABI has told us. */
10414
10415 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10416 binaries are always marked. */
10417 if (tdep->arm_abi == ARM_ABI_AUTO)
10418 tdep->arm_abi = ARM_ABI_APCS;
10419
10420 /* Watchpoints are not steppable. */
10421 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10422
10423 /* We used to default to FPA for generic ARM, but almost nobody
10424 uses that now, and we now provide a way for the user to force
10425 the model. So default to the most useful variant. */
10426 if (tdep->fp_model == ARM_FLOAT_AUTO)
10427 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10428
10429 if (tdep->jb_pc >= 0)
10430 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10431
10432 /* Floating point sizes and format. */
10433 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10434 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10435 {
10436 set_gdbarch_double_format
10437 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10438 set_gdbarch_long_double_format
10439 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10440 }
10441 else
10442 {
10443 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10444 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10445 }
10446
10447 if (have_vfp_pseudos)
10448 {
10449 /* NOTE: These are the only pseudo registers used by
10450 the ARM target at the moment. If more are added, a
10451 little more care in numbering will be needed. */
10452
10453 int num_pseudos = 32;
10454 if (have_neon_pseudos)
10455 num_pseudos += 16;
10456 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10457 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10458 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10459 }
10460
10461 if (tdesc_data)
10462 {
10463 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10464
10465 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10466
10467 /* Override tdesc_register_type to adjust the types of VFP
10468 registers for NEON. */
10469 set_gdbarch_register_type (gdbarch, arm_register_type);
10470 }
10471
10472 /* Add standard register aliases. We add aliases even for those
10473 nanes which are used by the current architecture - it's simpler,
10474 and does no harm, since nothing ever lists user registers. */
10475 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10476 user_reg_add (gdbarch, arm_register_aliases[i].name,
10477 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10478
10479 return gdbarch;
10480 }
10481
10482 static void
10483 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10484 {
10485 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10486
10487 if (tdep == NULL)
10488 return;
10489
10490 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10491 (unsigned long) tdep->lowest_pc);
10492 }
10493
10494 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10495
10496 void
10497 _initialize_arm_tdep (void)
10498 {
10499 struct ui_file *stb;
10500 long length;
10501 struct cmd_list_element *new_set, *new_show;
10502 const char *setname;
10503 const char *setdesc;
10504 const char *const *regnames;
10505 int numregs, i, j;
10506 static char *helptext;
10507 char regdesc[1024], *rdptr = regdesc;
10508 size_t rest = sizeof (regdesc);
10509
10510 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10511
10512 arm_objfile_data_key
10513 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10514
10515 /* Add ourselves to objfile event chain. */
10516 observer_attach_new_objfile (arm_exidx_new_objfile);
10517 arm_exidx_data_key
10518 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10519
10520 /* Register an ELF OS ABI sniffer for ARM binaries. */
10521 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10522 bfd_target_elf_flavour,
10523 arm_elf_osabi_sniffer);
10524
10525 /* Initialize the standard target descriptions. */
10526 initialize_tdesc_arm_with_m ();
10527 initialize_tdesc_arm_with_m_fpa_layout ();
10528 initialize_tdesc_arm_with_m_vfp_d16 ();
10529 initialize_tdesc_arm_with_iwmmxt ();
10530 initialize_tdesc_arm_with_vfpv2 ();
10531 initialize_tdesc_arm_with_vfpv3 ();
10532 initialize_tdesc_arm_with_neon ();
10533
10534 /* Get the number of possible sets of register names defined in opcodes. */
10535 num_disassembly_options = get_arm_regname_num_options ();
10536
10537 /* Add root prefix command for all "set arm"/"show arm" commands. */
10538 add_prefix_cmd ("arm", no_class, set_arm_command,
10539 _("Various ARM-specific commands."),
10540 &setarmcmdlist, "set arm ", 0, &setlist);
10541
10542 add_prefix_cmd ("arm", no_class, show_arm_command,
10543 _("Various ARM-specific commands."),
10544 &showarmcmdlist, "show arm ", 0, &showlist);
10545
10546 /* Sync the opcode insn printer with our register viewer. */
10547 parse_arm_disassembler_option ("reg-names-std");
10548
10549 /* Initialize the array that will be passed to
10550 add_setshow_enum_cmd(). */
10551 valid_disassembly_styles
10552 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10553 for (i = 0; i < num_disassembly_options; i++)
10554 {
10555 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10556 valid_disassembly_styles[i] = setname;
10557 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10558 rdptr += length;
10559 rest -= length;
10560 /* When we find the default names, tell the disassembler to use
10561 them. */
10562 if (!strcmp (setname, "std"))
10563 {
10564 disassembly_style = setname;
10565 set_arm_regname_option (i);
10566 }
10567 }
10568 /* Mark the end of valid options. */
10569 valid_disassembly_styles[num_disassembly_options] = NULL;
10570
10571 /* Create the help text. */
10572 stb = mem_fileopen ();
10573 fprintf_unfiltered (stb, "%s%s%s",
10574 _("The valid values are:\n"),
10575 regdesc,
10576 _("The default is \"std\"."));
10577 helptext = ui_file_xstrdup (stb, NULL);
10578 ui_file_delete (stb);
10579
10580 add_setshow_enum_cmd("disassembler", no_class,
10581 valid_disassembly_styles, &disassembly_style,
10582 _("Set the disassembly style."),
10583 _("Show the disassembly style."),
10584 helptext,
10585 set_disassembly_style_sfunc,
10586 NULL, /* FIXME: i18n: The disassembly style is
10587 \"%s\". */
10588 &setarmcmdlist, &showarmcmdlist);
10589
10590 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10591 _("Set usage of ARM 32-bit mode."),
10592 _("Show usage of ARM 32-bit mode."),
10593 _("When off, a 26-bit PC will be used."),
10594 NULL,
10595 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10596 mode is %s. */
10597 &setarmcmdlist, &showarmcmdlist);
10598
10599 /* Add a command to allow the user to force the FPU model. */
10600 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10601 _("Set the floating point type."),
10602 _("Show the floating point type."),
10603 _("auto - Determine the FP typefrom the OS-ABI.\n\
10604 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10605 fpa - FPA co-processor (GCC compiled).\n\
10606 softvfp - Software FP with pure-endian doubles.\n\
10607 vfp - VFP co-processor."),
10608 set_fp_model_sfunc, show_fp_model,
10609 &setarmcmdlist, &showarmcmdlist);
10610
10611 /* Add a command to allow the user to force the ABI. */
10612 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10613 _("Set the ABI."),
10614 _("Show the ABI."),
10615 NULL, arm_set_abi, arm_show_abi,
10616 &setarmcmdlist, &showarmcmdlist);
10617
10618 /* Add two commands to allow the user to force the assumed
10619 execution mode. */
10620 add_setshow_enum_cmd ("fallback-mode", class_support,
10621 arm_mode_strings, &arm_fallback_mode_string,
10622 _("Set the mode assumed when symbols are unavailable."),
10623 _("Show the mode assumed when symbols are unavailable."),
10624 NULL, NULL, arm_show_fallback_mode,
10625 &setarmcmdlist, &showarmcmdlist);
10626 add_setshow_enum_cmd ("force-mode", class_support,
10627 arm_mode_strings, &arm_force_mode_string,
10628 _("Set the mode assumed even when symbols are available."),
10629 _("Show the mode assumed even when symbols are available."),
10630 NULL, NULL, arm_show_force_mode,
10631 &setarmcmdlist, &showarmcmdlist);
10632
10633 /* Debugging flag. */
10634 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10635 _("Set ARM debugging."),
10636 _("Show ARM debugging."),
10637 _("When on, arm-specific debugging is enabled."),
10638 NULL,
10639 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10640 &setdebuglist, &showdebuglist);
10641 }
10642
10643 /* ARM-reversible process record data structures. */
10644
10645 #define ARM_INSN_SIZE_BYTES 4
10646 #define THUMB_INSN_SIZE_BYTES 2
10647 #define THUMB2_INSN_SIZE_BYTES 4
10648
10649
10650 #define INSN_S_L_BIT_NUM 20
10651
10652 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
10653 do \
10654 { \
10655 unsigned int reg_len = LENGTH; \
10656 if (reg_len) \
10657 { \
10658 REGS = XNEWVEC (uint32_t, reg_len); \
10659 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
10660 } \
10661 } \
10662 while (0)
10663
10664 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
10665 do \
10666 { \
10667 unsigned int mem_len = LENGTH; \
10668 if (mem_len) \
10669 { \
10670 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
10671 memcpy(&MEMS->len, &RECORD_BUF[0], \
10672 sizeof(struct arm_mem_r) * LENGTH); \
10673 } \
10674 } \
10675 while (0)
10676
10677 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
10678 #define INSN_RECORDED(ARM_RECORD) \
10679 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
10680
10681 /* ARM memory record structure. */
10682 struct arm_mem_r
10683 {
10684 uint32_t len; /* Record length. */
10685 uint32_t addr; /* Memory address. */
10686 };
10687
10688 /* ARM instruction record contains opcode of current insn
10689 and execution state (before entry to decode_insn()),
10690 contains list of to-be-modified registers and
10691 memory blocks (on return from decode_insn()). */
10692
10693 typedef struct insn_decode_record_t
10694 {
10695 struct gdbarch *gdbarch;
10696 struct regcache *regcache;
10697 CORE_ADDR this_addr; /* Address of the insn being decoded. */
10698 uint32_t arm_insn; /* Should accommodate thumb. */
10699 uint32_t cond; /* Condition code. */
10700 uint32_t opcode; /* Insn opcode. */
10701 uint32_t decode; /* Insn decode bits. */
10702 uint32_t mem_rec_count; /* No of mem records. */
10703 uint32_t reg_rec_count; /* No of reg records. */
10704 uint32_t *arm_regs; /* Registers to be saved for this record. */
10705 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
10706 } insn_decode_record;
10707
10708
10709 /* Checks ARM SBZ and SBO mandatory fields. */
10710
10711 static int
10712 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
10713 {
10714 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
10715
10716 if (!len)
10717 return 1;
10718
10719 if (!sbo)
10720 ones = ~ones;
10721
10722 while (ones)
10723 {
10724 if (!(ones & sbo))
10725 {
10726 return 0;
10727 }
10728 ones = ones >> 1;
10729 }
10730 return 1;
10731 }
10732
10733 enum arm_record_result
10734 {
10735 ARM_RECORD_SUCCESS = 0,
10736 ARM_RECORD_FAILURE = 1
10737 };
10738
10739 typedef enum
10740 {
10741 ARM_RECORD_STRH=1,
10742 ARM_RECORD_STRD
10743 } arm_record_strx_t;
10744
10745 typedef enum
10746 {
10747 ARM_RECORD=1,
10748 THUMB_RECORD,
10749 THUMB2_RECORD
10750 } record_type_t;
10751
10752
10753 static int
10754 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
10755 uint32_t *record_buf_mem, arm_record_strx_t str_type)
10756 {
10757
10758 struct regcache *reg_cache = arm_insn_r->regcache;
10759 ULONGEST u_regval[2]= {0};
10760
10761 uint32_t reg_src1 = 0, reg_src2 = 0;
10762 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10763 uint32_t opcode1 = 0;
10764
10765 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10766 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10767 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10768
10769
10770 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10771 {
10772 /* 1) Handle misc store, immediate offset. */
10773 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10774 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10775 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10776 regcache_raw_read_unsigned (reg_cache, reg_src1,
10777 &u_regval[0]);
10778 if (ARM_PC_REGNUM == reg_src1)
10779 {
10780 /* If R15 was used as Rn, hence current PC+8. */
10781 u_regval[0] = u_regval[0] + 8;
10782 }
10783 offset_8 = (immed_high << 4) | immed_low;
10784 /* Calculate target store address. */
10785 if (14 == arm_insn_r->opcode)
10786 {
10787 tgt_mem_addr = u_regval[0] + offset_8;
10788 }
10789 else
10790 {
10791 tgt_mem_addr = u_regval[0] - offset_8;
10792 }
10793 if (ARM_RECORD_STRH == str_type)
10794 {
10795 record_buf_mem[0] = 2;
10796 record_buf_mem[1] = tgt_mem_addr;
10797 arm_insn_r->mem_rec_count = 1;
10798 }
10799 else if (ARM_RECORD_STRD == str_type)
10800 {
10801 record_buf_mem[0] = 4;
10802 record_buf_mem[1] = tgt_mem_addr;
10803 record_buf_mem[2] = 4;
10804 record_buf_mem[3] = tgt_mem_addr + 4;
10805 arm_insn_r->mem_rec_count = 2;
10806 }
10807 }
10808 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
10809 {
10810 /* 2) Store, register offset. */
10811 /* Get Rm. */
10812 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10813 /* Get Rn. */
10814 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10815 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10816 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10817 if (15 == reg_src2)
10818 {
10819 /* If R15 was used as Rn, hence current PC+8. */
10820 u_regval[0] = u_regval[0] + 8;
10821 }
10822 /* Calculate target store address, Rn +/- Rm, register offset. */
10823 if (12 == arm_insn_r->opcode)
10824 {
10825 tgt_mem_addr = u_regval[0] + u_regval[1];
10826 }
10827 else
10828 {
10829 tgt_mem_addr = u_regval[1] - u_regval[0];
10830 }
10831 if (ARM_RECORD_STRH == str_type)
10832 {
10833 record_buf_mem[0] = 2;
10834 record_buf_mem[1] = tgt_mem_addr;
10835 arm_insn_r->mem_rec_count = 1;
10836 }
10837 else if (ARM_RECORD_STRD == str_type)
10838 {
10839 record_buf_mem[0] = 4;
10840 record_buf_mem[1] = tgt_mem_addr;
10841 record_buf_mem[2] = 4;
10842 record_buf_mem[3] = tgt_mem_addr + 4;
10843 arm_insn_r->mem_rec_count = 2;
10844 }
10845 }
10846 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10847 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10848 {
10849 /* 3) Store, immediate pre-indexed. */
10850 /* 5) Store, immediate post-indexed. */
10851 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10852 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10853 offset_8 = (immed_high << 4) | immed_low;
10854 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10855 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10856 /* Calculate target store address, Rn +/- Rm, register offset. */
10857 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10858 {
10859 tgt_mem_addr = u_regval[0] + offset_8;
10860 }
10861 else
10862 {
10863 tgt_mem_addr = u_regval[0] - offset_8;
10864 }
10865 if (ARM_RECORD_STRH == str_type)
10866 {
10867 record_buf_mem[0] = 2;
10868 record_buf_mem[1] = tgt_mem_addr;
10869 arm_insn_r->mem_rec_count = 1;
10870 }
10871 else if (ARM_RECORD_STRD == str_type)
10872 {
10873 record_buf_mem[0] = 4;
10874 record_buf_mem[1] = tgt_mem_addr;
10875 record_buf_mem[2] = 4;
10876 record_buf_mem[3] = tgt_mem_addr + 4;
10877 arm_insn_r->mem_rec_count = 2;
10878 }
10879 /* Record Rn also as it changes. */
10880 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10881 arm_insn_r->reg_rec_count = 1;
10882 }
10883 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
10884 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10885 {
10886 /* 4) Store, register pre-indexed. */
10887 /* 6) Store, register post -indexed. */
10888 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10889 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10890 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10891 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10892 /* Calculate target store address, Rn +/- Rm, register offset. */
10893 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10894 {
10895 tgt_mem_addr = u_regval[0] + u_regval[1];
10896 }
10897 else
10898 {
10899 tgt_mem_addr = u_regval[1] - u_regval[0];
10900 }
10901 if (ARM_RECORD_STRH == str_type)
10902 {
10903 record_buf_mem[0] = 2;
10904 record_buf_mem[1] = tgt_mem_addr;
10905 arm_insn_r->mem_rec_count = 1;
10906 }
10907 else if (ARM_RECORD_STRD == str_type)
10908 {
10909 record_buf_mem[0] = 4;
10910 record_buf_mem[1] = tgt_mem_addr;
10911 record_buf_mem[2] = 4;
10912 record_buf_mem[3] = tgt_mem_addr + 4;
10913 arm_insn_r->mem_rec_count = 2;
10914 }
10915 /* Record Rn also as it changes. */
10916 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10917 arm_insn_r->reg_rec_count = 1;
10918 }
10919 return 0;
10920 }
10921
10922 /* Handling ARM extension space insns. */
10923
10924 static int
10925 arm_record_extension_space (insn_decode_record *arm_insn_r)
10926 {
10927 uint32_t ret = 0; /* Return value: -1:record failure ; 0:success */
10928 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
10929 uint32_t record_buf[8], record_buf_mem[8];
10930 uint32_t reg_src1 = 0;
10931 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10932 struct regcache *reg_cache = arm_insn_r->regcache;
10933 ULONGEST u_regval = 0;
10934
10935 gdb_assert (!INSN_RECORDED(arm_insn_r));
10936 /* Handle unconditional insn extension space. */
10937
10938 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
10939 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10940 if (arm_insn_r->cond)
10941 {
10942 /* PLD has no affect on architectural state, it just affects
10943 the caches. */
10944 if (5 == ((opcode1 & 0xE0) >> 5))
10945 {
10946 /* BLX(1) */
10947 record_buf[0] = ARM_PS_REGNUM;
10948 record_buf[1] = ARM_LR_REGNUM;
10949 arm_insn_r->reg_rec_count = 2;
10950 }
10951 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
10952 }
10953
10954
10955 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10956 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
10957 {
10958 ret = -1;
10959 /* Undefined instruction on ARM V5; need to handle if later
10960 versions define it. */
10961 }
10962
10963 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
10964 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10965 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
10966
10967 /* Handle arithmetic insn extension space. */
10968 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
10969 && !INSN_RECORDED(arm_insn_r))
10970 {
10971 /* Handle MLA(S) and MUL(S). */
10972 if (0 <= insn_op1 && 3 >= insn_op1)
10973 {
10974 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10975 record_buf[1] = ARM_PS_REGNUM;
10976 arm_insn_r->reg_rec_count = 2;
10977 }
10978 else if (4 <= insn_op1 && 15 >= insn_op1)
10979 {
10980 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10981 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10982 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10983 record_buf[2] = ARM_PS_REGNUM;
10984 arm_insn_r->reg_rec_count = 3;
10985 }
10986 }
10987
10988 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10989 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10990 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10991
10992 /* Handle control insn extension space. */
10993
10994 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
10995 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
10996 {
10997 if (!bit (arm_insn_r->arm_insn,25))
10998 {
10999 if (!bits (arm_insn_r->arm_insn, 4, 7))
11000 {
11001 if ((0 == insn_op1) || (2 == insn_op1))
11002 {
11003 /* MRS. */
11004 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11005 arm_insn_r->reg_rec_count = 1;
11006 }
11007 else if (1 == insn_op1)
11008 {
11009 /* CSPR is going to be changed. */
11010 record_buf[0] = ARM_PS_REGNUM;
11011 arm_insn_r->reg_rec_count = 1;
11012 }
11013 else if (3 == insn_op1)
11014 {
11015 /* SPSR is going to be changed. */
11016 /* We need to get SPSR value, which is yet to be done. */
11017 printf_unfiltered (_("Process record does not support "
11018 "instruction 0x%0x at address %s.\n"),
11019 arm_insn_r->arm_insn,
11020 paddress (arm_insn_r->gdbarch,
11021 arm_insn_r->this_addr));
11022 return -1;
11023 }
11024 }
11025 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
11026 {
11027 if (1 == insn_op1)
11028 {
11029 /* BX. */
11030 record_buf[0] = ARM_PS_REGNUM;
11031 arm_insn_r->reg_rec_count = 1;
11032 }
11033 else if (3 == insn_op1)
11034 {
11035 /* CLZ. */
11036 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11037 arm_insn_r->reg_rec_count = 1;
11038 }
11039 }
11040 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
11041 {
11042 /* BLX. */
11043 record_buf[0] = ARM_PS_REGNUM;
11044 record_buf[1] = ARM_LR_REGNUM;
11045 arm_insn_r->reg_rec_count = 2;
11046 }
11047 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
11048 {
11049 /* QADD, QSUB, QDADD, QDSUB */
11050 record_buf[0] = ARM_PS_REGNUM;
11051 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11052 arm_insn_r->reg_rec_count = 2;
11053 }
11054 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
11055 {
11056 /* BKPT. */
11057 record_buf[0] = ARM_PS_REGNUM;
11058 record_buf[1] = ARM_LR_REGNUM;
11059 arm_insn_r->reg_rec_count = 2;
11060
11061 /* Save SPSR also;how? */
11062 printf_unfiltered (_("Process record does not support "
11063 "instruction 0x%0x at address %s.\n"),
11064 arm_insn_r->arm_insn,
11065 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11066 return -1;
11067 }
11068 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
11069 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
11070 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
11071 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
11072 )
11073 {
11074 if (0 == insn_op1 || 1 == insn_op1)
11075 {
11076 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
11077 /* We dont do optimization for SMULW<y> where we
11078 need only Rd. */
11079 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11080 record_buf[1] = ARM_PS_REGNUM;
11081 arm_insn_r->reg_rec_count = 2;
11082 }
11083 else if (2 == insn_op1)
11084 {
11085 /* SMLAL<x><y>. */
11086 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11087 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11088 arm_insn_r->reg_rec_count = 2;
11089 }
11090 else if (3 == insn_op1)
11091 {
11092 /* SMUL<x><y>. */
11093 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11094 arm_insn_r->reg_rec_count = 1;
11095 }
11096 }
11097 }
11098 else
11099 {
11100 /* MSR : immediate form. */
11101 if (1 == insn_op1)
11102 {
11103 /* CSPR is going to be changed. */
11104 record_buf[0] = ARM_PS_REGNUM;
11105 arm_insn_r->reg_rec_count = 1;
11106 }
11107 else if (3 == insn_op1)
11108 {
11109 /* SPSR is going to be changed. */
11110 /* we need to get SPSR value, which is yet to be done */
11111 printf_unfiltered (_("Process record does not support "
11112 "instruction 0x%0x at address %s.\n"),
11113 arm_insn_r->arm_insn,
11114 paddress (arm_insn_r->gdbarch,
11115 arm_insn_r->this_addr));
11116 return -1;
11117 }
11118 }
11119 }
11120
11121 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
11122 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
11123 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
11124
11125 /* Handle load/store insn extension space. */
11126
11127 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
11128 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
11129 && !INSN_RECORDED(arm_insn_r))
11130 {
11131 /* SWP/SWPB. */
11132 if (0 == insn_op1)
11133 {
11134 /* These insn, changes register and memory as well. */
11135 /* SWP or SWPB insn. */
11136 /* Get memory address given by Rn. */
11137 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11138 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11139 /* SWP insn ?, swaps word. */
11140 if (8 == arm_insn_r->opcode)
11141 {
11142 record_buf_mem[0] = 4;
11143 }
11144 else
11145 {
11146 /* SWPB insn, swaps only byte. */
11147 record_buf_mem[0] = 1;
11148 }
11149 record_buf_mem[1] = u_regval;
11150 arm_insn_r->mem_rec_count = 1;
11151 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11152 arm_insn_r->reg_rec_count = 1;
11153 }
11154 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11155 {
11156 /* STRH. */
11157 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11158 ARM_RECORD_STRH);
11159 }
11160 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11161 {
11162 /* LDRD. */
11163 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11164 record_buf[1] = record_buf[0] + 1;
11165 arm_insn_r->reg_rec_count = 2;
11166 }
11167 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11168 {
11169 /* STRD. */
11170 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11171 ARM_RECORD_STRD);
11172 }
11173 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
11174 {
11175 /* LDRH, LDRSB, LDRSH. */
11176 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11177 arm_insn_r->reg_rec_count = 1;
11178 }
11179
11180 }
11181
11182 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
11183 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
11184 && !INSN_RECORDED(arm_insn_r))
11185 {
11186 ret = -1;
11187 /* Handle coprocessor insn extension space. */
11188 }
11189
11190 /* To be done for ARMv5 and later; as of now we return -1. */
11191 if (-1 == ret)
11192 printf_unfiltered (_("Process record does not support instruction x%0x "
11193 "at address %s.\n"),arm_insn_r->arm_insn,
11194 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11195
11196
11197 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11198 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11199
11200 return ret;
11201 }
11202
11203 /* Handling opcode 000 insns. */
11204
11205 static int
11206 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
11207 {
11208 struct regcache *reg_cache = arm_insn_r->regcache;
11209 uint32_t record_buf[8], record_buf_mem[8];
11210 ULONGEST u_regval[2] = {0};
11211
11212 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11213 uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
11214 uint32_t opcode1 = 0;
11215
11216 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11217 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11218 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
11219
11220 /* Data processing insn /multiply insn. */
11221 if (9 == arm_insn_r->decode
11222 && ((4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11223 || (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)))
11224 {
11225 /* Handle multiply instructions. */
11226 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
11227 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
11228 {
11229 /* Handle MLA and MUL. */
11230 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11231 record_buf[1] = ARM_PS_REGNUM;
11232 arm_insn_r->reg_rec_count = 2;
11233 }
11234 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11235 {
11236 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
11237 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11238 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11239 record_buf[2] = ARM_PS_REGNUM;
11240 arm_insn_r->reg_rec_count = 3;
11241 }
11242 }
11243 else if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11244 && (11 == arm_insn_r->decode || 13 == arm_insn_r->decode))
11245 {
11246 /* Handle misc load insns, as 20th bit (L = 1). */
11247 /* LDR insn has a capability to do branching, if
11248 MOV LR, PC is precceded by LDR insn having Rn as R15
11249 in that case, it emulates branch and link insn, and hence we
11250 need to save CSPR and PC as well. I am not sure this is right
11251 place; as opcode = 010 LDR insn make this happen, if R15 was
11252 used. */
11253 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11254 if (15 != reg_dest)
11255 {
11256 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11257 arm_insn_r->reg_rec_count = 1;
11258 }
11259 else
11260 {
11261 record_buf[0] = reg_dest;
11262 record_buf[1] = ARM_PS_REGNUM;
11263 arm_insn_r->reg_rec_count = 2;
11264 }
11265 }
11266 else if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11267 && sbo_sbz (arm_insn_r->arm_insn, 5, 12, 0)
11268 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11269 && 2 == bits (arm_insn_r->arm_insn, 20, 21))
11270 {
11271 /* Handle MSR insn. */
11272 if (9 == arm_insn_r->opcode)
11273 {
11274 /* CSPR is going to be changed. */
11275 record_buf[0] = ARM_PS_REGNUM;
11276 arm_insn_r->reg_rec_count = 1;
11277 }
11278 else
11279 {
11280 /* SPSR is going to be changed. */
11281 /* How to read SPSR value? */
11282 printf_unfiltered (_("Process record does not support instruction "
11283 "0x%0x at address %s.\n"),
11284 arm_insn_r->arm_insn,
11285 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11286 return -1;
11287 }
11288 }
11289 else if (9 == arm_insn_r->decode
11290 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11291 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11292 {
11293 /* Handling SWP, SWPB. */
11294 /* These insn, changes register and memory as well. */
11295 /* SWP or SWPB insn. */
11296
11297 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11298 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11299 /* SWP insn ?, swaps word. */
11300 if (8 == arm_insn_r->opcode)
11301 {
11302 record_buf_mem[0] = 4;
11303 }
11304 else
11305 {
11306 /* SWPB insn, swaps only byte. */
11307 record_buf_mem[0] = 1;
11308 }
11309 record_buf_mem[1] = u_regval[0];
11310 arm_insn_r->mem_rec_count = 1;
11311 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11312 arm_insn_r->reg_rec_count = 1;
11313 }
11314 else if (3 == arm_insn_r->decode && 0x12 == opcode1
11315 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11316 {
11317 /* Handle BLX, branch and link/exchange. */
11318 if (9 == arm_insn_r->opcode)
11319 {
11320 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
11321 and R14 stores the return address. */
11322 record_buf[0] = ARM_PS_REGNUM;
11323 record_buf[1] = ARM_LR_REGNUM;
11324 arm_insn_r->reg_rec_count = 2;
11325 }
11326 }
11327 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
11328 {
11329 /* Handle enhanced software breakpoint insn, BKPT. */
11330 /* CPSR is changed to be executed in ARM state, disabling normal
11331 interrupts, entering abort mode. */
11332 /* According to high vector configuration PC is set. */
11333 /* user hit breakpoint and type reverse, in
11334 that case, we need to go back with previous CPSR and
11335 Program Counter. */
11336 record_buf[0] = ARM_PS_REGNUM;
11337 record_buf[1] = ARM_LR_REGNUM;
11338 arm_insn_r->reg_rec_count = 2;
11339
11340 /* Save SPSR also; how? */
11341 printf_unfiltered (_("Process record does not support instruction "
11342 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11343 paddress (arm_insn_r->gdbarch,
11344 arm_insn_r->this_addr));
11345 return -1;
11346 }
11347 else if (11 == arm_insn_r->decode
11348 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11349 {
11350 /* Handle enhanced store insns and DSP insns (e.g. LDRD). */
11351
11352 /* Handle str(x) insn */
11353 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11354 ARM_RECORD_STRH);
11355 }
11356 else if (1 == arm_insn_r->decode && 0x12 == opcode1
11357 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11358 {
11359 /* Handle BX, branch and link/exchange. */
11360 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
11361 record_buf[0] = ARM_PS_REGNUM;
11362 arm_insn_r->reg_rec_count = 1;
11363 }
11364 else if (1 == arm_insn_r->decode && 0x16 == opcode1
11365 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
11366 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
11367 {
11368 /* Count leading zeros: CLZ. */
11369 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11370 arm_insn_r->reg_rec_count = 1;
11371 }
11372 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11373 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11374 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
11375 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0)
11376 )
11377 {
11378 /* Handle MRS insn. */
11379 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11380 arm_insn_r->reg_rec_count = 1;
11381 }
11382 else if (arm_insn_r->opcode <= 15)
11383 {
11384 /* Normal data processing insns. */
11385 /* Out of 11 shifter operands mode, all the insn modifies destination
11386 register, which is specified by 13-16 decode. */
11387 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11388 record_buf[1] = ARM_PS_REGNUM;
11389 arm_insn_r->reg_rec_count = 2;
11390 }
11391 else
11392 {
11393 return -1;
11394 }
11395
11396 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11397 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11398 return 0;
11399 }
11400
11401 /* Handling opcode 001 insns. */
11402
11403 static int
11404 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
11405 {
11406 uint32_t record_buf[8], record_buf_mem[8];
11407
11408 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11409 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11410
11411 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11412 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
11413 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11414 )
11415 {
11416 /* Handle MSR insn. */
11417 if (9 == arm_insn_r->opcode)
11418 {
11419 /* CSPR is going to be changed. */
11420 record_buf[0] = ARM_PS_REGNUM;
11421 arm_insn_r->reg_rec_count = 1;
11422 }
11423 else
11424 {
11425 /* SPSR is going to be changed. */
11426 }
11427 }
11428 else if (arm_insn_r->opcode <= 15)
11429 {
11430 /* Normal data processing insns. */
11431 /* Out of 11 shifter operands mode, all the insn modifies destination
11432 register, which is specified by 13-16 decode. */
11433 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11434 record_buf[1] = ARM_PS_REGNUM;
11435 arm_insn_r->reg_rec_count = 2;
11436 }
11437 else
11438 {
11439 return -1;
11440 }
11441
11442 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11443 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11444 return 0;
11445 }
11446
11447 /* Handling opcode 010 insns. */
11448
11449 static int
11450 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
11451 {
11452 struct regcache *reg_cache = arm_insn_r->regcache;
11453
11454 uint32_t reg_src1 = 0 , reg_dest = 0;
11455 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11456 uint32_t record_buf[8], record_buf_mem[8];
11457
11458 ULONGEST u_regval = 0;
11459
11460 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11461 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11462
11463 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11464 {
11465 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11466 /* LDR insn has a capability to do branching, if
11467 MOV LR, PC is precedded by LDR insn having Rn as R15
11468 in that case, it emulates branch and link insn, and hence we
11469 need to save CSPR and PC as well. */
11470 if (ARM_PC_REGNUM != reg_dest)
11471 {
11472 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11473 arm_insn_r->reg_rec_count = 1;
11474 }
11475 else
11476 {
11477 record_buf[0] = reg_dest;
11478 record_buf[1] = ARM_PS_REGNUM;
11479 arm_insn_r->reg_rec_count = 2;
11480 }
11481 }
11482 else
11483 {
11484 /* Store, immediate offset, immediate pre-indexed,
11485 immediate post-indexed. */
11486 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11487 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
11488 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11489 /* U == 1 */
11490 if (bit (arm_insn_r->arm_insn, 23))
11491 {
11492 tgt_mem_addr = u_regval + offset_12;
11493 }
11494 else
11495 {
11496 tgt_mem_addr = u_regval - offset_12;
11497 }
11498
11499 switch (arm_insn_r->opcode)
11500 {
11501 /* STR. */
11502 case 8:
11503 case 12:
11504 /* STR. */
11505 case 9:
11506 case 13:
11507 /* STRT. */
11508 case 1:
11509 case 5:
11510 /* STR. */
11511 case 4:
11512 case 0:
11513 record_buf_mem[0] = 4;
11514 break;
11515
11516 /* STRB. */
11517 case 10:
11518 case 14:
11519 /* STRB. */
11520 case 11:
11521 case 15:
11522 /* STRBT. */
11523 case 3:
11524 case 7:
11525 /* STRB. */
11526 case 2:
11527 case 6:
11528 record_buf_mem[0] = 1;
11529 break;
11530
11531 default:
11532 gdb_assert_not_reached ("no decoding pattern found");
11533 break;
11534 }
11535 record_buf_mem[1] = tgt_mem_addr;
11536 arm_insn_r->mem_rec_count = 1;
11537
11538 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11539 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11540 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11541 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11542 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11543 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11544 )
11545 {
11546 /* We are handling pre-indexed mode; post-indexed mode;
11547 where Rn is going to be changed. */
11548 record_buf[0] = reg_src1;
11549 arm_insn_r->reg_rec_count = 1;
11550 }
11551 }
11552
11553 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11554 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11555 return 0;
11556 }
11557
11558 /* Handling opcode 011 insns. */
11559
11560 static int
11561 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
11562 {
11563 struct regcache *reg_cache = arm_insn_r->regcache;
11564
11565 uint32_t shift_imm = 0;
11566 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11567 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11568 uint32_t record_buf[8], record_buf_mem[8];
11569
11570 LONGEST s_word;
11571 ULONGEST u_regval[2];
11572
11573 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11574 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11575
11576 /* Handle enhanced store insns and LDRD DSP insn,
11577 order begins according to addressing modes for store insns
11578 STRH insn. */
11579
11580 /* LDR or STR? */
11581 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11582 {
11583 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11584 /* LDR insn has a capability to do branching, if
11585 MOV LR, PC is precedded by LDR insn having Rn as R15
11586 in that case, it emulates branch and link insn, and hence we
11587 need to save CSPR and PC as well. */
11588 if (15 != reg_dest)
11589 {
11590 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11591 arm_insn_r->reg_rec_count = 1;
11592 }
11593 else
11594 {
11595 record_buf[0] = reg_dest;
11596 record_buf[1] = ARM_PS_REGNUM;
11597 arm_insn_r->reg_rec_count = 2;
11598 }
11599 }
11600 else
11601 {
11602 if (! bits (arm_insn_r->arm_insn, 4, 11))
11603 {
11604 /* Store insn, register offset and register pre-indexed,
11605 register post-indexed. */
11606 /* Get Rm. */
11607 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11608 /* Get Rn. */
11609 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11610 regcache_raw_read_unsigned (reg_cache, reg_src1
11611 , &u_regval[0]);
11612 regcache_raw_read_unsigned (reg_cache, reg_src2
11613 , &u_regval[1]);
11614 if (15 == reg_src2)
11615 {
11616 /* If R15 was used as Rn, hence current PC+8. */
11617 /* Pre-indexed mode doesnt reach here ; illegal insn. */
11618 u_regval[0] = u_regval[0] + 8;
11619 }
11620 /* Calculate target store address, Rn +/- Rm, register offset. */
11621 /* U == 1. */
11622 if (bit (arm_insn_r->arm_insn, 23))
11623 {
11624 tgt_mem_addr = u_regval[0] + u_regval[1];
11625 }
11626 else
11627 {
11628 tgt_mem_addr = u_regval[1] - u_regval[0];
11629 }
11630
11631 switch (arm_insn_r->opcode)
11632 {
11633 /* STR. */
11634 case 8:
11635 case 12:
11636 /* STR. */
11637 case 9:
11638 case 13:
11639 /* STRT. */
11640 case 1:
11641 case 5:
11642 /* STR. */
11643 case 0:
11644 case 4:
11645 record_buf_mem[0] = 4;
11646 break;
11647
11648 /* STRB. */
11649 case 10:
11650 case 14:
11651 /* STRB. */
11652 case 11:
11653 case 15:
11654 /* STRBT. */
11655 case 3:
11656 case 7:
11657 /* STRB. */
11658 case 2:
11659 case 6:
11660 record_buf_mem[0] = 1;
11661 break;
11662
11663 default:
11664 gdb_assert_not_reached ("no decoding pattern found");
11665 break;
11666 }
11667 record_buf_mem[1] = tgt_mem_addr;
11668 arm_insn_r->mem_rec_count = 1;
11669
11670 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11671 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11672 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11673 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11674 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11675 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11676 )
11677 {
11678 /* Rn is going to be changed in pre-indexed mode and
11679 post-indexed mode as well. */
11680 record_buf[0] = reg_src2;
11681 arm_insn_r->reg_rec_count = 1;
11682 }
11683 }
11684 else
11685 {
11686 /* Store insn, scaled register offset; scaled pre-indexed. */
11687 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
11688 /* Get Rm. */
11689 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11690 /* Get Rn. */
11691 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11692 /* Get shift_imm. */
11693 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
11694 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11695 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
11696 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11697 /* Offset_12 used as shift. */
11698 switch (offset_12)
11699 {
11700 case 0:
11701 /* Offset_12 used as index. */
11702 offset_12 = u_regval[0] << shift_imm;
11703 break;
11704
11705 case 1:
11706 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
11707 break;
11708
11709 case 2:
11710 if (!shift_imm)
11711 {
11712 if (bit (u_regval[0], 31))
11713 {
11714 offset_12 = 0xFFFFFFFF;
11715 }
11716 else
11717 {
11718 offset_12 = 0;
11719 }
11720 }
11721 else
11722 {
11723 /* This is arithmetic shift. */
11724 offset_12 = s_word >> shift_imm;
11725 }
11726 break;
11727
11728 case 3:
11729 if (!shift_imm)
11730 {
11731 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
11732 &u_regval[1]);
11733 /* Get C flag value and shift it by 31. */
11734 offset_12 = (((bit (u_regval[1], 29)) << 31) \
11735 | (u_regval[0]) >> 1);
11736 }
11737 else
11738 {
11739 offset_12 = (u_regval[0] >> shift_imm) \
11740 | (u_regval[0] <<
11741 (sizeof(uint32_t) - shift_imm));
11742 }
11743 break;
11744
11745 default:
11746 gdb_assert_not_reached ("no decoding pattern found");
11747 break;
11748 }
11749
11750 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11751 /* bit U set. */
11752 if (bit (arm_insn_r->arm_insn, 23))
11753 {
11754 tgt_mem_addr = u_regval[1] + offset_12;
11755 }
11756 else
11757 {
11758 tgt_mem_addr = u_regval[1] - offset_12;
11759 }
11760
11761 switch (arm_insn_r->opcode)
11762 {
11763 /* STR. */
11764 case 8:
11765 case 12:
11766 /* STR. */
11767 case 9:
11768 case 13:
11769 /* STRT. */
11770 case 1:
11771 case 5:
11772 /* STR. */
11773 case 0:
11774 case 4:
11775 record_buf_mem[0] = 4;
11776 break;
11777
11778 /* STRB. */
11779 case 10:
11780 case 14:
11781 /* STRB. */
11782 case 11:
11783 case 15:
11784 /* STRBT. */
11785 case 3:
11786 case 7:
11787 /* STRB. */
11788 case 2:
11789 case 6:
11790 record_buf_mem[0] = 1;
11791 break;
11792
11793 default:
11794 gdb_assert_not_reached ("no decoding pattern found");
11795 break;
11796 }
11797 record_buf_mem[1] = tgt_mem_addr;
11798 arm_insn_r->mem_rec_count = 1;
11799
11800 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11801 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11802 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11803 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11804 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11805 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11806 )
11807 {
11808 /* Rn is going to be changed in register scaled pre-indexed
11809 mode,and scaled post indexed mode. */
11810 record_buf[0] = reg_src2;
11811 arm_insn_r->reg_rec_count = 1;
11812 }
11813 }
11814 }
11815
11816 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11817 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11818 return 0;
11819 }
11820
11821 /* Handling opcode 100 insns. */
11822
11823 static int
11824 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
11825 {
11826 struct regcache *reg_cache = arm_insn_r->regcache;
11827
11828 uint32_t register_list[16] = {0}, register_count = 0, register_bits = 0;
11829 uint32_t reg_src1 = 0, addr_mode = 0, no_of_regs = 0;
11830 uint32_t start_address = 0, index = 0;
11831 uint32_t record_buf[24], record_buf_mem[48];
11832
11833 ULONGEST u_regval[2] = {0};
11834
11835 /* This mode is exclusively for load and store multiple. */
11836 /* Handle incremenrt after/before and decrment after.before mode;
11837 Rn is changing depending on W bit, but as of now we store Rn too
11838 without optimization. */
11839
11840 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11841 {
11842 /* LDM (1,2,3) where LDM (3) changes CPSR too. */
11843
11844 if (bit (arm_insn_r->arm_insn, 20) && !bit (arm_insn_r->arm_insn, 22))
11845 {
11846 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11847 no_of_regs = 15;
11848 }
11849 else
11850 {
11851 register_bits = bits (arm_insn_r->arm_insn, 0, 14);
11852 no_of_regs = 14;
11853 }
11854 /* Get Rn. */
11855 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11856 while (register_bits)
11857 {
11858 if (register_bits & 0x00000001)
11859 record_buf[index++] = register_count;
11860 register_bits = register_bits >> 1;
11861 register_count++;
11862 }
11863
11864 /* Extra space for Base Register and CPSR; wihtout optimization. */
11865 record_buf[index++] = reg_src1;
11866 record_buf[index++] = ARM_PS_REGNUM;
11867 arm_insn_r->reg_rec_count = index;
11868 }
11869 else
11870 {
11871 /* It handles both STM(1) and STM(2). */
11872 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11873
11874 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11875 /* Get Rn. */
11876 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11877 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11878 while (register_bits)
11879 {
11880 if (register_bits & 0x00000001)
11881 register_count++;
11882 register_bits = register_bits >> 1;
11883 }
11884
11885 switch (addr_mode)
11886 {
11887 /* Decrement after. */
11888 case 0:
11889 start_address = (u_regval[0]) - (register_count * 4) + 4;
11890 arm_insn_r->mem_rec_count = register_count;
11891 while (register_count)
11892 {
11893 record_buf_mem[(register_count * 2) - 1] = start_address;
11894 record_buf_mem[(register_count * 2) - 2] = 4;
11895 start_address = start_address + 4;
11896 register_count--;
11897 }
11898 break;
11899
11900 /* Increment after. */
11901 case 1:
11902 start_address = u_regval[0];
11903 arm_insn_r->mem_rec_count = register_count;
11904 while (register_count)
11905 {
11906 record_buf_mem[(register_count * 2) - 1] = start_address;
11907 record_buf_mem[(register_count * 2) - 2] = 4;
11908 start_address = start_address + 4;
11909 register_count--;
11910 }
11911 break;
11912
11913 /* Decrement before. */
11914 case 2:
11915
11916 start_address = (u_regval[0]) - (register_count * 4);
11917 arm_insn_r->mem_rec_count = register_count;
11918 while (register_count)
11919 {
11920 record_buf_mem[(register_count * 2) - 1] = start_address;
11921 record_buf_mem[(register_count * 2) - 2] = 4;
11922 start_address = start_address + 4;
11923 register_count--;
11924 }
11925 break;
11926
11927 /* Increment before. */
11928 case 3:
11929 start_address = u_regval[0] + 4;
11930 arm_insn_r->mem_rec_count = register_count;
11931 while (register_count)
11932 {
11933 record_buf_mem[(register_count * 2) - 1] = start_address;
11934 record_buf_mem[(register_count * 2) - 2] = 4;
11935 start_address = start_address + 4;
11936 register_count--;
11937 }
11938 break;
11939
11940 default:
11941 gdb_assert_not_reached ("no decoding pattern found");
11942 break;
11943 }
11944
11945 /* Base register also changes; based on condition and W bit. */
11946 /* We save it anyway without optimization. */
11947 record_buf[0] = reg_src1;
11948 arm_insn_r->reg_rec_count = 1;
11949 }
11950
11951 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11952 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11953 return 0;
11954 }
11955
11956 /* Handling opcode 101 insns. */
11957
11958 static int
11959 arm_record_b_bl (insn_decode_record *arm_insn_r)
11960 {
11961 uint32_t record_buf[8];
11962
11963 /* Handle B, BL, BLX(1) insns. */
11964 /* B simply branches so we do nothing here. */
11965 /* Note: BLX(1) doesnt fall here but instead it falls into
11966 extension space. */
11967 if (bit (arm_insn_r->arm_insn, 24))
11968 {
11969 record_buf[0] = ARM_LR_REGNUM;
11970 arm_insn_r->reg_rec_count = 1;
11971 }
11972
11973 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11974
11975 return 0;
11976 }
11977
11978 /* Handling opcode 110 insns. */
11979
11980 static int
11981 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11982 {
11983 printf_unfiltered (_("Process record does not support instruction "
11984 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11985 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11986
11987 return -1;
11988 }
11989
11990 /* Handling opcode 111 insns. */
11991
11992 static int
11993 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11994 {
11995 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11996 struct regcache *reg_cache = arm_insn_r->regcache;
11997 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
11998 ULONGEST u_regval = 0;
11999
12000 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
12001
12002 /* Handle arm SWI/SVC system call instructions. */
12003 if (15 == arm_insn_r->opcode)
12004 {
12005 if (tdep->arm_syscall_record != NULL)
12006 {
12007 ULONGEST svc_operand, svc_number;
12008
12009 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
12010
12011 if (svc_operand) /* OABI. */
12012 svc_number = svc_operand - 0x900000;
12013 else /* EABI. */
12014 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
12015
12016 ret = tdep->arm_syscall_record (reg_cache, svc_number);
12017 }
12018 else
12019 {
12020 printf_unfiltered (_("no syscall record support\n"));
12021 ret = -1;
12022 }
12023 }
12024 else
12025 {
12026 arm_record_unsupported_insn (arm_insn_r);
12027 ret = -1;
12028 }
12029
12030 return ret;
12031 }
12032
12033 /* Handling opcode 000 insns. */
12034
12035 static int
12036 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
12037 {
12038 uint32_t record_buf[8];
12039 uint32_t reg_src1 = 0;
12040
12041 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12042
12043 record_buf[0] = ARM_PS_REGNUM;
12044 record_buf[1] = reg_src1;
12045 thumb_insn_r->reg_rec_count = 2;
12046
12047 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12048
12049 return 0;
12050 }
12051
12052
12053 /* Handling opcode 001 insns. */
12054
12055 static int
12056 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
12057 {
12058 uint32_t record_buf[8];
12059 uint32_t reg_src1 = 0;
12060
12061 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12062
12063 record_buf[0] = ARM_PS_REGNUM;
12064 record_buf[1] = reg_src1;
12065 thumb_insn_r->reg_rec_count = 2;
12066
12067 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12068
12069 return 0;
12070 }
12071
12072 /* Handling opcode 010 insns. */
12073
12074 static int
12075 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
12076 {
12077 struct regcache *reg_cache = thumb_insn_r->regcache;
12078 uint32_t record_buf[8], record_buf_mem[8];
12079
12080 uint32_t reg_src1 = 0, reg_src2 = 0;
12081 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
12082
12083 ULONGEST u_regval[2] = {0};
12084
12085 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
12086
12087 if (bit (thumb_insn_r->arm_insn, 12))
12088 {
12089 /* Handle load/store register offset. */
12090 opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
12091 if (opcode2 >= 12 && opcode2 <= 15)
12092 {
12093 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
12094 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
12095 record_buf[0] = reg_src1;
12096 thumb_insn_r->reg_rec_count = 1;
12097 }
12098 else if (opcode2 >= 8 && opcode2 <= 10)
12099 {
12100 /* STR(2), STRB(2), STRH(2) . */
12101 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12102 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
12103 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
12104 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
12105 if (8 == opcode2)
12106 record_buf_mem[0] = 4; /* STR (2). */
12107 else if (10 == opcode2)
12108 record_buf_mem[0] = 1; /* STRB (2). */
12109 else if (9 == opcode2)
12110 record_buf_mem[0] = 2; /* STRH (2). */
12111 record_buf_mem[1] = u_regval[0] + u_regval[1];
12112 thumb_insn_r->mem_rec_count = 1;
12113 }
12114 }
12115 else if (bit (thumb_insn_r->arm_insn, 11))
12116 {
12117 /* Handle load from literal pool. */
12118 /* LDR(3). */
12119 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12120 record_buf[0] = reg_src1;
12121 thumb_insn_r->reg_rec_count = 1;
12122 }
12123 else if (opcode1)
12124 {
12125 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
12126 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
12127 if ((3 == opcode2) && (!opcode3))
12128 {
12129 /* Branch with exchange. */
12130 record_buf[0] = ARM_PS_REGNUM;
12131 thumb_insn_r->reg_rec_count = 1;
12132 }
12133 else
12134 {
12135 /* Format 8; special data processing insns. */
12136 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12137 record_buf[0] = ARM_PS_REGNUM;
12138 record_buf[1] = reg_src1;
12139 thumb_insn_r->reg_rec_count = 2;
12140 }
12141 }
12142 else
12143 {
12144 /* Format 5; data processing insns. */
12145 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12146 if (bit (thumb_insn_r->arm_insn, 7))
12147 {
12148 reg_src1 = reg_src1 + 8;
12149 }
12150 record_buf[0] = ARM_PS_REGNUM;
12151 record_buf[1] = reg_src1;
12152 thumb_insn_r->reg_rec_count = 2;
12153 }
12154
12155 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12156 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12157 record_buf_mem);
12158
12159 return 0;
12160 }
12161
12162 /* Handling opcode 001 insns. */
12163
12164 static int
12165 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
12166 {
12167 struct regcache *reg_cache = thumb_insn_r->regcache;
12168 uint32_t record_buf[8], record_buf_mem[8];
12169
12170 uint32_t reg_src1 = 0;
12171 uint32_t opcode = 0, immed_5 = 0;
12172
12173 ULONGEST u_regval = 0;
12174
12175 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12176
12177 if (opcode)
12178 {
12179 /* LDR(1). */
12180 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12181 record_buf[0] = reg_src1;
12182 thumb_insn_r->reg_rec_count = 1;
12183 }
12184 else
12185 {
12186 /* STR(1). */
12187 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12188 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12189 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12190 record_buf_mem[0] = 4;
12191 record_buf_mem[1] = u_regval + (immed_5 * 4);
12192 thumb_insn_r->mem_rec_count = 1;
12193 }
12194
12195 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12196 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12197 record_buf_mem);
12198
12199 return 0;
12200 }
12201
12202 /* Handling opcode 100 insns. */
12203
12204 static int
12205 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
12206 {
12207 struct regcache *reg_cache = thumb_insn_r->regcache;
12208 uint32_t record_buf[8], record_buf_mem[8];
12209
12210 uint32_t reg_src1 = 0;
12211 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
12212
12213 ULONGEST u_regval = 0;
12214
12215 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12216
12217 if (3 == opcode)
12218 {
12219 /* LDR(4). */
12220 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12221 record_buf[0] = reg_src1;
12222 thumb_insn_r->reg_rec_count = 1;
12223 }
12224 else if (1 == opcode)
12225 {
12226 /* LDRH(1). */
12227 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12228 record_buf[0] = reg_src1;
12229 thumb_insn_r->reg_rec_count = 1;
12230 }
12231 else if (2 == opcode)
12232 {
12233 /* STR(3). */
12234 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12235 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12236 record_buf_mem[0] = 4;
12237 record_buf_mem[1] = u_regval + (immed_8 * 4);
12238 thumb_insn_r->mem_rec_count = 1;
12239 }
12240 else if (0 == opcode)
12241 {
12242 /* STRH(1). */
12243 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12244 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12245 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12246 record_buf_mem[0] = 2;
12247 record_buf_mem[1] = u_regval + (immed_5 * 2);
12248 thumb_insn_r->mem_rec_count = 1;
12249 }
12250
12251 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12252 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12253 record_buf_mem);
12254
12255 return 0;
12256 }
12257
12258 /* Handling opcode 101 insns. */
12259
12260 static int
12261 thumb_record_misc (insn_decode_record *thumb_insn_r)
12262 {
12263 struct regcache *reg_cache = thumb_insn_r->regcache;
12264
12265 uint32_t opcode = 0, opcode1 = 0, opcode2 = 0;
12266 uint32_t register_bits = 0, register_count = 0;
12267 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12268 uint32_t record_buf[24], record_buf_mem[48];
12269 uint32_t reg_src1;
12270
12271 ULONGEST u_regval = 0;
12272
12273 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12274 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12275 opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
12276
12277 if (14 == opcode2)
12278 {
12279 /* POP. */
12280 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12281 while (register_bits)
12282 {
12283 if (register_bits & 0x00000001)
12284 record_buf[index++] = register_count;
12285 register_bits = register_bits >> 1;
12286 register_count++;
12287 }
12288 record_buf[index++] = ARM_PS_REGNUM;
12289 record_buf[index++] = ARM_SP_REGNUM;
12290 thumb_insn_r->reg_rec_count = index;
12291 }
12292 else if (10 == opcode2)
12293 {
12294 /* PUSH. */
12295 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12296 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12297 while (register_bits)
12298 {
12299 if (register_bits & 0x00000001)
12300 register_count++;
12301 register_bits = register_bits >> 1;
12302 }
12303 start_address = u_regval - \
12304 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12305 thumb_insn_r->mem_rec_count = register_count;
12306 while (register_count)
12307 {
12308 record_buf_mem[(register_count * 2) - 1] = start_address;
12309 record_buf_mem[(register_count * 2) - 2] = 4;
12310 start_address = start_address + 4;
12311 register_count--;
12312 }
12313 record_buf[0] = ARM_SP_REGNUM;
12314 thumb_insn_r->reg_rec_count = 1;
12315 }
12316 else if (0x1E == opcode1)
12317 {
12318 /* BKPT insn. */
12319 /* Handle enhanced software breakpoint insn, BKPT. */
12320 /* CPSR is changed to be executed in ARM state, disabling normal
12321 interrupts, entering abort mode. */
12322 /* According to high vector configuration PC is set. */
12323 /* User hits breakpoint and type reverse, in that case, we need to go back with
12324 previous CPSR and Program Counter. */
12325 record_buf[0] = ARM_PS_REGNUM;
12326 record_buf[1] = ARM_LR_REGNUM;
12327 thumb_insn_r->reg_rec_count = 2;
12328 /* We need to save SPSR value, which is not yet done. */
12329 printf_unfiltered (_("Process record does not support instruction "
12330 "0x%0x at address %s.\n"),
12331 thumb_insn_r->arm_insn,
12332 paddress (thumb_insn_r->gdbarch,
12333 thumb_insn_r->this_addr));
12334 return -1;
12335 }
12336 else if ((0 == opcode) || (1 == opcode))
12337 {
12338 /* ADD(5), ADD(6). */
12339 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12340 record_buf[0] = reg_src1;
12341 thumb_insn_r->reg_rec_count = 1;
12342 }
12343 else if (2 == opcode)
12344 {
12345 /* ADD(7), SUB(4). */
12346 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12347 record_buf[0] = ARM_SP_REGNUM;
12348 thumb_insn_r->reg_rec_count = 1;
12349 }
12350
12351 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12352 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12353 record_buf_mem);
12354
12355 return 0;
12356 }
12357
12358 /* Handling opcode 110 insns. */
12359
12360 static int
12361 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12362 {
12363 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12364 struct regcache *reg_cache = thumb_insn_r->regcache;
12365
12366 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12367 uint32_t reg_src1 = 0;
12368 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12369 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12370 uint32_t record_buf[24], record_buf_mem[48];
12371
12372 ULONGEST u_regval = 0;
12373
12374 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12375 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12376
12377 if (1 == opcode2)
12378 {
12379
12380 /* LDMIA. */
12381 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12382 /* Get Rn. */
12383 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12384 while (register_bits)
12385 {
12386 if (register_bits & 0x00000001)
12387 record_buf[index++] = register_count;
12388 register_bits = register_bits >> 1;
12389 register_count++;
12390 }
12391 record_buf[index++] = reg_src1;
12392 thumb_insn_r->reg_rec_count = index;
12393 }
12394 else if (0 == opcode2)
12395 {
12396 /* It handles both STMIA. */
12397 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12398 /* Get Rn. */
12399 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12400 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12401 while (register_bits)
12402 {
12403 if (register_bits & 0x00000001)
12404 register_count++;
12405 register_bits = register_bits >> 1;
12406 }
12407 start_address = u_regval;
12408 thumb_insn_r->mem_rec_count = register_count;
12409 while (register_count)
12410 {
12411 record_buf_mem[(register_count * 2) - 1] = start_address;
12412 record_buf_mem[(register_count * 2) - 2] = 4;
12413 start_address = start_address + 4;
12414 register_count--;
12415 }
12416 }
12417 else if (0x1F == opcode1)
12418 {
12419 /* Handle arm syscall insn. */
12420 if (tdep->arm_syscall_record != NULL)
12421 {
12422 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12423 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12424 }
12425 else
12426 {
12427 printf_unfiltered (_("no syscall record support\n"));
12428 return -1;
12429 }
12430 }
12431
12432 /* B (1), conditional branch is automatically taken care in process_record,
12433 as PC is saved there. */
12434
12435 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12436 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12437 record_buf_mem);
12438
12439 return ret;
12440 }
12441
12442 /* Handling opcode 111 insns. */
12443
12444 static int
12445 thumb_record_branch (insn_decode_record *thumb_insn_r)
12446 {
12447 uint32_t record_buf[8];
12448 uint32_t bits_h = 0;
12449
12450 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12451
12452 if (2 == bits_h || 3 == bits_h)
12453 {
12454 /* BL */
12455 record_buf[0] = ARM_LR_REGNUM;
12456 thumb_insn_r->reg_rec_count = 1;
12457 }
12458 else if (1 == bits_h)
12459 {
12460 /* BLX(1). */
12461 record_buf[0] = ARM_PS_REGNUM;
12462 record_buf[1] = ARM_LR_REGNUM;
12463 thumb_insn_r->reg_rec_count = 2;
12464 }
12465
12466 /* B(2) is automatically taken care in process_record, as PC is
12467 saved there. */
12468
12469 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12470
12471 return 0;
12472 }
12473
12474 /* Handler for thumb2 load/store multiple instructions. */
12475
12476 static int
12477 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12478 {
12479 struct regcache *reg_cache = thumb2_insn_r->regcache;
12480
12481 uint32_t reg_rn, op;
12482 uint32_t register_bits = 0, register_count = 0;
12483 uint32_t index = 0, start_address = 0;
12484 uint32_t record_buf[24], record_buf_mem[48];
12485
12486 ULONGEST u_regval = 0;
12487
12488 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12489 op = bits (thumb2_insn_r->arm_insn, 23, 24);
12490
12491 if (0 == op || 3 == op)
12492 {
12493 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12494 {
12495 /* Handle RFE instruction. */
12496 record_buf[0] = ARM_PS_REGNUM;
12497 thumb2_insn_r->reg_rec_count = 1;
12498 }
12499 else
12500 {
12501 /* Handle SRS instruction after reading banked SP. */
12502 return arm_record_unsupported_insn (thumb2_insn_r);
12503 }
12504 }
12505 else if (1 == op || 2 == op)
12506 {
12507 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12508 {
12509 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
12510 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12511 while (register_bits)
12512 {
12513 if (register_bits & 0x00000001)
12514 record_buf[index++] = register_count;
12515
12516 register_count++;
12517 register_bits = register_bits >> 1;
12518 }
12519 record_buf[index++] = reg_rn;
12520 record_buf[index++] = ARM_PS_REGNUM;
12521 thumb2_insn_r->reg_rec_count = index;
12522 }
12523 else
12524 {
12525 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
12526 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12527 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12528 while (register_bits)
12529 {
12530 if (register_bits & 0x00000001)
12531 register_count++;
12532
12533 register_bits = register_bits >> 1;
12534 }
12535
12536 if (1 == op)
12537 {
12538 /* Start address calculation for LDMDB/LDMEA. */
12539 start_address = u_regval;
12540 }
12541 else if (2 == op)
12542 {
12543 /* Start address calculation for LDMDB/LDMEA. */
12544 start_address = u_regval - register_count * 4;
12545 }
12546
12547 thumb2_insn_r->mem_rec_count = register_count;
12548 while (register_count)
12549 {
12550 record_buf_mem[register_count * 2 - 1] = start_address;
12551 record_buf_mem[register_count * 2 - 2] = 4;
12552 start_address = start_address + 4;
12553 register_count--;
12554 }
12555 record_buf[0] = reg_rn;
12556 record_buf[1] = ARM_PS_REGNUM;
12557 thumb2_insn_r->reg_rec_count = 2;
12558 }
12559 }
12560
12561 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12562 record_buf_mem);
12563 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12564 record_buf);
12565 return ARM_RECORD_SUCCESS;
12566 }
12567
12568 /* Handler for thumb2 load/store (dual/exclusive) and table branch
12569 instructions. */
12570
12571 static int
12572 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
12573 {
12574 struct regcache *reg_cache = thumb2_insn_r->regcache;
12575
12576 uint32_t reg_rd, reg_rn, offset_imm;
12577 uint32_t reg_dest1, reg_dest2;
12578 uint32_t address, offset_addr;
12579 uint32_t record_buf[8], record_buf_mem[8];
12580 uint32_t op1, op2, op3;
12581 LONGEST s_word;
12582
12583 ULONGEST u_regval[2];
12584
12585 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
12586 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
12587 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
12588
12589 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12590 {
12591 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
12592 {
12593 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
12594 record_buf[0] = reg_dest1;
12595 record_buf[1] = ARM_PS_REGNUM;
12596 thumb2_insn_r->reg_rec_count = 2;
12597 }
12598
12599 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
12600 {
12601 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12602 record_buf[2] = reg_dest2;
12603 thumb2_insn_r->reg_rec_count = 3;
12604 }
12605 }
12606 else
12607 {
12608 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12609 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12610
12611 if (0 == op1 && 0 == op2)
12612 {
12613 /* Handle STREX. */
12614 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12615 address = u_regval[0] + (offset_imm * 4);
12616 record_buf_mem[0] = 4;
12617 record_buf_mem[1] = address;
12618 thumb2_insn_r->mem_rec_count = 1;
12619 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12620 record_buf[0] = reg_rd;
12621 thumb2_insn_r->reg_rec_count = 1;
12622 }
12623 else if (1 == op1 && 0 == op2)
12624 {
12625 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12626 record_buf[0] = reg_rd;
12627 thumb2_insn_r->reg_rec_count = 1;
12628 address = u_regval[0];
12629 record_buf_mem[1] = address;
12630
12631 if (4 == op3)
12632 {
12633 /* Handle STREXB. */
12634 record_buf_mem[0] = 1;
12635 thumb2_insn_r->mem_rec_count = 1;
12636 }
12637 else if (5 == op3)
12638 {
12639 /* Handle STREXH. */
12640 record_buf_mem[0] = 2 ;
12641 thumb2_insn_r->mem_rec_count = 1;
12642 }
12643 else if (7 == op3)
12644 {
12645 /* Handle STREXD. */
12646 address = u_regval[0];
12647 record_buf_mem[0] = 4;
12648 record_buf_mem[2] = 4;
12649 record_buf_mem[3] = address + 4;
12650 thumb2_insn_r->mem_rec_count = 2;
12651 }
12652 }
12653 else
12654 {
12655 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12656
12657 if (bit (thumb2_insn_r->arm_insn, 24))
12658 {
12659 if (bit (thumb2_insn_r->arm_insn, 23))
12660 offset_addr = u_regval[0] + (offset_imm * 4);
12661 else
12662 offset_addr = u_regval[0] - (offset_imm * 4);
12663
12664 address = offset_addr;
12665 }
12666 else
12667 address = u_regval[0];
12668
12669 record_buf_mem[0] = 4;
12670 record_buf_mem[1] = address;
12671 record_buf_mem[2] = 4;
12672 record_buf_mem[3] = address + 4;
12673 thumb2_insn_r->mem_rec_count = 2;
12674 record_buf[0] = reg_rn;
12675 thumb2_insn_r->reg_rec_count = 1;
12676 }
12677 }
12678
12679 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12680 record_buf);
12681 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12682 record_buf_mem);
12683 return ARM_RECORD_SUCCESS;
12684 }
12685
12686 /* Handler for thumb2 data processing (shift register and modified immediate)
12687 instructions. */
12688
12689 static int
12690 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
12691 {
12692 uint32_t reg_rd, op;
12693 uint32_t record_buf[8];
12694
12695 op = bits (thumb2_insn_r->arm_insn, 21, 24);
12696 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12697
12698 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
12699 {
12700 record_buf[0] = ARM_PS_REGNUM;
12701 thumb2_insn_r->reg_rec_count = 1;
12702 }
12703 else
12704 {
12705 record_buf[0] = reg_rd;
12706 record_buf[1] = ARM_PS_REGNUM;
12707 thumb2_insn_r->reg_rec_count = 2;
12708 }
12709
12710 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12711 record_buf);
12712 return ARM_RECORD_SUCCESS;
12713 }
12714
12715 /* Generic handler for thumb2 instructions which effect destination and PS
12716 registers. */
12717
12718 static int
12719 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
12720 {
12721 uint32_t reg_rd;
12722 uint32_t record_buf[8];
12723
12724 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12725
12726 record_buf[0] = reg_rd;
12727 record_buf[1] = ARM_PS_REGNUM;
12728 thumb2_insn_r->reg_rec_count = 2;
12729
12730 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12731 record_buf);
12732 return ARM_RECORD_SUCCESS;
12733 }
12734
12735 /* Handler for thumb2 branch and miscellaneous control instructions. */
12736
12737 static int
12738 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
12739 {
12740 uint32_t op, op1, op2;
12741 uint32_t record_buf[8];
12742
12743 op = bits (thumb2_insn_r->arm_insn, 20, 26);
12744 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
12745 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12746
12747 /* Handle MSR insn. */
12748 if (!(op1 & 0x2) && 0x38 == op)
12749 {
12750 if (!(op2 & 0x3))
12751 {
12752 /* CPSR is going to be changed. */
12753 record_buf[0] = ARM_PS_REGNUM;
12754 thumb2_insn_r->reg_rec_count = 1;
12755 }
12756 else
12757 {
12758 arm_record_unsupported_insn(thumb2_insn_r);
12759 return -1;
12760 }
12761 }
12762 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
12763 {
12764 /* BLX. */
12765 record_buf[0] = ARM_PS_REGNUM;
12766 record_buf[1] = ARM_LR_REGNUM;
12767 thumb2_insn_r->reg_rec_count = 2;
12768 }
12769
12770 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12771 record_buf);
12772 return ARM_RECORD_SUCCESS;
12773 }
12774
12775 /* Handler for thumb2 store single data item instructions. */
12776
12777 static int
12778 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
12779 {
12780 struct regcache *reg_cache = thumb2_insn_r->regcache;
12781
12782 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
12783 uint32_t address, offset_addr;
12784 uint32_t record_buf[8], record_buf_mem[8];
12785 uint32_t op1, op2;
12786
12787 ULONGEST u_regval[2];
12788
12789 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
12790 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
12791 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12792 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12793
12794 if (bit (thumb2_insn_r->arm_insn, 23))
12795 {
12796 /* T2 encoding. */
12797 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
12798 offset_addr = u_regval[0] + offset_imm;
12799 address = offset_addr;
12800 }
12801 else
12802 {
12803 /* T3 encoding. */
12804 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
12805 {
12806 /* Handle STRB (register). */
12807 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
12808 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
12809 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
12810 offset_addr = u_regval[1] << shift_imm;
12811 address = u_regval[0] + offset_addr;
12812 }
12813 else
12814 {
12815 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12816 if (bit (thumb2_insn_r->arm_insn, 10))
12817 {
12818 if (bit (thumb2_insn_r->arm_insn, 9))
12819 offset_addr = u_regval[0] + offset_imm;
12820 else
12821 offset_addr = u_regval[0] - offset_imm;
12822
12823 address = offset_addr;
12824 }
12825 else
12826 address = u_regval[0];
12827 }
12828 }
12829
12830 switch (op1)
12831 {
12832 /* Store byte instructions. */
12833 case 4:
12834 case 0:
12835 record_buf_mem[0] = 1;
12836 break;
12837 /* Store half word instructions. */
12838 case 1:
12839 case 5:
12840 record_buf_mem[0] = 2;
12841 break;
12842 /* Store word instructions. */
12843 case 2:
12844 case 6:
12845 record_buf_mem[0] = 4;
12846 break;
12847
12848 default:
12849 gdb_assert_not_reached ("no decoding pattern found");
12850 break;
12851 }
12852
12853 record_buf_mem[1] = address;
12854 thumb2_insn_r->mem_rec_count = 1;
12855 record_buf[0] = reg_rn;
12856 thumb2_insn_r->reg_rec_count = 1;
12857
12858 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12859 record_buf);
12860 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12861 record_buf_mem);
12862 return ARM_RECORD_SUCCESS;
12863 }
12864
12865 /* Handler for thumb2 load memory hints instructions. */
12866
12867 static int
12868 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
12869 {
12870 uint32_t record_buf[8];
12871 uint32_t reg_rt, reg_rn;
12872
12873 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
12874 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12875
12876 if (ARM_PC_REGNUM != reg_rt)
12877 {
12878 record_buf[0] = reg_rt;
12879 record_buf[1] = reg_rn;
12880 record_buf[2] = ARM_PS_REGNUM;
12881 thumb2_insn_r->reg_rec_count = 3;
12882
12883 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12884 record_buf);
12885 return ARM_RECORD_SUCCESS;
12886 }
12887
12888 return ARM_RECORD_FAILURE;
12889 }
12890
12891 /* Handler for thumb2 load word instructions. */
12892
12893 static int
12894 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
12895 {
12896 uint32_t opcode1 = 0, opcode2 = 0;
12897 uint32_t record_buf[8];
12898
12899 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
12900 record_buf[1] = ARM_PS_REGNUM;
12901 thumb2_insn_r->reg_rec_count = 2;
12902
12903 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12904 record_buf);
12905 return ARM_RECORD_SUCCESS;
12906 }
12907
12908 /* Handler for thumb2 long multiply, long multiply accumulate, and
12909 divide instructions. */
12910
12911 static int
12912 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
12913 {
12914 uint32_t opcode1 = 0, opcode2 = 0;
12915 uint32_t record_buf[8];
12916 uint32_t reg_src1 = 0;
12917
12918 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
12919 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
12920
12921 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
12922 {
12923 /* Handle SMULL, UMULL, SMULAL. */
12924 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
12925 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12926 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12927 record_buf[2] = ARM_PS_REGNUM;
12928 thumb2_insn_r->reg_rec_count = 3;
12929 }
12930 else if (1 == opcode1 || 3 == opcode2)
12931 {
12932 /* Handle SDIV and UDIV. */
12933 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12934 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12935 record_buf[2] = ARM_PS_REGNUM;
12936 thumb2_insn_r->reg_rec_count = 3;
12937 }
12938 else
12939 return ARM_RECORD_FAILURE;
12940
12941 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12942 record_buf);
12943 return ARM_RECORD_SUCCESS;
12944 }
12945
12946 /* Decodes thumb2 instruction type and invokes its record handler. */
12947
12948 static unsigned int
12949 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
12950 {
12951 uint32_t op, op1, op2;
12952
12953 op = bit (thumb2_insn_r->arm_insn, 15);
12954 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
12955 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
12956
12957 if (op1 == 0x01)
12958 {
12959 if (!(op2 & 0x64 ))
12960 {
12961 /* Load/store multiple instruction. */
12962 return thumb2_record_ld_st_multiple (thumb2_insn_r);
12963 }
12964 else if (!((op2 & 0x64) ^ 0x04))
12965 {
12966 /* Load/store (dual/exclusive) and table branch instruction. */
12967 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
12968 }
12969 else if (!((op2 & 0x20) ^ 0x20))
12970 {
12971 /* Data-processing (shifted register). */
12972 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12973 }
12974 else if (op2 & 0x40)
12975 {
12976 /* Co-processor instructions. */
12977 arm_record_unsupported_insn (thumb2_insn_r);
12978 }
12979 }
12980 else if (op1 == 0x02)
12981 {
12982 if (op)
12983 {
12984 /* Branches and miscellaneous control instructions. */
12985 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
12986 }
12987 else if (op2 & 0x20)
12988 {
12989 /* Data-processing (plain binary immediate) instruction. */
12990 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12991 }
12992 else
12993 {
12994 /* Data-processing (modified immediate). */
12995 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12996 }
12997 }
12998 else if (op1 == 0x03)
12999 {
13000 if (!(op2 & 0x71 ))
13001 {
13002 /* Store single data item. */
13003 return thumb2_record_str_single_data (thumb2_insn_r);
13004 }
13005 else if (!((op2 & 0x71) ^ 0x10))
13006 {
13007 /* Advanced SIMD or structure load/store instructions. */
13008 return arm_record_unsupported_insn (thumb2_insn_r);
13009 }
13010 else if (!((op2 & 0x67) ^ 0x01))
13011 {
13012 /* Load byte, memory hints instruction. */
13013 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13014 }
13015 else if (!((op2 & 0x67) ^ 0x03))
13016 {
13017 /* Load halfword, memory hints instruction. */
13018 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13019 }
13020 else if (!((op2 & 0x67) ^ 0x05))
13021 {
13022 /* Load word instruction. */
13023 return thumb2_record_ld_word (thumb2_insn_r);
13024 }
13025 else if (!((op2 & 0x70) ^ 0x20))
13026 {
13027 /* Data-processing (register) instruction. */
13028 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13029 }
13030 else if (!((op2 & 0x78) ^ 0x30))
13031 {
13032 /* Multiply, multiply accumulate, abs diff instruction. */
13033 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13034 }
13035 else if (!((op2 & 0x78) ^ 0x38))
13036 {
13037 /* Long multiply, long multiply accumulate, and divide. */
13038 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
13039 }
13040 else if (op2 & 0x40)
13041 {
13042 /* Co-processor instructions. */
13043 return arm_record_unsupported_insn (thumb2_insn_r);
13044 }
13045 }
13046
13047 return -1;
13048 }
13049
13050 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
13051 and positive val on fauilure. */
13052
13053 static int
13054 extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size)
13055 {
13056 gdb_byte buf[insn_size];
13057
13058 memset (&buf[0], 0, insn_size);
13059
13060 if (target_read_memory (insn_record->this_addr, &buf[0], insn_size))
13061 return 1;
13062 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13063 insn_size,
13064 gdbarch_byte_order (insn_record->gdbarch));
13065 return 0;
13066 }
13067
13068 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13069
13070 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13071 dispatch it. */
13072
13073 static int
13074 decode_insn (insn_decode_record *arm_record, record_type_t record_type,
13075 uint32_t insn_size)
13076 {
13077
13078 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */
13079 static const sti_arm_hdl_fp_t const arm_handle_insn[8] =
13080 {
13081 arm_record_data_proc_misc_ld_str, /* 000. */
13082 arm_record_data_proc_imm, /* 001. */
13083 arm_record_ld_st_imm_offset, /* 010. */
13084 arm_record_ld_st_reg_offset, /* 011. */
13085 arm_record_ld_st_multiple, /* 100. */
13086 arm_record_b_bl, /* 101. */
13087 arm_record_unsupported_insn, /* 110. */
13088 arm_record_coproc_data_proc /* 111. */
13089 };
13090
13091 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */
13092 static const sti_arm_hdl_fp_t const thumb_handle_insn[8] =
13093 { \
13094 thumb_record_shift_add_sub, /* 000. */
13095 thumb_record_add_sub_cmp_mov, /* 001. */
13096 thumb_record_ld_st_reg_offset, /* 010. */
13097 thumb_record_ld_st_imm_offset, /* 011. */
13098 thumb_record_ld_st_stack, /* 100. */
13099 thumb_record_misc, /* 101. */
13100 thumb_record_ldm_stm_swi, /* 110. */
13101 thumb_record_branch /* 111. */
13102 };
13103
13104 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13105 uint32_t insn_id = 0;
13106
13107 if (extract_arm_insn (arm_record, insn_size))
13108 {
13109 if (record_debug)
13110 {
13111 printf_unfiltered (_("Process record: error reading memory at "
13112 "addr %s len = %d.\n"),
13113 paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
13114 }
13115 return -1;
13116 }
13117 else if (ARM_RECORD == record_type)
13118 {
13119 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13120 insn_id = bits (arm_record->arm_insn, 25, 27);
13121 ret = arm_record_extension_space (arm_record);
13122 /* If this insn has fallen into extension space
13123 then we need not decode it anymore. */
13124 if (ret != -1 && !INSN_RECORDED(arm_record))
13125 {
13126 ret = arm_handle_insn[insn_id] (arm_record);
13127 }
13128 }
13129 else if (THUMB_RECORD == record_type)
13130 {
13131 /* As thumb does not have condition codes, we set negative. */
13132 arm_record->cond = -1;
13133 insn_id = bits (arm_record->arm_insn, 13, 15);
13134 ret = thumb_handle_insn[insn_id] (arm_record);
13135 }
13136 else if (THUMB2_RECORD == record_type)
13137 {
13138 /* As thumb does not have condition codes, we set negative. */
13139 arm_record->cond = -1;
13140
13141 /* Swap first half of 32bit thumb instruction with second half. */
13142 arm_record->arm_insn
13143 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13144
13145 insn_id = thumb2_record_decode_insn_handler (arm_record);
13146
13147 if (insn_id != ARM_RECORD_SUCCESS)
13148 {
13149 arm_record_unsupported_insn (arm_record);
13150 ret = -1;
13151 }
13152 }
13153 else
13154 {
13155 /* Throw assertion. */
13156 gdb_assert_not_reached ("not a valid instruction, could not decode");
13157 }
13158
13159 return ret;
13160 }
13161
13162
13163 /* Cleans up local record registers and memory allocations. */
13164
13165 static void
13166 deallocate_reg_mem (insn_decode_record *record)
13167 {
13168 xfree (record->arm_regs);
13169 xfree (record->arm_mems);
13170 }
13171
13172
13173 /* Parse the current instruction and record the values of the registers and
13174 memory that will be changed in current instruction to record_arch_list".
13175 Return -1 if something is wrong. */
13176
13177 int
13178 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13179 CORE_ADDR insn_addr)
13180 {
13181
13182 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
13183 uint32_t no_of_rec = 0;
13184 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13185 ULONGEST t_bit = 0, insn_id = 0;
13186
13187 ULONGEST u_regval = 0;
13188
13189 insn_decode_record arm_record;
13190
13191 memset (&arm_record, 0, sizeof (insn_decode_record));
13192 arm_record.regcache = regcache;
13193 arm_record.this_addr = insn_addr;
13194 arm_record.gdbarch = gdbarch;
13195
13196
13197 if (record_debug > 1)
13198 {
13199 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13200 "addr = %s\n",
13201 paddress (gdbarch, arm_record.this_addr));
13202 }
13203
13204 if (extract_arm_insn (&arm_record, 2))
13205 {
13206 if (record_debug)
13207 {
13208 printf_unfiltered (_("Process record: error reading memory at "
13209 "addr %s len = %d.\n"),
13210 paddress (arm_record.gdbarch,
13211 arm_record.this_addr), 2);
13212 }
13213 return -1;
13214 }
13215
13216 /* Check the insn, whether it is thumb or arm one. */
13217
13218 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13219 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13220
13221
13222 if (!(u_regval & t_bit))
13223 {
13224 /* We are decoding arm insn. */
13225 ret = decode_insn (&arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13226 }
13227 else
13228 {
13229 insn_id = bits (arm_record.arm_insn, 11, 15);
13230 /* is it thumb2 insn? */
13231 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13232 {
13233 ret = decode_insn (&arm_record, THUMB2_RECORD,
13234 THUMB2_INSN_SIZE_BYTES);
13235 }
13236 else
13237 {
13238 /* We are decoding thumb insn. */
13239 ret = decode_insn (&arm_record, THUMB_RECORD, THUMB_INSN_SIZE_BYTES);
13240 }
13241 }
13242
13243 if (0 == ret)
13244 {
13245 /* Record registers. */
13246 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13247 if (arm_record.arm_regs)
13248 {
13249 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13250 {
13251 if (record_full_arch_list_add_reg
13252 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13253 ret = -1;
13254 }
13255 }
13256 /* Record memories. */
13257 if (arm_record.arm_mems)
13258 {
13259 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13260 {
13261 if (record_full_arch_list_add_mem
13262 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13263 arm_record.arm_mems[no_of_rec].len))
13264 ret = -1;
13265 }
13266 }
13267
13268 if (record_full_arch_list_add_end ())
13269 ret = -1;
13270 }
13271
13272
13273 deallocate_reg_mem (&arm_record);
13274
13275 return ret;
13276 }
13277
This page took 0.300161 seconds and 3 git commands to generate.