* gdb/arm-tdep.c (arm_mode_strings): Add NULL entry at end of array.
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "target-descriptions.h"
44 #include "user-regs.h"
45
46 #include "arm-tdep.h"
47 #include "gdb/sim-arm.h"
48
49 #include "elf-bfd.h"
50 #include "coff/internal.h"
51 #include "elf/arm.h"
52
53 #include "gdb_assert.h"
54 #include "vec.h"
55
56 #include "features/arm-with-m.c"
57
58 static int arm_debug;
59
60 /* Macros for setting and testing a bit in a minimal symbol that marks
61 it as Thumb function. The MSB of the minimal symbol's "info" field
62 is used for this purpose.
63
64 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
65 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
66
67 #define MSYMBOL_SET_SPECIAL(msym) \
68 MSYMBOL_TARGET_FLAG_1 (msym) = 1
69
70 #define MSYMBOL_IS_SPECIAL(msym) \
71 MSYMBOL_TARGET_FLAG_1 (msym)
72
73 /* Per-objfile data used for mapping symbols. */
74 static const struct objfile_data *arm_objfile_data_key;
75
76 struct arm_mapping_symbol
77 {
78 bfd_vma value;
79 char type;
80 };
81 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
82 DEF_VEC_O(arm_mapping_symbol_s);
83
84 struct arm_per_objfile
85 {
86 VEC(arm_mapping_symbol_s) **section_maps;
87 };
88
89 /* The list of available "set arm ..." and "show arm ..." commands. */
90 static struct cmd_list_element *setarmcmdlist = NULL;
91 static struct cmd_list_element *showarmcmdlist = NULL;
92
93 /* The type of floating-point to use. Keep this in sync with enum
94 arm_float_model, and the help string in _initialize_arm_tdep. */
95 static const char *fp_model_strings[] =
96 {
97 "auto",
98 "softfpa",
99 "fpa",
100 "softvfp",
101 "vfp",
102 NULL
103 };
104
105 /* A variable that can be configured by the user. */
106 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
107 static const char *current_fp_model = "auto";
108
109 /* The ABI to use. Keep this in sync with arm_abi_kind. */
110 static const char *arm_abi_strings[] =
111 {
112 "auto",
113 "APCS",
114 "AAPCS",
115 NULL
116 };
117
118 /* A variable that can be configured by the user. */
119 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
120 static const char *arm_abi_string = "auto";
121
122 /* The execution mode to assume. */
123 static const char *arm_mode_strings[] =
124 {
125 "auto",
126 "arm",
127 "thumb",
128 NULL
129 };
130
131 static const char *arm_fallback_mode_string = "auto";
132 static const char *arm_force_mode_string = "auto";
133
134 /* Number of different reg name sets (options). */
135 static int num_disassembly_options;
136
137 /* The standard register names, and all the valid aliases for them. */
138 static const struct
139 {
140 const char *name;
141 int regnum;
142 } arm_register_aliases[] = {
143 /* Basic register numbers. */
144 { "r0", 0 },
145 { "r1", 1 },
146 { "r2", 2 },
147 { "r3", 3 },
148 { "r4", 4 },
149 { "r5", 5 },
150 { "r6", 6 },
151 { "r7", 7 },
152 { "r8", 8 },
153 { "r9", 9 },
154 { "r10", 10 },
155 { "r11", 11 },
156 { "r12", 12 },
157 { "r13", 13 },
158 { "r14", 14 },
159 { "r15", 15 },
160 /* Synonyms (argument and variable registers). */
161 { "a1", 0 },
162 { "a2", 1 },
163 { "a3", 2 },
164 { "a4", 3 },
165 { "v1", 4 },
166 { "v2", 5 },
167 { "v3", 6 },
168 { "v4", 7 },
169 { "v5", 8 },
170 { "v6", 9 },
171 { "v7", 10 },
172 { "v8", 11 },
173 /* Other platform-specific names for r9. */
174 { "sb", 9 },
175 { "tr", 9 },
176 /* Special names. */
177 { "ip", 12 },
178 { "sp", 13 },
179 { "lr", 14 },
180 { "pc", 15 },
181 /* Names used by GCC (not listed in the ARM EABI). */
182 { "sl", 10 },
183 { "fp", 11 },
184 /* A special name from the older ATPCS. */
185 { "wr", 7 },
186 };
187
188 static const char *const arm_register_names[] =
189 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
190 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
191 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
192 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
193 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
194 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
195 "fps", "cpsr" }; /* 24 25 */
196
197 /* Valid register name styles. */
198 static const char **valid_disassembly_styles;
199
200 /* Disassembly style to use. Default to "std" register names. */
201 static const char *disassembly_style;
202
203 /* This is used to keep the bfd arch_info in sync with the disassembly
204 style. */
205 static void set_disassembly_style_sfunc(char *, int,
206 struct cmd_list_element *);
207 static void set_disassembly_style (void);
208
209 static void convert_from_extended (const struct floatformat *, const void *,
210 void *, int);
211 static void convert_to_extended (const struct floatformat *, void *,
212 const void *, int);
213
214 static void arm_neon_quad_read (struct gdbarch *gdbarch,
215 struct regcache *regcache,
216 int regnum, gdb_byte *buf);
217 static void arm_neon_quad_write (struct gdbarch *gdbarch,
218 struct regcache *regcache,
219 int regnum, const gdb_byte *buf);
220
221 struct arm_prologue_cache
222 {
223 /* The stack pointer at the time this frame was created; i.e. the
224 caller's stack pointer when this function was called. It is used
225 to identify this frame. */
226 CORE_ADDR prev_sp;
227
228 /* The frame base for this frame is just prev_sp - frame size.
229 FRAMESIZE is the distance from the frame pointer to the
230 initial stack pointer. */
231
232 int framesize;
233
234 /* The register used to hold the frame pointer for this frame. */
235 int framereg;
236
237 /* Saved register offsets. */
238 struct trad_frame_saved_reg *saved_regs;
239 };
240
241 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
242 CORE_ADDR prologue_start,
243 CORE_ADDR prologue_end,
244 struct arm_prologue_cache *cache);
245
246 /* Architecture version for displaced stepping. This effects the behaviour of
247 certain instructions, and really should not be hard-wired. */
248
249 #define DISPLACED_STEPPING_ARCH_VERSION 5
250
251 /* Addresses for calling Thumb functions have the bit 0 set.
252 Here are some macros to test, set, or clear bit 0 of addresses. */
253 #define IS_THUMB_ADDR(addr) ((addr) & 1)
254 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
255 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
256
257 /* Set to true if the 32-bit mode is in use. */
258
259 int arm_apcs_32 = 1;
260
261 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
262
263 static int
264 arm_psr_thumb_bit (struct gdbarch *gdbarch)
265 {
266 if (gdbarch_tdep (gdbarch)->is_m)
267 return XPSR_T;
268 else
269 return CPSR_T;
270 }
271
272 /* Determine if FRAME is executing in Thumb mode. */
273
274 int
275 arm_frame_is_thumb (struct frame_info *frame)
276 {
277 CORE_ADDR cpsr;
278 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
279
280 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
281 directly (from a signal frame or dummy frame) or by interpreting
282 the saved LR (from a prologue or DWARF frame). So consult it and
283 trust the unwinders. */
284 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
285
286 return (cpsr & t_bit) != 0;
287 }
288
289 /* Callback for VEC_lower_bound. */
290
291 static inline int
292 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
293 const struct arm_mapping_symbol *rhs)
294 {
295 return lhs->value < rhs->value;
296 }
297
298 /* Search for the mapping symbol covering MEMADDR. If one is found,
299 return its type. Otherwise, return 0. If START is non-NULL,
300 set *START to the location of the mapping symbol. */
301
302 static char
303 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
304 {
305 struct obj_section *sec;
306
307 /* If there are mapping symbols, consult them. */
308 sec = find_pc_section (memaddr);
309 if (sec != NULL)
310 {
311 struct arm_per_objfile *data;
312 VEC(arm_mapping_symbol_s) *map;
313 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
314 0 };
315 unsigned int idx;
316
317 data = objfile_data (sec->objfile, arm_objfile_data_key);
318 if (data != NULL)
319 {
320 map = data->section_maps[sec->the_bfd_section->index];
321 if (!VEC_empty (arm_mapping_symbol_s, map))
322 {
323 struct arm_mapping_symbol *map_sym;
324
325 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
326 arm_compare_mapping_symbols);
327
328 /* VEC_lower_bound finds the earliest ordered insertion
329 point. If the following symbol starts at this exact
330 address, we use that; otherwise, the preceding
331 mapping symbol covers this address. */
332 if (idx < VEC_length (arm_mapping_symbol_s, map))
333 {
334 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
335 if (map_sym->value == map_key.value)
336 {
337 if (start)
338 *start = map_sym->value + obj_section_addr (sec);
339 return map_sym->type;
340 }
341 }
342
343 if (idx > 0)
344 {
345 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
346 if (start)
347 *start = map_sym->value + obj_section_addr (sec);
348 return map_sym->type;
349 }
350 }
351 }
352 }
353
354 return 0;
355 }
356
357 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
358 CORE_ADDR pc, int insert_bkpt);
359
360 /* Determine if the program counter specified in MEMADDR is in a Thumb
361 function. This function should be called for addresses unrelated to
362 any executing frame; otherwise, prefer arm_frame_is_thumb. */
363
364 static int
365 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
366 {
367 struct obj_section *sec;
368 struct minimal_symbol *sym;
369 char type;
370
371 /* If bit 0 of the address is set, assume this is a Thumb address. */
372 if (IS_THUMB_ADDR (memaddr))
373 return 1;
374
375 /* If the user wants to override the symbol table, let him. */
376 if (strcmp (arm_force_mode_string, "arm") == 0)
377 return 0;
378 if (strcmp (arm_force_mode_string, "thumb") == 0)
379 return 1;
380
381 /* ARM v6-M and v7-M are always in Thumb mode. */
382 if (gdbarch_tdep (gdbarch)->is_m)
383 return 1;
384
385 /* If there are mapping symbols, consult them. */
386 type = arm_find_mapping_symbol (memaddr, NULL);
387 if (type)
388 return type == 't';
389
390 /* Thumb functions have a "special" bit set in minimal symbols. */
391 sym = lookup_minimal_symbol_by_pc (memaddr);
392 if (sym)
393 return (MSYMBOL_IS_SPECIAL (sym));
394
395 /* If the user wants to override the fallback mode, let them. */
396 if (strcmp (arm_fallback_mode_string, "arm") == 0)
397 return 0;
398 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
399 return 1;
400
401 /* If we couldn't find any symbol, but we're talking to a running
402 target, then trust the current value of $cpsr. This lets
403 "display/i $pc" always show the correct mode (though if there is
404 a symbol table we will not reach here, so it still may not be
405 displayed in the mode it will be executed).
406
407 As a further heuristic if we detect that we are doing a single-step we
408 see what state executing the current instruction ends up with us being
409 in. */
410 if (target_has_registers)
411 {
412 struct frame_info *current_frame = get_current_frame ();
413 CORE_ADDR current_pc = get_frame_pc (current_frame);
414 int is_thumb = arm_frame_is_thumb (current_frame);
415 CORE_ADDR next_pc;
416 if (memaddr == current_pc)
417 return is_thumb;
418 else
419 {
420 struct gdbarch *gdbarch = get_frame_arch (current_frame);
421 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
422 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
423 return IS_THUMB_ADDR (next_pc);
424 else
425 return is_thumb;
426 }
427 }
428
429 /* Otherwise we're out of luck; we assume ARM. */
430 return 0;
431 }
432
433 /* Remove useless bits from addresses in a running program. */
434 static CORE_ADDR
435 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
436 {
437 if (arm_apcs_32)
438 return UNMAKE_THUMB_ADDR (val);
439 else
440 return (val & 0x03fffffc);
441 }
442
443 /* When reading symbols, we need to zap the low bit of the address,
444 which may be set to 1 for Thumb functions. */
445 static CORE_ADDR
446 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
447 {
448 return val & ~1;
449 }
450
451 /* Return 1 if PC is the start of a compiler helper function which
452 can be safely ignored during prologue skipping. */
453 static int
454 skip_prologue_function (CORE_ADDR pc)
455 {
456 struct minimal_symbol *msym;
457 const char *name;
458
459 msym = lookup_minimal_symbol_by_pc (pc);
460 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
461 return 0;
462
463 name = SYMBOL_LINKAGE_NAME (msym);
464 if (name == NULL)
465 return 0;
466
467 /* The GNU linker's Thumb call stub to foo is named
468 __foo_from_thumb. */
469 if (strstr (name, "_from_thumb") != NULL)
470 name += 2;
471
472 /* On soft-float targets, __truncdfsf2 is called to convert promoted
473 arguments to their argument types in non-prototyped
474 functions. */
475 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
476 return 1;
477 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
478 return 1;
479
480 /* Internal functions related to thread-local storage. */
481 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
482 return 1;
483 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
484 return 1;
485
486 return 0;
487 }
488
489 /* Support routines for instruction parsing. */
490 #define submask(x) ((1L << ((x) + 1)) - 1)
491 #define bit(obj,st) (((obj) >> (st)) & 1)
492 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
493 #define sbits(obj,st,fn) \
494 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
495 #define BranchDest(addr,instr) \
496 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
497
498 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
499
500 static unsigned int
501 thumb_expand_immediate (unsigned int imm)
502 {
503 unsigned int count = imm >> 7;
504
505 if (count < 8)
506 switch (count / 2)
507 {
508 case 0:
509 return imm & 0xff;
510 case 1:
511 return (imm & 0xff) | ((imm & 0xff) << 16);
512 case 2:
513 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
514 case 3:
515 return (imm & 0xff) | ((imm & 0xff) << 8)
516 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
517 }
518
519 return (0x80 | (imm & 0x7f)) << (32 - count);
520 }
521
522 /* Return 1 if the 16-bit Thumb instruction INST might change
523 control flow, 0 otherwise. */
524
525 static int
526 thumb_instruction_changes_pc (unsigned short inst)
527 {
528 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
529 return 1;
530
531 if ((inst & 0xf000) == 0xd000) /* conditional branch */
532 return 1;
533
534 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
535 return 1;
536
537 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
538 return 1;
539
540 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
541 return 1;
542
543 return 0;
544 }
545
546 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
547 might change control flow, 0 otherwise. */
548
549 static int
550 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
551 {
552 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
553 {
554 /* Branches and miscellaneous control instructions. */
555
556 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
557 {
558 /* B, BL, BLX. */
559 return 1;
560 }
561 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
562 {
563 /* SUBS PC, LR, #imm8. */
564 return 1;
565 }
566 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
567 {
568 /* Conditional branch. */
569 return 1;
570 }
571
572 return 0;
573 }
574
575 if ((inst1 & 0xfe50) == 0xe810)
576 {
577 /* Load multiple or RFE. */
578
579 if (bit (inst1, 7) && !bit (inst1, 8))
580 {
581 /* LDMIA or POP */
582 if (bit (inst2, 15))
583 return 1;
584 }
585 else if (!bit (inst1, 7) && bit (inst1, 8))
586 {
587 /* LDMDB */
588 if (bit (inst2, 15))
589 return 1;
590 }
591 else if (bit (inst1, 7) && bit (inst1, 8))
592 {
593 /* RFEIA */
594 return 1;
595 }
596 else if (!bit (inst1, 7) && !bit (inst1, 8))
597 {
598 /* RFEDB */
599 return 1;
600 }
601
602 return 0;
603 }
604
605 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
606 {
607 /* MOV PC or MOVS PC. */
608 return 1;
609 }
610
611 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
612 {
613 /* LDR PC. */
614 if (bits (inst1, 0, 3) == 15)
615 return 1;
616 if (bit (inst1, 7))
617 return 1;
618 if (bit (inst2, 11))
619 return 1;
620 if ((inst2 & 0x0fc0) == 0x0000)
621 return 1;
622
623 return 0;
624 }
625
626 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
627 {
628 /* TBB. */
629 return 1;
630 }
631
632 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
633 {
634 /* TBH. */
635 return 1;
636 }
637
638 return 0;
639 }
640
641 /* Analyze a Thumb prologue, looking for a recognizable stack frame
642 and frame pointer. Scan until we encounter a store that could
643 clobber the stack frame unexpectedly, or an unknown instruction.
644 Return the last address which is definitely safe to skip for an
645 initial breakpoint. */
646
647 static CORE_ADDR
648 thumb_analyze_prologue (struct gdbarch *gdbarch,
649 CORE_ADDR start, CORE_ADDR limit,
650 struct arm_prologue_cache *cache)
651 {
652 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
653 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
654 int i;
655 pv_t regs[16];
656 struct pv_area *stack;
657 struct cleanup *back_to;
658 CORE_ADDR offset;
659 CORE_ADDR unrecognized_pc = 0;
660
661 for (i = 0; i < 16; i++)
662 regs[i] = pv_register (i, 0);
663 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
664 back_to = make_cleanup_free_pv_area (stack);
665
666 while (start < limit)
667 {
668 unsigned short insn;
669
670 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
671
672 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
673 {
674 int regno;
675 int mask;
676
677 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
678 break;
679
680 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
681 whether to save LR (R14). */
682 mask = (insn & 0xff) | ((insn & 0x100) << 6);
683
684 /* Calculate offsets of saved R0-R7 and LR. */
685 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
686 if (mask & (1 << regno))
687 {
688 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
689 -4);
690 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
691 }
692 }
693 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
694 sub sp, #simm */
695 {
696 offset = (insn & 0x7f) << 2; /* get scaled offset */
697 if (insn & 0x80) /* Check for SUB. */
698 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
699 -offset);
700 else
701 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
702 offset);
703 }
704 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
705 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
706 (insn & 0xff) << 2);
707 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
708 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
709 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
710 bits (insn, 6, 8));
711 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
712 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
713 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
714 bits (insn, 0, 7));
715 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
716 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
717 && pv_is_constant (regs[bits (insn, 3, 5)]))
718 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
719 regs[bits (insn, 6, 8)]);
720 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
721 && pv_is_constant (regs[bits (insn, 3, 6)]))
722 {
723 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
724 int rm = bits (insn, 3, 6);
725 regs[rd] = pv_add (regs[rd], regs[rm]);
726 }
727 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
728 {
729 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
730 int src_reg = (insn & 0x78) >> 3;
731 regs[dst_reg] = regs[src_reg];
732 }
733 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
734 {
735 /* Handle stores to the stack. Normally pushes are used,
736 but with GCC -mtpcs-frame, there may be other stores
737 in the prologue to create the frame. */
738 int regno = (insn >> 8) & 0x7;
739 pv_t addr;
740
741 offset = (insn & 0xff) << 2;
742 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
743
744 if (pv_area_store_would_trash (stack, addr))
745 break;
746
747 pv_area_store (stack, addr, 4, regs[regno]);
748 }
749 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
750 {
751 int rd = bits (insn, 0, 2);
752 int rn = bits (insn, 3, 5);
753 pv_t addr;
754
755 offset = bits (insn, 6, 10) << 2;
756 addr = pv_add_constant (regs[rn], offset);
757
758 if (pv_area_store_would_trash (stack, addr))
759 break;
760
761 pv_area_store (stack, addr, 4, regs[rd]);
762 }
763 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
764 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
765 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
766 /* Ignore stores of argument registers to the stack. */
767 ;
768 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
769 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
770 /* Ignore block loads from the stack, potentially copying
771 parameters from memory. */
772 ;
773 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
774 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
775 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
776 /* Similarly ignore single loads from the stack. */
777 ;
778 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
779 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
780 /* Skip register copies, i.e. saves to another register
781 instead of the stack. */
782 ;
783 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
784 /* Recognize constant loads; even with small stacks these are necessary
785 on Thumb. */
786 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
787 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
788 {
789 /* Constant pool loads, for the same reason. */
790 unsigned int constant;
791 CORE_ADDR loc;
792
793 loc = start + 4 + bits (insn, 0, 7) * 4;
794 constant = read_memory_unsigned_integer (loc, 4, byte_order);
795 regs[bits (insn, 8, 10)] = pv_constant (constant);
796 }
797 else if ((insn & 0xe000) == 0xe000)
798 {
799 unsigned short inst2;
800
801 inst2 = read_memory_unsigned_integer (start + 2, 2,
802 byte_order_for_code);
803
804 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
805 {
806 /* BL, BLX. Allow some special function calls when
807 skipping the prologue; GCC generates these before
808 storing arguments to the stack. */
809 CORE_ADDR nextpc;
810 int j1, j2, imm1, imm2;
811
812 imm1 = sbits (insn, 0, 10);
813 imm2 = bits (inst2, 0, 10);
814 j1 = bit (inst2, 13);
815 j2 = bit (inst2, 11);
816
817 offset = ((imm1 << 12) + (imm2 << 1));
818 offset ^= ((!j2) << 22) | ((!j1) << 23);
819
820 nextpc = start + 4 + offset;
821 /* For BLX make sure to clear the low bits. */
822 if (bit (inst2, 12) == 0)
823 nextpc = nextpc & 0xfffffffc;
824
825 if (!skip_prologue_function (nextpc))
826 break;
827 }
828
829 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!}, { registers } */
830 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
831 {
832 pv_t addr = regs[bits (insn, 0, 3)];
833 int regno;
834
835 if (pv_area_store_would_trash (stack, addr))
836 break;
837
838 /* Calculate offsets of saved registers. */
839 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
840 if (inst2 & (1 << regno))
841 {
842 addr = pv_add_constant (addr, -4);
843 pv_area_store (stack, addr, 4, regs[regno]);
844 }
845
846 if (insn & 0x0020)
847 regs[bits (insn, 0, 3)] = addr;
848 }
849
850 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2, [Rn, #+/-imm]{!} */
851 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
852 {
853 int regno1 = bits (inst2, 12, 15);
854 int regno2 = bits (inst2, 8, 11);
855 pv_t addr = regs[bits (insn, 0, 3)];
856
857 offset = inst2 & 0xff;
858 if (insn & 0x0080)
859 addr = pv_add_constant (addr, offset);
860 else
861 addr = pv_add_constant (addr, -offset);
862
863 if (pv_area_store_would_trash (stack, addr))
864 break;
865
866 pv_area_store (stack, addr, 4, regs[regno1]);
867 pv_area_store (stack, pv_add_constant (addr, 4),
868 4, regs[regno2]);
869
870 if (insn & 0x0020)
871 regs[bits (insn, 0, 3)] = addr;
872 }
873
874 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
875 && (inst2 & 0x0c00) == 0x0c00
876 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
877 {
878 int regno = bits (inst2, 12, 15);
879 pv_t addr = regs[bits (insn, 0, 3)];
880
881 offset = inst2 & 0xff;
882 if (inst2 & 0x0200)
883 addr = pv_add_constant (addr, offset);
884 else
885 addr = pv_add_constant (addr, -offset);
886
887 if (pv_area_store_would_trash (stack, addr))
888 break;
889
890 pv_area_store (stack, addr, 4, regs[regno]);
891
892 if (inst2 & 0x0100)
893 regs[bits (insn, 0, 3)] = addr;
894 }
895
896 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
897 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 {
899 int regno = bits (inst2, 12, 15);
900 pv_t addr;
901
902 offset = inst2 & 0xfff;
903 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
904
905 if (pv_area_store_would_trash (stack, addr))
906 break;
907
908 pv_area_store (stack, addr, 4, regs[regno]);
909 }
910
911 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
912 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
913 /* Ignore stores of argument registers to the stack. */
914 ;
915
916 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
917 && (inst2 & 0x0d00) == 0x0c00
918 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
919 /* Ignore stores of argument registers to the stack. */
920 ;
921
922 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
923 && (inst2 & 0x8000) == 0x0000
924 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
925 /* Ignore block loads from the stack, potentially copying
926 parameters from memory. */
927 ;
928
929 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2, [Rn, #+/-imm] */
930 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
931 /* Similarly ignore dual loads from the stack. */
932 ;
933
934 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
935 && (inst2 & 0x0d00) == 0x0c00
936 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
937 /* Similarly ignore single loads from the stack. */
938 ;
939
940 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
941 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
942 /* Similarly ignore single loads from the stack. */
943 ;
944
945 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
946 && (inst2 & 0x8000) == 0x0000)
947 {
948 unsigned int imm = ((bits (insn, 10, 10) << 11)
949 | (bits (inst2, 12, 14) << 8)
950 | bits (inst2, 0, 7));
951
952 regs[bits (inst2, 8, 11)]
953 = pv_add_constant (regs[bits (insn, 0, 3)],
954 thumb_expand_immediate (imm));
955 }
956
957 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
958 && (inst2 & 0x8000) == 0x0000)
959 {
960 unsigned int imm = ((bits (insn, 10, 10) << 11)
961 | (bits (inst2, 12, 14) << 8)
962 | bits (inst2, 0, 7));
963
964 regs[bits (inst2, 8, 11)]
965 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
966 }
967
968 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
969 && (inst2 & 0x8000) == 0x0000)
970 {
971 unsigned int imm = ((bits (insn, 10, 10) << 11)
972 | (bits (inst2, 12, 14) << 8)
973 | bits (inst2, 0, 7));
974
975 regs[bits (inst2, 8, 11)]
976 = pv_add_constant (regs[bits (insn, 0, 3)],
977 - (CORE_ADDR) thumb_expand_immediate (imm));
978 }
979
980 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
981 && (inst2 & 0x8000) == 0x0000)
982 {
983 unsigned int imm = ((bits (insn, 10, 10) << 11)
984 | (bits (inst2, 12, 14) << 8)
985 | bits (inst2, 0, 7));
986
987 regs[bits (inst2, 8, 11)]
988 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
989 }
990
991 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
992 {
993 unsigned int imm = ((bits (insn, 10, 10) << 11)
994 | (bits (inst2, 12, 14) << 8)
995 | bits (inst2, 0, 7));
996
997 regs[bits (inst2, 8, 11)]
998 = pv_constant (thumb_expand_immediate (imm));
999 }
1000
1001 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1002 {
1003 unsigned int imm = ((bits (insn, 0, 3) << 12)
1004 | (bits (insn, 10, 10) << 11)
1005 | (bits (inst2, 12, 14) << 8)
1006 | bits (inst2, 0, 7));
1007
1008 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1009 }
1010
1011 else if (insn == 0xea5f /* mov.w Rd,Rm */
1012 && (inst2 & 0xf0f0) == 0)
1013 {
1014 int dst_reg = (inst2 & 0x0f00) >> 8;
1015 int src_reg = inst2 & 0xf;
1016 regs[dst_reg] = regs[src_reg];
1017 }
1018
1019 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1020 {
1021 /* Constant pool loads. */
1022 unsigned int constant;
1023 CORE_ADDR loc;
1024
1025 offset = bits (insn, 0, 11);
1026 if (insn & 0x0080)
1027 loc = start + 4 + offset;
1028 else
1029 loc = start + 4 - offset;
1030
1031 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1032 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1033 }
1034
1035 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1036 {
1037 /* Constant pool loads. */
1038 unsigned int constant;
1039 CORE_ADDR loc;
1040
1041 offset = bits (insn, 0, 7) << 2;
1042 if (insn & 0x0080)
1043 loc = start + 4 + offset;
1044 else
1045 loc = start + 4 - offset;
1046
1047 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1048 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1049
1050 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1051 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1052 }
1053
1054 else if (thumb2_instruction_changes_pc (insn, inst2))
1055 {
1056 /* Don't scan past anything that might change control flow. */
1057 break;
1058 }
1059 else
1060 {
1061 /* The optimizer might shove anything into the prologue,
1062 so we just skip what we don't recognize. */
1063 unrecognized_pc = start;
1064 }
1065
1066 start += 2;
1067 }
1068 else if (thumb_instruction_changes_pc (insn))
1069 {
1070 /* Don't scan past anything that might change control flow. */
1071 break;
1072 }
1073 else
1074 {
1075 /* The optimizer might shove anything into the prologue,
1076 so we just skip what we don't recognize. */
1077 unrecognized_pc = start;
1078 }
1079
1080 start += 2;
1081 }
1082
1083 if (arm_debug)
1084 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1085 paddress (gdbarch, start));
1086
1087 if (unrecognized_pc == 0)
1088 unrecognized_pc = start;
1089
1090 if (cache == NULL)
1091 {
1092 do_cleanups (back_to);
1093 return unrecognized_pc;
1094 }
1095
1096 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1097 {
1098 /* Frame pointer is fp. Frame size is constant. */
1099 cache->framereg = ARM_FP_REGNUM;
1100 cache->framesize = -regs[ARM_FP_REGNUM].k;
1101 }
1102 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1103 {
1104 /* Frame pointer is r7. Frame size is constant. */
1105 cache->framereg = THUMB_FP_REGNUM;
1106 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1107 }
1108 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1109 {
1110 /* Try the stack pointer... this is a bit desperate. */
1111 cache->framereg = ARM_SP_REGNUM;
1112 cache->framesize = -regs[ARM_SP_REGNUM].k;
1113 }
1114 else
1115 {
1116 /* We're just out of luck. We don't know where the frame is. */
1117 cache->framereg = -1;
1118 cache->framesize = 0;
1119 }
1120
1121 for (i = 0; i < 16; i++)
1122 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1123 cache->saved_regs[i].addr = offset;
1124
1125 do_cleanups (back_to);
1126 return unrecognized_pc;
1127 }
1128
1129 /* Advance the PC across any function entry prologue instructions to
1130 reach some "real" code.
1131
1132 The APCS (ARM Procedure Call Standard) defines the following
1133 prologue:
1134
1135 mov ip, sp
1136 [stmfd sp!, {a1,a2,a3,a4}]
1137 stmfd sp!, {...,fp,ip,lr,pc}
1138 [stfe f7, [sp, #-12]!]
1139 [stfe f6, [sp, #-12]!]
1140 [stfe f5, [sp, #-12]!]
1141 [stfe f4, [sp, #-12]!]
1142 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
1143
1144 static CORE_ADDR
1145 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1146 {
1147 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1148 unsigned long inst;
1149 CORE_ADDR skip_pc;
1150 CORE_ADDR func_addr, limit_pc;
1151 struct symtab_and_line sal;
1152
1153 /* See if we can determine the end of the prologue via the symbol table.
1154 If so, then return either PC, or the PC after the prologue, whichever
1155 is greater. */
1156 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1157 {
1158 CORE_ADDR post_prologue_pc
1159 = skip_prologue_using_sal (gdbarch, func_addr);
1160 struct symtab *s = find_pc_symtab (func_addr);
1161
1162 /* GCC always emits a line note before the prologue and another
1163 one after, even if the two are at the same address or on the
1164 same line. Take advantage of this so that we do not need to
1165 know every instruction that might appear in the prologue. We
1166 will have producer information for most binaries; if it is
1167 missing (e.g. for -gstabs), assuming the GNU tools. */
1168 if (post_prologue_pc
1169 && (s == NULL
1170 || s->producer == NULL
1171 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1172 return post_prologue_pc;
1173
1174 if (post_prologue_pc != 0)
1175 {
1176 CORE_ADDR analyzed_limit;
1177
1178 /* For non-GCC compilers, make sure the entire line is an
1179 acceptable prologue; GDB will round this function's
1180 return value up to the end of the following line so we
1181 can not skip just part of a line (and we do not want to).
1182
1183 RealView does not treat the prologue specially, but does
1184 associate prologue code with the opening brace; so this
1185 lets us skip the first line if we think it is the opening
1186 brace. */
1187 if (arm_pc_is_thumb (gdbarch, func_addr))
1188 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1189 post_prologue_pc, NULL);
1190 else
1191 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1192 post_prologue_pc, NULL);
1193
1194 if (analyzed_limit != post_prologue_pc)
1195 return func_addr;
1196
1197 return post_prologue_pc;
1198 }
1199 }
1200
1201 /* Can't determine prologue from the symbol table, need to examine
1202 instructions. */
1203
1204 /* Find an upper limit on the function prologue using the debug
1205 information. If the debug information could not be used to provide
1206 that bound, then use an arbitrary large number as the upper bound. */
1207 /* Like arm_scan_prologue, stop no later than pc + 64. */
1208 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1209 if (limit_pc == 0)
1210 limit_pc = pc + 64; /* Magic. */
1211
1212
1213 /* Check if this is Thumb code. */
1214 if (arm_pc_is_thumb (gdbarch, pc))
1215 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1216
1217 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1218 {
1219 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1220
1221 /* "mov ip, sp" is no longer a required part of the prologue. */
1222 if (inst == 0xe1a0c00d) /* mov ip, sp */
1223 continue;
1224
1225 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1226 continue;
1227
1228 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1229 continue;
1230
1231 /* Some prologues begin with "str lr, [sp, #-4]!". */
1232 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1233 continue;
1234
1235 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1236 continue;
1237
1238 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1239 continue;
1240
1241 /* Any insns after this point may float into the code, if it makes
1242 for better instruction scheduling, so we skip them only if we
1243 find them, but still consider the function to be frame-ful. */
1244
1245 /* We may have either one sfmfd instruction here, or several stfe
1246 insns, depending on the version of floating point code we
1247 support. */
1248 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1249 continue;
1250
1251 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1252 continue;
1253
1254 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1255 continue;
1256
1257 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1258 continue;
1259
1260 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1261 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1262 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1263 continue;
1264
1265 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1266 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1267 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1268 continue;
1269
1270 /* Un-recognized instruction; stop scanning. */
1271 break;
1272 }
1273
1274 return skip_pc; /* End of prologue */
1275 }
1276
1277 /* *INDENT-OFF* */
1278 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1279 This function decodes a Thumb function prologue to determine:
1280 1) the size of the stack frame
1281 2) which registers are saved on it
1282 3) the offsets of saved regs
1283 4) the offset from the stack pointer to the frame pointer
1284
1285 A typical Thumb function prologue would create this stack frame
1286 (offsets relative to FP)
1287 old SP -> 24 stack parameters
1288 20 LR
1289 16 R7
1290 R7 -> 0 local variables (16 bytes)
1291 SP -> -12 additional stack space (12 bytes)
1292 The frame size would thus be 36 bytes, and the frame offset would be
1293 12 bytes. The frame register is R7.
1294
1295 The comments for thumb_skip_prolog() describe the algorithm we use
1296 to detect the end of the prolog. */
1297 /* *INDENT-ON* */
1298
1299 static void
1300 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1301 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1302 {
1303 CORE_ADDR prologue_start;
1304 CORE_ADDR prologue_end;
1305 CORE_ADDR current_pc;
1306
1307 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1308 &prologue_end))
1309 {
1310 /* See comment in arm_scan_prologue for an explanation of
1311 this heuristics. */
1312 if (prologue_end > prologue_start + 64)
1313 {
1314 prologue_end = prologue_start + 64;
1315 }
1316 }
1317 else
1318 /* We're in the boondocks: we have no idea where the start of the
1319 function is. */
1320 return;
1321
1322 prologue_end = min (prologue_end, prev_pc);
1323
1324 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1325 }
1326
1327 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1328
1329 static int
1330 arm_instruction_changes_pc (uint32_t this_instr)
1331 {
1332 if (bits (this_instr, 28, 31) == INST_NV)
1333 /* Unconditional instructions. */
1334 switch (bits (this_instr, 24, 27))
1335 {
1336 case 0xa:
1337 case 0xb:
1338 /* Branch with Link and change to Thumb. */
1339 return 1;
1340 case 0xc:
1341 case 0xd:
1342 case 0xe:
1343 /* Coprocessor register transfer. */
1344 if (bits (this_instr, 12, 15) == 15)
1345 error (_("Invalid update to pc in instruction"));
1346 return 0;
1347 default:
1348 return 0;
1349 }
1350 else
1351 switch (bits (this_instr, 25, 27))
1352 {
1353 case 0x0:
1354 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1355 {
1356 /* Multiplies and extra load/stores. */
1357 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1358 /* Neither multiplies nor extension load/stores are allowed
1359 to modify PC. */
1360 return 0;
1361
1362 /* Otherwise, miscellaneous instructions. */
1363
1364 /* BX <reg>, BXJ <reg>, BLX <reg> */
1365 if (bits (this_instr, 4, 27) == 0x12fff1
1366 || bits (this_instr, 4, 27) == 0x12fff2
1367 || bits (this_instr, 4, 27) == 0x12fff3)
1368 return 1;
1369
1370 /* Other miscellaneous instructions are unpredictable if they
1371 modify PC. */
1372 return 0;
1373 }
1374 /* Data processing instruction. Fall through. */
1375
1376 case 0x1:
1377 if (bits (this_instr, 12, 15) == 15)
1378 return 1;
1379 else
1380 return 0;
1381
1382 case 0x2:
1383 case 0x3:
1384 /* Media instructions and architecturally undefined instructions. */
1385 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1386 return 0;
1387
1388 /* Stores. */
1389 if (bit (this_instr, 20) == 0)
1390 return 0;
1391
1392 /* Loads. */
1393 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1394 return 1;
1395 else
1396 return 0;
1397
1398 case 0x4:
1399 /* Load/store multiple. */
1400 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1401 return 1;
1402 else
1403 return 0;
1404
1405 case 0x5:
1406 /* Branch and branch with link. */
1407 return 1;
1408
1409 case 0x6:
1410 case 0x7:
1411 /* Coprocessor transfers or SWIs can not affect PC. */
1412 return 0;
1413
1414 default:
1415 internal_error (__FILE__, __LINE__, "bad value in switch");
1416 }
1417 }
1418
1419 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1420 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1421 fill it in. Return the first address not recognized as a prologue
1422 instruction.
1423
1424 We recognize all the instructions typically found in ARM prologues,
1425 plus harmless instructions which can be skipped (either for analysis
1426 purposes, or a more restrictive set that can be skipped when finding
1427 the end of the prologue). */
1428
1429 static CORE_ADDR
1430 arm_analyze_prologue (struct gdbarch *gdbarch,
1431 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1432 struct arm_prologue_cache *cache)
1433 {
1434 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1435 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1436 int regno;
1437 CORE_ADDR offset, current_pc;
1438 pv_t regs[ARM_FPS_REGNUM];
1439 struct pv_area *stack;
1440 struct cleanup *back_to;
1441 int framereg, framesize;
1442 CORE_ADDR unrecognized_pc = 0;
1443
1444 /* Search the prologue looking for instructions that set up the
1445 frame pointer, adjust the stack pointer, and save registers.
1446
1447 Be careful, however, and if it doesn't look like a prologue,
1448 don't try to scan it. If, for instance, a frameless function
1449 begins with stmfd sp!, then we will tell ourselves there is
1450 a frame, which will confuse stack traceback, as well as "finish"
1451 and other operations that rely on a knowledge of the stack
1452 traceback. */
1453
1454 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1455 regs[regno] = pv_register (regno, 0);
1456 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1457 back_to = make_cleanup_free_pv_area (stack);
1458
1459 for (current_pc = prologue_start;
1460 current_pc < prologue_end;
1461 current_pc += 4)
1462 {
1463 unsigned int insn
1464 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1465
1466 if (insn == 0xe1a0c00d) /* mov ip, sp */
1467 {
1468 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1469 continue;
1470 }
1471 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1472 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1473 {
1474 unsigned imm = insn & 0xff; /* immediate value */
1475 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1476 int rd = bits (insn, 12, 15);
1477 imm = (imm >> rot) | (imm << (32 - rot));
1478 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1479 continue;
1480 }
1481 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1482 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1483 {
1484 unsigned imm = insn & 0xff; /* immediate value */
1485 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1486 int rd = bits (insn, 12, 15);
1487 imm = (imm >> rot) | (imm << (32 - rot));
1488 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1489 continue;
1490 }
1491 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1492 {
1493 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1494 break;
1495 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1496 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1497 regs[bits (insn, 12, 15)]);
1498 continue;
1499 }
1500 else if ((insn & 0xffff0000) == 0xe92d0000)
1501 /* stmfd sp!, {..., fp, ip, lr, pc}
1502 or
1503 stmfd sp!, {a1, a2, a3, a4} */
1504 {
1505 int mask = insn & 0xffff;
1506
1507 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1508 break;
1509
1510 /* Calculate offsets of saved registers. */
1511 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1512 if (mask & (1 << regno))
1513 {
1514 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1515 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1516 }
1517 }
1518 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1519 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1520 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1521 {
1522 /* No need to add this to saved_regs -- it's just an arg reg. */
1523 continue;
1524 }
1525 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1526 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1527 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1528 {
1529 /* No need to add this to saved_regs -- it's just an arg reg. */
1530 continue;
1531 }
1532 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1533 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1534 {
1535 /* No need to add this to saved_regs -- it's just arg regs. */
1536 continue;
1537 }
1538 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1539 {
1540 unsigned imm = insn & 0xff; /* immediate value */
1541 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1542 imm = (imm >> rot) | (imm << (32 - rot));
1543 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1544 }
1545 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1546 {
1547 unsigned imm = insn & 0xff; /* immediate value */
1548 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1549 imm = (imm >> rot) | (imm << (32 - rot));
1550 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1551 }
1552 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1553 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1554 {
1555 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1556 break;
1557
1558 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1559 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1560 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1561 }
1562 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1563 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1564 {
1565 int n_saved_fp_regs;
1566 unsigned int fp_start_reg, fp_bound_reg;
1567
1568 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1569 break;
1570
1571 if ((insn & 0x800) == 0x800) /* N0 is set */
1572 {
1573 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1574 n_saved_fp_regs = 3;
1575 else
1576 n_saved_fp_regs = 1;
1577 }
1578 else
1579 {
1580 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1581 n_saved_fp_regs = 2;
1582 else
1583 n_saved_fp_regs = 4;
1584 }
1585
1586 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1587 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1588 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1589 {
1590 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1591 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1592 regs[fp_start_reg++]);
1593 }
1594 }
1595 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1596 {
1597 /* Allow some special function calls when skipping the
1598 prologue; GCC generates these before storing arguments to
1599 the stack. */
1600 CORE_ADDR dest = BranchDest (current_pc, insn);
1601
1602 if (skip_prologue_function (dest))
1603 continue;
1604 else
1605 break;
1606 }
1607 else if ((insn & 0xf0000000) != 0xe0000000)
1608 break; /* Condition not true, exit early */
1609 else if (arm_instruction_changes_pc (insn))
1610 /* Don't scan past anything that might change control flow. */
1611 break;
1612 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1613 {
1614 /* Ignore block loads from the stack, potentially copying
1615 parameters from memory. */
1616 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1617 continue;
1618 else
1619 break;
1620 }
1621 else if ((insn & 0xfc500000) == 0xe4100000)
1622 {
1623 /* Similarly ignore single loads from the stack. */
1624 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1625 continue;
1626 else
1627 break;
1628 }
1629 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1630 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1631 register instead of the stack. */
1632 continue;
1633 else
1634 {
1635 /* The optimizer might shove anything into the prologue,
1636 so we just skip what we don't recognize. */
1637 unrecognized_pc = current_pc;
1638 continue;
1639 }
1640 }
1641
1642 if (unrecognized_pc == 0)
1643 unrecognized_pc = current_pc;
1644
1645 /* The frame size is just the distance from the frame register
1646 to the original stack pointer. */
1647 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1648 {
1649 /* Frame pointer is fp. */
1650 framereg = ARM_FP_REGNUM;
1651 framesize = -regs[ARM_FP_REGNUM].k;
1652 }
1653 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1654 {
1655 /* Try the stack pointer... this is a bit desperate. */
1656 framereg = ARM_SP_REGNUM;
1657 framesize = -regs[ARM_SP_REGNUM].k;
1658 }
1659 else
1660 {
1661 /* We're just out of luck. We don't know where the frame is. */
1662 framereg = -1;
1663 framesize = 0;
1664 }
1665
1666 if (cache)
1667 {
1668 cache->framereg = framereg;
1669 cache->framesize = framesize;
1670
1671 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1672 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1673 cache->saved_regs[regno].addr = offset;
1674 }
1675
1676 if (arm_debug)
1677 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1678 paddress (gdbarch, unrecognized_pc));
1679
1680 do_cleanups (back_to);
1681 return unrecognized_pc;
1682 }
1683
1684 static void
1685 arm_scan_prologue (struct frame_info *this_frame,
1686 struct arm_prologue_cache *cache)
1687 {
1688 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1689 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1690 int regno;
1691 CORE_ADDR prologue_start, prologue_end, current_pc;
1692 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1693 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1694 pv_t regs[ARM_FPS_REGNUM];
1695 struct pv_area *stack;
1696 struct cleanup *back_to;
1697 CORE_ADDR offset;
1698
1699 /* Assume there is no frame until proven otherwise. */
1700 cache->framereg = ARM_SP_REGNUM;
1701 cache->framesize = 0;
1702
1703 /* Check for Thumb prologue. */
1704 if (arm_frame_is_thumb (this_frame))
1705 {
1706 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1707 return;
1708 }
1709
1710 /* Find the function prologue. If we can't find the function in
1711 the symbol table, peek in the stack frame to find the PC. */
1712 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1713 &prologue_end))
1714 {
1715 /* One way to find the end of the prologue (which works well
1716 for unoptimized code) is to do the following:
1717
1718 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1719
1720 if (sal.line == 0)
1721 prologue_end = prev_pc;
1722 else if (sal.end < prologue_end)
1723 prologue_end = sal.end;
1724
1725 This mechanism is very accurate so long as the optimizer
1726 doesn't move any instructions from the function body into the
1727 prologue. If this happens, sal.end will be the last
1728 instruction in the first hunk of prologue code just before
1729 the first instruction that the scheduler has moved from
1730 the body to the prologue.
1731
1732 In order to make sure that we scan all of the prologue
1733 instructions, we use a slightly less accurate mechanism which
1734 may scan more than necessary. To help compensate for this
1735 lack of accuracy, the prologue scanning loop below contains
1736 several clauses which'll cause the loop to terminate early if
1737 an implausible prologue instruction is encountered.
1738
1739 The expression
1740
1741 prologue_start + 64
1742
1743 is a suitable endpoint since it accounts for the largest
1744 possible prologue plus up to five instructions inserted by
1745 the scheduler. */
1746
1747 if (prologue_end > prologue_start + 64)
1748 {
1749 prologue_end = prologue_start + 64; /* See above. */
1750 }
1751 }
1752 else
1753 {
1754 /* We have no symbol information. Our only option is to assume this
1755 function has a standard stack frame and the normal frame register.
1756 Then, we can find the value of our frame pointer on entrance to
1757 the callee (or at the present moment if this is the innermost frame).
1758 The value stored there should be the address of the stmfd + 8. */
1759 CORE_ADDR frame_loc;
1760 LONGEST return_value;
1761
1762 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1763 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1764 return;
1765 else
1766 {
1767 prologue_start = gdbarch_addr_bits_remove
1768 (gdbarch, return_value) - 8;
1769 prologue_end = prologue_start + 64; /* See above. */
1770 }
1771 }
1772
1773 if (prev_pc < prologue_end)
1774 prologue_end = prev_pc;
1775
1776 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1777 }
1778
1779 static struct arm_prologue_cache *
1780 arm_make_prologue_cache (struct frame_info *this_frame)
1781 {
1782 int reg;
1783 struct arm_prologue_cache *cache;
1784 CORE_ADDR unwound_fp;
1785
1786 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1787 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1788
1789 arm_scan_prologue (this_frame, cache);
1790
1791 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1792 if (unwound_fp == 0)
1793 return cache;
1794
1795 cache->prev_sp = unwound_fp + cache->framesize;
1796
1797 /* Calculate actual addresses of saved registers using offsets
1798 determined by arm_scan_prologue. */
1799 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1800 if (trad_frame_addr_p (cache->saved_regs, reg))
1801 cache->saved_regs[reg].addr += cache->prev_sp;
1802
1803 return cache;
1804 }
1805
1806 /* Our frame ID for a normal frame is the current function's starting PC
1807 and the caller's SP when we were called. */
1808
1809 static void
1810 arm_prologue_this_id (struct frame_info *this_frame,
1811 void **this_cache,
1812 struct frame_id *this_id)
1813 {
1814 struct arm_prologue_cache *cache;
1815 struct frame_id id;
1816 CORE_ADDR pc, func;
1817
1818 if (*this_cache == NULL)
1819 *this_cache = arm_make_prologue_cache (this_frame);
1820 cache = *this_cache;
1821
1822 /* This is meant to halt the backtrace at "_start". */
1823 pc = get_frame_pc (this_frame);
1824 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1825 return;
1826
1827 /* If we've hit a wall, stop. */
1828 if (cache->prev_sp == 0)
1829 return;
1830
1831 func = get_frame_func (this_frame);
1832 id = frame_id_build (cache->prev_sp, func);
1833 *this_id = id;
1834 }
1835
1836 static struct value *
1837 arm_prologue_prev_register (struct frame_info *this_frame,
1838 void **this_cache,
1839 int prev_regnum)
1840 {
1841 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1842 struct arm_prologue_cache *cache;
1843
1844 if (*this_cache == NULL)
1845 *this_cache = arm_make_prologue_cache (this_frame);
1846 cache = *this_cache;
1847
1848 /* If we are asked to unwind the PC, then we need to return the LR
1849 instead. The prologue may save PC, but it will point into this
1850 frame's prologue, not the next frame's resume location. Also
1851 strip the saved T bit. A valid LR may have the low bit set, but
1852 a valid PC never does. */
1853 if (prev_regnum == ARM_PC_REGNUM)
1854 {
1855 CORE_ADDR lr;
1856
1857 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1858 return frame_unwind_got_constant (this_frame, prev_regnum,
1859 arm_addr_bits_remove (gdbarch, lr));
1860 }
1861
1862 /* SP is generally not saved to the stack, but this frame is
1863 identified by the next frame's stack pointer at the time of the call.
1864 The value was already reconstructed into PREV_SP. */
1865 if (prev_regnum == ARM_SP_REGNUM)
1866 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1867
1868 /* The CPSR may have been changed by the call instruction and by the
1869 called function. The only bit we can reconstruct is the T bit,
1870 by checking the low bit of LR as of the call. This is a reliable
1871 indicator of Thumb-ness except for some ARM v4T pre-interworking
1872 Thumb code, which could get away with a clear low bit as long as
1873 the called function did not use bx. Guess that all other
1874 bits are unchanged; the condition flags are presumably lost,
1875 but the processor status is likely valid. */
1876 if (prev_regnum == ARM_PS_REGNUM)
1877 {
1878 CORE_ADDR lr, cpsr;
1879 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1880
1881 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1882 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1883 if (IS_THUMB_ADDR (lr))
1884 cpsr |= t_bit;
1885 else
1886 cpsr &= ~t_bit;
1887 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1888 }
1889
1890 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1891 prev_regnum);
1892 }
1893
1894 struct frame_unwind arm_prologue_unwind = {
1895 NORMAL_FRAME,
1896 arm_prologue_this_id,
1897 arm_prologue_prev_register,
1898 NULL,
1899 default_frame_sniffer
1900 };
1901
1902 static struct arm_prologue_cache *
1903 arm_make_stub_cache (struct frame_info *this_frame)
1904 {
1905 struct arm_prologue_cache *cache;
1906
1907 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1908 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1909
1910 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1911
1912 return cache;
1913 }
1914
1915 /* Our frame ID for a stub frame is the current SP and LR. */
1916
1917 static void
1918 arm_stub_this_id (struct frame_info *this_frame,
1919 void **this_cache,
1920 struct frame_id *this_id)
1921 {
1922 struct arm_prologue_cache *cache;
1923
1924 if (*this_cache == NULL)
1925 *this_cache = arm_make_stub_cache (this_frame);
1926 cache = *this_cache;
1927
1928 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1929 }
1930
1931 static int
1932 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1933 struct frame_info *this_frame,
1934 void **this_prologue_cache)
1935 {
1936 CORE_ADDR addr_in_block;
1937 char dummy[4];
1938
1939 addr_in_block = get_frame_address_in_block (this_frame);
1940 if (in_plt_section (addr_in_block, NULL)
1941 /* We also use the stub winder if the target memory is unreadable
1942 to avoid having the prologue unwinder trying to read it. */
1943 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1944 return 1;
1945
1946 return 0;
1947 }
1948
1949 struct frame_unwind arm_stub_unwind = {
1950 NORMAL_FRAME,
1951 arm_stub_this_id,
1952 arm_prologue_prev_register,
1953 NULL,
1954 arm_stub_unwind_sniffer
1955 };
1956
1957 static CORE_ADDR
1958 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1959 {
1960 struct arm_prologue_cache *cache;
1961
1962 if (*this_cache == NULL)
1963 *this_cache = arm_make_prologue_cache (this_frame);
1964 cache = *this_cache;
1965
1966 return cache->prev_sp - cache->framesize;
1967 }
1968
1969 struct frame_base arm_normal_base = {
1970 &arm_prologue_unwind,
1971 arm_normal_frame_base,
1972 arm_normal_frame_base,
1973 arm_normal_frame_base
1974 };
1975
1976 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1977 dummy frame. The frame ID's base needs to match the TOS value
1978 saved by save_dummy_frame_tos() and returned from
1979 arm_push_dummy_call, and the PC needs to match the dummy frame's
1980 breakpoint. */
1981
1982 static struct frame_id
1983 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1984 {
1985 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1986 get_frame_pc (this_frame));
1987 }
1988
1989 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1990 be used to construct the previous frame's ID, after looking up the
1991 containing function). */
1992
1993 static CORE_ADDR
1994 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1995 {
1996 CORE_ADDR pc;
1997 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
1998 return arm_addr_bits_remove (gdbarch, pc);
1999 }
2000
2001 static CORE_ADDR
2002 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2003 {
2004 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2005 }
2006
2007 static struct value *
2008 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2009 int regnum)
2010 {
2011 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2012 CORE_ADDR lr, cpsr;
2013 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2014
2015 switch (regnum)
2016 {
2017 case ARM_PC_REGNUM:
2018 /* The PC is normally copied from the return column, which
2019 describes saves of LR. However, that version may have an
2020 extra bit set to indicate Thumb state. The bit is not
2021 part of the PC. */
2022 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2023 return frame_unwind_got_constant (this_frame, regnum,
2024 arm_addr_bits_remove (gdbarch, lr));
2025
2026 case ARM_PS_REGNUM:
2027 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2028 cpsr = get_frame_register_unsigned (this_frame, regnum);
2029 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2030 if (IS_THUMB_ADDR (lr))
2031 cpsr |= t_bit;
2032 else
2033 cpsr &= ~t_bit;
2034 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2035
2036 default:
2037 internal_error (__FILE__, __LINE__,
2038 _("Unexpected register %d"), regnum);
2039 }
2040 }
2041
2042 static void
2043 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2044 struct dwarf2_frame_state_reg *reg,
2045 struct frame_info *this_frame)
2046 {
2047 switch (regnum)
2048 {
2049 case ARM_PC_REGNUM:
2050 case ARM_PS_REGNUM:
2051 reg->how = DWARF2_FRAME_REG_FN;
2052 reg->loc.fn = arm_dwarf2_prev_register;
2053 break;
2054 case ARM_SP_REGNUM:
2055 reg->how = DWARF2_FRAME_REG_CFA;
2056 break;
2057 }
2058 }
2059
2060 /* Return true if we are in the function's epilogue, i.e. after the
2061 instruction that destroyed the function's stack frame. */
2062
2063 static int
2064 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2065 {
2066 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2067 unsigned int insn, insn2;
2068 int found_return = 0, found_stack_adjust = 0;
2069 CORE_ADDR func_start, func_end;
2070 CORE_ADDR scan_pc;
2071 gdb_byte buf[4];
2072
2073 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2074 return 0;
2075
2076 /* The epilogue is a sequence of instructions along the following lines:
2077
2078 - add stack frame size to SP or FP
2079 - [if frame pointer used] restore SP from FP
2080 - restore registers from SP [may include PC]
2081 - a return-type instruction [if PC wasn't already restored]
2082
2083 In a first pass, we scan forward from the current PC and verify the
2084 instructions we find as compatible with this sequence, ending in a
2085 return instruction.
2086
2087 However, this is not sufficient to distinguish indirect function calls
2088 within a function from indirect tail calls in the epilogue in some cases.
2089 Therefore, if we didn't already find any SP-changing instruction during
2090 forward scan, we add a backward scanning heuristic to ensure we actually
2091 are in the epilogue. */
2092
2093 scan_pc = pc;
2094 while (scan_pc < func_end && !found_return)
2095 {
2096 if (target_read_memory (scan_pc, buf, 2))
2097 break;
2098
2099 scan_pc += 2;
2100 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2101
2102 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2103 found_return = 1;
2104 else if (insn == 0x46f7) /* mov pc, lr */
2105 found_return = 1;
2106 else if (insn == 0x46bd) /* mov sp, r7 */
2107 found_stack_adjust = 1;
2108 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2109 found_stack_adjust = 1;
2110 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2111 {
2112 found_stack_adjust = 1;
2113 if (insn & 0x0100) /* <registers> include PC. */
2114 found_return = 1;
2115 }
2116 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2117 {
2118 if (target_read_memory (scan_pc, buf, 2))
2119 break;
2120
2121 scan_pc += 2;
2122 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2123
2124 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2125 {
2126 found_stack_adjust = 1;
2127 if (insn2 & 0x8000) /* <registers> include PC. */
2128 found_return = 1;
2129 }
2130 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2131 && (insn2 & 0x0fff) == 0x0b04)
2132 {
2133 found_stack_adjust = 1;
2134 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2135 found_return = 1;
2136 }
2137 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2138 && (insn2 & 0x0e00) == 0x0a00)
2139 found_stack_adjust = 1;
2140 else
2141 break;
2142 }
2143 else
2144 break;
2145 }
2146
2147 if (!found_return)
2148 return 0;
2149
2150 /* Since any instruction in the epilogue sequence, with the possible
2151 exception of return itself, updates the stack pointer, we need to
2152 scan backwards for at most one instruction. Try either a 16-bit or
2153 a 32-bit instruction. This is just a heuristic, so we do not worry
2154 too much about false positives.*/
2155
2156 if (!found_stack_adjust)
2157 {
2158 if (pc - 4 < func_start)
2159 return 0;
2160 if (target_read_memory (pc - 4, buf, 4))
2161 return 0;
2162
2163 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2164 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2165
2166 if (insn2 == 0x46bd) /* mov sp, r7 */
2167 found_stack_adjust = 1;
2168 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2169 found_stack_adjust = 1;
2170 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2171 found_stack_adjust = 1;
2172 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2173 found_stack_adjust = 1;
2174 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2175 && (insn2 & 0x0fff) == 0x0b04)
2176 found_stack_adjust = 1;
2177 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2178 && (insn2 & 0x0e00) == 0x0a00)
2179 found_stack_adjust = 1;
2180 }
2181
2182 return found_stack_adjust;
2183 }
2184
2185 /* Return true if we are in the function's epilogue, i.e. after the
2186 instruction that destroyed the function's stack frame. */
2187
2188 static int
2189 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2190 {
2191 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2192 unsigned int insn;
2193 int found_return, found_stack_adjust;
2194 CORE_ADDR func_start, func_end;
2195
2196 if (arm_pc_is_thumb (gdbarch, pc))
2197 return thumb_in_function_epilogue_p (gdbarch, pc);
2198
2199 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2200 return 0;
2201
2202 /* We are in the epilogue if the previous instruction was a stack
2203 adjustment and the next instruction is a possible return (bx, mov
2204 pc, or pop). We could have to scan backwards to find the stack
2205 adjustment, or forwards to find the return, but this is a decent
2206 approximation. First scan forwards. */
2207
2208 found_return = 0;
2209 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2210 if (bits (insn, 28, 31) != INST_NV)
2211 {
2212 if ((insn & 0x0ffffff0) == 0x012fff10)
2213 /* BX. */
2214 found_return = 1;
2215 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2216 /* MOV PC. */
2217 found_return = 1;
2218 else if ((insn & 0x0fff0000) == 0x08bd0000
2219 && (insn & 0x0000c000) != 0)
2220 /* POP (LDMIA), including PC or LR. */
2221 found_return = 1;
2222 }
2223
2224 if (!found_return)
2225 return 0;
2226
2227 /* Scan backwards. This is just a heuristic, so do not worry about
2228 false positives from mode changes. */
2229
2230 if (pc < func_start + 4)
2231 return 0;
2232
2233 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2234 if (bits (insn, 28, 31) != INST_NV)
2235 {
2236 if ((insn & 0x0df0f000) == 0x0080d000)
2237 /* ADD SP (register or immediate). */
2238 found_stack_adjust = 1;
2239 else if ((insn & 0x0df0f000) == 0x0040d000)
2240 /* SUB SP (register or immediate). */
2241 found_stack_adjust = 1;
2242 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2243 /* MOV SP. */
2244 found_return = 1;
2245 else if ((insn & 0x0fff0000) == 0x08bd0000)
2246 /* POP (LDMIA). */
2247 found_stack_adjust = 1;
2248 }
2249
2250 if (found_stack_adjust)
2251 return 1;
2252
2253 return 0;
2254 }
2255
2256
2257 /* When arguments must be pushed onto the stack, they go on in reverse
2258 order. The code below implements a FILO (stack) to do this. */
2259
2260 struct stack_item
2261 {
2262 int len;
2263 struct stack_item *prev;
2264 void *data;
2265 };
2266
2267 static struct stack_item *
2268 push_stack_item (struct stack_item *prev, const void *contents, int len)
2269 {
2270 struct stack_item *si;
2271 si = xmalloc (sizeof (struct stack_item));
2272 si->data = xmalloc (len);
2273 si->len = len;
2274 si->prev = prev;
2275 memcpy (si->data, contents, len);
2276 return si;
2277 }
2278
2279 static struct stack_item *
2280 pop_stack_item (struct stack_item *si)
2281 {
2282 struct stack_item *dead = si;
2283 si = si->prev;
2284 xfree (dead->data);
2285 xfree (dead);
2286 return si;
2287 }
2288
2289
2290 /* Return the alignment (in bytes) of the given type. */
2291
2292 static int
2293 arm_type_align (struct type *t)
2294 {
2295 int n;
2296 int align;
2297 int falign;
2298
2299 t = check_typedef (t);
2300 switch (TYPE_CODE (t))
2301 {
2302 default:
2303 /* Should never happen. */
2304 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2305 return 4;
2306
2307 case TYPE_CODE_PTR:
2308 case TYPE_CODE_ENUM:
2309 case TYPE_CODE_INT:
2310 case TYPE_CODE_FLT:
2311 case TYPE_CODE_SET:
2312 case TYPE_CODE_RANGE:
2313 case TYPE_CODE_BITSTRING:
2314 case TYPE_CODE_REF:
2315 case TYPE_CODE_CHAR:
2316 case TYPE_CODE_BOOL:
2317 return TYPE_LENGTH (t);
2318
2319 case TYPE_CODE_ARRAY:
2320 case TYPE_CODE_COMPLEX:
2321 /* TODO: What about vector types? */
2322 return arm_type_align (TYPE_TARGET_TYPE (t));
2323
2324 case TYPE_CODE_STRUCT:
2325 case TYPE_CODE_UNION:
2326 align = 1;
2327 for (n = 0; n < TYPE_NFIELDS (t); n++)
2328 {
2329 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2330 if (falign > align)
2331 align = falign;
2332 }
2333 return align;
2334 }
2335 }
2336
2337 /* Possible base types for a candidate for passing and returning in
2338 VFP registers. */
2339
2340 enum arm_vfp_cprc_base_type
2341 {
2342 VFP_CPRC_UNKNOWN,
2343 VFP_CPRC_SINGLE,
2344 VFP_CPRC_DOUBLE,
2345 VFP_CPRC_VEC64,
2346 VFP_CPRC_VEC128
2347 };
2348
2349 /* The length of one element of base type B. */
2350
2351 static unsigned
2352 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2353 {
2354 switch (b)
2355 {
2356 case VFP_CPRC_SINGLE:
2357 return 4;
2358 case VFP_CPRC_DOUBLE:
2359 return 8;
2360 case VFP_CPRC_VEC64:
2361 return 8;
2362 case VFP_CPRC_VEC128:
2363 return 16;
2364 default:
2365 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2366 (int) b);
2367 }
2368 }
2369
2370 /* The character ('s', 'd' or 'q') for the type of VFP register used
2371 for passing base type B. */
2372
2373 static int
2374 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2375 {
2376 switch (b)
2377 {
2378 case VFP_CPRC_SINGLE:
2379 return 's';
2380 case VFP_CPRC_DOUBLE:
2381 return 'd';
2382 case VFP_CPRC_VEC64:
2383 return 'd';
2384 case VFP_CPRC_VEC128:
2385 return 'q';
2386 default:
2387 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2388 (int) b);
2389 }
2390 }
2391
2392 /* Determine whether T may be part of a candidate for passing and
2393 returning in VFP registers, ignoring the limit on the total number
2394 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2395 classification of the first valid component found; if it is not
2396 VFP_CPRC_UNKNOWN, all components must have the same classification
2397 as *BASE_TYPE. If it is found that T contains a type not permitted
2398 for passing and returning in VFP registers, a type differently
2399 classified from *BASE_TYPE, or two types differently classified
2400 from each other, return -1, otherwise return the total number of
2401 base-type elements found (possibly 0 in an empty structure or
2402 array). Vectors and complex types are not currently supported,
2403 matching the generic AAPCS support. */
2404
2405 static int
2406 arm_vfp_cprc_sub_candidate (struct type *t,
2407 enum arm_vfp_cprc_base_type *base_type)
2408 {
2409 t = check_typedef (t);
2410 switch (TYPE_CODE (t))
2411 {
2412 case TYPE_CODE_FLT:
2413 switch (TYPE_LENGTH (t))
2414 {
2415 case 4:
2416 if (*base_type == VFP_CPRC_UNKNOWN)
2417 *base_type = VFP_CPRC_SINGLE;
2418 else if (*base_type != VFP_CPRC_SINGLE)
2419 return -1;
2420 return 1;
2421
2422 case 8:
2423 if (*base_type == VFP_CPRC_UNKNOWN)
2424 *base_type = VFP_CPRC_DOUBLE;
2425 else if (*base_type != VFP_CPRC_DOUBLE)
2426 return -1;
2427 return 1;
2428
2429 default:
2430 return -1;
2431 }
2432 break;
2433
2434 case TYPE_CODE_ARRAY:
2435 {
2436 int count;
2437 unsigned unitlen;
2438 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2439 if (count == -1)
2440 return -1;
2441 if (TYPE_LENGTH (t) == 0)
2442 {
2443 gdb_assert (count == 0);
2444 return 0;
2445 }
2446 else if (count == 0)
2447 return -1;
2448 unitlen = arm_vfp_cprc_unit_length (*base_type);
2449 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2450 return TYPE_LENGTH (t) / unitlen;
2451 }
2452 break;
2453
2454 case TYPE_CODE_STRUCT:
2455 {
2456 int count = 0;
2457 unsigned unitlen;
2458 int i;
2459 for (i = 0; i < TYPE_NFIELDS (t); i++)
2460 {
2461 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2462 base_type);
2463 if (sub_count == -1)
2464 return -1;
2465 count += sub_count;
2466 }
2467 if (TYPE_LENGTH (t) == 0)
2468 {
2469 gdb_assert (count == 0);
2470 return 0;
2471 }
2472 else if (count == 0)
2473 return -1;
2474 unitlen = arm_vfp_cprc_unit_length (*base_type);
2475 if (TYPE_LENGTH (t) != unitlen * count)
2476 return -1;
2477 return count;
2478 }
2479
2480 case TYPE_CODE_UNION:
2481 {
2482 int count = 0;
2483 unsigned unitlen;
2484 int i;
2485 for (i = 0; i < TYPE_NFIELDS (t); i++)
2486 {
2487 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2488 base_type);
2489 if (sub_count == -1)
2490 return -1;
2491 count = (count > sub_count ? count : sub_count);
2492 }
2493 if (TYPE_LENGTH (t) == 0)
2494 {
2495 gdb_assert (count == 0);
2496 return 0;
2497 }
2498 else if (count == 0)
2499 return -1;
2500 unitlen = arm_vfp_cprc_unit_length (*base_type);
2501 if (TYPE_LENGTH (t) != unitlen * count)
2502 return -1;
2503 return count;
2504 }
2505
2506 default:
2507 break;
2508 }
2509
2510 return -1;
2511 }
2512
2513 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2514 if passed to or returned from a non-variadic function with the VFP
2515 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2516 *BASE_TYPE to the base type for T and *COUNT to the number of
2517 elements of that base type before returning. */
2518
2519 static int
2520 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2521 int *count)
2522 {
2523 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2524 int c = arm_vfp_cprc_sub_candidate (t, &b);
2525 if (c <= 0 || c > 4)
2526 return 0;
2527 *base_type = b;
2528 *count = c;
2529 return 1;
2530 }
2531
2532 /* Return 1 if the VFP ABI should be used for passing arguments to and
2533 returning values from a function of type FUNC_TYPE, 0
2534 otherwise. */
2535
2536 static int
2537 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2538 {
2539 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2540 /* Variadic functions always use the base ABI. Assume that functions
2541 without debug info are not variadic. */
2542 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2543 return 0;
2544 /* The VFP ABI is only supported as a variant of AAPCS. */
2545 if (tdep->arm_abi != ARM_ABI_AAPCS)
2546 return 0;
2547 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2548 }
2549
2550 /* We currently only support passing parameters in integer registers, which
2551 conforms with GCC's default model, and VFP argument passing following
2552 the VFP variant of AAPCS. Several other variants exist and
2553 we should probably support some of them based on the selected ABI. */
2554
2555 static CORE_ADDR
2556 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2557 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2558 struct value **args, CORE_ADDR sp, int struct_return,
2559 CORE_ADDR struct_addr)
2560 {
2561 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2562 int argnum;
2563 int argreg;
2564 int nstack;
2565 struct stack_item *si = NULL;
2566 int use_vfp_abi;
2567 struct type *ftype;
2568 unsigned vfp_regs_free = (1 << 16) - 1;
2569
2570 /* Determine the type of this function and whether the VFP ABI
2571 applies. */
2572 ftype = check_typedef (value_type (function));
2573 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2574 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2575 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2576
2577 /* Set the return address. For the ARM, the return breakpoint is
2578 always at BP_ADDR. */
2579 if (arm_pc_is_thumb (gdbarch, bp_addr))
2580 bp_addr |= 1;
2581 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2582
2583 /* Walk through the list of args and determine how large a temporary
2584 stack is required. Need to take care here as structs may be
2585 passed on the stack, and we have to to push them. */
2586 nstack = 0;
2587
2588 argreg = ARM_A1_REGNUM;
2589 nstack = 0;
2590
2591 /* The struct_return pointer occupies the first parameter
2592 passing register. */
2593 if (struct_return)
2594 {
2595 if (arm_debug)
2596 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2597 gdbarch_register_name (gdbarch, argreg),
2598 paddress (gdbarch, struct_addr));
2599 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2600 argreg++;
2601 }
2602
2603 for (argnum = 0; argnum < nargs; argnum++)
2604 {
2605 int len;
2606 struct type *arg_type;
2607 struct type *target_type;
2608 enum type_code typecode;
2609 const bfd_byte *val;
2610 int align;
2611 enum arm_vfp_cprc_base_type vfp_base_type;
2612 int vfp_base_count;
2613 int may_use_core_reg = 1;
2614
2615 arg_type = check_typedef (value_type (args[argnum]));
2616 len = TYPE_LENGTH (arg_type);
2617 target_type = TYPE_TARGET_TYPE (arg_type);
2618 typecode = TYPE_CODE (arg_type);
2619 val = value_contents (args[argnum]);
2620
2621 align = arm_type_align (arg_type);
2622 /* Round alignment up to a whole number of words. */
2623 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2624 /* Different ABIs have different maximum alignments. */
2625 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2626 {
2627 /* The APCS ABI only requires word alignment. */
2628 align = INT_REGISTER_SIZE;
2629 }
2630 else
2631 {
2632 /* The AAPCS requires at most doubleword alignment. */
2633 if (align > INT_REGISTER_SIZE * 2)
2634 align = INT_REGISTER_SIZE * 2;
2635 }
2636
2637 if (use_vfp_abi
2638 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2639 &vfp_base_count))
2640 {
2641 int regno;
2642 int unit_length;
2643 int shift;
2644 unsigned mask;
2645
2646 /* Because this is a CPRC it cannot go in a core register or
2647 cause a core register to be skipped for alignment.
2648 Either it goes in VFP registers and the rest of this loop
2649 iteration is skipped for this argument, or it goes on the
2650 stack (and the stack alignment code is correct for this
2651 case). */
2652 may_use_core_reg = 0;
2653
2654 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2655 shift = unit_length / 4;
2656 mask = (1 << (shift * vfp_base_count)) - 1;
2657 for (regno = 0; regno < 16; regno += shift)
2658 if (((vfp_regs_free >> regno) & mask) == mask)
2659 break;
2660
2661 if (regno < 16)
2662 {
2663 int reg_char;
2664 int reg_scaled;
2665 int i;
2666
2667 vfp_regs_free &= ~(mask << regno);
2668 reg_scaled = regno / shift;
2669 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2670 for (i = 0; i < vfp_base_count; i++)
2671 {
2672 char name_buf[4];
2673 int regnum;
2674 if (reg_char == 'q')
2675 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2676 val + i * unit_length);
2677 else
2678 {
2679 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2680 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2681 strlen (name_buf));
2682 regcache_cooked_write (regcache, regnum,
2683 val + i * unit_length);
2684 }
2685 }
2686 continue;
2687 }
2688 else
2689 {
2690 /* This CPRC could not go in VFP registers, so all VFP
2691 registers are now marked as used. */
2692 vfp_regs_free = 0;
2693 }
2694 }
2695
2696 /* Push stack padding for dowubleword alignment. */
2697 if (nstack & (align - 1))
2698 {
2699 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2700 nstack += INT_REGISTER_SIZE;
2701 }
2702
2703 /* Doubleword aligned quantities must go in even register pairs. */
2704 if (may_use_core_reg
2705 && argreg <= ARM_LAST_ARG_REGNUM
2706 && align > INT_REGISTER_SIZE
2707 && argreg & 1)
2708 argreg++;
2709
2710 /* If the argument is a pointer to a function, and it is a
2711 Thumb function, create a LOCAL copy of the value and set
2712 the THUMB bit in it. */
2713 if (TYPE_CODE_PTR == typecode
2714 && target_type != NULL
2715 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2716 {
2717 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2718 if (arm_pc_is_thumb (gdbarch, regval))
2719 {
2720 bfd_byte *copy = alloca (len);
2721 store_unsigned_integer (copy, len, byte_order,
2722 MAKE_THUMB_ADDR (regval));
2723 val = copy;
2724 }
2725 }
2726
2727 /* Copy the argument to general registers or the stack in
2728 register-sized pieces. Large arguments are split between
2729 registers and stack. */
2730 while (len > 0)
2731 {
2732 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2733
2734 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2735 {
2736 /* The argument is being passed in a general purpose
2737 register. */
2738 CORE_ADDR regval
2739 = extract_unsigned_integer (val, partial_len, byte_order);
2740 if (byte_order == BFD_ENDIAN_BIG)
2741 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2742 if (arm_debug)
2743 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2744 argnum,
2745 gdbarch_register_name
2746 (gdbarch, argreg),
2747 phex (regval, INT_REGISTER_SIZE));
2748 regcache_cooked_write_unsigned (regcache, argreg, regval);
2749 argreg++;
2750 }
2751 else
2752 {
2753 /* Push the arguments onto the stack. */
2754 if (arm_debug)
2755 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2756 argnum, nstack);
2757 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2758 nstack += INT_REGISTER_SIZE;
2759 }
2760
2761 len -= partial_len;
2762 val += partial_len;
2763 }
2764 }
2765 /* If we have an odd number of words to push, then decrement the stack
2766 by one word now, so first stack argument will be dword aligned. */
2767 if (nstack & 4)
2768 sp -= 4;
2769
2770 while (si)
2771 {
2772 sp -= si->len;
2773 write_memory (sp, si->data, si->len);
2774 si = pop_stack_item (si);
2775 }
2776
2777 /* Finally, update teh SP register. */
2778 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2779
2780 return sp;
2781 }
2782
2783
2784 /* Always align the frame to an 8-byte boundary. This is required on
2785 some platforms and harmless on the rest. */
2786
2787 static CORE_ADDR
2788 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2789 {
2790 /* Align the stack to eight bytes. */
2791 return sp & ~ (CORE_ADDR) 7;
2792 }
2793
2794 static void
2795 print_fpu_flags (int flags)
2796 {
2797 if (flags & (1 << 0))
2798 fputs ("IVO ", stdout);
2799 if (flags & (1 << 1))
2800 fputs ("DVZ ", stdout);
2801 if (flags & (1 << 2))
2802 fputs ("OFL ", stdout);
2803 if (flags & (1 << 3))
2804 fputs ("UFL ", stdout);
2805 if (flags & (1 << 4))
2806 fputs ("INX ", stdout);
2807 putchar ('\n');
2808 }
2809
2810 /* Print interesting information about the floating point processor
2811 (if present) or emulator. */
2812 static void
2813 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2814 struct frame_info *frame, const char *args)
2815 {
2816 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2817 int type;
2818
2819 type = (status >> 24) & 127;
2820 if (status & (1 << 31))
2821 printf (_("Hardware FPU type %d\n"), type);
2822 else
2823 printf (_("Software FPU type %d\n"), type);
2824 /* i18n: [floating point unit] mask */
2825 fputs (_("mask: "), stdout);
2826 print_fpu_flags (status >> 16);
2827 /* i18n: [floating point unit] flags */
2828 fputs (_("flags: "), stdout);
2829 print_fpu_flags (status);
2830 }
2831
2832 /* Construct the ARM extended floating point type. */
2833 static struct type *
2834 arm_ext_type (struct gdbarch *gdbarch)
2835 {
2836 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2837
2838 if (!tdep->arm_ext_type)
2839 tdep->arm_ext_type
2840 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2841 floatformats_arm_ext);
2842
2843 return tdep->arm_ext_type;
2844 }
2845
2846 static struct type *
2847 arm_neon_double_type (struct gdbarch *gdbarch)
2848 {
2849 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2850
2851 if (tdep->neon_double_type == NULL)
2852 {
2853 struct type *t, *elem;
2854
2855 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2856 TYPE_CODE_UNION);
2857 elem = builtin_type (gdbarch)->builtin_uint8;
2858 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2859 elem = builtin_type (gdbarch)->builtin_uint16;
2860 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2861 elem = builtin_type (gdbarch)->builtin_uint32;
2862 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2863 elem = builtin_type (gdbarch)->builtin_uint64;
2864 append_composite_type_field (t, "u64", elem);
2865 elem = builtin_type (gdbarch)->builtin_float;
2866 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2867 elem = builtin_type (gdbarch)->builtin_double;
2868 append_composite_type_field (t, "f64", elem);
2869
2870 TYPE_VECTOR (t) = 1;
2871 TYPE_NAME (t) = "neon_d";
2872 tdep->neon_double_type = t;
2873 }
2874
2875 return tdep->neon_double_type;
2876 }
2877
2878 /* FIXME: The vector types are not correctly ordered on big-endian
2879 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2880 bits of d0 - regardless of what unit size is being held in d0. So
2881 the offset of the first uint8 in d0 is 7, but the offset of the
2882 first float is 4. This code works as-is for little-endian
2883 targets. */
2884
2885 static struct type *
2886 arm_neon_quad_type (struct gdbarch *gdbarch)
2887 {
2888 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2889
2890 if (tdep->neon_quad_type == NULL)
2891 {
2892 struct type *t, *elem;
2893
2894 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2895 TYPE_CODE_UNION);
2896 elem = builtin_type (gdbarch)->builtin_uint8;
2897 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2898 elem = builtin_type (gdbarch)->builtin_uint16;
2899 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2900 elem = builtin_type (gdbarch)->builtin_uint32;
2901 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2902 elem = builtin_type (gdbarch)->builtin_uint64;
2903 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2904 elem = builtin_type (gdbarch)->builtin_float;
2905 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2906 elem = builtin_type (gdbarch)->builtin_double;
2907 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2908
2909 TYPE_VECTOR (t) = 1;
2910 TYPE_NAME (t) = "neon_q";
2911 tdep->neon_quad_type = t;
2912 }
2913
2914 return tdep->neon_quad_type;
2915 }
2916
2917 /* Return the GDB type object for the "standard" data type of data in
2918 register N. */
2919
2920 static struct type *
2921 arm_register_type (struct gdbarch *gdbarch, int regnum)
2922 {
2923 int num_regs = gdbarch_num_regs (gdbarch);
2924
2925 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2926 && regnum >= num_regs && regnum < num_regs + 32)
2927 return builtin_type (gdbarch)->builtin_float;
2928
2929 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2930 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2931 return arm_neon_quad_type (gdbarch);
2932
2933 /* If the target description has register information, we are only
2934 in this function so that we can override the types of
2935 double-precision registers for NEON. */
2936 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2937 {
2938 struct type *t = tdesc_register_type (gdbarch, regnum);
2939
2940 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2941 && TYPE_CODE (t) == TYPE_CODE_FLT
2942 && gdbarch_tdep (gdbarch)->have_neon)
2943 return arm_neon_double_type (gdbarch);
2944 else
2945 return t;
2946 }
2947
2948 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2949 {
2950 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2951 return builtin_type (gdbarch)->builtin_void;
2952
2953 return arm_ext_type (gdbarch);
2954 }
2955 else if (regnum == ARM_SP_REGNUM)
2956 return builtin_type (gdbarch)->builtin_data_ptr;
2957 else if (regnum == ARM_PC_REGNUM)
2958 return builtin_type (gdbarch)->builtin_func_ptr;
2959 else if (regnum >= ARRAY_SIZE (arm_register_names))
2960 /* These registers are only supported on targets which supply
2961 an XML description. */
2962 return builtin_type (gdbarch)->builtin_int0;
2963 else
2964 return builtin_type (gdbarch)->builtin_uint32;
2965 }
2966
2967 /* Map a DWARF register REGNUM onto the appropriate GDB register
2968 number. */
2969
2970 static int
2971 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2972 {
2973 /* Core integer regs. */
2974 if (reg >= 0 && reg <= 15)
2975 return reg;
2976
2977 /* Legacy FPA encoding. These were once used in a way which
2978 overlapped with VFP register numbering, so their use is
2979 discouraged, but GDB doesn't support the ARM toolchain
2980 which used them for VFP. */
2981 if (reg >= 16 && reg <= 23)
2982 return ARM_F0_REGNUM + reg - 16;
2983
2984 /* New assignments for the FPA registers. */
2985 if (reg >= 96 && reg <= 103)
2986 return ARM_F0_REGNUM + reg - 96;
2987
2988 /* WMMX register assignments. */
2989 if (reg >= 104 && reg <= 111)
2990 return ARM_WCGR0_REGNUM + reg - 104;
2991
2992 if (reg >= 112 && reg <= 127)
2993 return ARM_WR0_REGNUM + reg - 112;
2994
2995 if (reg >= 192 && reg <= 199)
2996 return ARM_WC0_REGNUM + reg - 192;
2997
2998 /* VFP v2 registers. A double precision value is actually
2999 in d1 rather than s2, but the ABI only defines numbering
3000 for the single precision registers. This will "just work"
3001 in GDB for little endian targets (we'll read eight bytes,
3002 starting in s0 and then progressing to s1), but will be
3003 reversed on big endian targets with VFP. This won't
3004 be a problem for the new Neon quad registers; you're supposed
3005 to use DW_OP_piece for those. */
3006 if (reg >= 64 && reg <= 95)
3007 {
3008 char name_buf[4];
3009
3010 sprintf (name_buf, "s%d", reg - 64);
3011 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3012 strlen (name_buf));
3013 }
3014
3015 /* VFP v3 / Neon registers. This range is also used for VFP v2
3016 registers, except that it now describes d0 instead of s0. */
3017 if (reg >= 256 && reg <= 287)
3018 {
3019 char name_buf[4];
3020
3021 sprintf (name_buf, "d%d", reg - 256);
3022 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3023 strlen (name_buf));
3024 }
3025
3026 return -1;
3027 }
3028
3029 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3030 static int
3031 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3032 {
3033 int reg = regnum;
3034 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3035
3036 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3037 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3038
3039 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3040 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3041
3042 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3043 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3044
3045 if (reg < NUM_GREGS)
3046 return SIM_ARM_R0_REGNUM + reg;
3047 reg -= NUM_GREGS;
3048
3049 if (reg < NUM_FREGS)
3050 return SIM_ARM_FP0_REGNUM + reg;
3051 reg -= NUM_FREGS;
3052
3053 if (reg < NUM_SREGS)
3054 return SIM_ARM_FPS_REGNUM + reg;
3055 reg -= NUM_SREGS;
3056
3057 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3058 }
3059
3060 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3061 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3062 It is thought that this is is the floating-point register format on
3063 little-endian systems. */
3064
3065 static void
3066 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3067 void *dbl, int endianess)
3068 {
3069 DOUBLEST d;
3070
3071 if (endianess == BFD_ENDIAN_BIG)
3072 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3073 else
3074 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3075 ptr, &d);
3076 floatformat_from_doublest (fmt, &d, dbl);
3077 }
3078
3079 static void
3080 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3081 int endianess)
3082 {
3083 DOUBLEST d;
3084
3085 floatformat_to_doublest (fmt, ptr, &d);
3086 if (endianess == BFD_ENDIAN_BIG)
3087 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3088 else
3089 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3090 &d, dbl);
3091 }
3092
3093 static int
3094 condition_true (unsigned long cond, unsigned long status_reg)
3095 {
3096 if (cond == INST_AL || cond == INST_NV)
3097 return 1;
3098
3099 switch (cond)
3100 {
3101 case INST_EQ:
3102 return ((status_reg & FLAG_Z) != 0);
3103 case INST_NE:
3104 return ((status_reg & FLAG_Z) == 0);
3105 case INST_CS:
3106 return ((status_reg & FLAG_C) != 0);
3107 case INST_CC:
3108 return ((status_reg & FLAG_C) == 0);
3109 case INST_MI:
3110 return ((status_reg & FLAG_N) != 0);
3111 case INST_PL:
3112 return ((status_reg & FLAG_N) == 0);
3113 case INST_VS:
3114 return ((status_reg & FLAG_V) != 0);
3115 case INST_VC:
3116 return ((status_reg & FLAG_V) == 0);
3117 case INST_HI:
3118 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3119 case INST_LS:
3120 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3121 case INST_GE:
3122 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3123 case INST_LT:
3124 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3125 case INST_GT:
3126 return (((status_reg & FLAG_Z) == 0)
3127 && (((status_reg & FLAG_N) == 0)
3128 == ((status_reg & FLAG_V) == 0)));
3129 case INST_LE:
3130 return (((status_reg & FLAG_Z) != 0)
3131 || (((status_reg & FLAG_N) == 0)
3132 != ((status_reg & FLAG_V) == 0)));
3133 }
3134 return 1;
3135 }
3136
3137 static unsigned long
3138 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3139 unsigned long pc_val, unsigned long status_reg)
3140 {
3141 unsigned long res, shift;
3142 int rm = bits (inst, 0, 3);
3143 unsigned long shifttype = bits (inst, 5, 6);
3144
3145 if (bit (inst, 4))
3146 {
3147 int rs = bits (inst, 8, 11);
3148 shift = (rs == 15 ? pc_val + 8
3149 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3150 }
3151 else
3152 shift = bits (inst, 7, 11);
3153
3154 res = (rm == 15
3155 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3156 : get_frame_register_unsigned (frame, rm));
3157
3158 switch (shifttype)
3159 {
3160 case 0: /* LSL */
3161 res = shift >= 32 ? 0 : res << shift;
3162 break;
3163
3164 case 1: /* LSR */
3165 res = shift >= 32 ? 0 : res >> shift;
3166 break;
3167
3168 case 2: /* ASR */
3169 if (shift >= 32)
3170 shift = 31;
3171 res = ((res & 0x80000000L)
3172 ? ~((~res) >> shift) : res >> shift);
3173 break;
3174
3175 case 3: /* ROR/RRX */
3176 shift &= 31;
3177 if (shift == 0)
3178 res = (res >> 1) | (carry ? 0x80000000L : 0);
3179 else
3180 res = (res >> shift) | (res << (32 - shift));
3181 break;
3182 }
3183
3184 return res & 0xffffffff;
3185 }
3186
3187 /* Return number of 1-bits in VAL. */
3188
3189 static int
3190 bitcount (unsigned long val)
3191 {
3192 int nbits;
3193 for (nbits = 0; val != 0; nbits++)
3194 val &= val - 1; /* delete rightmost 1-bit in val */
3195 return nbits;
3196 }
3197
3198 /* Return the size in bytes of the complete Thumb instruction whose
3199 first halfword is INST1. */
3200
3201 static int
3202 thumb_insn_size (unsigned short inst1)
3203 {
3204 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3205 return 4;
3206 else
3207 return 2;
3208 }
3209
3210 static int
3211 thumb_advance_itstate (unsigned int itstate)
3212 {
3213 /* Preserve IT[7:5], the first three bits of the condition. Shift
3214 the upcoming condition flags left by one bit. */
3215 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3216
3217 /* If we have finished the IT block, clear the state. */
3218 if ((itstate & 0x0f) == 0)
3219 itstate = 0;
3220
3221 return itstate;
3222 }
3223
3224 /* Find the next PC after the current instruction executes. In some
3225 cases we can not statically determine the answer (see the IT state
3226 handling in this function); in that case, a breakpoint may be
3227 inserted in addition to the returned PC, which will be used to set
3228 another breakpoint by our caller. */
3229
3230 static CORE_ADDR
3231 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3232 {
3233 struct gdbarch *gdbarch = get_frame_arch (frame);
3234 struct address_space *aspace = get_frame_address_space (frame);
3235 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3236 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3237 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3238 unsigned short inst1;
3239 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
3240 unsigned long offset;
3241 ULONGEST status, itstate;
3242
3243 nextpc = MAKE_THUMB_ADDR (nextpc);
3244 pc_val = MAKE_THUMB_ADDR (pc_val);
3245
3246 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3247
3248 /* Thumb-2 conditional execution support. There are eight bits in
3249 the CPSR which describe conditional execution state. Once
3250 reconstructed (they're in a funny order), the low five bits
3251 describe the low bit of the condition for each instruction and
3252 how many instructions remain. The high three bits describe the
3253 base condition. One of the low four bits will be set if an IT
3254 block is active. These bits read as zero on earlier
3255 processors. */
3256 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3257 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3258
3259 /* If-Then handling. On GNU/Linux, where this routine is used, we
3260 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3261 can disable execution of the undefined instruction. So we might
3262 miss the breakpoint if we set it on a skipped conditional
3263 instruction. Because conditional instructions can change the
3264 flags, affecting the execution of further instructions, we may
3265 need to set two breakpoints. */
3266
3267 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3268 {
3269 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3270 {
3271 /* An IT instruction. Because this instruction does not
3272 modify the flags, we can accurately predict the next
3273 executed instruction. */
3274 itstate = inst1 & 0x00ff;
3275 pc += thumb_insn_size (inst1);
3276
3277 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3278 {
3279 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3280 pc += thumb_insn_size (inst1);
3281 itstate = thumb_advance_itstate (itstate);
3282 }
3283
3284 return MAKE_THUMB_ADDR (pc);
3285 }
3286 else if (itstate != 0)
3287 {
3288 /* We are in a conditional block. Check the condition. */
3289 if (! condition_true (itstate >> 4, status))
3290 {
3291 /* Advance to the next executed instruction. */
3292 pc += thumb_insn_size (inst1);
3293 itstate = thumb_advance_itstate (itstate);
3294
3295 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3296 {
3297 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3298 pc += thumb_insn_size (inst1);
3299 itstate = thumb_advance_itstate (itstate);
3300 }
3301
3302 return MAKE_THUMB_ADDR (pc);
3303 }
3304 else if ((itstate & 0x0f) == 0x08)
3305 {
3306 /* This is the last instruction of the conditional
3307 block, and it is executed. We can handle it normally
3308 because the following instruction is not conditional,
3309 and we must handle it normally because it is
3310 permitted to branch. Fall through. */
3311 }
3312 else
3313 {
3314 int cond_negated;
3315
3316 /* There are conditional instructions after this one.
3317 If this instruction modifies the flags, then we can
3318 not predict what the next executed instruction will
3319 be. Fortunately, this instruction is architecturally
3320 forbidden to branch; we know it will fall through.
3321 Start by skipping past it. */
3322 pc += thumb_insn_size (inst1);
3323 itstate = thumb_advance_itstate (itstate);
3324
3325 /* Set a breakpoint on the following instruction. */
3326 gdb_assert ((itstate & 0x0f) != 0);
3327 if (insert_bkpt)
3328 insert_single_step_breakpoint (gdbarch, aspace, pc);
3329 cond_negated = (itstate >> 4) & 1;
3330
3331 /* Skip all following instructions with the same
3332 condition. If there is a later instruction in the IT
3333 block with the opposite condition, set the other
3334 breakpoint there. If not, then set a breakpoint on
3335 the instruction after the IT block. */
3336 do
3337 {
3338 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3339 pc += thumb_insn_size (inst1);
3340 itstate = thumb_advance_itstate (itstate);
3341 }
3342 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3343
3344 return MAKE_THUMB_ADDR (pc);
3345 }
3346 }
3347 }
3348 else if (itstate & 0x0f)
3349 {
3350 /* We are in a conditional block. Check the condition. */
3351 int cond = itstate >> 4;
3352
3353 if (! condition_true (cond, status))
3354 {
3355 /* Advance to the next instruction. All the 32-bit
3356 instructions share a common prefix. */
3357 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3358 return MAKE_THUMB_ADDR (pc + 4);
3359 else
3360 return MAKE_THUMB_ADDR (pc + 2);
3361 }
3362
3363 /* Otherwise, handle the instruction normally. */
3364 }
3365
3366 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3367 {
3368 CORE_ADDR sp;
3369
3370 /* Fetch the saved PC from the stack. It's stored above
3371 all of the other registers. */
3372 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3373 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3374 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3375 }
3376 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3377 {
3378 unsigned long cond = bits (inst1, 8, 11);
3379 if (cond == 0x0f) /* 0x0f = SWI */
3380 {
3381 struct gdbarch_tdep *tdep;
3382 tdep = gdbarch_tdep (gdbarch);
3383
3384 if (tdep->syscall_next_pc != NULL)
3385 nextpc = tdep->syscall_next_pc (frame);
3386
3387 }
3388 else if (cond != 0x0f && condition_true (cond, status))
3389 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3390 }
3391 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3392 {
3393 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3394 }
3395 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3396 {
3397 unsigned short inst2;
3398 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3399
3400 /* Default to the next instruction. */
3401 nextpc = pc + 4;
3402 nextpc = MAKE_THUMB_ADDR (nextpc);
3403
3404 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3405 {
3406 /* Branches and miscellaneous control instructions. */
3407
3408 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3409 {
3410 /* B, BL, BLX. */
3411 int j1, j2, imm1, imm2;
3412
3413 imm1 = sbits (inst1, 0, 10);
3414 imm2 = bits (inst2, 0, 10);
3415 j1 = bit (inst2, 13);
3416 j2 = bit (inst2, 11);
3417
3418 offset = ((imm1 << 12) + (imm2 << 1));
3419 offset ^= ((!j2) << 22) | ((!j1) << 23);
3420
3421 nextpc = pc_val + offset;
3422 /* For BLX make sure to clear the low bits. */
3423 if (bit (inst2, 12) == 0)
3424 nextpc = nextpc & 0xfffffffc;
3425 }
3426 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3427 {
3428 /* SUBS PC, LR, #imm8. */
3429 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3430 nextpc -= inst2 & 0x00ff;
3431 }
3432 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3433 {
3434 /* Conditional branch. */
3435 if (condition_true (bits (inst1, 6, 9), status))
3436 {
3437 int sign, j1, j2, imm1, imm2;
3438
3439 sign = sbits (inst1, 10, 10);
3440 imm1 = bits (inst1, 0, 5);
3441 imm2 = bits (inst2, 0, 10);
3442 j1 = bit (inst2, 13);
3443 j2 = bit (inst2, 11);
3444
3445 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3446 offset += (imm1 << 12) + (imm2 << 1);
3447
3448 nextpc = pc_val + offset;
3449 }
3450 }
3451 }
3452 else if ((inst1 & 0xfe50) == 0xe810)
3453 {
3454 /* Load multiple or RFE. */
3455 int rn, offset, load_pc = 1;
3456
3457 rn = bits (inst1, 0, 3);
3458 if (bit (inst1, 7) && !bit (inst1, 8))
3459 {
3460 /* LDMIA or POP */
3461 if (!bit (inst2, 15))
3462 load_pc = 0;
3463 offset = bitcount (inst2) * 4 - 4;
3464 }
3465 else if (!bit (inst1, 7) && bit (inst1, 8))
3466 {
3467 /* LDMDB */
3468 if (!bit (inst2, 15))
3469 load_pc = 0;
3470 offset = -4;
3471 }
3472 else if (bit (inst1, 7) && bit (inst1, 8))
3473 {
3474 /* RFEIA */
3475 offset = 0;
3476 }
3477 else if (!bit (inst1, 7) && !bit (inst1, 8))
3478 {
3479 /* RFEDB */
3480 offset = -8;
3481 }
3482 else
3483 load_pc = 0;
3484
3485 if (load_pc)
3486 {
3487 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3488 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3489 }
3490 }
3491 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3492 {
3493 /* MOV PC or MOVS PC. */
3494 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3495 nextpc = MAKE_THUMB_ADDR (nextpc);
3496 }
3497 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3498 {
3499 /* LDR PC. */
3500 CORE_ADDR base;
3501 int rn, load_pc = 1;
3502
3503 rn = bits (inst1, 0, 3);
3504 base = get_frame_register_unsigned (frame, rn);
3505 if (rn == 15)
3506 {
3507 base = (base + 4) & ~(CORE_ADDR) 0x3;
3508 if (bit (inst1, 7))
3509 base += bits (inst2, 0, 11);
3510 else
3511 base -= bits (inst2, 0, 11);
3512 }
3513 else if (bit (inst1, 7))
3514 base += bits (inst2, 0, 11);
3515 else if (bit (inst2, 11))
3516 {
3517 if (bit (inst2, 10))
3518 {
3519 if (bit (inst2, 9))
3520 base += bits (inst2, 0, 7);
3521 else
3522 base -= bits (inst2, 0, 7);
3523 }
3524 }
3525 else if ((inst2 & 0x0fc0) == 0x0000)
3526 {
3527 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3528 base += get_frame_register_unsigned (frame, rm) << shift;
3529 }
3530 else
3531 /* Reserved. */
3532 load_pc = 0;
3533
3534 if (load_pc)
3535 nextpc = get_frame_memory_unsigned (frame, base, 4);
3536 }
3537 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3538 {
3539 /* TBB. */
3540 CORE_ADDR tbl_reg, table, offset, length;
3541
3542 tbl_reg = bits (inst1, 0, 3);
3543 if (tbl_reg == 0x0f)
3544 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3545 else
3546 table = get_frame_register_unsigned (frame, tbl_reg);
3547
3548 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3549 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3550 nextpc = pc_val + length;
3551 }
3552 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3553 {
3554 /* TBH. */
3555 CORE_ADDR tbl_reg, table, offset, length;
3556
3557 tbl_reg = bits (inst1, 0, 3);
3558 if (tbl_reg == 0x0f)
3559 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3560 else
3561 table = get_frame_register_unsigned (frame, tbl_reg);
3562
3563 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3564 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3565 nextpc = pc_val + length;
3566 }
3567 }
3568 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3569 {
3570 if (bits (inst1, 3, 6) == 0x0f)
3571 nextpc = pc_val;
3572 else
3573 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3574 }
3575 else if ((inst1 & 0xf500) == 0xb100)
3576 {
3577 /* CBNZ or CBZ. */
3578 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3579 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3580
3581 if (bit (inst1, 11) && reg != 0)
3582 nextpc = pc_val + imm;
3583 else if (!bit (inst1, 11) && reg == 0)
3584 nextpc = pc_val + imm;
3585 }
3586 return nextpc;
3587 }
3588
3589 /* Get the raw next address. PC is the current program counter, in
3590 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3591 the alternative next instruction if there are two options.
3592
3593 The value returned has the execution state of the next instruction
3594 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3595 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3596 address.
3597 */
3598 static CORE_ADDR
3599 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3600 {
3601 struct gdbarch *gdbarch = get_frame_arch (frame);
3602 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3603 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3604 unsigned long pc_val;
3605 unsigned long this_instr;
3606 unsigned long status;
3607 CORE_ADDR nextpc;
3608
3609 if (arm_frame_is_thumb (frame))
3610 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3611
3612 pc_val = (unsigned long) pc;
3613 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3614
3615 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3616 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3617
3618 if (bits (this_instr, 28, 31) == INST_NV)
3619 switch (bits (this_instr, 24, 27))
3620 {
3621 case 0xa:
3622 case 0xb:
3623 {
3624 /* Branch with Link and change to Thumb. */
3625 nextpc = BranchDest (pc, this_instr);
3626 nextpc |= bit (this_instr, 24) << 1;
3627 nextpc = MAKE_THUMB_ADDR (nextpc);
3628 break;
3629 }
3630 case 0xc:
3631 case 0xd:
3632 case 0xe:
3633 /* Coprocessor register transfer. */
3634 if (bits (this_instr, 12, 15) == 15)
3635 error (_("Invalid update to pc in instruction"));
3636 break;
3637 }
3638 else if (condition_true (bits (this_instr, 28, 31), status))
3639 {
3640 switch (bits (this_instr, 24, 27))
3641 {
3642 case 0x0:
3643 case 0x1: /* data processing */
3644 case 0x2:
3645 case 0x3:
3646 {
3647 unsigned long operand1, operand2, result = 0;
3648 unsigned long rn;
3649 int c;
3650
3651 if (bits (this_instr, 12, 15) != 15)
3652 break;
3653
3654 if (bits (this_instr, 22, 25) == 0
3655 && bits (this_instr, 4, 7) == 9) /* multiply */
3656 error (_("Invalid update to pc in instruction"));
3657
3658 /* BX <reg>, BLX <reg> */
3659 if (bits (this_instr, 4, 27) == 0x12fff1
3660 || bits (this_instr, 4, 27) == 0x12fff3)
3661 {
3662 rn = bits (this_instr, 0, 3);
3663 nextpc = (rn == 15) ? pc_val + 8
3664 : get_frame_register_unsigned (frame, rn);
3665 return nextpc;
3666 }
3667
3668 /* Multiply into PC */
3669 c = (status & FLAG_C) ? 1 : 0;
3670 rn = bits (this_instr, 16, 19);
3671 operand1 = (rn == 15) ? pc_val + 8
3672 : get_frame_register_unsigned (frame, rn);
3673
3674 if (bit (this_instr, 25))
3675 {
3676 unsigned long immval = bits (this_instr, 0, 7);
3677 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3678 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3679 & 0xffffffff;
3680 }
3681 else /* operand 2 is a shifted register */
3682 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3683
3684 switch (bits (this_instr, 21, 24))
3685 {
3686 case 0x0: /*and */
3687 result = operand1 & operand2;
3688 break;
3689
3690 case 0x1: /*eor */
3691 result = operand1 ^ operand2;
3692 break;
3693
3694 case 0x2: /*sub */
3695 result = operand1 - operand2;
3696 break;
3697
3698 case 0x3: /*rsb */
3699 result = operand2 - operand1;
3700 break;
3701
3702 case 0x4: /*add */
3703 result = operand1 + operand2;
3704 break;
3705
3706 case 0x5: /*adc */
3707 result = operand1 + operand2 + c;
3708 break;
3709
3710 case 0x6: /*sbc */
3711 result = operand1 - operand2 + c;
3712 break;
3713
3714 case 0x7: /*rsc */
3715 result = operand2 - operand1 + c;
3716 break;
3717
3718 case 0x8:
3719 case 0x9:
3720 case 0xa:
3721 case 0xb: /* tst, teq, cmp, cmn */
3722 result = (unsigned long) nextpc;
3723 break;
3724
3725 case 0xc: /*orr */
3726 result = operand1 | operand2;
3727 break;
3728
3729 case 0xd: /*mov */
3730 /* Always step into a function. */
3731 result = operand2;
3732 break;
3733
3734 case 0xe: /*bic */
3735 result = operand1 & ~operand2;
3736 break;
3737
3738 case 0xf: /*mvn */
3739 result = ~operand2;
3740 break;
3741 }
3742
3743 /* In 26-bit APCS the bottom two bits of the result are
3744 ignored, and we always end up in ARM state. */
3745 if (!arm_apcs_32)
3746 nextpc = arm_addr_bits_remove (gdbarch, result);
3747 else
3748 nextpc = result;
3749
3750 break;
3751 }
3752
3753 case 0x4:
3754 case 0x5: /* data transfer */
3755 case 0x6:
3756 case 0x7:
3757 if (bit (this_instr, 20))
3758 {
3759 /* load */
3760 if (bits (this_instr, 12, 15) == 15)
3761 {
3762 /* rd == pc */
3763 unsigned long rn;
3764 unsigned long base;
3765
3766 if (bit (this_instr, 22))
3767 error (_("Invalid update to pc in instruction"));
3768
3769 /* byte write to PC */
3770 rn = bits (this_instr, 16, 19);
3771 base = (rn == 15) ? pc_val + 8
3772 : get_frame_register_unsigned (frame, rn);
3773 if (bit (this_instr, 24))
3774 {
3775 /* pre-indexed */
3776 int c = (status & FLAG_C) ? 1 : 0;
3777 unsigned long offset =
3778 (bit (this_instr, 25)
3779 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3780 : bits (this_instr, 0, 11));
3781
3782 if (bit (this_instr, 23))
3783 base += offset;
3784 else
3785 base -= offset;
3786 }
3787 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3788 4, byte_order);
3789 }
3790 }
3791 break;
3792
3793 case 0x8:
3794 case 0x9: /* block transfer */
3795 if (bit (this_instr, 20))
3796 {
3797 /* LDM */
3798 if (bit (this_instr, 15))
3799 {
3800 /* loading pc */
3801 int offset = 0;
3802
3803 if (bit (this_instr, 23))
3804 {
3805 /* up */
3806 unsigned long reglist = bits (this_instr, 0, 14);
3807 offset = bitcount (reglist) * 4;
3808 if (bit (this_instr, 24)) /* pre */
3809 offset += 4;
3810 }
3811 else if (bit (this_instr, 24))
3812 offset = -4;
3813
3814 {
3815 unsigned long rn_val =
3816 get_frame_register_unsigned (frame,
3817 bits (this_instr, 16, 19));
3818 nextpc =
3819 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3820 + offset),
3821 4, byte_order);
3822 }
3823 }
3824 }
3825 break;
3826
3827 case 0xb: /* branch & link */
3828 case 0xa: /* branch */
3829 {
3830 nextpc = BranchDest (pc, this_instr);
3831 break;
3832 }
3833
3834 case 0xc:
3835 case 0xd:
3836 case 0xe: /* coproc ops */
3837 break;
3838 case 0xf: /* SWI */
3839 {
3840 struct gdbarch_tdep *tdep;
3841 tdep = gdbarch_tdep (gdbarch);
3842
3843 if (tdep->syscall_next_pc != NULL)
3844 nextpc = tdep->syscall_next_pc (frame);
3845
3846 }
3847 break;
3848
3849 default:
3850 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3851 return (pc);
3852 }
3853 }
3854
3855 return nextpc;
3856 }
3857
3858 CORE_ADDR
3859 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3860 {
3861 struct gdbarch *gdbarch = get_frame_arch (frame);
3862 CORE_ADDR nextpc =
3863 gdbarch_addr_bits_remove (gdbarch,
3864 arm_get_next_pc_raw (frame, pc, TRUE));
3865 if (nextpc == pc)
3866 error (_("Infinite loop detected"));
3867 return nextpc;
3868 }
3869
3870 /* single_step() is called just before we want to resume the inferior,
3871 if we want to single-step it but there is no hardware or kernel
3872 single-step support. We find the target of the coming instruction
3873 and breakpoint it. */
3874
3875 int
3876 arm_software_single_step (struct frame_info *frame)
3877 {
3878 struct gdbarch *gdbarch = get_frame_arch (frame);
3879 struct address_space *aspace = get_frame_address_space (frame);
3880
3881 /* NOTE: This may insert the wrong breakpoint instruction when
3882 single-stepping over a mode-changing instruction, if the
3883 CPSR heuristics are used. */
3884
3885 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3886 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3887
3888 return 1;
3889 }
3890
3891 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3892 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3893 NULL if an error occurs. BUF is freed. */
3894
3895 static gdb_byte *
3896 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3897 int old_len, int new_len)
3898 {
3899 gdb_byte *new_buf, *middle;
3900 int bytes_to_read = new_len - old_len;
3901
3902 new_buf = xmalloc (new_len);
3903 memcpy (new_buf + bytes_to_read, buf, old_len);
3904 xfree (buf);
3905 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3906 {
3907 xfree (new_buf);
3908 return NULL;
3909 }
3910 return new_buf;
3911 }
3912
3913 /* An IT block is at most the 2-byte IT instruction followed by
3914 four 4-byte instructions. The furthest back we must search to
3915 find an IT block that affects the current instruction is thus
3916 2 + 3 * 4 == 14 bytes. */
3917 #define MAX_IT_BLOCK_PREFIX 14
3918
3919 /* Use a quick scan if there are more than this many bytes of
3920 code. */
3921 #define IT_SCAN_THRESHOLD 32
3922
3923 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3924 A breakpoint in an IT block may not be hit, depending on the
3925 condition flags. */
3926 static CORE_ADDR
3927 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3928 {
3929 gdb_byte *buf;
3930 char map_type;
3931 CORE_ADDR boundary, func_start;
3932 int buf_len, buf2_len;
3933 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3934 int i, any, last_it, last_it_count;
3935
3936 /* If we are using BKPT breakpoints, none of this is necessary. */
3937 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3938 return bpaddr;
3939
3940 /* ARM mode does not have this problem. */
3941 if (!arm_pc_is_thumb (gdbarch, bpaddr))
3942 return bpaddr;
3943
3944 /* We are setting a breakpoint in Thumb code that could potentially
3945 contain an IT block. The first step is to find how much Thumb
3946 code there is; we do not need to read outside of known Thumb
3947 sequences. */
3948 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3949 if (map_type == 0)
3950 /* Thumb-2 code must have mapping symbols to have a chance. */
3951 return bpaddr;
3952
3953 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3954
3955 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3956 && func_start > boundary)
3957 boundary = func_start;
3958
3959 /* Search for a candidate IT instruction. We have to do some fancy
3960 footwork to distinguish a real IT instruction from the second
3961 half of a 32-bit instruction, but there is no need for that if
3962 there's no candidate. */
3963 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3964 if (buf_len == 0)
3965 /* No room for an IT instruction. */
3966 return bpaddr;
3967
3968 buf = xmalloc (buf_len);
3969 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3970 return bpaddr;
3971 any = 0;
3972 for (i = 0; i < buf_len; i += 2)
3973 {
3974 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3975 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3976 {
3977 any = 1;
3978 break;
3979 }
3980 }
3981 if (any == 0)
3982 {
3983 xfree (buf);
3984 return bpaddr;
3985 }
3986
3987 /* OK, the code bytes before this instruction contain at least one
3988 halfword which resembles an IT instruction. We know that it's
3989 Thumb code, but there are still two possibilities. Either the
3990 halfword really is an IT instruction, or it is the second half of
3991 a 32-bit Thumb instruction. The only way we can tell is to
3992 scan forwards from a known instruction boundary. */
3993 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
3994 {
3995 int definite;
3996
3997 /* There's a lot of code before this instruction. Start with an
3998 optimistic search; it's easy to recognize halfwords that can
3999 not be the start of a 32-bit instruction, and use that to
4000 lock on to the instruction boundaries. */
4001 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4002 if (buf == NULL)
4003 return bpaddr;
4004 buf_len = IT_SCAN_THRESHOLD;
4005
4006 definite = 0;
4007 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4008 {
4009 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4010 if (thumb_insn_size (inst1) == 2)
4011 {
4012 definite = 1;
4013 break;
4014 }
4015 }
4016
4017 /* At this point, if DEFINITE, BUF[I] is the first place we
4018 are sure that we know the instruction boundaries, and it is far
4019 enough from BPADDR that we could not miss an IT instruction
4020 affecting BPADDR. If ! DEFINITE, give up - start from a
4021 known boundary. */
4022 if (! definite)
4023 {
4024 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4025 if (buf == NULL)
4026 return bpaddr;
4027 buf_len = bpaddr - boundary;
4028 i = 0;
4029 }
4030 }
4031 else
4032 {
4033 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4034 if (buf == NULL)
4035 return bpaddr;
4036 buf_len = bpaddr - boundary;
4037 i = 0;
4038 }
4039
4040 /* Scan forwards. Find the last IT instruction before BPADDR. */
4041 last_it = -1;
4042 last_it_count = 0;
4043 while (i < buf_len)
4044 {
4045 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4046 last_it_count--;
4047 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4048 {
4049 last_it = i;
4050 if (inst1 & 0x0001)
4051 last_it_count = 4;
4052 else if (inst1 & 0x0002)
4053 last_it_count = 3;
4054 else if (inst1 & 0x0004)
4055 last_it_count = 2;
4056 else
4057 last_it_count = 1;
4058 }
4059 i += thumb_insn_size (inst1);
4060 }
4061
4062 xfree (buf);
4063
4064 if (last_it == -1)
4065 /* There wasn't really an IT instruction after all. */
4066 return bpaddr;
4067
4068 if (last_it_count < 1)
4069 /* It was too far away. */
4070 return bpaddr;
4071
4072 /* This really is a trouble spot. Move the breakpoint to the IT
4073 instruction. */
4074 return bpaddr - buf_len + last_it;
4075 }
4076
4077 /* ARM displaced stepping support.
4078
4079 Generally ARM displaced stepping works as follows:
4080
4081 1. When an instruction is to be single-stepped, it is first decoded by
4082 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4083 Depending on the type of instruction, it is then copied to a scratch
4084 location, possibly in a modified form. The copy_* set of functions
4085 performs such modification, as necessary. A breakpoint is placed after
4086 the modified instruction in the scratch space to return control to GDB.
4087 Note in particular that instructions which modify the PC will no longer
4088 do so after modification.
4089
4090 2. The instruction is single-stepped, by setting the PC to the scratch
4091 location address, and resuming. Control returns to GDB when the
4092 breakpoint is hit.
4093
4094 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4095 function used for the current instruction. This function's job is to
4096 put the CPU/memory state back to what it would have been if the
4097 instruction had been executed unmodified in its original location. */
4098
4099 /* NOP instruction (mov r0, r0). */
4100 #define ARM_NOP 0xe1a00000
4101
4102 /* Helper for register reads for displaced stepping. In particular, this
4103 returns the PC as it would be seen by the instruction at its original
4104 location. */
4105
4106 ULONGEST
4107 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4108 {
4109 ULONGEST ret;
4110
4111 if (regno == 15)
4112 {
4113 if (debug_displaced)
4114 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4115 (unsigned long) from + 8);
4116 return (ULONGEST) from + 8; /* Pipeline offset. */
4117 }
4118 else
4119 {
4120 regcache_cooked_read_unsigned (regs, regno, &ret);
4121 if (debug_displaced)
4122 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4123 regno, (unsigned long) ret);
4124 return ret;
4125 }
4126 }
4127
4128 static int
4129 displaced_in_arm_mode (struct regcache *regs)
4130 {
4131 ULONGEST ps;
4132 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4133
4134 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4135
4136 return (ps & t_bit) == 0;
4137 }
4138
4139 /* Write to the PC as from a branch instruction. */
4140
4141 static void
4142 branch_write_pc (struct regcache *regs, ULONGEST val)
4143 {
4144 if (displaced_in_arm_mode (regs))
4145 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4146 architecture versions < 6. */
4147 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
4148 else
4149 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
4150 }
4151
4152 /* Write to the PC as from a branch-exchange instruction. */
4153
4154 static void
4155 bx_write_pc (struct regcache *regs, ULONGEST val)
4156 {
4157 ULONGEST ps;
4158 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4159
4160 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4161
4162 if ((val & 1) == 1)
4163 {
4164 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4165 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4166 }
4167 else if ((val & 2) == 0)
4168 {
4169 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4170 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4171 }
4172 else
4173 {
4174 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4175 mode, align dest to 4 bytes). */
4176 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4177 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4178 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4179 }
4180 }
4181
4182 /* Write to the PC as if from a load instruction. */
4183
4184 static void
4185 load_write_pc (struct regcache *regs, ULONGEST val)
4186 {
4187 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4188 bx_write_pc (regs, val);
4189 else
4190 branch_write_pc (regs, val);
4191 }
4192
4193 /* Write to the PC as if from an ALU instruction. */
4194
4195 static void
4196 alu_write_pc (struct regcache *regs, ULONGEST val)
4197 {
4198 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4199 bx_write_pc (regs, val);
4200 else
4201 branch_write_pc (regs, val);
4202 }
4203
4204 /* Helper for writing to registers for displaced stepping. Writing to the PC
4205 has a varying effects depending on the instruction which does the write:
4206 this is controlled by the WRITE_PC argument. */
4207
4208 void
4209 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4210 int regno, ULONGEST val, enum pc_write_style write_pc)
4211 {
4212 if (regno == 15)
4213 {
4214 if (debug_displaced)
4215 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4216 (unsigned long) val);
4217 switch (write_pc)
4218 {
4219 case BRANCH_WRITE_PC:
4220 branch_write_pc (regs, val);
4221 break;
4222
4223 case BX_WRITE_PC:
4224 bx_write_pc (regs, val);
4225 break;
4226
4227 case LOAD_WRITE_PC:
4228 load_write_pc (regs, val);
4229 break;
4230
4231 case ALU_WRITE_PC:
4232 alu_write_pc (regs, val);
4233 break;
4234
4235 case CANNOT_WRITE_PC:
4236 warning (_("Instruction wrote to PC in an unexpected way when "
4237 "single-stepping"));
4238 break;
4239
4240 default:
4241 internal_error (__FILE__, __LINE__,
4242 _("Invalid argument to displaced_write_reg"));
4243 }
4244
4245 dsc->wrote_to_pc = 1;
4246 }
4247 else
4248 {
4249 if (debug_displaced)
4250 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4251 regno, (unsigned long) val);
4252 regcache_cooked_write_unsigned (regs, regno, val);
4253 }
4254 }
4255
4256 /* This function is used to concisely determine if an instruction INSN
4257 references PC. Register fields of interest in INSN should have the
4258 corresponding fields of BITMASK set to 0b1111. The function returns return 1
4259 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
4260 returns 0. */
4261
4262 static int
4263 insn_references_pc (uint32_t insn, uint32_t bitmask)
4264 {
4265 uint32_t lowbit = 1;
4266
4267 while (bitmask != 0)
4268 {
4269 uint32_t mask;
4270
4271 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4272 ;
4273
4274 if (!lowbit)
4275 break;
4276
4277 mask = lowbit * 0xf;
4278
4279 if ((insn & mask) == mask)
4280 return 1;
4281
4282 bitmask &= ~mask;
4283 }
4284
4285 return 0;
4286 }
4287
4288 /* The simplest copy function. Many instructions have the same effect no
4289 matter what address they are executed at: in those cases, use this. */
4290
4291 static int
4292 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4293 const char *iname, struct displaced_step_closure *dsc)
4294 {
4295 if (debug_displaced)
4296 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4297 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4298 iname);
4299
4300 dsc->modinsn[0] = insn;
4301
4302 return 0;
4303 }
4304
4305 /* Preload instructions with immediate offset. */
4306
4307 static void
4308 cleanup_preload (struct gdbarch *gdbarch,
4309 struct regcache *regs, struct displaced_step_closure *dsc)
4310 {
4311 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4312 if (!dsc->u.preload.immed)
4313 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4314 }
4315
4316 static int
4317 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4318 struct displaced_step_closure *dsc)
4319 {
4320 unsigned int rn = bits (insn, 16, 19);
4321 ULONGEST rn_val;
4322 CORE_ADDR from = dsc->insn_addr;
4323
4324 if (!insn_references_pc (insn, 0x000f0000ul))
4325 return copy_unmodified (gdbarch, insn, "preload", dsc);
4326
4327 if (debug_displaced)
4328 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4329 (unsigned long) insn);
4330
4331 /* Preload instructions:
4332
4333 {pli/pld} [rn, #+/-imm]
4334 ->
4335 {pli/pld} [r0, #+/-imm]. */
4336
4337 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4338 rn_val = displaced_read_reg (regs, from, rn);
4339 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4340
4341 dsc->u.preload.immed = 1;
4342
4343 dsc->modinsn[0] = insn & 0xfff0ffff;
4344
4345 dsc->cleanup = &cleanup_preload;
4346
4347 return 0;
4348 }
4349
4350 /* Preload instructions with register offset. */
4351
4352 static int
4353 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4354 struct displaced_step_closure *dsc)
4355 {
4356 unsigned int rn = bits (insn, 16, 19);
4357 unsigned int rm = bits (insn, 0, 3);
4358 ULONGEST rn_val, rm_val;
4359 CORE_ADDR from = dsc->insn_addr;
4360
4361 if (!insn_references_pc (insn, 0x000f000ful))
4362 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4363
4364 if (debug_displaced)
4365 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4366 (unsigned long) insn);
4367
4368 /* Preload register-offset instructions:
4369
4370 {pli/pld} [rn, rm {, shift}]
4371 ->
4372 {pli/pld} [r0, r1 {, shift}]. */
4373
4374 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4375 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4376 rn_val = displaced_read_reg (regs, from, rn);
4377 rm_val = displaced_read_reg (regs, from, rm);
4378 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4379 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4380
4381 dsc->u.preload.immed = 0;
4382
4383 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4384
4385 dsc->cleanup = &cleanup_preload;
4386
4387 return 0;
4388 }
4389
4390 /* Copy/cleanup coprocessor load and store instructions. */
4391
4392 static void
4393 cleanup_copro_load_store (struct gdbarch *gdbarch,
4394 struct regcache *regs,
4395 struct displaced_step_closure *dsc)
4396 {
4397 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4398
4399 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4400
4401 if (dsc->u.ldst.writeback)
4402 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4403 }
4404
4405 static int
4406 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4407 struct regcache *regs,
4408 struct displaced_step_closure *dsc)
4409 {
4410 unsigned int rn = bits (insn, 16, 19);
4411 ULONGEST rn_val;
4412 CORE_ADDR from = dsc->insn_addr;
4413
4414 if (!insn_references_pc (insn, 0x000f0000ul))
4415 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4416
4417 if (debug_displaced)
4418 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4419 "load/store insn %.8lx\n", (unsigned long) insn);
4420
4421 /* Coprocessor load/store instructions:
4422
4423 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4424 ->
4425 {stc/stc2} [r0, #+/-imm].
4426
4427 ldc/ldc2 are handled identically. */
4428
4429 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4430 rn_val = displaced_read_reg (regs, from, rn);
4431 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4432
4433 dsc->u.ldst.writeback = bit (insn, 25);
4434 dsc->u.ldst.rn = rn;
4435
4436 dsc->modinsn[0] = insn & 0xfff0ffff;
4437
4438 dsc->cleanup = &cleanup_copro_load_store;
4439
4440 return 0;
4441 }
4442
4443 /* Clean up branch instructions (actually perform the branch, by setting
4444 PC). */
4445
4446 static void
4447 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4448 struct displaced_step_closure *dsc)
4449 {
4450 ULONGEST from = dsc->insn_addr;
4451 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4452 int branch_taken = condition_true (dsc->u.branch.cond, status);
4453 enum pc_write_style write_pc = dsc->u.branch.exchange
4454 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4455
4456 if (!branch_taken)
4457 return;
4458
4459 if (dsc->u.branch.link)
4460 {
4461 ULONGEST pc = displaced_read_reg (regs, from, 15);
4462 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4463 }
4464
4465 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4466 }
4467
4468 /* Copy B/BL/BLX instructions with immediate destinations. */
4469
4470 static int
4471 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4472 struct regcache *regs, struct displaced_step_closure *dsc)
4473 {
4474 unsigned int cond = bits (insn, 28, 31);
4475 int exchange = (cond == 0xf);
4476 int link = exchange || bit (insn, 24);
4477 CORE_ADDR from = dsc->insn_addr;
4478 long offset;
4479
4480 if (debug_displaced)
4481 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4482 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4483 (unsigned long) insn);
4484
4485 /* Implement "BL<cond> <label>" as:
4486
4487 Preparation: cond <- instruction condition
4488 Insn: mov r0, r0 (nop)
4489 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4490
4491 B<cond> similar, but don't set r14 in cleanup. */
4492
4493 if (exchange)
4494 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4495 then arrange the switch into Thumb mode. */
4496 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4497 else
4498 offset = bits (insn, 0, 23) << 2;
4499
4500 if (bit (offset, 25))
4501 offset = offset | ~0x3ffffff;
4502
4503 dsc->u.branch.cond = cond;
4504 dsc->u.branch.link = link;
4505 dsc->u.branch.exchange = exchange;
4506 dsc->u.branch.dest = from + 8 + offset;
4507
4508 dsc->modinsn[0] = ARM_NOP;
4509
4510 dsc->cleanup = &cleanup_branch;
4511
4512 return 0;
4513 }
4514
4515 /* Copy BX/BLX with register-specified destinations. */
4516
4517 static int
4518 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4519 struct regcache *regs, struct displaced_step_closure *dsc)
4520 {
4521 unsigned int cond = bits (insn, 28, 31);
4522 /* BX: x12xxx1x
4523 BLX: x12xxx3x. */
4524 int link = bit (insn, 5);
4525 unsigned int rm = bits (insn, 0, 3);
4526 CORE_ADDR from = dsc->insn_addr;
4527
4528 if (debug_displaced)
4529 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4530 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
4531
4532 /* Implement {BX,BLX}<cond> <reg>" as:
4533
4534 Preparation: cond <- instruction condition
4535 Insn: mov r0, r0 (nop)
4536 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4537
4538 Don't set r14 in cleanup for BX. */
4539
4540 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4541
4542 dsc->u.branch.cond = cond;
4543 dsc->u.branch.link = link;
4544 dsc->u.branch.exchange = 1;
4545
4546 dsc->modinsn[0] = ARM_NOP;
4547
4548 dsc->cleanup = &cleanup_branch;
4549
4550 return 0;
4551 }
4552
4553 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4554
4555 static void
4556 cleanup_alu_imm (struct gdbarch *gdbarch,
4557 struct regcache *regs, struct displaced_step_closure *dsc)
4558 {
4559 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4560 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4561 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4562 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4563 }
4564
4565 static int
4566 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4567 struct displaced_step_closure *dsc)
4568 {
4569 unsigned int rn = bits (insn, 16, 19);
4570 unsigned int rd = bits (insn, 12, 15);
4571 unsigned int op = bits (insn, 21, 24);
4572 int is_mov = (op == 0xd);
4573 ULONGEST rd_val, rn_val;
4574 CORE_ADDR from = dsc->insn_addr;
4575
4576 if (!insn_references_pc (insn, 0x000ff000ul))
4577 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4578
4579 if (debug_displaced)
4580 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4581 "%.8lx\n", is_mov ? "move" : "ALU",
4582 (unsigned long) insn);
4583
4584 /* Instruction is of form:
4585
4586 <op><cond> rd, [rn,] #imm
4587
4588 Rewrite as:
4589
4590 Preparation: tmp1, tmp2 <- r0, r1;
4591 r0, r1 <- rd, rn
4592 Insn: <op><cond> r0, r1, #imm
4593 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4594 */
4595
4596 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4597 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4598 rn_val = displaced_read_reg (regs, from, rn);
4599 rd_val = displaced_read_reg (regs, from, rd);
4600 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4601 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4602 dsc->rd = rd;
4603
4604 if (is_mov)
4605 dsc->modinsn[0] = insn & 0xfff00fff;
4606 else
4607 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4608
4609 dsc->cleanup = &cleanup_alu_imm;
4610
4611 return 0;
4612 }
4613
4614 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4615
4616 static void
4617 cleanup_alu_reg (struct gdbarch *gdbarch,
4618 struct regcache *regs, struct displaced_step_closure *dsc)
4619 {
4620 ULONGEST rd_val;
4621 int i;
4622
4623 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4624
4625 for (i = 0; i < 3; i++)
4626 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4627
4628 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4629 }
4630
4631 static int
4632 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4633 struct displaced_step_closure *dsc)
4634 {
4635 unsigned int rn = bits (insn, 16, 19);
4636 unsigned int rm = bits (insn, 0, 3);
4637 unsigned int rd = bits (insn, 12, 15);
4638 unsigned int op = bits (insn, 21, 24);
4639 int is_mov = (op == 0xd);
4640 ULONGEST rd_val, rn_val, rm_val;
4641 CORE_ADDR from = dsc->insn_addr;
4642
4643 if (!insn_references_pc (insn, 0x000ff00ful))
4644 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4645
4646 if (debug_displaced)
4647 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4648 is_mov ? "move" : "ALU", (unsigned long) insn);
4649
4650 /* Instruction is of form:
4651
4652 <op><cond> rd, [rn,] rm [, <shift>]
4653
4654 Rewrite as:
4655
4656 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4657 r0, r1, r2 <- rd, rn, rm
4658 Insn: <op><cond> r0, r1, r2 [, <shift>]
4659 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4660 */
4661
4662 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4663 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4664 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4665 rd_val = displaced_read_reg (regs, from, rd);
4666 rn_val = displaced_read_reg (regs, from, rn);
4667 rm_val = displaced_read_reg (regs, from, rm);
4668 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4669 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4670 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4671 dsc->rd = rd;
4672
4673 if (is_mov)
4674 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4675 else
4676 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4677
4678 dsc->cleanup = &cleanup_alu_reg;
4679
4680 return 0;
4681 }
4682
4683 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4684
4685 static void
4686 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4687 struct regcache *regs,
4688 struct displaced_step_closure *dsc)
4689 {
4690 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4691 int i;
4692
4693 for (i = 0; i < 4; i++)
4694 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4695
4696 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4697 }
4698
4699 static int
4700 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4701 struct regcache *regs, struct displaced_step_closure *dsc)
4702 {
4703 unsigned int rn = bits (insn, 16, 19);
4704 unsigned int rm = bits (insn, 0, 3);
4705 unsigned int rd = bits (insn, 12, 15);
4706 unsigned int rs = bits (insn, 8, 11);
4707 unsigned int op = bits (insn, 21, 24);
4708 int is_mov = (op == 0xd), i;
4709 ULONGEST rd_val, rn_val, rm_val, rs_val;
4710 CORE_ADDR from = dsc->insn_addr;
4711
4712 if (!insn_references_pc (insn, 0x000fff0ful))
4713 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4714
4715 if (debug_displaced)
4716 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4717 "%.8lx\n", is_mov ? "move" : "ALU",
4718 (unsigned long) insn);
4719
4720 /* Instruction is of form:
4721
4722 <op><cond> rd, [rn,] rm, <shift> rs
4723
4724 Rewrite as:
4725
4726 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4727 r0, r1, r2, r3 <- rd, rn, rm, rs
4728 Insn: <op><cond> r0, r1, r2, <shift> r3
4729 Cleanup: tmp5 <- r0
4730 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4731 rd <- tmp5
4732 */
4733
4734 for (i = 0; i < 4; i++)
4735 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4736
4737 rd_val = displaced_read_reg (regs, from, rd);
4738 rn_val = displaced_read_reg (regs, from, rn);
4739 rm_val = displaced_read_reg (regs, from, rm);
4740 rs_val = displaced_read_reg (regs, from, rs);
4741 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4742 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4743 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4744 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4745 dsc->rd = rd;
4746
4747 if (is_mov)
4748 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4749 else
4750 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4751
4752 dsc->cleanup = &cleanup_alu_shifted_reg;
4753
4754 return 0;
4755 }
4756
4757 /* Clean up load instructions. */
4758
4759 static void
4760 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4761 struct displaced_step_closure *dsc)
4762 {
4763 ULONGEST rt_val, rt_val2 = 0, rn_val;
4764 CORE_ADDR from = dsc->insn_addr;
4765
4766 rt_val = displaced_read_reg (regs, from, 0);
4767 if (dsc->u.ldst.xfersize == 8)
4768 rt_val2 = displaced_read_reg (regs, from, 1);
4769 rn_val = displaced_read_reg (regs, from, 2);
4770
4771 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4772 if (dsc->u.ldst.xfersize > 4)
4773 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4774 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4775 if (!dsc->u.ldst.immed)
4776 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4777
4778 /* Handle register writeback. */
4779 if (dsc->u.ldst.writeback)
4780 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4781 /* Put result in right place. */
4782 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4783 if (dsc->u.ldst.xfersize == 8)
4784 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4785 }
4786
4787 /* Clean up store instructions. */
4788
4789 static void
4790 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
4791 struct displaced_step_closure *dsc)
4792 {
4793 CORE_ADDR from = dsc->insn_addr;
4794 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4795
4796 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4797 if (dsc->u.ldst.xfersize > 4)
4798 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4799 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4800 if (!dsc->u.ldst.immed)
4801 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4802 if (!dsc->u.ldst.restore_r4)
4803 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4804
4805 /* Writeback. */
4806 if (dsc->u.ldst.writeback)
4807 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4808 }
4809
4810 /* Copy "extra" load/store instructions. These are halfword/doubleword
4811 transfers, which have a different encoding to byte/word transfers. */
4812
4813 static int
4814 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4815 struct regcache *regs, struct displaced_step_closure *dsc)
4816 {
4817 unsigned int op1 = bits (insn, 20, 24);
4818 unsigned int op2 = bits (insn, 5, 6);
4819 unsigned int rt = bits (insn, 12, 15);
4820 unsigned int rn = bits (insn, 16, 19);
4821 unsigned int rm = bits (insn, 0, 3);
4822 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4823 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4824 int immed = (op1 & 0x4) != 0;
4825 int opcode;
4826 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4827 CORE_ADDR from = dsc->insn_addr;
4828
4829 if (!insn_references_pc (insn, 0x000ff00ful))
4830 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4831
4832 if (debug_displaced)
4833 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4834 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4835 (unsigned long) insn);
4836
4837 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4838
4839 if (opcode < 0)
4840 internal_error (__FILE__, __LINE__,
4841 _("copy_extra_ld_st: instruction decode error"));
4842
4843 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4844 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4845 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4846 if (!immed)
4847 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4848
4849 rt_val = displaced_read_reg (regs, from, rt);
4850 if (bytesize[opcode] == 8)
4851 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4852 rn_val = displaced_read_reg (regs, from, rn);
4853 if (!immed)
4854 rm_val = displaced_read_reg (regs, from, rm);
4855
4856 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4857 if (bytesize[opcode] == 8)
4858 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4859 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4860 if (!immed)
4861 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4862
4863 dsc->rd = rt;
4864 dsc->u.ldst.xfersize = bytesize[opcode];
4865 dsc->u.ldst.rn = rn;
4866 dsc->u.ldst.immed = immed;
4867 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4868 dsc->u.ldst.restore_r4 = 0;
4869
4870 if (immed)
4871 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4872 ->
4873 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4874 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4875 else
4876 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4877 ->
4878 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4879 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4880
4881 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4882
4883 return 0;
4884 }
4885
4886 /* Copy byte/word loads and stores. */
4887
4888 static int
4889 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4890 struct regcache *regs,
4891 struct displaced_step_closure *dsc, int load, int byte,
4892 int usermode)
4893 {
4894 int immed = !bit (insn, 25);
4895 unsigned int rt = bits (insn, 12, 15);
4896 unsigned int rn = bits (insn, 16, 19);
4897 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4898 ULONGEST rt_val, rn_val, rm_val = 0;
4899 CORE_ADDR from = dsc->insn_addr;
4900
4901 if (!insn_references_pc (insn, 0x000ff00ful))
4902 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4903
4904 if (debug_displaced)
4905 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4906 load ? (byte ? "ldrb" : "ldr")
4907 : (byte ? "strb" : "str"), usermode ? "t" : "",
4908 (unsigned long) insn);
4909
4910 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4911 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4912 if (!immed)
4913 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4914 if (!load)
4915 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4916
4917 rt_val = displaced_read_reg (regs, from, rt);
4918 rn_val = displaced_read_reg (regs, from, rn);
4919 if (!immed)
4920 rm_val = displaced_read_reg (regs, from, rm);
4921
4922 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4923 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4924 if (!immed)
4925 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4926
4927 dsc->rd = rt;
4928 dsc->u.ldst.xfersize = byte ? 1 : 4;
4929 dsc->u.ldst.rn = rn;
4930 dsc->u.ldst.immed = immed;
4931 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4932
4933 /* To write PC we can do:
4934
4935 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4936 scratch+4: ldr r4, temp
4937 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4938 scratch+12: add r4, r4, #8 (r4 = offset)
4939 scratch+16: add r0, r0, r4
4940 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4941 scratch+24: <temp>
4942
4943 Otherwise we don't know what value to write for PC, since the offset is
4944 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4945
4946 if (load || rt != 15)
4947 {
4948 dsc->u.ldst.restore_r4 = 0;
4949
4950 if (immed)
4951 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4952 ->
4953 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4954 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4955 else
4956 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4957 ->
4958 {ldr,str}[b]<cond> r0, [r2, r3]. */
4959 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4960 }
4961 else
4962 {
4963 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4964 dsc->u.ldst.restore_r4 = 1;
4965
4966 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4967 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4968 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4969 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4970 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4971
4972 /* As above. */
4973 if (immed)
4974 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4975 else
4976 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4977
4978 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4979 dsc->modinsn[7] = 0x0; /* scratch space. */
4980
4981 dsc->numinsns = 6;
4982 }
4983
4984 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4985
4986 return 0;
4987 }
4988
4989 /* Cleanup LDM instructions with fully-populated register list. This is an
4990 unfortunate corner case: it's impossible to implement correctly by modifying
4991 the instruction. The issue is as follows: we have an instruction,
4992
4993 ldm rN, {r0-r15}
4994
4995 which we must rewrite to avoid loading PC. A possible solution would be to
4996 do the load in two halves, something like (with suitable cleanup
4997 afterwards):
4998
4999 mov r8, rN
5000 ldm[id][ab] r8!, {r0-r7}
5001 str r7, <temp>
5002 ldm[id][ab] r8, {r7-r14}
5003 <bkpt>
5004
5005 but at present there's no suitable place for <temp>, since the scratch space
5006 is overwritten before the cleanup routine is called. For now, we simply
5007 emulate the instruction. */
5008
5009 static void
5010 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5011 struct displaced_step_closure *dsc)
5012 {
5013 ULONGEST from = dsc->insn_addr;
5014 int inc = dsc->u.block.increment;
5015 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5016 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5017 uint32_t regmask = dsc->u.block.regmask;
5018 int regno = inc ? 0 : 15;
5019 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5020 int exception_return = dsc->u.block.load && dsc->u.block.user
5021 && (regmask & 0x8000) != 0;
5022 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5023 int do_transfer = condition_true (dsc->u.block.cond, status);
5024 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5025
5026 if (!do_transfer)
5027 return;
5028
5029 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5030 sensible we can do here. Complain loudly. */
5031 if (exception_return)
5032 error (_("Cannot single-step exception return"));
5033
5034 /* We don't handle any stores here for now. */
5035 gdb_assert (dsc->u.block.load != 0);
5036
5037 if (debug_displaced)
5038 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5039 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5040 dsc->u.block.increment ? "inc" : "dec",
5041 dsc->u.block.before ? "before" : "after");
5042
5043 while (regmask)
5044 {
5045 uint32_t memword;
5046
5047 if (inc)
5048 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5049 regno++;
5050 else
5051 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5052 regno--;
5053
5054 xfer_addr += bump_before;
5055
5056 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5057 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5058
5059 xfer_addr += bump_after;
5060
5061 regmask &= ~(1 << regno);
5062 }
5063
5064 if (dsc->u.block.writeback)
5065 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5066 CANNOT_WRITE_PC);
5067 }
5068
5069 /* Clean up an STM which included the PC in the register list. */
5070
5071 static void
5072 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5073 struct displaced_step_closure *dsc)
5074 {
5075 ULONGEST from = dsc->insn_addr;
5076 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5077 int store_executed = condition_true (dsc->u.block.cond, status);
5078 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5079 CORE_ADDR stm_insn_addr;
5080 uint32_t pc_val;
5081 long offset;
5082 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5083
5084 /* If condition code fails, there's nothing else to do. */
5085 if (!store_executed)
5086 return;
5087
5088 if (dsc->u.block.increment)
5089 {
5090 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5091
5092 if (dsc->u.block.before)
5093 pc_stored_at += 4;
5094 }
5095 else
5096 {
5097 pc_stored_at = dsc->u.block.xfer_addr;
5098
5099 if (dsc->u.block.before)
5100 pc_stored_at -= 4;
5101 }
5102
5103 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5104 stm_insn_addr = dsc->scratch_base;
5105 offset = pc_val - stm_insn_addr;
5106
5107 if (debug_displaced)
5108 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5109 "STM instruction\n", offset);
5110
5111 /* Rewrite the stored PC to the proper value for the non-displaced original
5112 instruction. */
5113 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5114 dsc->insn_addr + offset);
5115 }
5116
5117 /* Clean up an LDM which includes the PC in the register list. We clumped all
5118 the registers in the transferred list into a contiguous range r0...rX (to
5119 avoid loading PC directly and losing control of the debugged program), so we
5120 must undo that here. */
5121
5122 static void
5123 cleanup_block_load_pc (struct gdbarch *gdbarch,
5124 struct regcache *regs,
5125 struct displaced_step_closure *dsc)
5126 {
5127 ULONGEST from = dsc->insn_addr;
5128 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5129 int load_executed = condition_true (dsc->u.block.cond, status), i;
5130 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5131 unsigned int regs_loaded = bitcount (mask);
5132 unsigned int num_to_shuffle = regs_loaded, clobbered;
5133
5134 /* The method employed here will fail if the register list is fully populated
5135 (we need to avoid loading PC directly). */
5136 gdb_assert (num_to_shuffle < 16);
5137
5138 if (!load_executed)
5139 return;
5140
5141 clobbered = (1 << num_to_shuffle) - 1;
5142
5143 while (num_to_shuffle > 0)
5144 {
5145 if ((mask & (1 << write_reg)) != 0)
5146 {
5147 unsigned int read_reg = num_to_shuffle - 1;
5148
5149 if (read_reg != write_reg)
5150 {
5151 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5152 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5153 if (debug_displaced)
5154 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5155 "loaded register r%d to r%d\n"), read_reg,
5156 write_reg);
5157 }
5158 else if (debug_displaced)
5159 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5160 "r%d already in the right place\n"),
5161 write_reg);
5162
5163 clobbered &= ~(1 << write_reg);
5164
5165 num_to_shuffle--;
5166 }
5167
5168 write_reg--;
5169 }
5170
5171 /* Restore any registers we scribbled over. */
5172 for (write_reg = 0; clobbered != 0; write_reg++)
5173 {
5174 if ((clobbered & (1 << write_reg)) != 0)
5175 {
5176 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5177 CANNOT_WRITE_PC);
5178 if (debug_displaced)
5179 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5180 "clobbered register r%d\n"), write_reg);
5181 clobbered &= ~(1 << write_reg);
5182 }
5183 }
5184
5185 /* Perform register writeback manually. */
5186 if (dsc->u.block.writeback)
5187 {
5188 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5189
5190 if (dsc->u.block.increment)
5191 new_rn_val += regs_loaded * 4;
5192 else
5193 new_rn_val -= regs_loaded * 4;
5194
5195 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5196 CANNOT_WRITE_PC);
5197 }
5198 }
5199
5200 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5201 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5202
5203 static int
5204 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5205 struct displaced_step_closure *dsc)
5206 {
5207 int load = bit (insn, 20);
5208 int user = bit (insn, 22);
5209 int increment = bit (insn, 23);
5210 int before = bit (insn, 24);
5211 int writeback = bit (insn, 21);
5212 int rn = bits (insn, 16, 19);
5213 CORE_ADDR from = dsc->insn_addr;
5214
5215 /* Block transfers which don't mention PC can be run directly out-of-line. */
5216 if (rn != 15 && (insn & 0x8000) == 0)
5217 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5218
5219 if (rn == 15)
5220 {
5221 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
5222 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5223 }
5224
5225 if (debug_displaced)
5226 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5227 "%.8lx\n", (unsigned long) insn);
5228
5229 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5230 dsc->u.block.rn = rn;
5231
5232 dsc->u.block.load = load;
5233 dsc->u.block.user = user;
5234 dsc->u.block.increment = increment;
5235 dsc->u.block.before = before;
5236 dsc->u.block.writeback = writeback;
5237 dsc->u.block.cond = bits (insn, 28, 31);
5238
5239 dsc->u.block.regmask = insn & 0xffff;
5240
5241 if (load)
5242 {
5243 if ((insn & 0xffff) == 0xffff)
5244 {
5245 /* LDM with a fully-populated register list. This case is
5246 particularly tricky. Implement for now by fully emulating the
5247 instruction (which might not behave perfectly in all cases, but
5248 these instructions should be rare enough for that not to matter
5249 too much). */
5250 dsc->modinsn[0] = ARM_NOP;
5251
5252 dsc->cleanup = &cleanup_block_load_all;
5253 }
5254 else
5255 {
5256 /* LDM of a list of registers which includes PC. Implement by
5257 rewriting the list of registers to be transferred into a
5258 contiguous chunk r0...rX before doing the transfer, then shuffling
5259 registers into the correct places in the cleanup routine. */
5260 unsigned int regmask = insn & 0xffff;
5261 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5262 unsigned int to = 0, from = 0, i, new_rn;
5263
5264 for (i = 0; i < num_in_list; i++)
5265 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5266
5267 /* Writeback makes things complicated. We need to avoid clobbering
5268 the base register with one of the registers in our modified
5269 register list, but just using a different register can't work in
5270 all cases, e.g.:
5271
5272 ldm r14!, {r0-r13,pc}
5273
5274 which would need to be rewritten as:
5275
5276 ldm rN!, {r0-r14}
5277
5278 but that can't work, because there's no free register for N.
5279
5280 Solve this by turning off the writeback bit, and emulating
5281 writeback manually in the cleanup routine. */
5282
5283 if (writeback)
5284 insn &= ~(1 << 21);
5285
5286 new_regmask = (1 << num_in_list) - 1;
5287
5288 if (debug_displaced)
5289 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5290 "{..., pc}: original reg list %.4x, modified "
5291 "list %.4x\n"), rn, writeback ? "!" : "",
5292 (int) insn & 0xffff, new_regmask);
5293
5294 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5295
5296 dsc->cleanup = &cleanup_block_load_pc;
5297 }
5298 }
5299 else
5300 {
5301 /* STM of a list of registers which includes PC. Run the instruction
5302 as-is, but out of line: this will store the wrong value for the PC,
5303 so we must manually fix up the memory in the cleanup routine.
5304 Doing things this way has the advantage that we can auto-detect
5305 the offset of the PC write (which is architecture-dependent) in
5306 the cleanup routine. */
5307 dsc->modinsn[0] = insn;
5308
5309 dsc->cleanup = &cleanup_block_store_pc;
5310 }
5311
5312 return 0;
5313 }
5314
5315 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5316 for Linux, where some SVC instructions must be treated specially. */
5317
5318 static void
5319 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5320 struct displaced_step_closure *dsc)
5321 {
5322 CORE_ADDR from = dsc->insn_addr;
5323 CORE_ADDR resume_addr = from + 4;
5324
5325 if (debug_displaced)
5326 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5327 "%.8lx\n", (unsigned long) resume_addr);
5328
5329 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5330 }
5331
5332 static int
5333 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5334 struct regcache *regs, struct displaced_step_closure *dsc)
5335 {
5336 CORE_ADDR from = dsc->insn_addr;
5337
5338 /* Allow OS-specific code to override SVC handling. */
5339 if (dsc->u.svc.copy_svc_os)
5340 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5341
5342 if (debug_displaced)
5343 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5344 (unsigned long) insn);
5345
5346 /* Preparation: none.
5347 Insn: unmodified svc.
5348 Cleanup: pc <- insn_addr + 4. */
5349
5350 dsc->modinsn[0] = insn;
5351
5352 dsc->cleanup = &cleanup_svc;
5353 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5354 instruction. */
5355 dsc->wrote_to_pc = 1;
5356
5357 return 0;
5358 }
5359
5360 /* Copy undefined instructions. */
5361
5362 static int
5363 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5364 struct displaced_step_closure *dsc)
5365 {
5366 if (debug_displaced)
5367 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
5368 (unsigned long) insn);
5369
5370 dsc->modinsn[0] = insn;
5371
5372 return 0;
5373 }
5374
5375 /* Copy unpredictable instructions. */
5376
5377 static int
5378 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5379 struct displaced_step_closure *dsc)
5380 {
5381 if (debug_displaced)
5382 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5383 "%.8lx\n", (unsigned long) insn);
5384
5385 dsc->modinsn[0] = insn;
5386
5387 return 0;
5388 }
5389
5390 /* The decode_* functions are instruction decoding helpers. They mostly follow
5391 the presentation in the ARM ARM. */
5392
5393 static int
5394 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5395 struct regcache *regs,
5396 struct displaced_step_closure *dsc)
5397 {
5398 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5399 unsigned int rn = bits (insn, 16, 19);
5400
5401 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5402 return copy_unmodified (gdbarch, insn, "cps", dsc);
5403 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5404 return copy_unmodified (gdbarch, insn, "setend", dsc);
5405 else if ((op1 & 0x60) == 0x20)
5406 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5407 else if ((op1 & 0x71) == 0x40)
5408 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5409 else if ((op1 & 0x77) == 0x41)
5410 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5411 else if ((op1 & 0x77) == 0x45)
5412 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5413 else if ((op1 & 0x77) == 0x51)
5414 {
5415 if (rn != 0xf)
5416 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5417 else
5418 return copy_unpred (gdbarch, insn, dsc);
5419 }
5420 else if ((op1 & 0x77) == 0x55)
5421 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5422 else if (op1 == 0x57)
5423 switch (op2)
5424 {
5425 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5426 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5427 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5428 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5429 default: return copy_unpred (gdbarch, insn, dsc);
5430 }
5431 else if ((op1 & 0x63) == 0x43)
5432 return copy_unpred (gdbarch, insn, dsc);
5433 else if ((op2 & 0x1) == 0x0)
5434 switch (op1 & ~0x80)
5435 {
5436 case 0x61:
5437 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5438 case 0x65:
5439 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5440 case 0x71: case 0x75:
5441 /* pld/pldw reg. */
5442 return copy_preload_reg (gdbarch, insn, regs, dsc);
5443 case 0x63: case 0x67: case 0x73: case 0x77:
5444 return copy_unpred (gdbarch, insn, dsc);
5445 default:
5446 return copy_undef (gdbarch, insn, dsc);
5447 }
5448 else
5449 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5450 }
5451
5452 static int
5453 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5454 struct regcache *regs, struct displaced_step_closure *dsc)
5455 {
5456 if (bit (insn, 27) == 0)
5457 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5458 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5459 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5460 {
5461 case 0x0: case 0x2:
5462 return copy_unmodified (gdbarch, insn, "srs", dsc);
5463
5464 case 0x1: case 0x3:
5465 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5466
5467 case 0x4: case 0x5: case 0x6: case 0x7:
5468 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5469
5470 case 0x8:
5471 switch ((insn & 0xe00000) >> 21)
5472 {
5473 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5474 /* stc/stc2. */
5475 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5476
5477 case 0x2:
5478 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5479
5480 default:
5481 return copy_undef (gdbarch, insn, dsc);
5482 }
5483
5484 case 0x9:
5485 {
5486 int rn_f = (bits (insn, 16, 19) == 0xf);
5487 switch ((insn & 0xe00000) >> 21)
5488 {
5489 case 0x1: case 0x3:
5490 /* ldc/ldc2 imm (undefined for rn == pc). */
5491 return rn_f ? copy_undef (gdbarch, insn, dsc)
5492 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5493
5494 case 0x2:
5495 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5496
5497 case 0x4: case 0x5: case 0x6: case 0x7:
5498 /* ldc/ldc2 lit (undefined for rn != pc). */
5499 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5500 : copy_undef (gdbarch, insn, dsc);
5501
5502 default:
5503 return copy_undef (gdbarch, insn, dsc);
5504 }
5505 }
5506
5507 case 0xa:
5508 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5509
5510 case 0xb:
5511 if (bits (insn, 16, 19) == 0xf)
5512 /* ldc/ldc2 lit. */
5513 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5514 else
5515 return copy_undef (gdbarch, insn, dsc);
5516
5517 case 0xc:
5518 if (bit (insn, 4))
5519 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5520 else
5521 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5522
5523 case 0xd:
5524 if (bit (insn, 4))
5525 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5526 else
5527 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5528
5529 default:
5530 return copy_undef (gdbarch, insn, dsc);
5531 }
5532 }
5533
5534 /* Decode miscellaneous instructions in dp/misc encoding space. */
5535
5536 static int
5537 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5538 struct regcache *regs, struct displaced_step_closure *dsc)
5539 {
5540 unsigned int op2 = bits (insn, 4, 6);
5541 unsigned int op = bits (insn, 21, 22);
5542 unsigned int op1 = bits (insn, 16, 19);
5543
5544 switch (op2)
5545 {
5546 case 0x0:
5547 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5548
5549 case 0x1:
5550 if (op == 0x1) /* bx. */
5551 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5552 else if (op == 0x3)
5553 return copy_unmodified (gdbarch, insn, "clz", dsc);
5554 else
5555 return copy_undef (gdbarch, insn, dsc);
5556
5557 case 0x2:
5558 if (op == 0x1)
5559 /* Not really supported. */
5560 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5561 else
5562 return copy_undef (gdbarch, insn, dsc);
5563
5564 case 0x3:
5565 if (op == 0x1)
5566 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
5567 else
5568 return copy_undef (gdbarch, insn, dsc);
5569
5570 case 0x5:
5571 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5572
5573 case 0x7:
5574 if (op == 0x1)
5575 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5576 else if (op == 0x3)
5577 /* Not really supported. */
5578 return copy_unmodified (gdbarch, insn, "smc", dsc);
5579
5580 default:
5581 return copy_undef (gdbarch, insn, dsc);
5582 }
5583 }
5584
5585 static int
5586 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5587 struct displaced_step_closure *dsc)
5588 {
5589 if (bit (insn, 25))
5590 switch (bits (insn, 20, 24))
5591 {
5592 case 0x10:
5593 return copy_unmodified (gdbarch, insn, "movw", dsc);
5594
5595 case 0x14:
5596 return copy_unmodified (gdbarch, insn, "movt", dsc);
5597
5598 case 0x12: case 0x16:
5599 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5600
5601 default:
5602 return copy_alu_imm (gdbarch, insn, regs, dsc);
5603 }
5604 else
5605 {
5606 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5607
5608 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5609 return copy_alu_reg (gdbarch, insn, regs, dsc);
5610 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5611 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5612 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5613 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5614 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5615 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5616 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5617 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5618 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5619 return copy_unmodified (gdbarch, insn, "synch", dsc);
5620 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5621 /* 2nd arg means "unpriveleged". */
5622 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5623 dsc);
5624 }
5625
5626 /* Should be unreachable. */
5627 return 1;
5628 }
5629
5630 static int
5631 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5632 struct regcache *regs,
5633 struct displaced_step_closure *dsc)
5634 {
5635 int a = bit (insn, 25), b = bit (insn, 4);
5636 uint32_t op1 = bits (insn, 20, 24);
5637 int rn_f = bits (insn, 16, 19) == 0xf;
5638
5639 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5640 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5641 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5642 else if ((!a && (op1 & 0x17) == 0x02)
5643 || (a && (op1 & 0x17) == 0x02 && !b))
5644 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5645 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5646 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5647 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5648 else if ((!a && (op1 & 0x17) == 0x03)
5649 || (a && (op1 & 0x17) == 0x03 && !b))
5650 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5651 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5652 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5653 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5654 else if ((!a && (op1 & 0x17) == 0x06)
5655 || (a && (op1 & 0x17) == 0x06 && !b))
5656 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5657 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5658 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5659 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5660 else if ((!a && (op1 & 0x17) == 0x07)
5661 || (a && (op1 & 0x17) == 0x07 && !b))
5662 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5663
5664 /* Should be unreachable. */
5665 return 1;
5666 }
5667
5668 static int
5669 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5670 struct displaced_step_closure *dsc)
5671 {
5672 switch (bits (insn, 20, 24))
5673 {
5674 case 0x00: case 0x01: case 0x02: case 0x03:
5675 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5676
5677 case 0x04: case 0x05: case 0x06: case 0x07:
5678 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5679
5680 case 0x08: case 0x09: case 0x0a: case 0x0b:
5681 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5682 return copy_unmodified (gdbarch, insn,
5683 "decode/pack/unpack/saturate/reverse", dsc);
5684
5685 case 0x18:
5686 if (bits (insn, 5, 7) == 0) /* op2. */
5687 {
5688 if (bits (insn, 12, 15) == 0xf)
5689 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5690 else
5691 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5692 }
5693 else
5694 return copy_undef (gdbarch, insn, dsc);
5695
5696 case 0x1a: case 0x1b:
5697 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5698 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5699 else
5700 return copy_undef (gdbarch, insn, dsc);
5701
5702 case 0x1c: case 0x1d:
5703 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5704 {
5705 if (bits (insn, 0, 3) == 0xf)
5706 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5707 else
5708 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5709 }
5710 else
5711 return copy_undef (gdbarch, insn, dsc);
5712
5713 case 0x1e: case 0x1f:
5714 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5715 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5716 else
5717 return copy_undef (gdbarch, insn, dsc);
5718 }
5719
5720 /* Should be unreachable. */
5721 return 1;
5722 }
5723
5724 static int
5725 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5726 struct regcache *regs, struct displaced_step_closure *dsc)
5727 {
5728 if (bit (insn, 25))
5729 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5730 else
5731 return copy_block_xfer (gdbarch, insn, regs, dsc);
5732 }
5733
5734 static int
5735 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5736 struct regcache *regs, struct displaced_step_closure *dsc)
5737 {
5738 unsigned int opcode = bits (insn, 20, 24);
5739
5740 switch (opcode)
5741 {
5742 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5743 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5744
5745 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5746 case 0x12: case 0x16:
5747 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5748
5749 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5750 case 0x13: case 0x17:
5751 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5752
5753 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5754 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5755 /* Note: no writeback for these instructions. Bit 25 will always be
5756 zero though (via caller), so the following works OK. */
5757 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5758 }
5759
5760 /* Should be unreachable. */
5761 return 1;
5762 }
5763
5764 static int
5765 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5766 struct regcache *regs, struct displaced_step_closure *dsc)
5767 {
5768 unsigned int op1 = bits (insn, 20, 25);
5769 int op = bit (insn, 4);
5770 unsigned int coproc = bits (insn, 8, 11);
5771 unsigned int rn = bits (insn, 16, 19);
5772
5773 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5774 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5775 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5776 && (coproc & 0xe) != 0xa)
5777 /* stc/stc2. */
5778 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5779 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5780 && (coproc & 0xe) != 0xa)
5781 /* ldc/ldc2 imm/lit. */
5782 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5783 else if ((op1 & 0x3e) == 0x00)
5784 return copy_undef (gdbarch, insn, dsc);
5785 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5786 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5787 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5788 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5789 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5790 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5791 else if ((op1 & 0x30) == 0x20 && !op)
5792 {
5793 if ((coproc & 0xe) == 0xa)
5794 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5795 else
5796 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5797 }
5798 else if ((op1 & 0x30) == 0x20 && op)
5799 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5800 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5801 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5802 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5803 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5804 else if ((op1 & 0x30) == 0x30)
5805 return copy_svc (gdbarch, insn, to, regs, dsc);
5806 else
5807 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5808 }
5809
5810 void
5811 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5812 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5813 struct displaced_step_closure *dsc)
5814 {
5815 int err = 0;
5816
5817 if (!displaced_in_arm_mode (regs))
5818 error (_("Displaced stepping is only supported in ARM mode"));
5819
5820 /* Most displaced instructions use a 1-instruction scratch space, so set this
5821 here and override below if/when necessary. */
5822 dsc->numinsns = 1;
5823 dsc->insn_addr = from;
5824 dsc->scratch_base = to;
5825 dsc->cleanup = NULL;
5826 dsc->wrote_to_pc = 0;
5827
5828 if ((insn & 0xf0000000) == 0xf0000000)
5829 err = decode_unconditional (gdbarch, insn, regs, dsc);
5830 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5831 {
5832 case 0x0: case 0x1: case 0x2: case 0x3:
5833 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5834 break;
5835
5836 case 0x4: case 0x5: case 0x6:
5837 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5838 break;
5839
5840 case 0x7:
5841 err = decode_media (gdbarch, insn, dsc);
5842 break;
5843
5844 case 0x8: case 0x9: case 0xa: case 0xb:
5845 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5846 break;
5847
5848 case 0xc: case 0xd: case 0xe: case 0xf:
5849 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5850 break;
5851 }
5852
5853 if (err)
5854 internal_error (__FILE__, __LINE__,
5855 _("arm_process_displaced_insn: Instruction decode error"));
5856 }
5857
5858 /* Actually set up the scratch space for a displaced instruction. */
5859
5860 void
5861 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5862 CORE_ADDR to, struct displaced_step_closure *dsc)
5863 {
5864 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5865 unsigned int i;
5866 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5867
5868 /* Poke modified instruction(s). */
5869 for (i = 0; i < dsc->numinsns; i++)
5870 {
5871 if (debug_displaced)
5872 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5873 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5874 (unsigned long) to + i * 4);
5875 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5876 dsc->modinsn[i]);
5877 }
5878
5879 /* Put breakpoint afterwards. */
5880 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5881 tdep->arm_breakpoint_size);
5882
5883 if (debug_displaced)
5884 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5885 paddress (gdbarch, from), paddress (gdbarch, to));
5886 }
5887
5888 /* Entry point for copying an instruction into scratch space for displaced
5889 stepping. */
5890
5891 struct displaced_step_closure *
5892 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5893 CORE_ADDR from, CORE_ADDR to,
5894 struct regcache *regs)
5895 {
5896 struct displaced_step_closure *dsc
5897 = xmalloc (sizeof (struct displaced_step_closure));
5898 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5899 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5900
5901 if (debug_displaced)
5902 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5903 "at %.8lx\n", (unsigned long) insn,
5904 (unsigned long) from);
5905
5906 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5907 arm_displaced_init_closure (gdbarch, from, to, dsc);
5908
5909 return dsc;
5910 }
5911
5912 /* Entry point for cleaning things up after a displaced instruction has been
5913 single-stepped. */
5914
5915 void
5916 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5917 struct displaced_step_closure *dsc,
5918 CORE_ADDR from, CORE_ADDR to,
5919 struct regcache *regs)
5920 {
5921 if (dsc->cleanup)
5922 dsc->cleanup (gdbarch, regs, dsc);
5923
5924 if (!dsc->wrote_to_pc)
5925 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5926 }
5927
5928 #include "bfd-in2.h"
5929 #include "libcoff.h"
5930
5931 static int
5932 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5933 {
5934 struct gdbarch *gdbarch = info->application_data;
5935
5936 if (arm_pc_is_thumb (gdbarch, memaddr))
5937 {
5938 static asymbol *asym;
5939 static combined_entry_type ce;
5940 static struct coff_symbol_struct csym;
5941 static struct bfd fake_bfd;
5942 static bfd_target fake_target;
5943
5944 if (csym.native == NULL)
5945 {
5946 /* Create a fake symbol vector containing a Thumb symbol.
5947 This is solely so that the code in print_insn_little_arm()
5948 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5949 the presence of a Thumb symbol and switch to decoding
5950 Thumb instructions. */
5951
5952 fake_target.flavour = bfd_target_coff_flavour;
5953 fake_bfd.xvec = &fake_target;
5954 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5955 csym.native = &ce;
5956 csym.symbol.the_bfd = &fake_bfd;
5957 csym.symbol.name = "fake";
5958 asym = (asymbol *) & csym;
5959 }
5960
5961 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5962 info->symbols = &asym;
5963 }
5964 else
5965 info->symbols = NULL;
5966
5967 if (info->endian == BFD_ENDIAN_BIG)
5968 return print_insn_big_arm (memaddr, info);
5969 else
5970 return print_insn_little_arm (memaddr, info);
5971 }
5972
5973 /* The following define instruction sequences that will cause ARM
5974 cpu's to take an undefined instruction trap. These are used to
5975 signal a breakpoint to GDB.
5976
5977 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5978 modes. A different instruction is required for each mode. The ARM
5979 cpu's can also be big or little endian. Thus four different
5980 instructions are needed to support all cases.
5981
5982 Note: ARMv4 defines several new instructions that will take the
5983 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5984 not in fact add the new instructions. The new undefined
5985 instructions in ARMv4 are all instructions that had no defined
5986 behaviour in earlier chips. There is no guarantee that they will
5987 raise an exception, but may be treated as NOP's. In practice, it
5988 may only safe to rely on instructions matching:
5989
5990 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
5991 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
5992 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
5993
5994 Even this may only true if the condition predicate is true. The
5995 following use a condition predicate of ALWAYS so it is always TRUE.
5996
5997 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
5998 and NetBSD all use a software interrupt rather than an undefined
5999 instruction to force a trap. This can be handled by by the
6000 abi-specific code during establishment of the gdbarch vector. */
6001
6002 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6003 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6004 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6005 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6006
6007 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6008 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6009 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6010 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6011
6012 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6013 the program counter value to determine whether a 16-bit or 32-bit
6014 breakpoint should be used. It returns a pointer to a string of
6015 bytes that encode a breakpoint instruction, stores the length of
6016 the string to *lenptr, and adjusts the program counter (if
6017 necessary) to point to the actual memory location where the
6018 breakpoint should be inserted. */
6019
6020 static const unsigned char *
6021 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6022 {
6023 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6024 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6025
6026 if (arm_pc_is_thumb (gdbarch, *pcptr))
6027 {
6028 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6029
6030 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6031 check whether we are replacing a 32-bit instruction. */
6032 if (tdep->thumb2_breakpoint != NULL)
6033 {
6034 gdb_byte buf[2];
6035 if (target_read_memory (*pcptr, buf, 2) == 0)
6036 {
6037 unsigned short inst1;
6038 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6039 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6040 {
6041 *lenptr = tdep->thumb2_breakpoint_size;
6042 return tdep->thumb2_breakpoint;
6043 }
6044 }
6045 }
6046
6047 *lenptr = tdep->thumb_breakpoint_size;
6048 return tdep->thumb_breakpoint;
6049 }
6050 else
6051 {
6052 *lenptr = tdep->arm_breakpoint_size;
6053 return tdep->arm_breakpoint;
6054 }
6055 }
6056
6057 static void
6058 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6059 int *kindptr)
6060 {
6061 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6062
6063 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6064
6065 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6066 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6067 that this is not confused with a 32-bit ARM breakpoint. */
6068 *kindptr = 3;
6069 }
6070
6071 /* Extract from an array REGBUF containing the (raw) register state a
6072 function return value of type TYPE, and copy that, in virtual
6073 format, into VALBUF. */
6074
6075 static void
6076 arm_extract_return_value (struct type *type, struct regcache *regs,
6077 gdb_byte *valbuf)
6078 {
6079 struct gdbarch *gdbarch = get_regcache_arch (regs);
6080 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6081
6082 if (TYPE_CODE_FLT == TYPE_CODE (type))
6083 {
6084 switch (gdbarch_tdep (gdbarch)->fp_model)
6085 {
6086 case ARM_FLOAT_FPA:
6087 {
6088 /* The value is in register F0 in internal format. We need to
6089 extract the raw value and then convert it to the desired
6090 internal type. */
6091 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6092
6093 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6094 convert_from_extended (floatformat_from_type (type), tmpbuf,
6095 valbuf, gdbarch_byte_order (gdbarch));
6096 }
6097 break;
6098
6099 case ARM_FLOAT_SOFT_FPA:
6100 case ARM_FLOAT_SOFT_VFP:
6101 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6102 not using the VFP ABI code. */
6103 case ARM_FLOAT_VFP:
6104 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6105 if (TYPE_LENGTH (type) > 4)
6106 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6107 valbuf + INT_REGISTER_SIZE);
6108 break;
6109
6110 default:
6111 internal_error
6112 (__FILE__, __LINE__,
6113 _("arm_extract_return_value: Floating point model not supported"));
6114 break;
6115 }
6116 }
6117 else if (TYPE_CODE (type) == TYPE_CODE_INT
6118 || TYPE_CODE (type) == TYPE_CODE_CHAR
6119 || TYPE_CODE (type) == TYPE_CODE_BOOL
6120 || TYPE_CODE (type) == TYPE_CODE_PTR
6121 || TYPE_CODE (type) == TYPE_CODE_REF
6122 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6123 {
6124 /* If the the type is a plain integer, then the access is
6125 straight-forward. Otherwise we have to play around a bit more. */
6126 int len = TYPE_LENGTH (type);
6127 int regno = ARM_A1_REGNUM;
6128 ULONGEST tmp;
6129
6130 while (len > 0)
6131 {
6132 /* By using store_unsigned_integer we avoid having to do
6133 anything special for small big-endian values. */
6134 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6135 store_unsigned_integer (valbuf,
6136 (len > INT_REGISTER_SIZE
6137 ? INT_REGISTER_SIZE : len),
6138 byte_order, tmp);
6139 len -= INT_REGISTER_SIZE;
6140 valbuf += INT_REGISTER_SIZE;
6141 }
6142 }
6143 else
6144 {
6145 /* For a structure or union the behaviour is as if the value had
6146 been stored to word-aligned memory and then loaded into
6147 registers with 32-bit load instruction(s). */
6148 int len = TYPE_LENGTH (type);
6149 int regno = ARM_A1_REGNUM;
6150 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6151
6152 while (len > 0)
6153 {
6154 regcache_cooked_read (regs, regno++, tmpbuf);
6155 memcpy (valbuf, tmpbuf,
6156 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6157 len -= INT_REGISTER_SIZE;
6158 valbuf += INT_REGISTER_SIZE;
6159 }
6160 }
6161 }
6162
6163
6164 /* Will a function return an aggregate type in memory or in a
6165 register? Return 0 if an aggregate type can be returned in a
6166 register, 1 if it must be returned in memory. */
6167
6168 static int
6169 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6170 {
6171 int nRc;
6172 enum type_code code;
6173
6174 CHECK_TYPEDEF (type);
6175
6176 /* In the ARM ABI, "integer" like aggregate types are returned in
6177 registers. For an aggregate type to be integer like, its size
6178 must be less than or equal to INT_REGISTER_SIZE and the
6179 offset of each addressable subfield must be zero. Note that bit
6180 fields are not addressable, and all addressable subfields of
6181 unions always start at offset zero.
6182
6183 This function is based on the behaviour of GCC 2.95.1.
6184 See: gcc/arm.c: arm_return_in_memory() for details.
6185
6186 Note: All versions of GCC before GCC 2.95.2 do not set up the
6187 parameters correctly for a function returning the following
6188 structure: struct { float f;}; This should be returned in memory,
6189 not a register. Richard Earnshaw sent me a patch, but I do not
6190 know of any way to detect if a function like the above has been
6191 compiled with the correct calling convention. */
6192
6193 /* All aggregate types that won't fit in a register must be returned
6194 in memory. */
6195 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6196 {
6197 return 1;
6198 }
6199
6200 /* The AAPCS says all aggregates not larger than a word are returned
6201 in a register. */
6202 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6203 return 0;
6204
6205 /* The only aggregate types that can be returned in a register are
6206 structs and unions. Arrays must be returned in memory. */
6207 code = TYPE_CODE (type);
6208 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6209 {
6210 return 1;
6211 }
6212
6213 /* Assume all other aggregate types can be returned in a register.
6214 Run a check for structures, unions and arrays. */
6215 nRc = 0;
6216
6217 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6218 {
6219 int i;
6220 /* Need to check if this struct/union is "integer" like. For
6221 this to be true, its size must be less than or equal to
6222 INT_REGISTER_SIZE and the offset of each addressable
6223 subfield must be zero. Note that bit fields are not
6224 addressable, and unions always start at offset zero. If any
6225 of the subfields is a floating point type, the struct/union
6226 cannot be an integer type. */
6227
6228 /* For each field in the object, check:
6229 1) Is it FP? --> yes, nRc = 1;
6230 2) Is it addressable (bitpos != 0) and
6231 not packed (bitsize == 0)?
6232 --> yes, nRc = 1
6233 */
6234
6235 for (i = 0; i < TYPE_NFIELDS (type); i++)
6236 {
6237 enum type_code field_type_code;
6238 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
6239
6240 /* Is it a floating point type field? */
6241 if (field_type_code == TYPE_CODE_FLT)
6242 {
6243 nRc = 1;
6244 break;
6245 }
6246
6247 /* If bitpos != 0, then we have to care about it. */
6248 if (TYPE_FIELD_BITPOS (type, i) != 0)
6249 {
6250 /* Bitfields are not addressable. If the field bitsize is
6251 zero, then the field is not packed. Hence it cannot be
6252 a bitfield or any other packed type. */
6253 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6254 {
6255 nRc = 1;
6256 break;
6257 }
6258 }
6259 }
6260 }
6261
6262 return nRc;
6263 }
6264
6265 /* Write into appropriate registers a function return value of type
6266 TYPE, given in virtual format. */
6267
6268 static void
6269 arm_store_return_value (struct type *type, struct regcache *regs,
6270 const gdb_byte *valbuf)
6271 {
6272 struct gdbarch *gdbarch = get_regcache_arch (regs);
6273 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6274
6275 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6276 {
6277 char buf[MAX_REGISTER_SIZE];
6278
6279 switch (gdbarch_tdep (gdbarch)->fp_model)
6280 {
6281 case ARM_FLOAT_FPA:
6282
6283 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6284 gdbarch_byte_order (gdbarch));
6285 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6286 break;
6287
6288 case ARM_FLOAT_SOFT_FPA:
6289 case ARM_FLOAT_SOFT_VFP:
6290 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6291 not using the VFP ABI code. */
6292 case ARM_FLOAT_VFP:
6293 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6294 if (TYPE_LENGTH (type) > 4)
6295 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6296 valbuf + INT_REGISTER_SIZE);
6297 break;
6298
6299 default:
6300 internal_error
6301 (__FILE__, __LINE__,
6302 _("arm_store_return_value: Floating point model not supported"));
6303 break;
6304 }
6305 }
6306 else if (TYPE_CODE (type) == TYPE_CODE_INT
6307 || TYPE_CODE (type) == TYPE_CODE_CHAR
6308 || TYPE_CODE (type) == TYPE_CODE_BOOL
6309 || TYPE_CODE (type) == TYPE_CODE_PTR
6310 || TYPE_CODE (type) == TYPE_CODE_REF
6311 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6312 {
6313 if (TYPE_LENGTH (type) <= 4)
6314 {
6315 /* Values of one word or less are zero/sign-extended and
6316 returned in r0. */
6317 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6318 LONGEST val = unpack_long (type, valbuf);
6319
6320 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6321 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6322 }
6323 else
6324 {
6325 /* Integral values greater than one word are stored in consecutive
6326 registers starting with r0. This will always be a multiple of
6327 the regiser size. */
6328 int len = TYPE_LENGTH (type);
6329 int regno = ARM_A1_REGNUM;
6330
6331 while (len > 0)
6332 {
6333 regcache_cooked_write (regs, regno++, valbuf);
6334 len -= INT_REGISTER_SIZE;
6335 valbuf += INT_REGISTER_SIZE;
6336 }
6337 }
6338 }
6339 else
6340 {
6341 /* For a structure or union the behaviour is as if the value had
6342 been stored to word-aligned memory and then loaded into
6343 registers with 32-bit load instruction(s). */
6344 int len = TYPE_LENGTH (type);
6345 int regno = ARM_A1_REGNUM;
6346 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6347
6348 while (len > 0)
6349 {
6350 memcpy (tmpbuf, valbuf,
6351 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6352 regcache_cooked_write (regs, regno++, tmpbuf);
6353 len -= INT_REGISTER_SIZE;
6354 valbuf += INT_REGISTER_SIZE;
6355 }
6356 }
6357 }
6358
6359
6360 /* Handle function return values. */
6361
6362 static enum return_value_convention
6363 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6364 struct type *valtype, struct regcache *regcache,
6365 gdb_byte *readbuf, const gdb_byte *writebuf)
6366 {
6367 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6368 enum arm_vfp_cprc_base_type vfp_base_type;
6369 int vfp_base_count;
6370
6371 if (arm_vfp_abi_for_function (gdbarch, func_type)
6372 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6373 {
6374 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6375 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6376 int i;
6377 for (i = 0; i < vfp_base_count; i++)
6378 {
6379 if (reg_char == 'q')
6380 {
6381 if (writebuf)
6382 arm_neon_quad_write (gdbarch, regcache, i,
6383 writebuf + i * unit_length);
6384
6385 if (readbuf)
6386 arm_neon_quad_read (gdbarch, regcache, i,
6387 readbuf + i * unit_length);
6388 }
6389 else
6390 {
6391 char name_buf[4];
6392 int regnum;
6393
6394 sprintf (name_buf, "%c%d", reg_char, i);
6395 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6396 strlen (name_buf));
6397 if (writebuf)
6398 regcache_cooked_write (regcache, regnum,
6399 writebuf + i * unit_length);
6400 if (readbuf)
6401 regcache_cooked_read (regcache, regnum,
6402 readbuf + i * unit_length);
6403 }
6404 }
6405 return RETURN_VALUE_REGISTER_CONVENTION;
6406 }
6407
6408 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6409 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6410 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6411 {
6412 if (tdep->struct_return == pcc_struct_return
6413 || arm_return_in_memory (gdbarch, valtype))
6414 return RETURN_VALUE_STRUCT_CONVENTION;
6415 }
6416
6417 if (writebuf)
6418 arm_store_return_value (valtype, regcache, writebuf);
6419
6420 if (readbuf)
6421 arm_extract_return_value (valtype, regcache, readbuf);
6422
6423 return RETURN_VALUE_REGISTER_CONVENTION;
6424 }
6425
6426
6427 static int
6428 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6429 {
6430 struct gdbarch *gdbarch = get_frame_arch (frame);
6431 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6432 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6433 CORE_ADDR jb_addr;
6434 char buf[INT_REGISTER_SIZE];
6435
6436 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6437
6438 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6439 INT_REGISTER_SIZE))
6440 return 0;
6441
6442 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6443 return 1;
6444 }
6445
6446 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6447 return the target PC. Otherwise return 0. */
6448
6449 CORE_ADDR
6450 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6451 {
6452 char *name;
6453 int namelen;
6454 CORE_ADDR start_addr;
6455
6456 /* Find the starting address and name of the function containing the PC. */
6457 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6458 return 0;
6459
6460 /* If PC is in a Thumb call or return stub, return the address of the
6461 target PC, which is in a register. The thunk functions are called
6462 _call_via_xx, where x is the register name. The possible names
6463 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6464 functions, named __ARM_call_via_r[0-7]. */
6465 if (strncmp (name, "_call_via_", 10) == 0
6466 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6467 {
6468 /* Use the name suffix to determine which register contains the
6469 target PC. */
6470 static char *table[15] =
6471 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6472 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6473 };
6474 int regno;
6475 int offset = strlen (name) - 2;
6476
6477 for (regno = 0; regno <= 14; regno++)
6478 if (strcmp (&name[offset], table[regno]) == 0)
6479 return get_frame_register_unsigned (frame, regno);
6480 }
6481
6482 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6483 non-interworking calls to foo. We could decode the stubs
6484 to find the target but it's easier to use the symbol table. */
6485 namelen = strlen (name);
6486 if (name[0] == '_' && name[1] == '_'
6487 && ((namelen > 2 + strlen ("_from_thumb")
6488 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6489 strlen ("_from_thumb")) == 0)
6490 || (namelen > 2 + strlen ("_from_arm")
6491 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6492 strlen ("_from_arm")) == 0)))
6493 {
6494 char *target_name;
6495 int target_len = namelen - 2;
6496 struct minimal_symbol *minsym;
6497 struct objfile *objfile;
6498 struct obj_section *sec;
6499
6500 if (name[namelen - 1] == 'b')
6501 target_len -= strlen ("_from_thumb");
6502 else
6503 target_len -= strlen ("_from_arm");
6504
6505 target_name = alloca (target_len + 1);
6506 memcpy (target_name, name + 2, target_len);
6507 target_name[target_len] = '\0';
6508
6509 sec = find_pc_section (pc);
6510 objfile = (sec == NULL) ? NULL : sec->objfile;
6511 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6512 if (minsym != NULL)
6513 return SYMBOL_VALUE_ADDRESS (minsym);
6514 else
6515 return 0;
6516 }
6517
6518 return 0; /* not a stub */
6519 }
6520
6521 static void
6522 set_arm_command (char *args, int from_tty)
6523 {
6524 printf_unfiltered (_("\
6525 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6526 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6527 }
6528
6529 static void
6530 show_arm_command (char *args, int from_tty)
6531 {
6532 cmd_show_list (showarmcmdlist, from_tty, "");
6533 }
6534
6535 static void
6536 arm_update_current_architecture (void)
6537 {
6538 struct gdbarch_info info;
6539
6540 /* If the current architecture is not ARM, we have nothing to do. */
6541 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6542 return;
6543
6544 /* Update the architecture. */
6545 gdbarch_info_init (&info);
6546
6547 if (!gdbarch_update_p (info))
6548 internal_error (__FILE__, __LINE__, "could not update architecture");
6549 }
6550
6551 static void
6552 set_fp_model_sfunc (char *args, int from_tty,
6553 struct cmd_list_element *c)
6554 {
6555 enum arm_float_model fp_model;
6556
6557 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6558 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6559 {
6560 arm_fp_model = fp_model;
6561 break;
6562 }
6563
6564 if (fp_model == ARM_FLOAT_LAST)
6565 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6566 current_fp_model);
6567
6568 arm_update_current_architecture ();
6569 }
6570
6571 static void
6572 show_fp_model (struct ui_file *file, int from_tty,
6573 struct cmd_list_element *c, const char *value)
6574 {
6575 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6576
6577 if (arm_fp_model == ARM_FLOAT_AUTO
6578 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6579 fprintf_filtered (file, _("\
6580 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6581 fp_model_strings[tdep->fp_model]);
6582 else
6583 fprintf_filtered (file, _("\
6584 The current ARM floating point model is \"%s\".\n"),
6585 fp_model_strings[arm_fp_model]);
6586 }
6587
6588 static void
6589 arm_set_abi (char *args, int from_tty,
6590 struct cmd_list_element *c)
6591 {
6592 enum arm_abi_kind arm_abi;
6593
6594 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6595 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6596 {
6597 arm_abi_global = arm_abi;
6598 break;
6599 }
6600
6601 if (arm_abi == ARM_ABI_LAST)
6602 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6603 arm_abi_string);
6604
6605 arm_update_current_architecture ();
6606 }
6607
6608 static void
6609 arm_show_abi (struct ui_file *file, int from_tty,
6610 struct cmd_list_element *c, const char *value)
6611 {
6612 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6613
6614 if (arm_abi_global == ARM_ABI_AUTO
6615 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6616 fprintf_filtered (file, _("\
6617 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6618 arm_abi_strings[tdep->arm_abi]);
6619 else
6620 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6621 arm_abi_string);
6622 }
6623
6624 static void
6625 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6626 struct cmd_list_element *c, const char *value)
6627 {
6628 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6629
6630 fprintf_filtered (file, _("\
6631 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6632 arm_fallback_mode_string);
6633 }
6634
6635 static void
6636 arm_show_force_mode (struct ui_file *file, int from_tty,
6637 struct cmd_list_element *c, const char *value)
6638 {
6639 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6640
6641 fprintf_filtered (file, _("\
6642 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6643 arm_force_mode_string);
6644 }
6645
6646 /* If the user changes the register disassembly style used for info
6647 register and other commands, we have to also switch the style used
6648 in opcodes for disassembly output. This function is run in the "set
6649 arm disassembly" command, and does that. */
6650
6651 static void
6652 set_disassembly_style_sfunc (char *args, int from_tty,
6653 struct cmd_list_element *c)
6654 {
6655 set_disassembly_style ();
6656 }
6657 \f
6658 /* Return the ARM register name corresponding to register I. */
6659 static const char *
6660 arm_register_name (struct gdbarch *gdbarch, int i)
6661 {
6662 const int num_regs = gdbarch_num_regs (gdbarch);
6663
6664 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6665 && i >= num_regs && i < num_regs + 32)
6666 {
6667 static const char *const vfp_pseudo_names[] = {
6668 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6669 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6670 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6671 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6672 };
6673
6674 return vfp_pseudo_names[i - num_regs];
6675 }
6676
6677 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6678 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6679 {
6680 static const char *const neon_pseudo_names[] = {
6681 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6682 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6683 };
6684
6685 return neon_pseudo_names[i - num_regs - 32];
6686 }
6687
6688 if (i >= ARRAY_SIZE (arm_register_names))
6689 /* These registers are only supported on targets which supply
6690 an XML description. */
6691 return "";
6692
6693 return arm_register_names[i];
6694 }
6695
6696 static void
6697 set_disassembly_style (void)
6698 {
6699 int current;
6700
6701 /* Find the style that the user wants. */
6702 for (current = 0; current < num_disassembly_options; current++)
6703 if (disassembly_style == valid_disassembly_styles[current])
6704 break;
6705 gdb_assert (current < num_disassembly_options);
6706
6707 /* Synchronize the disassembler. */
6708 set_arm_regname_option (current);
6709 }
6710
6711 /* Test whether the coff symbol specific value corresponds to a Thumb
6712 function. */
6713
6714 static int
6715 coff_sym_is_thumb (int val)
6716 {
6717 return (val == C_THUMBEXT
6718 || val == C_THUMBSTAT
6719 || val == C_THUMBEXTFUNC
6720 || val == C_THUMBSTATFUNC
6721 || val == C_THUMBLABEL);
6722 }
6723
6724 /* arm_coff_make_msymbol_special()
6725 arm_elf_make_msymbol_special()
6726
6727 These functions test whether the COFF or ELF symbol corresponds to
6728 an address in thumb code, and set a "special" bit in a minimal
6729 symbol to indicate that it does. */
6730
6731 static void
6732 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6733 {
6734 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6735 STT_ARM_TFUNC). */
6736 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6737 == STT_LOPROC)
6738 MSYMBOL_SET_SPECIAL (msym);
6739 }
6740
6741 static void
6742 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6743 {
6744 if (coff_sym_is_thumb (val))
6745 MSYMBOL_SET_SPECIAL (msym);
6746 }
6747
6748 static void
6749 arm_objfile_data_free (struct objfile *objfile, void *arg)
6750 {
6751 struct arm_per_objfile *data = arg;
6752 unsigned int i;
6753
6754 for (i = 0; i < objfile->obfd->section_count; i++)
6755 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6756 }
6757
6758 static void
6759 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6760 asymbol *sym)
6761 {
6762 const char *name = bfd_asymbol_name (sym);
6763 struct arm_per_objfile *data;
6764 VEC(arm_mapping_symbol_s) **map_p;
6765 struct arm_mapping_symbol new_map_sym;
6766
6767 gdb_assert (name[0] == '$');
6768 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6769 return;
6770
6771 data = objfile_data (objfile, arm_objfile_data_key);
6772 if (data == NULL)
6773 {
6774 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6775 struct arm_per_objfile);
6776 set_objfile_data (objfile, arm_objfile_data_key, data);
6777 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6778 objfile->obfd->section_count,
6779 VEC(arm_mapping_symbol_s) *);
6780 }
6781 map_p = &data->section_maps[bfd_get_section (sym)->index];
6782
6783 new_map_sym.value = sym->value;
6784 new_map_sym.type = name[1];
6785
6786 /* Assume that most mapping symbols appear in order of increasing
6787 value. If they were randomly distributed, it would be faster to
6788 always push here and then sort at first use. */
6789 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6790 {
6791 struct arm_mapping_symbol *prev_map_sym;
6792
6793 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6794 if (prev_map_sym->value >= sym->value)
6795 {
6796 unsigned int idx;
6797 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6798 arm_compare_mapping_symbols);
6799 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6800 return;
6801 }
6802 }
6803
6804 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6805 }
6806
6807 static void
6808 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6809 {
6810 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6811 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6812
6813 /* If necessary, set the T bit. */
6814 if (arm_apcs_32)
6815 {
6816 ULONGEST val, t_bit;
6817 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6818 t_bit = arm_psr_thumb_bit (gdbarch);
6819 if (arm_pc_is_thumb (gdbarch, pc))
6820 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6821 val | t_bit);
6822 else
6823 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6824 val & ~t_bit);
6825 }
6826 }
6827
6828 /* Read the contents of a NEON quad register, by reading from two
6829 double registers. This is used to implement the quad pseudo
6830 registers, and for argument passing in case the quad registers are
6831 missing; vectors are passed in quad registers when using the VFP
6832 ABI, even if a NEON unit is not present. REGNUM is the index of
6833 the quad register, in [0, 15]. */
6834
6835 static void
6836 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6837 int regnum, gdb_byte *buf)
6838 {
6839 char name_buf[4];
6840 gdb_byte reg_buf[8];
6841 int offset, double_regnum;
6842
6843 sprintf (name_buf, "d%d", regnum << 1);
6844 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6845 strlen (name_buf));
6846
6847 /* d0 is always the least significant half of q0. */
6848 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6849 offset = 8;
6850 else
6851 offset = 0;
6852
6853 regcache_raw_read (regcache, double_regnum, reg_buf);
6854 memcpy (buf + offset, reg_buf, 8);
6855
6856 offset = 8 - offset;
6857 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6858 memcpy (buf + offset, reg_buf, 8);
6859 }
6860
6861 static void
6862 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6863 int regnum, gdb_byte *buf)
6864 {
6865 const int num_regs = gdbarch_num_regs (gdbarch);
6866 char name_buf[4];
6867 gdb_byte reg_buf[8];
6868 int offset, double_regnum;
6869
6870 gdb_assert (regnum >= num_regs);
6871 regnum -= num_regs;
6872
6873 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6874 /* Quad-precision register. */
6875 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6876 else
6877 {
6878 /* Single-precision register. */
6879 gdb_assert (regnum < 32);
6880
6881 /* s0 is always the least significant half of d0. */
6882 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6883 offset = (regnum & 1) ? 0 : 4;
6884 else
6885 offset = (regnum & 1) ? 4 : 0;
6886
6887 sprintf (name_buf, "d%d", regnum >> 1);
6888 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6889 strlen (name_buf));
6890
6891 regcache_raw_read (regcache, double_regnum, reg_buf);
6892 memcpy (buf, reg_buf + offset, 4);
6893 }
6894 }
6895
6896 /* Store the contents of BUF to a NEON quad register, by writing to
6897 two double registers. This is used to implement the quad pseudo
6898 registers, and for argument passing in case the quad registers are
6899 missing; vectors are passed in quad registers when using the VFP
6900 ABI, even if a NEON unit is not present. REGNUM is the index
6901 of the quad register, in [0, 15]. */
6902
6903 static void
6904 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6905 int regnum, const gdb_byte *buf)
6906 {
6907 char name_buf[4];
6908 gdb_byte reg_buf[8];
6909 int offset, double_regnum;
6910
6911 sprintf (name_buf, "d%d", regnum << 1);
6912 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6913 strlen (name_buf));
6914
6915 /* d0 is always the least significant half of q0. */
6916 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6917 offset = 8;
6918 else
6919 offset = 0;
6920
6921 regcache_raw_write (regcache, double_regnum, buf + offset);
6922 offset = 8 - offset;
6923 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6924 }
6925
6926 static void
6927 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6928 int regnum, const gdb_byte *buf)
6929 {
6930 const int num_regs = gdbarch_num_regs (gdbarch);
6931 char name_buf[4];
6932 gdb_byte reg_buf[8];
6933 int offset, double_regnum;
6934
6935 gdb_assert (regnum >= num_regs);
6936 regnum -= num_regs;
6937
6938 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6939 /* Quad-precision register. */
6940 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6941 else
6942 {
6943 /* Single-precision register. */
6944 gdb_assert (regnum < 32);
6945
6946 /* s0 is always the least significant half of d0. */
6947 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6948 offset = (regnum & 1) ? 0 : 4;
6949 else
6950 offset = (regnum & 1) ? 4 : 0;
6951
6952 sprintf (name_buf, "d%d", regnum >> 1);
6953 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6954 strlen (name_buf));
6955
6956 regcache_raw_read (regcache, double_regnum, reg_buf);
6957 memcpy (reg_buf + offset, buf, 4);
6958 regcache_raw_write (regcache, double_regnum, reg_buf);
6959 }
6960 }
6961
6962 static struct value *
6963 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6964 {
6965 const int *reg_p = baton;
6966 return value_of_register (*reg_p, frame);
6967 }
6968 \f
6969 static enum gdb_osabi
6970 arm_elf_osabi_sniffer (bfd *abfd)
6971 {
6972 unsigned int elfosabi;
6973 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6974
6975 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6976
6977 if (elfosabi == ELFOSABI_ARM)
6978 /* GNU tools use this value. Check note sections in this case,
6979 as well. */
6980 bfd_map_over_sections (abfd,
6981 generic_elf_osabi_sniff_abi_tag_sections,
6982 &osabi);
6983
6984 /* Anything else will be handled by the generic ELF sniffer. */
6985 return osabi;
6986 }
6987
6988 \f
6989 /* Initialize the current architecture based on INFO. If possible,
6990 re-use an architecture from ARCHES, which is a list of
6991 architectures already created during this debugging session.
6992
6993 Called e.g. at program startup, when reading a core file, and when
6994 reading a binary file. */
6995
6996 static struct gdbarch *
6997 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
6998 {
6999 struct gdbarch_tdep *tdep;
7000 struct gdbarch *gdbarch;
7001 struct gdbarch_list *best_arch;
7002 enum arm_abi_kind arm_abi = arm_abi_global;
7003 enum arm_float_model fp_model = arm_fp_model;
7004 struct tdesc_arch_data *tdesc_data = NULL;
7005 int i, is_m = 0;
7006 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7007 int have_neon = 0;
7008 int have_fpa_registers = 1;
7009 const struct target_desc *tdesc = info.target_desc;
7010
7011 /* If we have an object to base this architecture on, try to determine
7012 its ABI. */
7013
7014 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7015 {
7016 int ei_osabi, e_flags;
7017
7018 switch (bfd_get_flavour (info.abfd))
7019 {
7020 case bfd_target_aout_flavour:
7021 /* Assume it's an old APCS-style ABI. */
7022 arm_abi = ARM_ABI_APCS;
7023 break;
7024
7025 case bfd_target_coff_flavour:
7026 /* Assume it's an old APCS-style ABI. */
7027 /* XXX WinCE? */
7028 arm_abi = ARM_ABI_APCS;
7029 break;
7030
7031 case bfd_target_elf_flavour:
7032 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7033 e_flags = elf_elfheader (info.abfd)->e_flags;
7034
7035 if (ei_osabi == ELFOSABI_ARM)
7036 {
7037 /* GNU tools used to use this value, but do not for EABI
7038 objects. There's nowhere to tag an EABI version
7039 anyway, so assume APCS. */
7040 arm_abi = ARM_ABI_APCS;
7041 }
7042 else if (ei_osabi == ELFOSABI_NONE)
7043 {
7044 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7045 int attr_arch, attr_profile;
7046
7047 switch (eabi_ver)
7048 {
7049 case EF_ARM_EABI_UNKNOWN:
7050 /* Assume GNU tools. */
7051 arm_abi = ARM_ABI_APCS;
7052 break;
7053
7054 case EF_ARM_EABI_VER4:
7055 case EF_ARM_EABI_VER5:
7056 arm_abi = ARM_ABI_AAPCS;
7057 /* EABI binaries default to VFP float ordering.
7058 They may also contain build attributes that can
7059 be used to identify if the VFP argument-passing
7060 ABI is in use. */
7061 if (fp_model == ARM_FLOAT_AUTO)
7062 {
7063 #ifdef HAVE_ELF
7064 switch (bfd_elf_get_obj_attr_int (info.abfd,
7065 OBJ_ATTR_PROC,
7066 Tag_ABI_VFP_args))
7067 {
7068 case 0:
7069 /* "The user intended FP parameter/result
7070 passing to conform to AAPCS, base
7071 variant". */
7072 fp_model = ARM_FLOAT_SOFT_VFP;
7073 break;
7074 case 1:
7075 /* "The user intended FP parameter/result
7076 passing to conform to AAPCS, VFP
7077 variant". */
7078 fp_model = ARM_FLOAT_VFP;
7079 break;
7080 case 2:
7081 /* "The user intended FP parameter/result
7082 passing to conform to tool chain-specific
7083 conventions" - we don't know any such
7084 conventions, so leave it as "auto". */
7085 break;
7086 default:
7087 /* Attribute value not mentioned in the
7088 October 2008 ABI, so leave it as
7089 "auto". */
7090 break;
7091 }
7092 #else
7093 fp_model = ARM_FLOAT_SOFT_VFP;
7094 #endif
7095 }
7096 break;
7097
7098 default:
7099 /* Leave it as "auto". */
7100 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7101 break;
7102 }
7103
7104 #ifdef HAVE_ELF
7105 /* Detect M-profile programs. This only works if the
7106 executable file includes build attributes; GCC does
7107 copy them to the executable, but e.g. RealView does
7108 not. */
7109 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7110 Tag_CPU_arch);
7111 attr_profile = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7112 Tag_CPU_arch_profile);
7113 /* GCC specifies the profile for v6-M; RealView only
7114 specifies the profile for architectures starting with
7115 V7 (as opposed to architectures with a tag
7116 numerically greater than TAG_CPU_ARCH_V7). */
7117 if (!tdesc_has_registers (tdesc)
7118 && (attr_arch == TAG_CPU_ARCH_V6_M
7119 || attr_arch == TAG_CPU_ARCH_V6S_M
7120 || attr_profile == 'M'))
7121 tdesc = tdesc_arm_with_m;
7122 #endif
7123 }
7124
7125 if (fp_model == ARM_FLOAT_AUTO)
7126 {
7127 int e_flags = elf_elfheader (info.abfd)->e_flags;
7128
7129 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7130 {
7131 case 0:
7132 /* Leave it as "auto". Strictly speaking this case
7133 means FPA, but almost nobody uses that now, and
7134 many toolchains fail to set the appropriate bits
7135 for the floating-point model they use. */
7136 break;
7137 case EF_ARM_SOFT_FLOAT:
7138 fp_model = ARM_FLOAT_SOFT_FPA;
7139 break;
7140 case EF_ARM_VFP_FLOAT:
7141 fp_model = ARM_FLOAT_VFP;
7142 break;
7143 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7144 fp_model = ARM_FLOAT_SOFT_VFP;
7145 break;
7146 }
7147 }
7148
7149 if (e_flags & EF_ARM_BE8)
7150 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7151
7152 break;
7153
7154 default:
7155 /* Leave it as "auto". */
7156 break;
7157 }
7158 }
7159
7160 /* Check any target description for validity. */
7161 if (tdesc_has_registers (tdesc))
7162 {
7163 /* For most registers we require GDB's default names; but also allow
7164 the numeric names for sp / lr / pc, as a convenience. */
7165 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7166 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7167 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7168
7169 const struct tdesc_feature *feature;
7170 int valid_p;
7171
7172 feature = tdesc_find_feature (tdesc,
7173 "org.gnu.gdb.arm.core");
7174 if (feature == NULL)
7175 {
7176 feature = tdesc_find_feature (tdesc,
7177 "org.gnu.gdb.arm.m-profile");
7178 if (feature == NULL)
7179 return NULL;
7180 else
7181 is_m = 1;
7182 }
7183
7184 tdesc_data = tdesc_data_alloc ();
7185
7186 valid_p = 1;
7187 for (i = 0; i < ARM_SP_REGNUM; i++)
7188 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7189 arm_register_names[i]);
7190 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7191 ARM_SP_REGNUM,
7192 arm_sp_names);
7193 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7194 ARM_LR_REGNUM,
7195 arm_lr_names);
7196 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7197 ARM_PC_REGNUM,
7198 arm_pc_names);
7199 if (is_m)
7200 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7201 ARM_PS_REGNUM, "xpsr");
7202 else
7203 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7204 ARM_PS_REGNUM, "cpsr");
7205
7206 if (!valid_p)
7207 {
7208 tdesc_data_cleanup (tdesc_data);
7209 return NULL;
7210 }
7211
7212 feature = tdesc_find_feature (tdesc,
7213 "org.gnu.gdb.arm.fpa");
7214 if (feature != NULL)
7215 {
7216 valid_p = 1;
7217 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7218 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7219 arm_register_names[i]);
7220 if (!valid_p)
7221 {
7222 tdesc_data_cleanup (tdesc_data);
7223 return NULL;
7224 }
7225 }
7226 else
7227 have_fpa_registers = 0;
7228
7229 feature = tdesc_find_feature (tdesc,
7230 "org.gnu.gdb.xscale.iwmmxt");
7231 if (feature != NULL)
7232 {
7233 static const char *const iwmmxt_names[] = {
7234 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7235 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7236 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7237 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7238 };
7239
7240 valid_p = 1;
7241 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7242 valid_p
7243 &= tdesc_numbered_register (feature, tdesc_data, i,
7244 iwmmxt_names[i - ARM_WR0_REGNUM]);
7245
7246 /* Check for the control registers, but do not fail if they
7247 are missing. */
7248 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7249 tdesc_numbered_register (feature, tdesc_data, i,
7250 iwmmxt_names[i - ARM_WR0_REGNUM]);
7251
7252 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7253 valid_p
7254 &= tdesc_numbered_register (feature, tdesc_data, i,
7255 iwmmxt_names[i - ARM_WR0_REGNUM]);
7256
7257 if (!valid_p)
7258 {
7259 tdesc_data_cleanup (tdesc_data);
7260 return NULL;
7261 }
7262 }
7263
7264 /* If we have a VFP unit, check whether the single precision registers
7265 are present. If not, then we will synthesize them as pseudo
7266 registers. */
7267 feature = tdesc_find_feature (tdesc,
7268 "org.gnu.gdb.arm.vfp");
7269 if (feature != NULL)
7270 {
7271 static const char *const vfp_double_names[] = {
7272 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7273 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7274 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7275 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7276 };
7277
7278 /* Require the double precision registers. There must be either
7279 16 or 32. */
7280 valid_p = 1;
7281 for (i = 0; i < 32; i++)
7282 {
7283 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7284 ARM_D0_REGNUM + i,
7285 vfp_double_names[i]);
7286 if (!valid_p)
7287 break;
7288 }
7289
7290 if (!valid_p && i != 16)
7291 {
7292 tdesc_data_cleanup (tdesc_data);
7293 return NULL;
7294 }
7295
7296 if (tdesc_unnumbered_register (feature, "s0") == 0)
7297 have_vfp_pseudos = 1;
7298
7299 have_vfp_registers = 1;
7300
7301 /* If we have VFP, also check for NEON. The architecture allows
7302 NEON without VFP (integer vector operations only), but GDB
7303 does not support that. */
7304 feature = tdesc_find_feature (tdesc,
7305 "org.gnu.gdb.arm.neon");
7306 if (feature != NULL)
7307 {
7308 /* NEON requires 32 double-precision registers. */
7309 if (i != 32)
7310 {
7311 tdesc_data_cleanup (tdesc_data);
7312 return NULL;
7313 }
7314
7315 /* If there are quad registers defined by the stub, use
7316 their type; otherwise (normally) provide them with
7317 the default type. */
7318 if (tdesc_unnumbered_register (feature, "q0") == 0)
7319 have_neon_pseudos = 1;
7320
7321 have_neon = 1;
7322 }
7323 }
7324 }
7325
7326 /* If there is already a candidate, use it. */
7327 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7328 best_arch != NULL;
7329 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7330 {
7331 if (arm_abi != ARM_ABI_AUTO
7332 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7333 continue;
7334
7335 if (fp_model != ARM_FLOAT_AUTO
7336 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7337 continue;
7338
7339 /* There are various other properties in tdep that we do not
7340 need to check here: those derived from a target description,
7341 since gdbarches with a different target description are
7342 automatically disqualified. */
7343
7344 /* Do check is_m, though, since it might come from the binary. */
7345 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7346 continue;
7347
7348 /* Found a match. */
7349 break;
7350 }
7351
7352 if (best_arch != NULL)
7353 {
7354 if (tdesc_data != NULL)
7355 tdesc_data_cleanup (tdesc_data);
7356 return best_arch->gdbarch;
7357 }
7358
7359 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7360 gdbarch = gdbarch_alloc (&info, tdep);
7361
7362 /* Record additional information about the architecture we are defining.
7363 These are gdbarch discriminators, like the OSABI. */
7364 tdep->arm_abi = arm_abi;
7365 tdep->fp_model = fp_model;
7366 tdep->is_m = is_m;
7367 tdep->have_fpa_registers = have_fpa_registers;
7368 tdep->have_vfp_registers = have_vfp_registers;
7369 tdep->have_vfp_pseudos = have_vfp_pseudos;
7370 tdep->have_neon_pseudos = have_neon_pseudos;
7371 tdep->have_neon = have_neon;
7372
7373 /* Breakpoints. */
7374 switch (info.byte_order_for_code)
7375 {
7376 case BFD_ENDIAN_BIG:
7377 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7378 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7379 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7380 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7381
7382 break;
7383
7384 case BFD_ENDIAN_LITTLE:
7385 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7386 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7387 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7388 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7389
7390 break;
7391
7392 default:
7393 internal_error (__FILE__, __LINE__,
7394 _("arm_gdbarch_init: bad byte order for float format"));
7395 }
7396
7397 /* On ARM targets char defaults to unsigned. */
7398 set_gdbarch_char_signed (gdbarch, 0);
7399
7400 /* Note: for displaced stepping, this includes the breakpoint, and one word
7401 of additional scratch space. This setting isn't used for anything beside
7402 displaced stepping at present. */
7403 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7404
7405 /* This should be low enough for everything. */
7406 tdep->lowest_pc = 0x20;
7407 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7408
7409 /* The default, for both APCS and AAPCS, is to return small
7410 structures in registers. */
7411 tdep->struct_return = reg_struct_return;
7412
7413 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7414 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7415
7416 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7417
7418 /* Frame handling. */
7419 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7420 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7421 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7422
7423 frame_base_set_default (gdbarch, &arm_normal_base);
7424
7425 /* Address manipulation. */
7426 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7427 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7428
7429 /* Advance PC across function entry code. */
7430 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7431
7432 /* Detect whether PC is in function epilogue. */
7433 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7434
7435 /* Skip trampolines. */
7436 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7437
7438 /* The stack grows downward. */
7439 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7440
7441 /* Breakpoint manipulation. */
7442 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7443 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7444 arm_remote_breakpoint_from_pc);
7445
7446 /* Information about registers, etc. */
7447 set_gdbarch_deprecated_fp_regnum (gdbarch, ARM_FP_REGNUM); /* ??? */
7448 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7449 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7450 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7451 set_gdbarch_register_type (gdbarch, arm_register_type);
7452
7453 /* This "info float" is FPA-specific. Use the generic version if we
7454 do not have FPA. */
7455 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7456 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7457
7458 /* Internal <-> external register number maps. */
7459 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7460 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7461
7462 set_gdbarch_register_name (gdbarch, arm_register_name);
7463
7464 /* Returning results. */
7465 set_gdbarch_return_value (gdbarch, arm_return_value);
7466
7467 /* Disassembly. */
7468 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7469
7470 /* Minsymbol frobbing. */
7471 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7472 set_gdbarch_coff_make_msymbol_special (gdbarch,
7473 arm_coff_make_msymbol_special);
7474 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7475
7476 /* Thumb-2 IT block support. */
7477 set_gdbarch_adjust_breakpoint_address (gdbarch,
7478 arm_adjust_breakpoint_address);
7479
7480 /* Virtual tables. */
7481 set_gdbarch_vbit_in_delta (gdbarch, 1);
7482
7483 /* Hook in the ABI-specific overrides, if they have been registered. */
7484 gdbarch_init_osabi (info, gdbarch);
7485
7486 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7487
7488 /* Add some default predicates. */
7489 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7490 dwarf2_append_unwinders (gdbarch);
7491 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7492
7493 /* Now we have tuned the configuration, set a few final things,
7494 based on what the OS ABI has told us. */
7495
7496 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7497 binaries are always marked. */
7498 if (tdep->arm_abi == ARM_ABI_AUTO)
7499 tdep->arm_abi = ARM_ABI_APCS;
7500
7501 /* We used to default to FPA for generic ARM, but almost nobody
7502 uses that now, and we now provide a way for the user to force
7503 the model. So default to the most useful variant. */
7504 if (tdep->fp_model == ARM_FLOAT_AUTO)
7505 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7506
7507 if (tdep->jb_pc >= 0)
7508 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7509
7510 /* Floating point sizes and format. */
7511 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7512 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7513 {
7514 set_gdbarch_double_format
7515 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7516 set_gdbarch_long_double_format
7517 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7518 }
7519 else
7520 {
7521 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7522 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7523 }
7524
7525 if (have_vfp_pseudos)
7526 {
7527 /* NOTE: These are the only pseudo registers used by
7528 the ARM target at the moment. If more are added, a
7529 little more care in numbering will be needed. */
7530
7531 int num_pseudos = 32;
7532 if (have_neon_pseudos)
7533 num_pseudos += 16;
7534 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7535 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7536 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7537 }
7538
7539 if (tdesc_data)
7540 {
7541 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7542
7543 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7544
7545 /* Override tdesc_register_type to adjust the types of VFP
7546 registers for NEON. */
7547 set_gdbarch_register_type (gdbarch, arm_register_type);
7548 }
7549
7550 /* Add standard register aliases. We add aliases even for those
7551 nanes which are used by the current architecture - it's simpler,
7552 and does no harm, since nothing ever lists user registers. */
7553 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7554 user_reg_add (gdbarch, arm_register_aliases[i].name,
7555 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7556
7557 return gdbarch;
7558 }
7559
7560 static void
7561 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7562 {
7563 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7564
7565 if (tdep == NULL)
7566 return;
7567
7568 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7569 (unsigned long) tdep->lowest_pc);
7570 }
7571
7572 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7573
7574 void
7575 _initialize_arm_tdep (void)
7576 {
7577 struct ui_file *stb;
7578 long length;
7579 struct cmd_list_element *new_set, *new_show;
7580 const char *setname;
7581 const char *setdesc;
7582 const char *const *regnames;
7583 int numregs, i, j;
7584 static char *helptext;
7585 char regdesc[1024], *rdptr = regdesc;
7586 size_t rest = sizeof (regdesc);
7587
7588 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7589
7590 arm_objfile_data_key
7591 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7592
7593 /* Register an ELF OS ABI sniffer for ARM binaries. */
7594 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7595 bfd_target_elf_flavour,
7596 arm_elf_osabi_sniffer);
7597
7598 /* Initialize the standard target descriptions. */
7599 initialize_tdesc_arm_with_m ();
7600
7601 /* Get the number of possible sets of register names defined in opcodes. */
7602 num_disassembly_options = get_arm_regname_num_options ();
7603
7604 /* Add root prefix command for all "set arm"/"show arm" commands. */
7605 add_prefix_cmd ("arm", no_class, set_arm_command,
7606 _("Various ARM-specific commands."),
7607 &setarmcmdlist, "set arm ", 0, &setlist);
7608
7609 add_prefix_cmd ("arm", no_class, show_arm_command,
7610 _("Various ARM-specific commands."),
7611 &showarmcmdlist, "show arm ", 0, &showlist);
7612
7613 /* Sync the opcode insn printer with our register viewer. */
7614 parse_arm_disassembler_option ("reg-names-std");
7615
7616 /* Initialize the array that will be passed to
7617 add_setshow_enum_cmd(). */
7618 valid_disassembly_styles
7619 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7620 for (i = 0; i < num_disassembly_options; i++)
7621 {
7622 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7623 valid_disassembly_styles[i] = setname;
7624 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7625 rdptr += length;
7626 rest -= length;
7627 /* When we find the default names, tell the disassembler to use
7628 them. */
7629 if (!strcmp (setname, "std"))
7630 {
7631 disassembly_style = setname;
7632 set_arm_regname_option (i);
7633 }
7634 }
7635 /* Mark the end of valid options. */
7636 valid_disassembly_styles[num_disassembly_options] = NULL;
7637
7638 /* Create the help text. */
7639 stb = mem_fileopen ();
7640 fprintf_unfiltered (stb, "%s%s%s",
7641 _("The valid values are:\n"),
7642 regdesc,
7643 _("The default is \"std\"."));
7644 helptext = ui_file_xstrdup (stb, NULL);
7645 ui_file_delete (stb);
7646
7647 add_setshow_enum_cmd("disassembler", no_class,
7648 valid_disassembly_styles, &disassembly_style,
7649 _("Set the disassembly style."),
7650 _("Show the disassembly style."),
7651 helptext,
7652 set_disassembly_style_sfunc,
7653 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7654 &setarmcmdlist, &showarmcmdlist);
7655
7656 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7657 _("Set usage of ARM 32-bit mode."),
7658 _("Show usage of ARM 32-bit mode."),
7659 _("When off, a 26-bit PC will be used."),
7660 NULL,
7661 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7662 &setarmcmdlist, &showarmcmdlist);
7663
7664 /* Add a command to allow the user to force the FPU model. */
7665 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7666 _("Set the floating point type."),
7667 _("Show the floating point type."),
7668 _("auto - Determine the FP typefrom the OS-ABI.\n\
7669 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7670 fpa - FPA co-processor (GCC compiled).\n\
7671 softvfp - Software FP with pure-endian doubles.\n\
7672 vfp - VFP co-processor."),
7673 set_fp_model_sfunc, show_fp_model,
7674 &setarmcmdlist, &showarmcmdlist);
7675
7676 /* Add a command to allow the user to force the ABI. */
7677 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7678 _("Set the ABI."),
7679 _("Show the ABI."),
7680 NULL, arm_set_abi, arm_show_abi,
7681 &setarmcmdlist, &showarmcmdlist);
7682
7683 /* Add two commands to allow the user to force the assumed
7684 execution mode. */
7685 add_setshow_enum_cmd ("fallback-mode", class_support,
7686 arm_mode_strings, &arm_fallback_mode_string,
7687 _("Set the mode assumed when symbols are unavailable."),
7688 _("Show the mode assumed when symbols are unavailable."),
7689 NULL, NULL, arm_show_fallback_mode,
7690 &setarmcmdlist, &showarmcmdlist);
7691 add_setshow_enum_cmd ("force-mode", class_support,
7692 arm_mode_strings, &arm_force_mode_string,
7693 _("Set the mode assumed even when symbols are available."),
7694 _("Show the mode assumed even when symbols are available."),
7695 NULL, NULL, arm_show_force_mode,
7696 &setarmcmdlist, &showarmcmdlist);
7697
7698 /* Debugging flag. */
7699 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7700 _("Set ARM debugging."),
7701 _("Show ARM debugging."),
7702 _("When on, arm-specific debugging is enabled."),
7703 NULL,
7704 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7705 &setdebuglist, &showdebuglist);
7706 }
This page took 0.349523 seconds and 4 git commands to generate.