305201a930fb4bf4973387f2765dc945d119b21d
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24 #include "defs.h"
25 #include "arch-utils.h"
26 #include "gdbtypes.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "gdb_assert.h"
31 #include "frame.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "symtab.h"
36 #include "symfile.h"
37 #include "value.h"
38 #include "inferior.h"
39 #include "dis-asm.h"
40 #include "objfiles.h"
41 #include "language.h"
42 #include "regcache.h"
43 #include "reggroups.h"
44 #include "floatformat.h"
45 #include "observer.h"
46
47 #include "spu-tdep.h"
48
49 /* SPU-specific vector type. */
50 struct type *spu_builtin_type_vec128;
51
52 /* Registers. */
53
54 static const char *
55 spu_register_name (int reg_nr)
56 {
57 static char *register_names[] =
58 {
59 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
60 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
61 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
62 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
63 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
64 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
65 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
66 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
67 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
68 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
69 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
70 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
71 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
72 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
73 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
74 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
75 "id", "pc", "sp"
76 };
77
78 if (reg_nr < 0)
79 return NULL;
80 if (reg_nr >= sizeof register_names / sizeof *register_names)
81 return NULL;
82
83 return register_names[reg_nr];
84 }
85
86 static struct type *
87 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
88 {
89 if (reg_nr < SPU_NUM_GPRS)
90 return spu_builtin_type_vec128;
91
92 switch (reg_nr)
93 {
94 case SPU_ID_REGNUM:
95 return builtin_type_uint32;
96
97 case SPU_PC_REGNUM:
98 return builtin_type_void_func_ptr;
99
100 case SPU_SP_REGNUM:
101 return builtin_type_void_data_ptr;
102
103 default:
104 internal_error (__FILE__, __LINE__, "invalid regnum");
105 }
106 }
107
108 /* Pseudo registers for preferred slots - stack pointer. */
109
110 static void
111 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
112 int regnum, gdb_byte *buf)
113 {
114 gdb_byte reg[16];
115
116 switch (regnum)
117 {
118 case SPU_SP_REGNUM:
119 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
120 memcpy (buf, reg, 4);
121 break;
122
123 default:
124 internal_error (__FILE__, __LINE__, _("invalid regnum"));
125 }
126 }
127
128 static void
129 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
130 int regnum, const gdb_byte *buf)
131 {
132 gdb_byte reg[16];
133
134 switch (regnum)
135 {
136 case SPU_SP_REGNUM:
137 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
138 memcpy (reg, buf, 4);
139 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
140 break;
141
142 default:
143 internal_error (__FILE__, __LINE__, _("invalid regnum"));
144 }
145 }
146
147 /* Value conversion -- access scalar values at the preferred slot. */
148
149 static struct value *
150 spu_value_from_register (struct type *type, int regnum,
151 struct frame_info *frame)
152 {
153 struct value *value = default_value_from_register (type, regnum, frame);
154 int len = TYPE_LENGTH (type);
155
156 if (regnum < SPU_NUM_GPRS && len < 16)
157 {
158 int preferred_slot = len < 4 ? 4 - len : 0;
159 set_value_offset (value, preferred_slot);
160 }
161
162 return value;
163 }
164
165 /* Register groups. */
166
167 static int
168 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
169 struct reggroup *group)
170 {
171 /* Registers displayed via 'info regs'. */
172 if (group == general_reggroup)
173 return 1;
174
175 /* Registers displayed via 'info float'. */
176 if (group == float_reggroup)
177 return 0;
178
179 /* Registers that need to be saved/restored in order to
180 push or pop frames. */
181 if (group == save_reggroup || group == restore_reggroup)
182 return 1;
183
184 return default_register_reggroup_p (gdbarch, regnum, group);
185 }
186
187
188 /* Decoding SPU instructions. */
189
190 enum
191 {
192 op_lqd = 0x34,
193 op_lqx = 0x3c4,
194 op_lqa = 0x61,
195 op_lqr = 0x67,
196 op_stqd = 0x24,
197 op_stqx = 0x144,
198 op_stqa = 0x41,
199 op_stqr = 0x47,
200
201 op_il = 0x081,
202 op_ila = 0x21,
203 op_a = 0x0c0,
204 op_ai = 0x1c,
205
206 op_selb = 0x4,
207
208 op_br = 0x64,
209 op_bra = 0x60,
210 op_brsl = 0x66,
211 op_brasl = 0x62,
212 op_brnz = 0x42,
213 op_brz = 0x40,
214 op_brhnz = 0x46,
215 op_brhz = 0x44,
216 op_bi = 0x1a8,
217 op_bisl = 0x1a9,
218 op_biz = 0x128,
219 op_binz = 0x129,
220 op_bihz = 0x12a,
221 op_bihnz = 0x12b,
222 };
223
224 static int
225 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
226 {
227 if ((insn >> 21) == op)
228 {
229 *rt = insn & 127;
230 *ra = (insn >> 7) & 127;
231 *rb = (insn >> 14) & 127;
232 return 1;
233 }
234
235 return 0;
236 }
237
238 static int
239 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
240 {
241 if ((insn >> 28) == op)
242 {
243 *rt = (insn >> 21) & 127;
244 *ra = (insn >> 7) & 127;
245 *rb = (insn >> 14) & 127;
246 *rc = insn & 127;
247 return 1;
248 }
249
250 return 0;
251 }
252
253 static int
254 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
255 {
256 if ((insn >> 21) == op)
257 {
258 *rt = insn & 127;
259 *ra = (insn >> 7) & 127;
260 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
261 return 1;
262 }
263
264 return 0;
265 }
266
267 static int
268 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
269 {
270 if ((insn >> 24) == op)
271 {
272 *rt = insn & 127;
273 *ra = (insn >> 7) & 127;
274 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
275 return 1;
276 }
277
278 return 0;
279 }
280
281 static int
282 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
283 {
284 if ((insn >> 23) == op)
285 {
286 *rt = insn & 127;
287 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
288 return 1;
289 }
290
291 return 0;
292 }
293
294 static int
295 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
296 {
297 if ((insn >> 25) == op)
298 {
299 *rt = insn & 127;
300 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
301 return 1;
302 }
303
304 return 0;
305 }
306
307 static int
308 is_branch (unsigned int insn, int *offset, int *reg)
309 {
310 int rt, i7, i16;
311
312 if (is_ri16 (insn, op_br, &rt, &i16)
313 || is_ri16 (insn, op_brsl, &rt, &i16)
314 || is_ri16 (insn, op_brnz, &rt, &i16)
315 || is_ri16 (insn, op_brz, &rt, &i16)
316 || is_ri16 (insn, op_brhnz, &rt, &i16)
317 || is_ri16 (insn, op_brhz, &rt, &i16))
318 {
319 *reg = SPU_PC_REGNUM;
320 *offset = i16 << 2;
321 return 1;
322 }
323
324 if (is_ri16 (insn, op_bra, &rt, &i16)
325 || is_ri16 (insn, op_brasl, &rt, &i16))
326 {
327 *reg = -1;
328 *offset = i16 << 2;
329 return 1;
330 }
331
332 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
333 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
334 || is_ri7 (insn, op_biz, &rt, reg, &i7)
335 || is_ri7 (insn, op_binz, &rt, reg, &i7)
336 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
337 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
338 {
339 *offset = 0;
340 return 1;
341 }
342
343 return 0;
344 }
345
346
347 /* Prolog parsing. */
348
349 struct spu_prologue_data
350 {
351 /* Stack frame size. -1 if analysis was unsuccessful. */
352 int size;
353
354 /* How to find the CFA. The CFA is equal to SP at function entry. */
355 int cfa_reg;
356 int cfa_offset;
357
358 /* Offset relative to CFA where a register is saved. -1 if invalid. */
359 int reg_offset[SPU_NUM_GPRS];
360 };
361
362 static CORE_ADDR
363 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
364 struct spu_prologue_data *data)
365 {
366 int found_sp = 0;
367 int found_fp = 0;
368 int found_lr = 0;
369 int reg_immed[SPU_NUM_GPRS];
370 gdb_byte buf[16];
371 CORE_ADDR prolog_pc = start_pc;
372 CORE_ADDR pc;
373 int i;
374
375
376 /* Initialize DATA to default values. */
377 data->size = -1;
378
379 data->cfa_reg = SPU_RAW_SP_REGNUM;
380 data->cfa_offset = 0;
381
382 for (i = 0; i < SPU_NUM_GPRS; i++)
383 data->reg_offset[i] = -1;
384
385 /* Set up REG_IMMED array. This is non-zero for a register if we know its
386 preferred slot currently holds this immediate value. */
387 for (i = 0; i < SPU_NUM_GPRS; i++)
388 reg_immed[i] = 0;
389
390 /* Scan instructions until the first branch.
391
392 The following instructions are important prolog components:
393
394 - The first instruction to set up the stack pointer.
395 - The first instruction to set up the frame pointer.
396 - The first instruction to save the link register.
397
398 We return the instruction after the latest of these three,
399 or the incoming PC if none is found. The first instruction
400 to set up the stack pointer also defines the frame size.
401
402 Note that instructions saving incoming arguments to their stack
403 slots are not counted as important, because they are hard to
404 identify with certainty. This should not matter much, because
405 arguments are relevant only in code compiled with debug data,
406 and in such code the GDB core will advance until the first source
407 line anyway, using SAL data.
408
409 For purposes of stack unwinding, we analyze the following types
410 of instructions in addition:
411
412 - Any instruction adding to the current frame pointer.
413 - Any instruction loading an immediate constant into a register.
414 - Any instruction storing a register onto the stack.
415
416 These are used to compute the CFA and REG_OFFSET output. */
417
418 for (pc = start_pc; pc < end_pc; pc += 4)
419 {
420 unsigned int insn;
421 int rt, ra, rb, rc, immed;
422
423 if (target_read_memory (pc, buf, 4))
424 break;
425 insn = extract_unsigned_integer (buf, 4);
426
427 /* AI is the typical instruction to set up a stack frame.
428 It is also used to initialize the frame pointer. */
429 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
430 {
431 if (rt == data->cfa_reg && ra == data->cfa_reg)
432 data->cfa_offset -= immed;
433
434 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
435 && !found_sp)
436 {
437 found_sp = 1;
438 prolog_pc = pc + 4;
439
440 data->size = -immed;
441 }
442 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
443 && !found_fp)
444 {
445 found_fp = 1;
446 prolog_pc = pc + 4;
447
448 data->cfa_reg = SPU_FP_REGNUM;
449 data->cfa_offset -= immed;
450 }
451 }
452
453 /* A is used to set up stack frames of size >= 512 bytes.
454 If we have tracked the contents of the addend register,
455 we can handle this as well. */
456 else if (is_rr (insn, op_a, &rt, &ra, &rb))
457 {
458 if (rt == data->cfa_reg && ra == data->cfa_reg)
459 {
460 if (reg_immed[rb] != 0)
461 data->cfa_offset -= reg_immed[rb];
462 else
463 data->cfa_reg = -1; /* We don't know the CFA any more. */
464 }
465
466 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
467 && !found_sp)
468 {
469 found_sp = 1;
470 prolog_pc = pc + 4;
471
472 if (reg_immed[rb] != 0)
473 data->size = -reg_immed[rb];
474 }
475 }
476
477 /* We need to track IL and ILA used to load immediate constants
478 in case they are later used as input to an A instruction. */
479 else if (is_ri16 (insn, op_il, &rt, &immed))
480 {
481 reg_immed[rt] = immed;
482
483 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
484 found_sp = 1;
485 }
486
487 else if (is_ri18 (insn, op_ila, &rt, &immed))
488 {
489 reg_immed[rt] = immed & 0x3ffff;
490
491 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
492 found_sp = 1;
493 }
494
495 /* STQD is used to save registers to the stack. */
496 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
497 {
498 if (ra == data->cfa_reg)
499 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
500
501 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
502 && !found_lr)
503 {
504 found_lr = 1;
505 prolog_pc = pc + 4;
506 }
507 }
508
509 /* _start uses SELB to set up the stack pointer. */
510 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
511 {
512 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
513 found_sp = 1;
514 }
515
516 /* We terminate if we find a branch. */
517 else if (is_branch (insn, &immed, &ra))
518 break;
519 }
520
521
522 /* If we successfully parsed until here, and didn't find any instruction
523 modifying SP, we assume we have a frameless function. */
524 if (!found_sp)
525 data->size = 0;
526
527 /* Return cooked instead of raw SP. */
528 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
529 data->cfa_reg = SPU_SP_REGNUM;
530
531 return prolog_pc;
532 }
533
534 /* Return the first instruction after the prologue starting at PC. */
535 static CORE_ADDR
536 spu_skip_prologue (CORE_ADDR pc)
537 {
538 struct spu_prologue_data data;
539 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
540 }
541
542 /* Return the frame pointer in use at address PC. */
543 static void
544 spu_virtual_frame_pointer (CORE_ADDR pc, int *reg, LONGEST *offset)
545 {
546 struct spu_prologue_data data;
547 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
548
549 if (data.size != -1 && data.cfa_reg != -1)
550 {
551 /* The 'frame pointer' address is CFA minus frame size. */
552 *reg = data.cfa_reg;
553 *offset = data.cfa_offset - data.size;
554 }
555 else
556 {
557 /* ??? We don't really know ... */
558 *reg = SPU_SP_REGNUM;
559 *offset = 0;
560 }
561 }
562
563 /* Return true if we are in the function's epilogue, i.e. after the
564 instruction that destroyed the function's stack frame.
565
566 1) scan forward from the point of execution:
567 a) If you find an instruction that modifies the stack pointer
568 or transfers control (except a return), execution is not in
569 an epilogue, return.
570 b) Stop scanning if you find a return instruction or reach the
571 end of the function or reach the hard limit for the size of
572 an epilogue.
573 2) scan backward from the point of execution:
574 a) If you find an instruction that modifies the stack pointer,
575 execution *is* in an epilogue, return.
576 b) Stop scanning if you reach an instruction that transfers
577 control or the beginning of the function or reach the hard
578 limit for the size of an epilogue. */
579
580 static int
581 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
582 {
583 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
584 bfd_byte buf[4];
585 unsigned int insn;
586 int rt, ra, rb, rc, immed;
587
588 /* Find the search limits based on function boundaries and hard limit.
589 We assume the epilogue can be up to 64 instructions long. */
590
591 const int spu_max_epilogue_size = 64 * 4;
592
593 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
594 return 0;
595
596 if (pc - func_start < spu_max_epilogue_size)
597 epilogue_start = func_start;
598 else
599 epilogue_start = pc - spu_max_epilogue_size;
600
601 if (func_end - pc < spu_max_epilogue_size)
602 epilogue_end = func_end;
603 else
604 epilogue_end = pc + spu_max_epilogue_size;
605
606 /* Scan forward until next 'bi $0'. */
607
608 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
609 {
610 if (target_read_memory (scan_pc, buf, 4))
611 return 0;
612 insn = extract_unsigned_integer (buf, 4);
613
614 if (is_branch (insn, &immed, &ra))
615 {
616 if (immed == 0 && ra == SPU_LR_REGNUM)
617 break;
618
619 return 0;
620 }
621
622 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
623 || is_rr (insn, op_a, &rt, &ra, &rb)
624 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
625 {
626 if (rt == SPU_RAW_SP_REGNUM)
627 return 0;
628 }
629 }
630
631 if (scan_pc >= epilogue_end)
632 return 0;
633
634 /* Scan backward until adjustment to stack pointer (R1). */
635
636 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
637 {
638 if (target_read_memory (scan_pc, buf, 4))
639 return 0;
640 insn = extract_unsigned_integer (buf, 4);
641
642 if (is_branch (insn, &immed, &ra))
643 return 0;
644
645 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
646 || is_rr (insn, op_a, &rt, &ra, &rb)
647 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
648 {
649 if (rt == SPU_RAW_SP_REGNUM)
650 return 1;
651 }
652 }
653
654 return 0;
655 }
656
657
658 /* Normal stack frames. */
659
660 struct spu_unwind_cache
661 {
662 CORE_ADDR func;
663 CORE_ADDR frame_base;
664 CORE_ADDR local_base;
665
666 struct trad_frame_saved_reg *saved_regs;
667 };
668
669 static struct spu_unwind_cache *
670 spu_frame_unwind_cache (struct frame_info *next_frame,
671 void **this_prologue_cache)
672 {
673 struct spu_unwind_cache *info;
674 struct spu_prologue_data data;
675 gdb_byte buf[16];
676
677 if (*this_prologue_cache)
678 return *this_prologue_cache;
679
680 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
681 *this_prologue_cache = info;
682 info->saved_regs = trad_frame_alloc_saved_regs (next_frame);
683 info->frame_base = 0;
684 info->local_base = 0;
685
686 /* Find the start of the current function, and analyze its prologue. */
687 info->func = frame_func_unwind (next_frame, NORMAL_FRAME);
688 if (info->func == 0)
689 {
690 /* Fall back to using the current PC as frame ID. */
691 info->func = frame_pc_unwind (next_frame);
692 data.size = -1;
693 }
694 else
695 spu_analyze_prologue (info->func, frame_pc_unwind (next_frame), &data);
696
697
698 /* If successful, use prologue analysis data. */
699 if (data.size != -1 && data.cfa_reg != -1)
700 {
701 CORE_ADDR cfa;
702 int i;
703
704 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
705 frame_unwind_register (next_frame, data.cfa_reg, buf);
706 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
707
708 /* Call-saved register slots. */
709 for (i = 0; i < SPU_NUM_GPRS; i++)
710 if (i == SPU_LR_REGNUM
711 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
712 if (data.reg_offset[i] != -1)
713 info->saved_regs[i].addr = cfa - data.reg_offset[i];
714
715 /* Frame bases. */
716 info->frame_base = cfa;
717 info->local_base = cfa - data.size;
718 }
719
720 /* Otherwise, fall back to reading the backchain link. */
721 else
722 {
723 CORE_ADDR reg, backchain;
724
725 /* Get the backchain. */
726 reg = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
727 backchain = read_memory_unsigned_integer (reg, 4);
728
729 /* A zero backchain terminates the frame chain. Also, sanity
730 check against the local store size limit. */
731 if (backchain != 0 && backchain < SPU_LS_SIZE)
732 {
733 /* Assume the link register is saved into its slot. */
734 if (backchain + 16 < SPU_LS_SIZE)
735 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
736
737 /* Frame bases. */
738 info->frame_base = backchain;
739 info->local_base = reg;
740 }
741 }
742
743 /* The previous SP is equal to the CFA. */
744 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
745
746 /* Read full contents of the unwound link register in order to
747 be able to determine the return address. */
748 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
749 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
750 else
751 frame_unwind_register (next_frame, SPU_LR_REGNUM, buf);
752
753 /* Normally, the return address is contained in the slot 0 of the
754 link register, and slots 1-3 are zero. For an overlay return,
755 slot 0 contains the address of the overlay manager return stub,
756 slot 1 contains the partition number of the overlay section to
757 be returned to, and slot 2 contains the return address within
758 that section. Return the latter address in that case. */
759 if (extract_unsigned_integer (buf + 8, 4) != 0)
760 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
761 extract_unsigned_integer (buf + 8, 4));
762 else
763 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
764 extract_unsigned_integer (buf, 4));
765
766 return info;
767 }
768
769 static void
770 spu_frame_this_id (struct frame_info *next_frame,
771 void **this_prologue_cache, struct frame_id *this_id)
772 {
773 struct spu_unwind_cache *info =
774 spu_frame_unwind_cache (next_frame, this_prologue_cache);
775
776 if (info->frame_base == 0)
777 return;
778
779 *this_id = frame_id_build (info->frame_base, info->func);
780 }
781
782 static void
783 spu_frame_prev_register (struct frame_info *next_frame,
784 void **this_prologue_cache,
785 int regnum, int *optimizedp,
786 enum lval_type *lvalp, CORE_ADDR * addrp,
787 int *realnump, gdb_byte *bufferp)
788 {
789 struct spu_unwind_cache *info
790 = spu_frame_unwind_cache (next_frame, this_prologue_cache);
791
792 /* Special-case the stack pointer. */
793 if (regnum == SPU_RAW_SP_REGNUM)
794 regnum = SPU_SP_REGNUM;
795
796 trad_frame_get_prev_register (next_frame, info->saved_regs, regnum,
797 optimizedp, lvalp, addrp, realnump, bufferp);
798 }
799
800 static const struct frame_unwind spu_frame_unwind = {
801 NORMAL_FRAME,
802 spu_frame_this_id,
803 spu_frame_prev_register
804 };
805
806 const struct frame_unwind *
807 spu_frame_sniffer (struct frame_info *next_frame)
808 {
809 return &spu_frame_unwind;
810 }
811
812 static CORE_ADDR
813 spu_frame_base_address (struct frame_info *next_frame, void **this_cache)
814 {
815 struct spu_unwind_cache *info
816 = spu_frame_unwind_cache (next_frame, this_cache);
817 return info->local_base;
818 }
819
820 static const struct frame_base spu_frame_base = {
821 &spu_frame_unwind,
822 spu_frame_base_address,
823 spu_frame_base_address,
824 spu_frame_base_address
825 };
826
827 static CORE_ADDR
828 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
829 {
830 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
831 /* Mask off interrupt enable bit. */
832 return pc & -4;
833 }
834
835 static CORE_ADDR
836 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
837 {
838 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
839 }
840
841 static CORE_ADDR
842 spu_read_pc (ptid_t ptid)
843 {
844 CORE_ADDR pc = read_register_pid (SPU_PC_REGNUM, ptid);
845 /* Mask off interrupt enable bit. */
846 return pc & -4;
847 }
848
849 static void
850 spu_write_pc (CORE_ADDR pc, ptid_t ptid)
851 {
852 /* Keep interrupt enabled state unchanged. */
853 CORE_ADDR old_pc = read_register_pid (SPU_PC_REGNUM, ptid);
854 write_register_pid (SPU_PC_REGNUM, (pc & -4) | (old_pc & 3), ptid);
855 }
856
857
858 /* Function calling convention. */
859
860 static CORE_ADDR
861 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
862 {
863 return sp & ~15;
864 }
865
866 static int
867 spu_scalar_value_p (struct type *type)
868 {
869 switch (TYPE_CODE (type))
870 {
871 case TYPE_CODE_INT:
872 case TYPE_CODE_ENUM:
873 case TYPE_CODE_RANGE:
874 case TYPE_CODE_CHAR:
875 case TYPE_CODE_BOOL:
876 case TYPE_CODE_PTR:
877 case TYPE_CODE_REF:
878 return TYPE_LENGTH (type) <= 16;
879
880 default:
881 return 0;
882 }
883 }
884
885 static void
886 spu_value_to_regcache (struct regcache *regcache, int regnum,
887 struct type *type, const gdb_byte *in)
888 {
889 int len = TYPE_LENGTH (type);
890
891 if (spu_scalar_value_p (type))
892 {
893 int preferred_slot = len < 4 ? 4 - len : 0;
894 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
895 }
896 else
897 {
898 while (len >= 16)
899 {
900 regcache_cooked_write (regcache, regnum++, in);
901 in += 16;
902 len -= 16;
903 }
904
905 if (len > 0)
906 regcache_cooked_write_part (regcache, regnum, 0, len, in);
907 }
908 }
909
910 static void
911 spu_regcache_to_value (struct regcache *regcache, int regnum,
912 struct type *type, gdb_byte *out)
913 {
914 int len = TYPE_LENGTH (type);
915
916 if (spu_scalar_value_p (type))
917 {
918 int preferred_slot = len < 4 ? 4 - len : 0;
919 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
920 }
921 else
922 {
923 while (len >= 16)
924 {
925 regcache_cooked_read (regcache, regnum++, out);
926 out += 16;
927 len -= 16;
928 }
929
930 if (len > 0)
931 regcache_cooked_read_part (regcache, regnum, 0, len, out);
932 }
933 }
934
935 static CORE_ADDR
936 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
937 struct regcache *regcache, CORE_ADDR bp_addr,
938 int nargs, struct value **args, CORE_ADDR sp,
939 int struct_return, CORE_ADDR struct_addr)
940 {
941 int i;
942 int regnum = SPU_ARG1_REGNUM;
943 int stack_arg = -1;
944 gdb_byte buf[16];
945
946 /* Set the return address. */
947 memset (buf, 0, sizeof buf);
948 store_unsigned_integer (buf, 4, bp_addr);
949 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
950
951 /* If STRUCT_RETURN is true, then the struct return address (in
952 STRUCT_ADDR) will consume the first argument-passing register.
953 Both adjust the register count and store that value. */
954 if (struct_return)
955 {
956 memset (buf, 0, sizeof buf);
957 store_unsigned_integer (buf, 4, struct_addr);
958 regcache_cooked_write (regcache, regnum++, buf);
959 }
960
961 /* Fill in argument registers. */
962 for (i = 0; i < nargs; i++)
963 {
964 struct value *arg = args[i];
965 struct type *type = check_typedef (value_type (arg));
966 const gdb_byte *contents = value_contents (arg);
967 int len = TYPE_LENGTH (type);
968 int n_regs = align_up (len, 16) / 16;
969
970 /* If the argument doesn't wholly fit into registers, it and
971 all subsequent arguments go to the stack. */
972 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
973 {
974 stack_arg = i;
975 break;
976 }
977
978 spu_value_to_regcache (regcache, regnum, type, contents);
979 regnum += n_regs;
980 }
981
982 /* Overflow arguments go to the stack. */
983 if (stack_arg != -1)
984 {
985 CORE_ADDR ap;
986
987 /* Allocate all required stack size. */
988 for (i = stack_arg; i < nargs; i++)
989 {
990 struct type *type = check_typedef (value_type (args[i]));
991 sp -= align_up (TYPE_LENGTH (type), 16);
992 }
993
994 /* Fill in stack arguments. */
995 ap = sp;
996 for (i = stack_arg; i < nargs; i++)
997 {
998 struct value *arg = args[i];
999 struct type *type = check_typedef (value_type (arg));
1000 int len = TYPE_LENGTH (type);
1001 int preferred_slot;
1002
1003 if (spu_scalar_value_p (type))
1004 preferred_slot = len < 4 ? 4 - len : 0;
1005 else
1006 preferred_slot = 0;
1007
1008 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1009 ap += align_up (TYPE_LENGTH (type), 16);
1010 }
1011 }
1012
1013 /* Allocate stack frame header. */
1014 sp -= 32;
1015
1016 /* Store stack back chain. */
1017 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1018 target_write_memory (sp, buf, 16);
1019
1020 /* Finally, update the SP register. */
1021 regcache_cooked_write_unsigned (regcache, SPU_SP_REGNUM, sp);
1022
1023 return sp;
1024 }
1025
1026 static struct frame_id
1027 spu_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1028 {
1029 return frame_id_build (spu_unwind_sp (gdbarch, next_frame),
1030 spu_unwind_pc (gdbarch, next_frame));
1031 }
1032
1033 /* Function return value access. */
1034
1035 static enum return_value_convention
1036 spu_return_value (struct gdbarch *gdbarch, struct type *type,
1037 struct regcache *regcache, gdb_byte *out, const gdb_byte *in)
1038 {
1039 enum return_value_convention rvc;
1040
1041 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1042 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1043 else
1044 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1045
1046 if (in)
1047 {
1048 switch (rvc)
1049 {
1050 case RETURN_VALUE_REGISTER_CONVENTION:
1051 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1052 break;
1053
1054 case RETURN_VALUE_STRUCT_CONVENTION:
1055 error ("Cannot set function return value.");
1056 break;
1057 }
1058 }
1059 else if (out)
1060 {
1061 switch (rvc)
1062 {
1063 case RETURN_VALUE_REGISTER_CONVENTION:
1064 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1065 break;
1066
1067 case RETURN_VALUE_STRUCT_CONVENTION:
1068 error ("Function return value unknown.");
1069 break;
1070 }
1071 }
1072
1073 return rvc;
1074 }
1075
1076
1077 /* Breakpoints. */
1078
1079 static const gdb_byte *
1080 spu_breakpoint_from_pc (CORE_ADDR * pcptr, int *lenptr)
1081 {
1082 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1083
1084 *lenptr = sizeof breakpoint;
1085 return breakpoint;
1086 }
1087
1088
1089 /* Software single-stepping support. */
1090
1091 int
1092 spu_software_single_step (struct regcache *regcache)
1093 {
1094 CORE_ADDR pc, next_pc;
1095 unsigned int insn;
1096 int offset, reg;
1097 gdb_byte buf[4];
1098
1099 regcache_cooked_read (regcache, SPU_PC_REGNUM, buf);
1100 /* Mask off interrupt enable bit. */
1101 pc = extract_unsigned_integer (buf, 4) & -4;
1102
1103 if (target_read_memory (pc, buf, 4))
1104 return 1;
1105 insn = extract_unsigned_integer (buf, 4);
1106
1107 /* Next sequential instruction is at PC + 4, except if the current
1108 instruction is a PPE-assisted call, in which case it is at PC + 8.
1109 Wrap around LS limit to be on the safe side. */
1110 if ((insn & 0xffffff00) == 0x00002100)
1111 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1112 else
1113 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1114
1115 insert_single_step_breakpoint (next_pc);
1116
1117 if (is_branch (insn, &offset, &reg))
1118 {
1119 CORE_ADDR target = offset;
1120
1121 if (reg == SPU_PC_REGNUM)
1122 target += pc;
1123 else if (reg != -1)
1124 {
1125 regcache_cooked_read_part (regcache, reg, 0, 4, buf);
1126 target += extract_unsigned_integer (buf, 4) & -4;
1127 }
1128
1129 target = target & (SPU_LS_SIZE - 1);
1130 if (target != next_pc)
1131 insert_single_step_breakpoint (target);
1132 }
1133
1134 return 1;
1135 }
1136
1137 /* Target overlays for the SPU overlay manager.
1138
1139 See the documentation of simple_overlay_update for how the
1140 interface is supposed to work.
1141
1142 Data structures used by the overlay manager:
1143
1144 struct ovly_table
1145 {
1146 u32 vma;
1147 u32 size;
1148 u32 pos;
1149 u32 buf;
1150 } _ovly_table[]; -- one entry per overlay section
1151
1152 struct ovly_buf_table
1153 {
1154 u32 mapped;
1155 } _ovly_buf_table[]; -- one entry per overlay buffer
1156
1157 _ovly_table should never change.
1158
1159 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1160 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1161 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1162
1163 mapped is an index into _ovly_table. Both the mapped and buf indices start
1164 from one to reference the first entry in their respective tables. */
1165
1166 /* Using the per-objfile private data mechanism, we store for each
1167 objfile an array of "struct spu_overlay_table" structures, one
1168 for each obj_section of the objfile. This structure holds two
1169 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1170 is *not* an overlay section. If it is non-zero, it represents
1171 a target address. The overlay section is mapped iff the target
1172 integer at this location equals MAPPED_VAL. */
1173
1174 static const struct objfile_data *spu_overlay_data;
1175
1176 struct spu_overlay_table
1177 {
1178 CORE_ADDR mapped_ptr;
1179 CORE_ADDR mapped_val;
1180 };
1181
1182 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1183 the _ovly_table data structure from the target and initialize the
1184 spu_overlay_table data structure from it. */
1185 static struct spu_overlay_table *
1186 spu_get_overlay_table (struct objfile *objfile)
1187 {
1188 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1189 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1190 unsigned ovly_table_size, ovly_buf_table_size;
1191 struct spu_overlay_table *tbl;
1192 struct obj_section *osect;
1193 char *ovly_table;
1194 int i;
1195
1196 tbl = objfile_data (objfile, spu_overlay_data);
1197 if (tbl)
1198 return tbl;
1199
1200 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1201 if (!ovly_table_msym)
1202 return NULL;
1203
1204 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1205 if (!ovly_buf_table_msym)
1206 return NULL;
1207
1208 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1209 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1210
1211 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1212 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1213
1214 ovly_table = xmalloc (ovly_table_size);
1215 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1216
1217 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1218 objfile->sections_end - objfile->sections,
1219 struct spu_overlay_table);
1220
1221 for (i = 0; i < ovly_table_size / 16; i++)
1222 {
1223 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1224 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1225 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1226 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1227
1228 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1229 continue;
1230
1231 ALL_OBJFILE_OSECTIONS (objfile, osect)
1232 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1233 && pos == osect->the_bfd_section->filepos)
1234 {
1235 int ndx = osect - objfile->sections;
1236 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1237 tbl[ndx].mapped_val = i + 1;
1238 break;
1239 }
1240 }
1241
1242 xfree (ovly_table);
1243 set_objfile_data (objfile, spu_overlay_data, tbl);
1244 return tbl;
1245 }
1246
1247 /* Read _ovly_buf_table entry from the target to dermine whether
1248 OSECT is currently mapped, and update the mapped state. */
1249 static void
1250 spu_overlay_update_osect (struct obj_section *osect)
1251 {
1252 struct spu_overlay_table *ovly_table;
1253 CORE_ADDR val;
1254
1255 ovly_table = spu_get_overlay_table (osect->objfile);
1256 if (!ovly_table)
1257 return;
1258
1259 ovly_table += osect - osect->objfile->sections;
1260 if (ovly_table->mapped_ptr == 0)
1261 return;
1262
1263 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1264 osect->ovly_mapped = (val == ovly_table->mapped_val);
1265 }
1266
1267 /* If OSECT is NULL, then update all sections' mapped state.
1268 If OSECT is non-NULL, then update only OSECT's mapped state. */
1269 static void
1270 spu_overlay_update (struct obj_section *osect)
1271 {
1272 /* Just one section. */
1273 if (osect)
1274 spu_overlay_update_osect (osect);
1275
1276 /* All sections. */
1277 else
1278 {
1279 struct objfile *objfile;
1280
1281 ALL_OBJSECTIONS (objfile, osect)
1282 if (section_is_overlay (osect->the_bfd_section))
1283 spu_overlay_update_osect (osect);
1284 }
1285 }
1286
1287 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1288 If there is one, go through all sections and make sure for non-
1289 overlay sections LMA equals VMA, while for overlay sections LMA
1290 is larger than local store size. */
1291 static void
1292 spu_overlay_new_objfile (struct objfile *objfile)
1293 {
1294 struct spu_overlay_table *ovly_table;
1295 struct obj_section *osect;
1296
1297 /* If we've already touched this file, do nothing. */
1298 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1299 return;
1300
1301 /* Check if this objfile has overlays. */
1302 ovly_table = spu_get_overlay_table (objfile);
1303 if (!ovly_table)
1304 return;
1305
1306 /* Now go and fiddle with all the LMAs. */
1307 ALL_OBJFILE_OSECTIONS (objfile, osect)
1308 {
1309 bfd *obfd = objfile->obfd;
1310 asection *bsect = osect->the_bfd_section;
1311 int ndx = osect - objfile->sections;
1312
1313 if (ovly_table[ndx].mapped_ptr == 0)
1314 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1315 else
1316 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1317 }
1318 }
1319
1320
1321 /* Set up gdbarch struct. */
1322
1323 static struct gdbarch *
1324 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
1325 {
1326 struct gdbarch *gdbarch;
1327
1328 /* Find a candidate among the list of pre-declared architectures. */
1329 arches = gdbarch_list_lookup_by_info (arches, &info);
1330 if (arches != NULL)
1331 return arches->gdbarch;
1332
1333 /* Is is for us? */
1334 if (info.bfd_arch_info->mach != bfd_mach_spu)
1335 return NULL;
1336
1337 /* Yes, create a new architecture. */
1338 gdbarch = gdbarch_alloc (&info, NULL);
1339
1340 /* Disassembler. */
1341 set_gdbarch_print_insn (gdbarch, print_insn_spu);
1342
1343 /* Registers. */
1344 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
1345 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
1346 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
1347 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
1348 set_gdbarch_read_pc (gdbarch, spu_read_pc);
1349 set_gdbarch_write_pc (gdbarch, spu_write_pc);
1350 set_gdbarch_register_name (gdbarch, spu_register_name);
1351 set_gdbarch_register_type (gdbarch, spu_register_type);
1352 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
1353 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
1354 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
1355 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
1356
1357 /* Data types. */
1358 set_gdbarch_char_signed (gdbarch, 0);
1359 set_gdbarch_ptr_bit (gdbarch, 32);
1360 set_gdbarch_addr_bit (gdbarch, 32);
1361 set_gdbarch_short_bit (gdbarch, 16);
1362 set_gdbarch_int_bit (gdbarch, 32);
1363 set_gdbarch_long_bit (gdbarch, 32);
1364 set_gdbarch_long_long_bit (gdbarch, 64);
1365 set_gdbarch_float_bit (gdbarch, 32);
1366 set_gdbarch_double_bit (gdbarch, 64);
1367 set_gdbarch_long_double_bit (gdbarch, 64);
1368 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
1369 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
1370 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
1371
1372 /* Inferior function calls. */
1373 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
1374 set_gdbarch_frame_align (gdbarch, spu_frame_align);
1375 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
1376 set_gdbarch_unwind_dummy_id (gdbarch, spu_unwind_dummy_id);
1377 set_gdbarch_return_value (gdbarch, spu_return_value);
1378
1379 /* Frame handling. */
1380 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
1381 frame_unwind_append_sniffer (gdbarch, spu_frame_sniffer);
1382 frame_base_set_default (gdbarch, &spu_frame_base);
1383 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
1384 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
1385 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
1386 set_gdbarch_frame_args_skip (gdbarch, 0);
1387 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
1388 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
1389
1390 /* Breakpoints. */
1391 set_gdbarch_decr_pc_after_break (gdbarch, 4);
1392 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
1393 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
1394 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
1395
1396 /* Overlays. */
1397 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
1398
1399 return gdbarch;
1400 }
1401
1402 /* Implement a SPU-specific vector type as replacement
1403 for __gdb_builtin_type_vec128. */
1404 static void
1405 spu_init_vector_type (void)
1406 {
1407 struct type *type;
1408
1409 type = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
1410 append_composite_type_field (type, "uint128", builtin_type_int128);
1411 append_composite_type_field (type, "v2_int64", builtin_type_v2_int64);
1412 append_composite_type_field (type, "v4_int32", builtin_type_v4_int32);
1413 append_composite_type_field (type, "v8_int16", builtin_type_v8_int16);
1414 append_composite_type_field (type, "v16_int8", builtin_type_v16_int8);
1415 append_composite_type_field (type, "v2_double", builtin_type_v2_double);
1416 append_composite_type_field (type, "v4_float", builtin_type_v4_float);
1417
1418 TYPE_FLAGS (type) |= TYPE_FLAG_VECTOR;
1419 TYPE_NAME (type) = "spu_builtin_type_vec128";
1420 spu_builtin_type_vec128 = type;
1421 }
1422
1423 void
1424 _initialize_spu_tdep (void)
1425 {
1426 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
1427
1428 spu_init_vector_type ();
1429
1430 /* Add ourselves to objfile event chain. */
1431 observer_attach_new_objfile (spu_overlay_new_objfile);
1432 spu_overlay_data = register_objfile_data ();
1433 }
This page took 0.05873 seconds and 4 git commands to generate.