* defs.h (extract_signed_integer, extract_unsigned_integer,
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 const struct builtin_type *bt = builtin_type (gdbarch);
65 struct type *t;
66
67 t = arch_composite_type (gdbarch,
68 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
69 append_composite_type_field (t, "uint128", bt->builtin_int128);
70 append_composite_type_field (t, "v2_int64",
71 init_vector_type (bt->builtin_int64, 2));
72 append_composite_type_field (t, "v4_int32",
73 init_vector_type (bt->builtin_int32, 4));
74 append_composite_type_field (t, "v8_int16",
75 init_vector_type (bt->builtin_int16, 8));
76 append_composite_type_field (t, "v16_int8",
77 init_vector_type (bt->builtin_int8, 16));
78 append_composite_type_field (t, "v2_double",
79 init_vector_type (bt->builtin_double, 2));
80 append_composite_type_field (t, "v4_float",
81 init_vector_type (bt->builtin_float, 4));
82
83 TYPE_VECTOR (t) = 1;
84 TYPE_NAME (t) = "spu_builtin_type_vec128";
85
86 tdep->spu_builtin_type_vec128 = t;
87 }
88
89 return tdep->spu_builtin_type_vec128;
90 }
91
92
93 /* The list of available "info spu " commands. */
94 static struct cmd_list_element *infospucmdlist = NULL;
95
96 /* Registers. */
97
98 static const char *
99 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
100 {
101 static char *register_names[] =
102 {
103 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
105 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
106 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
108 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
109 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
110 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
111 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
112 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
113 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
114 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
115 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
116 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
117 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
118 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
119 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
120 };
121
122 if (reg_nr < 0)
123 return NULL;
124 if (reg_nr >= sizeof register_names / sizeof *register_names)
125 return NULL;
126
127 return register_names[reg_nr];
128 }
129
130 static struct type *
131 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
132 {
133 if (reg_nr < SPU_NUM_GPRS)
134 return spu_builtin_type_vec128 (gdbarch);
135
136 switch (reg_nr)
137 {
138 case SPU_ID_REGNUM:
139 return builtin_type (gdbarch)->builtin_uint32;
140
141 case SPU_PC_REGNUM:
142 return builtin_type (gdbarch)->builtin_func_ptr;
143
144 case SPU_SP_REGNUM:
145 return builtin_type (gdbarch)->builtin_data_ptr;
146
147 case SPU_FPSCR_REGNUM:
148 return builtin_type (gdbarch)->builtin_uint128;
149
150 case SPU_SRR0_REGNUM:
151 return builtin_type (gdbarch)->builtin_uint32;
152
153 case SPU_LSLR_REGNUM:
154 return builtin_type (gdbarch)->builtin_uint32;
155
156 case SPU_DECR_REGNUM:
157 return builtin_type (gdbarch)->builtin_uint32;
158
159 case SPU_DECR_STATUS_REGNUM:
160 return builtin_type (gdbarch)->builtin_uint32;
161
162 default:
163 internal_error (__FILE__, __LINE__, "invalid regnum");
164 }
165 }
166
167 /* Pseudo registers for preferred slots - stack pointer. */
168
169 static void
170 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
171 gdb_byte *buf)
172 {
173 struct gdbarch *gdbarch = get_regcache_arch (regcache);
174 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
175 gdb_byte reg[32];
176 char annex[32];
177 ULONGEST id;
178
179 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
180 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
181 memset (reg, 0, sizeof reg);
182 target_read (&current_target, TARGET_OBJECT_SPU, annex,
183 reg, 0, sizeof reg);
184
185 store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
186 }
187
188 static void
189 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
190 int regnum, gdb_byte *buf)
191 {
192 gdb_byte reg[16];
193 char annex[32];
194 ULONGEST id;
195
196 switch (regnum)
197 {
198 case SPU_SP_REGNUM:
199 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
200 memcpy (buf, reg, 4);
201 break;
202
203 case SPU_FPSCR_REGNUM:
204 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
205 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
206 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
207 break;
208
209 case SPU_SRR0_REGNUM:
210 spu_pseudo_register_read_spu (regcache, "srr0", buf);
211 break;
212
213 case SPU_LSLR_REGNUM:
214 spu_pseudo_register_read_spu (regcache, "lslr", buf);
215 break;
216
217 case SPU_DECR_REGNUM:
218 spu_pseudo_register_read_spu (regcache, "decr", buf);
219 break;
220
221 case SPU_DECR_STATUS_REGNUM:
222 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
223 break;
224
225 default:
226 internal_error (__FILE__, __LINE__, _("invalid regnum"));
227 }
228 }
229
230 static void
231 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
232 const gdb_byte *buf)
233 {
234 struct gdbarch *gdbarch = get_regcache_arch (regcache);
235 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
236 gdb_byte reg[32];
237 char annex[32];
238 ULONGEST id;
239
240 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
241 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
242 xsnprintf (reg, sizeof reg, "0x%s",
243 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
244 target_write (&current_target, TARGET_OBJECT_SPU, annex,
245 reg, 0, strlen (reg));
246 }
247
248 static void
249 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
250 int regnum, const gdb_byte *buf)
251 {
252 gdb_byte reg[16];
253 char annex[32];
254 ULONGEST id;
255
256 switch (regnum)
257 {
258 case SPU_SP_REGNUM:
259 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
260 memcpy (reg, buf, 4);
261 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
262 break;
263
264 case SPU_FPSCR_REGNUM:
265 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
266 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
267 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
268 break;
269
270 case SPU_SRR0_REGNUM:
271 spu_pseudo_register_write_spu (regcache, "srr0", buf);
272 break;
273
274 case SPU_LSLR_REGNUM:
275 spu_pseudo_register_write_spu (regcache, "lslr", buf);
276 break;
277
278 case SPU_DECR_REGNUM:
279 spu_pseudo_register_write_spu (regcache, "decr", buf);
280 break;
281
282 case SPU_DECR_STATUS_REGNUM:
283 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
284 break;
285
286 default:
287 internal_error (__FILE__, __LINE__, _("invalid regnum"));
288 }
289 }
290
291 /* Value conversion -- access scalar values at the preferred slot. */
292
293 static struct value *
294 spu_value_from_register (struct type *type, int regnum,
295 struct frame_info *frame)
296 {
297 struct value *value = default_value_from_register (type, regnum, frame);
298 int len = TYPE_LENGTH (type);
299
300 if (regnum < SPU_NUM_GPRS && len < 16)
301 {
302 int preferred_slot = len < 4 ? 4 - len : 0;
303 set_value_offset (value, preferred_slot);
304 }
305
306 return value;
307 }
308
309 /* Register groups. */
310
311 static int
312 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
313 struct reggroup *group)
314 {
315 /* Registers displayed via 'info regs'. */
316 if (group == general_reggroup)
317 return 1;
318
319 /* Registers displayed via 'info float'. */
320 if (group == float_reggroup)
321 return 0;
322
323 /* Registers that need to be saved/restored in order to
324 push or pop frames. */
325 if (group == save_reggroup || group == restore_reggroup)
326 return 1;
327
328 return default_register_reggroup_p (gdbarch, regnum, group);
329 }
330
331 /* Address conversion. */
332
333 static CORE_ADDR
334 spu_pointer_to_address (struct gdbarch *gdbarch,
335 struct type *type, const gdb_byte *buf)
336 {
337 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
338 ULONGEST addr
339 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
340 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
341
342 if (target_has_registers && target_has_stack && target_has_memory)
343 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
344 SPU_LSLR_REGNUM);
345
346 return addr & lslr;
347 }
348
349 static CORE_ADDR
350 spu_integer_to_address (struct gdbarch *gdbarch,
351 struct type *type, const gdb_byte *buf)
352 {
353 ULONGEST addr = unpack_long (type, buf);
354 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
355
356 if (target_has_registers && target_has_stack && target_has_memory)
357 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
358 SPU_LSLR_REGNUM);
359
360 return addr & lslr;
361 }
362
363
364 /* Decoding SPU instructions. */
365
366 enum
367 {
368 op_lqd = 0x34,
369 op_lqx = 0x3c4,
370 op_lqa = 0x61,
371 op_lqr = 0x67,
372 op_stqd = 0x24,
373 op_stqx = 0x144,
374 op_stqa = 0x41,
375 op_stqr = 0x47,
376
377 op_il = 0x081,
378 op_ila = 0x21,
379 op_a = 0x0c0,
380 op_ai = 0x1c,
381
382 op_selb = 0x4,
383
384 op_br = 0x64,
385 op_bra = 0x60,
386 op_brsl = 0x66,
387 op_brasl = 0x62,
388 op_brnz = 0x42,
389 op_brz = 0x40,
390 op_brhnz = 0x46,
391 op_brhz = 0x44,
392 op_bi = 0x1a8,
393 op_bisl = 0x1a9,
394 op_biz = 0x128,
395 op_binz = 0x129,
396 op_bihz = 0x12a,
397 op_bihnz = 0x12b,
398 };
399
400 static int
401 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
402 {
403 if ((insn >> 21) == op)
404 {
405 *rt = insn & 127;
406 *ra = (insn >> 7) & 127;
407 *rb = (insn >> 14) & 127;
408 return 1;
409 }
410
411 return 0;
412 }
413
414 static int
415 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
416 {
417 if ((insn >> 28) == op)
418 {
419 *rt = (insn >> 21) & 127;
420 *ra = (insn >> 7) & 127;
421 *rb = (insn >> 14) & 127;
422 *rc = insn & 127;
423 return 1;
424 }
425
426 return 0;
427 }
428
429 static int
430 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
431 {
432 if ((insn >> 21) == op)
433 {
434 *rt = insn & 127;
435 *ra = (insn >> 7) & 127;
436 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
437 return 1;
438 }
439
440 return 0;
441 }
442
443 static int
444 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
445 {
446 if ((insn >> 24) == op)
447 {
448 *rt = insn & 127;
449 *ra = (insn >> 7) & 127;
450 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
451 return 1;
452 }
453
454 return 0;
455 }
456
457 static int
458 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
459 {
460 if ((insn >> 23) == op)
461 {
462 *rt = insn & 127;
463 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
464 return 1;
465 }
466
467 return 0;
468 }
469
470 static int
471 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
472 {
473 if ((insn >> 25) == op)
474 {
475 *rt = insn & 127;
476 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
477 return 1;
478 }
479
480 return 0;
481 }
482
483 static int
484 is_branch (unsigned int insn, int *offset, int *reg)
485 {
486 int rt, i7, i16;
487
488 if (is_ri16 (insn, op_br, &rt, &i16)
489 || is_ri16 (insn, op_brsl, &rt, &i16)
490 || is_ri16 (insn, op_brnz, &rt, &i16)
491 || is_ri16 (insn, op_brz, &rt, &i16)
492 || is_ri16 (insn, op_brhnz, &rt, &i16)
493 || is_ri16 (insn, op_brhz, &rt, &i16))
494 {
495 *reg = SPU_PC_REGNUM;
496 *offset = i16 << 2;
497 return 1;
498 }
499
500 if (is_ri16 (insn, op_bra, &rt, &i16)
501 || is_ri16 (insn, op_brasl, &rt, &i16))
502 {
503 *reg = -1;
504 *offset = i16 << 2;
505 return 1;
506 }
507
508 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
509 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
510 || is_ri7 (insn, op_biz, &rt, reg, &i7)
511 || is_ri7 (insn, op_binz, &rt, reg, &i7)
512 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
513 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
514 {
515 *offset = 0;
516 return 1;
517 }
518
519 return 0;
520 }
521
522
523 /* Prolog parsing. */
524
525 struct spu_prologue_data
526 {
527 /* Stack frame size. -1 if analysis was unsuccessful. */
528 int size;
529
530 /* How to find the CFA. The CFA is equal to SP at function entry. */
531 int cfa_reg;
532 int cfa_offset;
533
534 /* Offset relative to CFA where a register is saved. -1 if invalid. */
535 int reg_offset[SPU_NUM_GPRS];
536 };
537
538 static CORE_ADDR
539 spu_analyze_prologue (struct gdbarch *gdbarch,
540 CORE_ADDR start_pc, CORE_ADDR end_pc,
541 struct spu_prologue_data *data)
542 {
543 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
544 int found_sp = 0;
545 int found_fp = 0;
546 int found_lr = 0;
547 int reg_immed[SPU_NUM_GPRS];
548 gdb_byte buf[16];
549 CORE_ADDR prolog_pc = start_pc;
550 CORE_ADDR pc;
551 int i;
552
553
554 /* Initialize DATA to default values. */
555 data->size = -1;
556
557 data->cfa_reg = SPU_RAW_SP_REGNUM;
558 data->cfa_offset = 0;
559
560 for (i = 0; i < SPU_NUM_GPRS; i++)
561 data->reg_offset[i] = -1;
562
563 /* Set up REG_IMMED array. This is non-zero for a register if we know its
564 preferred slot currently holds this immediate value. */
565 for (i = 0; i < SPU_NUM_GPRS; i++)
566 reg_immed[i] = 0;
567
568 /* Scan instructions until the first branch.
569
570 The following instructions are important prolog components:
571
572 - The first instruction to set up the stack pointer.
573 - The first instruction to set up the frame pointer.
574 - The first instruction to save the link register.
575
576 We return the instruction after the latest of these three,
577 or the incoming PC if none is found. The first instruction
578 to set up the stack pointer also defines the frame size.
579
580 Note that instructions saving incoming arguments to their stack
581 slots are not counted as important, because they are hard to
582 identify with certainty. This should not matter much, because
583 arguments are relevant only in code compiled with debug data,
584 and in such code the GDB core will advance until the first source
585 line anyway, using SAL data.
586
587 For purposes of stack unwinding, we analyze the following types
588 of instructions in addition:
589
590 - Any instruction adding to the current frame pointer.
591 - Any instruction loading an immediate constant into a register.
592 - Any instruction storing a register onto the stack.
593
594 These are used to compute the CFA and REG_OFFSET output. */
595
596 for (pc = start_pc; pc < end_pc; pc += 4)
597 {
598 unsigned int insn;
599 int rt, ra, rb, rc, immed;
600
601 if (target_read_memory (pc, buf, 4))
602 break;
603 insn = extract_unsigned_integer (buf, 4, byte_order);
604
605 /* AI is the typical instruction to set up a stack frame.
606 It is also used to initialize the frame pointer. */
607 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
608 {
609 if (rt == data->cfa_reg && ra == data->cfa_reg)
610 data->cfa_offset -= immed;
611
612 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
613 && !found_sp)
614 {
615 found_sp = 1;
616 prolog_pc = pc + 4;
617
618 data->size = -immed;
619 }
620 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
621 && !found_fp)
622 {
623 found_fp = 1;
624 prolog_pc = pc + 4;
625
626 data->cfa_reg = SPU_FP_REGNUM;
627 data->cfa_offset -= immed;
628 }
629 }
630
631 /* A is used to set up stack frames of size >= 512 bytes.
632 If we have tracked the contents of the addend register,
633 we can handle this as well. */
634 else if (is_rr (insn, op_a, &rt, &ra, &rb))
635 {
636 if (rt == data->cfa_reg && ra == data->cfa_reg)
637 {
638 if (reg_immed[rb] != 0)
639 data->cfa_offset -= reg_immed[rb];
640 else
641 data->cfa_reg = -1; /* We don't know the CFA any more. */
642 }
643
644 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
645 && !found_sp)
646 {
647 found_sp = 1;
648 prolog_pc = pc + 4;
649
650 if (reg_immed[rb] != 0)
651 data->size = -reg_immed[rb];
652 }
653 }
654
655 /* We need to track IL and ILA used to load immediate constants
656 in case they are later used as input to an A instruction. */
657 else if (is_ri16 (insn, op_il, &rt, &immed))
658 {
659 reg_immed[rt] = immed;
660
661 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
662 found_sp = 1;
663 }
664
665 else if (is_ri18 (insn, op_ila, &rt, &immed))
666 {
667 reg_immed[rt] = immed & 0x3ffff;
668
669 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
670 found_sp = 1;
671 }
672
673 /* STQD is used to save registers to the stack. */
674 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
675 {
676 if (ra == data->cfa_reg)
677 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
678
679 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
680 && !found_lr)
681 {
682 found_lr = 1;
683 prolog_pc = pc + 4;
684 }
685 }
686
687 /* _start uses SELB to set up the stack pointer. */
688 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
689 {
690 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
691 found_sp = 1;
692 }
693
694 /* We terminate if we find a branch. */
695 else if (is_branch (insn, &immed, &ra))
696 break;
697 }
698
699
700 /* If we successfully parsed until here, and didn't find any instruction
701 modifying SP, we assume we have a frameless function. */
702 if (!found_sp)
703 data->size = 0;
704
705 /* Return cooked instead of raw SP. */
706 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
707 data->cfa_reg = SPU_SP_REGNUM;
708
709 return prolog_pc;
710 }
711
712 /* Return the first instruction after the prologue starting at PC. */
713 static CORE_ADDR
714 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
715 {
716 struct spu_prologue_data data;
717 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
718 }
719
720 /* Return the frame pointer in use at address PC. */
721 static void
722 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
723 int *reg, LONGEST *offset)
724 {
725 struct spu_prologue_data data;
726 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
727
728 if (data.size != -1 && data.cfa_reg != -1)
729 {
730 /* The 'frame pointer' address is CFA minus frame size. */
731 *reg = data.cfa_reg;
732 *offset = data.cfa_offset - data.size;
733 }
734 else
735 {
736 /* ??? We don't really know ... */
737 *reg = SPU_SP_REGNUM;
738 *offset = 0;
739 }
740 }
741
742 /* Return true if we are in the function's epilogue, i.e. after the
743 instruction that destroyed the function's stack frame.
744
745 1) scan forward from the point of execution:
746 a) If you find an instruction that modifies the stack pointer
747 or transfers control (except a return), execution is not in
748 an epilogue, return.
749 b) Stop scanning if you find a return instruction or reach the
750 end of the function or reach the hard limit for the size of
751 an epilogue.
752 2) scan backward from the point of execution:
753 a) If you find an instruction that modifies the stack pointer,
754 execution *is* in an epilogue, return.
755 b) Stop scanning if you reach an instruction that transfers
756 control or the beginning of the function or reach the hard
757 limit for the size of an epilogue. */
758
759 static int
760 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
761 {
762 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
763 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
764 bfd_byte buf[4];
765 unsigned int insn;
766 int rt, ra, rb, rc, immed;
767
768 /* Find the search limits based on function boundaries and hard limit.
769 We assume the epilogue can be up to 64 instructions long. */
770
771 const int spu_max_epilogue_size = 64 * 4;
772
773 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
774 return 0;
775
776 if (pc - func_start < spu_max_epilogue_size)
777 epilogue_start = func_start;
778 else
779 epilogue_start = pc - spu_max_epilogue_size;
780
781 if (func_end - pc < spu_max_epilogue_size)
782 epilogue_end = func_end;
783 else
784 epilogue_end = pc + spu_max_epilogue_size;
785
786 /* Scan forward until next 'bi $0'. */
787
788 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
789 {
790 if (target_read_memory (scan_pc, buf, 4))
791 return 0;
792 insn = extract_unsigned_integer (buf, 4, byte_order);
793
794 if (is_branch (insn, &immed, &ra))
795 {
796 if (immed == 0 && ra == SPU_LR_REGNUM)
797 break;
798
799 return 0;
800 }
801
802 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
803 || is_rr (insn, op_a, &rt, &ra, &rb)
804 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
805 {
806 if (rt == SPU_RAW_SP_REGNUM)
807 return 0;
808 }
809 }
810
811 if (scan_pc >= epilogue_end)
812 return 0;
813
814 /* Scan backward until adjustment to stack pointer (R1). */
815
816 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
817 {
818 if (target_read_memory (scan_pc, buf, 4))
819 return 0;
820 insn = extract_unsigned_integer (buf, 4, byte_order);
821
822 if (is_branch (insn, &immed, &ra))
823 return 0;
824
825 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
826 || is_rr (insn, op_a, &rt, &ra, &rb)
827 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
828 {
829 if (rt == SPU_RAW_SP_REGNUM)
830 return 1;
831 }
832 }
833
834 return 0;
835 }
836
837
838 /* Normal stack frames. */
839
840 struct spu_unwind_cache
841 {
842 CORE_ADDR func;
843 CORE_ADDR frame_base;
844 CORE_ADDR local_base;
845
846 struct trad_frame_saved_reg *saved_regs;
847 };
848
849 static struct spu_unwind_cache *
850 spu_frame_unwind_cache (struct frame_info *this_frame,
851 void **this_prologue_cache)
852 {
853 struct gdbarch *gdbarch = get_frame_arch (this_frame);
854 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
855 struct spu_unwind_cache *info;
856 struct spu_prologue_data data;
857 gdb_byte buf[16];
858
859 if (*this_prologue_cache)
860 return *this_prologue_cache;
861
862 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
863 *this_prologue_cache = info;
864 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
865 info->frame_base = 0;
866 info->local_base = 0;
867
868 /* Find the start of the current function, and analyze its prologue. */
869 info->func = get_frame_func (this_frame);
870 if (info->func == 0)
871 {
872 /* Fall back to using the current PC as frame ID. */
873 info->func = get_frame_pc (this_frame);
874 data.size = -1;
875 }
876 else
877 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
878 &data);
879
880 /* If successful, use prologue analysis data. */
881 if (data.size != -1 && data.cfa_reg != -1)
882 {
883 CORE_ADDR cfa;
884 int i;
885
886 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
887 get_frame_register (this_frame, data.cfa_reg, buf);
888 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
889
890 /* Call-saved register slots. */
891 for (i = 0; i < SPU_NUM_GPRS; i++)
892 if (i == SPU_LR_REGNUM
893 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
894 if (data.reg_offset[i] != -1)
895 info->saved_regs[i].addr = cfa - data.reg_offset[i];
896
897 /* Frame bases. */
898 info->frame_base = cfa;
899 info->local_base = cfa - data.size;
900 }
901
902 /* Otherwise, fall back to reading the backchain link. */
903 else
904 {
905 CORE_ADDR reg;
906 LONGEST backchain;
907 int status;
908
909 /* Get the backchain. */
910 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
911 status = safe_read_memory_integer (reg, 4, byte_order, &backchain);
912
913 /* A zero backchain terminates the frame chain. Also, sanity
914 check against the local store size limit. */
915 if (status && backchain > 0 && backchain < SPU_LS_SIZE)
916 {
917 /* Assume the link register is saved into its slot. */
918 if (backchain + 16 < SPU_LS_SIZE)
919 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
920
921 /* Frame bases. */
922 info->frame_base = backchain;
923 info->local_base = reg;
924 }
925 }
926
927 /* If we didn't find a frame, we cannot determine SP / return address. */
928 if (info->frame_base == 0)
929 return info;
930
931 /* The previous SP is equal to the CFA. */
932 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
933
934 /* Read full contents of the unwound link register in order to
935 be able to determine the return address. */
936 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
937 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
938 else
939 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
940
941 /* Normally, the return address is contained in the slot 0 of the
942 link register, and slots 1-3 are zero. For an overlay return,
943 slot 0 contains the address of the overlay manager return stub,
944 slot 1 contains the partition number of the overlay section to
945 be returned to, and slot 2 contains the return address within
946 that section. Return the latter address in that case. */
947 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
948 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
949 extract_unsigned_integer (buf + 8, 4, byte_order));
950 else
951 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
952 extract_unsigned_integer (buf, 4, byte_order));
953
954 return info;
955 }
956
957 static void
958 spu_frame_this_id (struct frame_info *this_frame,
959 void **this_prologue_cache, struct frame_id *this_id)
960 {
961 struct spu_unwind_cache *info =
962 spu_frame_unwind_cache (this_frame, this_prologue_cache);
963
964 if (info->frame_base == 0)
965 return;
966
967 *this_id = frame_id_build (info->frame_base, info->func);
968 }
969
970 static struct value *
971 spu_frame_prev_register (struct frame_info *this_frame,
972 void **this_prologue_cache, int regnum)
973 {
974 struct spu_unwind_cache *info
975 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
976
977 /* Special-case the stack pointer. */
978 if (regnum == SPU_RAW_SP_REGNUM)
979 regnum = SPU_SP_REGNUM;
980
981 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
982 }
983
984 static const struct frame_unwind spu_frame_unwind = {
985 NORMAL_FRAME,
986 spu_frame_this_id,
987 spu_frame_prev_register,
988 NULL,
989 default_frame_sniffer
990 };
991
992 static CORE_ADDR
993 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
994 {
995 struct spu_unwind_cache *info
996 = spu_frame_unwind_cache (this_frame, this_cache);
997 return info->local_base;
998 }
999
1000 static const struct frame_base spu_frame_base = {
1001 &spu_frame_unwind,
1002 spu_frame_base_address,
1003 spu_frame_base_address,
1004 spu_frame_base_address
1005 };
1006
1007 static CORE_ADDR
1008 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1009 {
1010 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1011 /* Mask off interrupt enable bit. */
1012 return pc & -4;
1013 }
1014
1015 static CORE_ADDR
1016 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1017 {
1018 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1019 }
1020
1021 static CORE_ADDR
1022 spu_read_pc (struct regcache *regcache)
1023 {
1024 ULONGEST pc;
1025 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1026 /* Mask off interrupt enable bit. */
1027 return pc & -4;
1028 }
1029
1030 static void
1031 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1032 {
1033 /* Keep interrupt enabled state unchanged. */
1034 ULONGEST old_pc;
1035 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1036 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1037 (pc & -4) | (old_pc & 3));
1038 }
1039
1040
1041 /* Function calling convention. */
1042
1043 static CORE_ADDR
1044 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1045 {
1046 return sp & ~15;
1047 }
1048
1049 static CORE_ADDR
1050 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1051 struct value **args, int nargs, struct type *value_type,
1052 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1053 struct regcache *regcache)
1054 {
1055 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1056 sp = (sp - 4) & ~15;
1057 /* Store the address of that breakpoint */
1058 *bp_addr = sp;
1059 /* The call starts at the callee's entry point. */
1060 *real_pc = funaddr;
1061
1062 return sp;
1063 }
1064
1065 static int
1066 spu_scalar_value_p (struct type *type)
1067 {
1068 switch (TYPE_CODE (type))
1069 {
1070 case TYPE_CODE_INT:
1071 case TYPE_CODE_ENUM:
1072 case TYPE_CODE_RANGE:
1073 case TYPE_CODE_CHAR:
1074 case TYPE_CODE_BOOL:
1075 case TYPE_CODE_PTR:
1076 case TYPE_CODE_REF:
1077 return TYPE_LENGTH (type) <= 16;
1078
1079 default:
1080 return 0;
1081 }
1082 }
1083
1084 static void
1085 spu_value_to_regcache (struct regcache *regcache, int regnum,
1086 struct type *type, const gdb_byte *in)
1087 {
1088 int len = TYPE_LENGTH (type);
1089
1090 if (spu_scalar_value_p (type))
1091 {
1092 int preferred_slot = len < 4 ? 4 - len : 0;
1093 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1094 }
1095 else
1096 {
1097 while (len >= 16)
1098 {
1099 regcache_cooked_write (regcache, regnum++, in);
1100 in += 16;
1101 len -= 16;
1102 }
1103
1104 if (len > 0)
1105 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1106 }
1107 }
1108
1109 static void
1110 spu_regcache_to_value (struct regcache *regcache, int regnum,
1111 struct type *type, gdb_byte *out)
1112 {
1113 int len = TYPE_LENGTH (type);
1114
1115 if (spu_scalar_value_p (type))
1116 {
1117 int preferred_slot = len < 4 ? 4 - len : 0;
1118 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1119 }
1120 else
1121 {
1122 while (len >= 16)
1123 {
1124 regcache_cooked_read (regcache, regnum++, out);
1125 out += 16;
1126 len -= 16;
1127 }
1128
1129 if (len > 0)
1130 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1131 }
1132 }
1133
1134 static CORE_ADDR
1135 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1136 struct regcache *regcache, CORE_ADDR bp_addr,
1137 int nargs, struct value **args, CORE_ADDR sp,
1138 int struct_return, CORE_ADDR struct_addr)
1139 {
1140 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1141 CORE_ADDR sp_delta;
1142 int i;
1143 int regnum = SPU_ARG1_REGNUM;
1144 int stack_arg = -1;
1145 gdb_byte buf[16];
1146
1147 /* Set the return address. */
1148 memset (buf, 0, sizeof buf);
1149 store_unsigned_integer (buf, 4, byte_order, bp_addr);
1150 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1151
1152 /* If STRUCT_RETURN is true, then the struct return address (in
1153 STRUCT_ADDR) will consume the first argument-passing register.
1154 Both adjust the register count and store that value. */
1155 if (struct_return)
1156 {
1157 memset (buf, 0, sizeof buf);
1158 store_unsigned_integer (buf, 4, byte_order, struct_addr);
1159 regcache_cooked_write (regcache, regnum++, buf);
1160 }
1161
1162 /* Fill in argument registers. */
1163 for (i = 0; i < nargs; i++)
1164 {
1165 struct value *arg = args[i];
1166 struct type *type = check_typedef (value_type (arg));
1167 const gdb_byte *contents = value_contents (arg);
1168 int len = TYPE_LENGTH (type);
1169 int n_regs = align_up (len, 16) / 16;
1170
1171 /* If the argument doesn't wholly fit into registers, it and
1172 all subsequent arguments go to the stack. */
1173 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1174 {
1175 stack_arg = i;
1176 break;
1177 }
1178
1179 spu_value_to_regcache (regcache, regnum, type, contents);
1180 regnum += n_regs;
1181 }
1182
1183 /* Overflow arguments go to the stack. */
1184 if (stack_arg != -1)
1185 {
1186 CORE_ADDR ap;
1187
1188 /* Allocate all required stack size. */
1189 for (i = stack_arg; i < nargs; i++)
1190 {
1191 struct type *type = check_typedef (value_type (args[i]));
1192 sp -= align_up (TYPE_LENGTH (type), 16);
1193 }
1194
1195 /* Fill in stack arguments. */
1196 ap = sp;
1197 for (i = stack_arg; i < nargs; i++)
1198 {
1199 struct value *arg = args[i];
1200 struct type *type = check_typedef (value_type (arg));
1201 int len = TYPE_LENGTH (type);
1202 int preferred_slot;
1203
1204 if (spu_scalar_value_p (type))
1205 preferred_slot = len < 4 ? 4 - len : 0;
1206 else
1207 preferred_slot = 0;
1208
1209 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1210 ap += align_up (TYPE_LENGTH (type), 16);
1211 }
1212 }
1213
1214 /* Allocate stack frame header. */
1215 sp -= 32;
1216
1217 /* Store stack back chain. */
1218 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1219 target_write_memory (sp, buf, 16);
1220
1221 /* Finally, update all slots of the SP register. */
1222 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1223 for (i = 0; i < 4; i++)
1224 {
1225 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1226 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1227 }
1228 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1229
1230 return sp;
1231 }
1232
1233 static struct frame_id
1234 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1235 {
1236 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1237 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1238 return frame_id_build (sp, pc & -4);
1239 }
1240
1241 /* Function return value access. */
1242
1243 static enum return_value_convention
1244 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1245 struct type *type, struct regcache *regcache,
1246 gdb_byte *out, const gdb_byte *in)
1247 {
1248 enum return_value_convention rvc;
1249
1250 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1251 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1252 else
1253 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1254
1255 if (in)
1256 {
1257 switch (rvc)
1258 {
1259 case RETURN_VALUE_REGISTER_CONVENTION:
1260 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1261 break;
1262
1263 case RETURN_VALUE_STRUCT_CONVENTION:
1264 error ("Cannot set function return value.");
1265 break;
1266 }
1267 }
1268 else if (out)
1269 {
1270 switch (rvc)
1271 {
1272 case RETURN_VALUE_REGISTER_CONVENTION:
1273 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1274 break;
1275
1276 case RETURN_VALUE_STRUCT_CONVENTION:
1277 error ("Function return value unknown.");
1278 break;
1279 }
1280 }
1281
1282 return rvc;
1283 }
1284
1285
1286 /* Breakpoints. */
1287
1288 static const gdb_byte *
1289 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1290 {
1291 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1292
1293 *lenptr = sizeof breakpoint;
1294 return breakpoint;
1295 }
1296
1297
1298 /* Software single-stepping support. */
1299
1300 static int
1301 spu_software_single_step (struct frame_info *frame)
1302 {
1303 struct gdbarch *gdbarch = get_frame_arch (frame);
1304 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1305 CORE_ADDR pc, next_pc;
1306 unsigned int insn;
1307 int offset, reg;
1308 gdb_byte buf[4];
1309
1310 pc = get_frame_pc (frame);
1311
1312 if (target_read_memory (pc, buf, 4))
1313 return 1;
1314 insn = extract_unsigned_integer (buf, 4, byte_order);
1315
1316 /* Next sequential instruction is at PC + 4, except if the current
1317 instruction is a PPE-assisted call, in which case it is at PC + 8.
1318 Wrap around LS limit to be on the safe side. */
1319 if ((insn & 0xffffff00) == 0x00002100)
1320 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1321 else
1322 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1323
1324 insert_single_step_breakpoint (gdbarch, next_pc);
1325
1326 if (is_branch (insn, &offset, &reg))
1327 {
1328 CORE_ADDR target = offset;
1329
1330 if (reg == SPU_PC_REGNUM)
1331 target += pc;
1332 else if (reg != -1)
1333 {
1334 get_frame_register_bytes (frame, reg, 0, 4, buf);
1335 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1336 }
1337
1338 target = target & (SPU_LS_SIZE - 1);
1339 if (target != next_pc)
1340 insert_single_step_breakpoint (gdbarch, target);
1341 }
1342
1343 return 1;
1344 }
1345
1346
1347 /* Longjmp support. */
1348
1349 static int
1350 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1351 {
1352 struct gdbarch *gdbarch = get_frame_arch (frame);
1353 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1354 gdb_byte buf[4];
1355 CORE_ADDR jb_addr;
1356
1357 /* Jump buffer is pointed to by the argument register $r3. */
1358 get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1359 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1360 if (target_read_memory (jb_addr, buf, 4))
1361 return 0;
1362
1363 *pc = extract_unsigned_integer (buf, 4, byte_order);
1364 return 1;
1365 }
1366
1367
1368 /* Target overlays for the SPU overlay manager.
1369
1370 See the documentation of simple_overlay_update for how the
1371 interface is supposed to work.
1372
1373 Data structures used by the overlay manager:
1374
1375 struct ovly_table
1376 {
1377 u32 vma;
1378 u32 size;
1379 u32 pos;
1380 u32 buf;
1381 } _ovly_table[]; -- one entry per overlay section
1382
1383 struct ovly_buf_table
1384 {
1385 u32 mapped;
1386 } _ovly_buf_table[]; -- one entry per overlay buffer
1387
1388 _ovly_table should never change.
1389
1390 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1391 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1392 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1393
1394 mapped is an index into _ovly_table. Both the mapped and buf indices start
1395 from one to reference the first entry in their respective tables. */
1396
1397 /* Using the per-objfile private data mechanism, we store for each
1398 objfile an array of "struct spu_overlay_table" structures, one
1399 for each obj_section of the objfile. This structure holds two
1400 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1401 is *not* an overlay section. If it is non-zero, it represents
1402 a target address. The overlay section is mapped iff the target
1403 integer at this location equals MAPPED_VAL. */
1404
1405 static const struct objfile_data *spu_overlay_data;
1406
1407 struct spu_overlay_table
1408 {
1409 CORE_ADDR mapped_ptr;
1410 CORE_ADDR mapped_val;
1411 };
1412
1413 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1414 the _ovly_table data structure from the target and initialize the
1415 spu_overlay_table data structure from it. */
1416 static struct spu_overlay_table *
1417 spu_get_overlay_table (struct objfile *objfile)
1418 {
1419 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1420 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1421 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1422 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1423 unsigned ovly_table_size, ovly_buf_table_size;
1424 struct spu_overlay_table *tbl;
1425 struct obj_section *osect;
1426 char *ovly_table;
1427 int i;
1428
1429 tbl = objfile_data (objfile, spu_overlay_data);
1430 if (tbl)
1431 return tbl;
1432
1433 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1434 if (!ovly_table_msym)
1435 return NULL;
1436
1437 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1438 if (!ovly_buf_table_msym)
1439 return NULL;
1440
1441 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1442 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1443
1444 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1445 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1446
1447 ovly_table = xmalloc (ovly_table_size);
1448 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1449
1450 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1451 objfile->sections_end - objfile->sections,
1452 struct spu_overlay_table);
1453
1454 for (i = 0; i < ovly_table_size / 16; i++)
1455 {
1456 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1457 4, byte_order);
1458 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1459 4, byte_order);
1460 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1461 4, byte_order);
1462 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1463 4, byte_order);
1464
1465 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1466 continue;
1467
1468 ALL_OBJFILE_OSECTIONS (objfile, osect)
1469 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1470 && pos == osect->the_bfd_section->filepos)
1471 {
1472 int ndx = osect - objfile->sections;
1473 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1474 tbl[ndx].mapped_val = i + 1;
1475 break;
1476 }
1477 }
1478
1479 xfree (ovly_table);
1480 set_objfile_data (objfile, spu_overlay_data, tbl);
1481 return tbl;
1482 }
1483
1484 /* Read _ovly_buf_table entry from the target to dermine whether
1485 OSECT is currently mapped, and update the mapped state. */
1486 static void
1487 spu_overlay_update_osect (struct obj_section *osect)
1488 {
1489 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1490 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1491 struct spu_overlay_table *ovly_table;
1492 CORE_ADDR val;
1493
1494 ovly_table = spu_get_overlay_table (osect->objfile);
1495 if (!ovly_table)
1496 return;
1497
1498 ovly_table += osect - osect->objfile->sections;
1499 if (ovly_table->mapped_ptr == 0)
1500 return;
1501
1502 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4, byte_order);
1503 osect->ovly_mapped = (val == ovly_table->mapped_val);
1504 }
1505
1506 /* If OSECT is NULL, then update all sections' mapped state.
1507 If OSECT is non-NULL, then update only OSECT's mapped state. */
1508 static void
1509 spu_overlay_update (struct obj_section *osect)
1510 {
1511 /* Just one section. */
1512 if (osect)
1513 spu_overlay_update_osect (osect);
1514
1515 /* All sections. */
1516 else
1517 {
1518 struct objfile *objfile;
1519
1520 ALL_OBJSECTIONS (objfile, osect)
1521 if (section_is_overlay (osect))
1522 spu_overlay_update_osect (osect);
1523 }
1524 }
1525
1526 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1527 If there is one, go through all sections and make sure for non-
1528 overlay sections LMA equals VMA, while for overlay sections LMA
1529 is larger than local store size. */
1530 static void
1531 spu_overlay_new_objfile (struct objfile *objfile)
1532 {
1533 struct spu_overlay_table *ovly_table;
1534 struct obj_section *osect;
1535
1536 /* If we've already touched this file, do nothing. */
1537 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1538 return;
1539
1540 /* Consider only SPU objfiles. */
1541 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1542 return;
1543
1544 /* Check if this objfile has overlays. */
1545 ovly_table = spu_get_overlay_table (objfile);
1546 if (!ovly_table)
1547 return;
1548
1549 /* Now go and fiddle with all the LMAs. */
1550 ALL_OBJFILE_OSECTIONS (objfile, osect)
1551 {
1552 bfd *obfd = objfile->obfd;
1553 asection *bsect = osect->the_bfd_section;
1554 int ndx = osect - objfile->sections;
1555
1556 if (ovly_table[ndx].mapped_ptr == 0)
1557 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1558 else
1559 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1560 }
1561 }
1562
1563
1564 /* "info spu" commands. */
1565
1566 static void
1567 info_spu_event_command (char *args, int from_tty)
1568 {
1569 struct frame_info *frame = get_selected_frame (NULL);
1570 ULONGEST event_status = 0;
1571 ULONGEST event_mask = 0;
1572 struct cleanup *chain;
1573 gdb_byte buf[100];
1574 char annex[32];
1575 LONGEST len;
1576 int rc, id;
1577
1578 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1579 error (_("\"info spu\" is only supported on the SPU architecture."));
1580
1581 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1582
1583 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1584 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1585 buf, 0, (sizeof (buf) - 1));
1586 if (len <= 0)
1587 error (_("Could not read event_status."));
1588 buf[len] = '\0';
1589 event_status = strtoulst (buf, NULL, 16);
1590
1591 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1592 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1593 buf, 0, (sizeof (buf) - 1));
1594 if (len <= 0)
1595 error (_("Could not read event_mask."));
1596 buf[len] = '\0';
1597 event_mask = strtoulst (buf, NULL, 16);
1598
1599 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1600
1601 if (ui_out_is_mi_like_p (uiout))
1602 {
1603 ui_out_field_fmt (uiout, "event_status",
1604 "0x%s", phex_nz (event_status, 4));
1605 ui_out_field_fmt (uiout, "event_mask",
1606 "0x%s", phex_nz (event_mask, 4));
1607 }
1608 else
1609 {
1610 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1611 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1612 }
1613
1614 do_cleanups (chain);
1615 }
1616
1617 static void
1618 info_spu_signal_command (char *args, int from_tty)
1619 {
1620 struct frame_info *frame = get_selected_frame (NULL);
1621 struct gdbarch *gdbarch = get_frame_arch (frame);
1622 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1623 ULONGEST signal1 = 0;
1624 ULONGEST signal1_type = 0;
1625 int signal1_pending = 0;
1626 ULONGEST signal2 = 0;
1627 ULONGEST signal2_type = 0;
1628 int signal2_pending = 0;
1629 struct cleanup *chain;
1630 char annex[32];
1631 gdb_byte buf[100];
1632 LONGEST len;
1633 int rc, id;
1634
1635 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1636 error (_("\"info spu\" is only supported on the SPU architecture."));
1637
1638 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1639
1640 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1641 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1642 if (len < 0)
1643 error (_("Could not read signal1."));
1644 else if (len == 4)
1645 {
1646 signal1 = extract_unsigned_integer (buf, 4, byte_order);
1647 signal1_pending = 1;
1648 }
1649
1650 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1651 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1652 buf, 0, (sizeof (buf) - 1));
1653 if (len <= 0)
1654 error (_("Could not read signal1_type."));
1655 buf[len] = '\0';
1656 signal1_type = strtoulst (buf, NULL, 16);
1657
1658 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1659 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1660 if (len < 0)
1661 error (_("Could not read signal2."));
1662 else if (len == 4)
1663 {
1664 signal2 = extract_unsigned_integer (buf, 4, byte_order);
1665 signal2_pending = 1;
1666 }
1667
1668 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1669 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1670 buf, 0, (sizeof (buf) - 1));
1671 if (len <= 0)
1672 error (_("Could not read signal2_type."));
1673 buf[len] = '\0';
1674 signal2_type = strtoulst (buf, NULL, 16);
1675
1676 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1677
1678 if (ui_out_is_mi_like_p (uiout))
1679 {
1680 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1681 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1682 ui_out_field_int (uiout, "signal1_type", signal1_type);
1683 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1684 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1685 ui_out_field_int (uiout, "signal2_type", signal2_type);
1686 }
1687 else
1688 {
1689 if (signal1_pending)
1690 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1691 else
1692 printf_filtered (_("Signal 1 not pending "));
1693
1694 if (signal1_type)
1695 printf_filtered (_("(Type Or)\n"));
1696 else
1697 printf_filtered (_("(Type Overwrite)\n"));
1698
1699 if (signal2_pending)
1700 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1701 else
1702 printf_filtered (_("Signal 2 not pending "));
1703
1704 if (signal2_type)
1705 printf_filtered (_("(Type Or)\n"));
1706 else
1707 printf_filtered (_("(Type Overwrite)\n"));
1708 }
1709
1710 do_cleanups (chain);
1711 }
1712
1713 static void
1714 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
1715 const char *field, const char *msg)
1716 {
1717 struct cleanup *chain;
1718 int i;
1719
1720 if (nr <= 0)
1721 return;
1722
1723 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1724
1725 ui_out_table_header (uiout, 32, ui_left, field, msg);
1726 ui_out_table_body (uiout);
1727
1728 for (i = 0; i < nr; i++)
1729 {
1730 struct cleanup *val_chain;
1731 ULONGEST val;
1732 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1733 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1734 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1735 do_cleanups (val_chain);
1736
1737 if (!ui_out_is_mi_like_p (uiout))
1738 printf_filtered ("\n");
1739 }
1740
1741 do_cleanups (chain);
1742 }
1743
1744 static void
1745 info_spu_mailbox_command (char *args, int from_tty)
1746 {
1747 struct frame_info *frame = get_selected_frame (NULL);
1748 struct gdbarch *gdbarch = get_frame_arch (frame);
1749 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1750 struct cleanup *chain;
1751 char annex[32];
1752 gdb_byte buf[1024];
1753 LONGEST len;
1754 int i, id;
1755
1756 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1757 error (_("\"info spu\" is only supported on the SPU architecture."));
1758
1759 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1760
1761 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1762
1763 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1764 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1765 buf, 0, sizeof buf);
1766 if (len < 0)
1767 error (_("Could not read mbox_info."));
1768
1769 info_spu_mailbox_list (buf, len / 4, byte_order,
1770 "mbox", "SPU Outbound Mailbox");
1771
1772 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1773 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1774 buf, 0, sizeof buf);
1775 if (len < 0)
1776 error (_("Could not read ibox_info."));
1777
1778 info_spu_mailbox_list (buf, len / 4, byte_order,
1779 "ibox", "SPU Outbound Interrupt Mailbox");
1780
1781 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1782 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1783 buf, 0, sizeof buf);
1784 if (len < 0)
1785 error (_("Could not read wbox_info."));
1786
1787 info_spu_mailbox_list (buf, len / 4, byte_order,
1788 "wbox", "SPU Inbound Mailbox");
1789
1790 do_cleanups (chain);
1791 }
1792
1793 static ULONGEST
1794 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1795 {
1796 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1797 return (word >> (63 - last)) & mask;
1798 }
1799
1800 static void
1801 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
1802 {
1803 static char *spu_mfc_opcode[256] =
1804 {
1805 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1806 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1807 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1808 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1809 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1810 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1811 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1812 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1813 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1814 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1815 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1816 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1817 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1818 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1819 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1820 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1821 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1822 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1823 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1824 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1825 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1826 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1827 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1828 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1829 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1830 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1831 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1832 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1833 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1834 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1835 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1836 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1837 };
1838
1839 int *seq = alloca (nr * sizeof (int));
1840 int done = 0;
1841 struct cleanup *chain;
1842 int i, j;
1843
1844
1845 /* Determine sequence in which to display (valid) entries. */
1846 for (i = 0; i < nr; i++)
1847 {
1848 /* Search for the first valid entry all of whose
1849 dependencies are met. */
1850 for (j = 0; j < nr; j++)
1851 {
1852 ULONGEST mfc_cq_dw3;
1853 ULONGEST dependencies;
1854
1855 if (done & (1 << (nr - 1 - j)))
1856 continue;
1857
1858 mfc_cq_dw3
1859 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
1860 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
1861 continue;
1862
1863 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
1864 if ((dependencies & done) != dependencies)
1865 continue;
1866
1867 seq[i] = j;
1868 done |= 1 << (nr - 1 - j);
1869 break;
1870 }
1871
1872 if (j == nr)
1873 break;
1874 }
1875
1876 nr = i;
1877
1878
1879 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1880
1881 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1882 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1883 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1884 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1885 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1886 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1887 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1888 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1889 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1890 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1891
1892 ui_out_table_body (uiout);
1893
1894 for (i = 0; i < nr; i++)
1895 {
1896 struct cleanup *cmd_chain;
1897 ULONGEST mfc_cq_dw0;
1898 ULONGEST mfc_cq_dw1;
1899 ULONGEST mfc_cq_dw2;
1900 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1901 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1902 ULONGEST mfc_ea;
1903 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1904
1905 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1906 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1907
1908 mfc_cq_dw0
1909 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
1910 mfc_cq_dw1
1911 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
1912 mfc_cq_dw2
1913 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
1914
1915 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1916 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1917 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1918 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1919 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1920 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1921 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1922
1923 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1924 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1925
1926 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1927 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1928 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1929 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1930 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1931 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1932
1933 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1934
1935 if (spu_mfc_opcode[mfc_cmd_opcode])
1936 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1937 else
1938 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1939
1940 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1941 ui_out_field_int (uiout, "tid", tclass_id);
1942 ui_out_field_int (uiout, "rid", rclass_id);
1943
1944 if (ea_valid_p)
1945 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1946 else
1947 ui_out_field_skip (uiout, "ea");
1948
1949 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1950 if (qw_valid_p)
1951 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1952 else
1953 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1954
1955 if (list_valid_p)
1956 {
1957 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1958 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1959 }
1960 else
1961 {
1962 ui_out_field_skip (uiout, "lstaddr");
1963 ui_out_field_skip (uiout, "lstsize");
1964 }
1965
1966 if (cmd_error_p)
1967 ui_out_field_string (uiout, "error_p", "*");
1968 else
1969 ui_out_field_skip (uiout, "error_p");
1970
1971 do_cleanups (cmd_chain);
1972
1973 if (!ui_out_is_mi_like_p (uiout))
1974 printf_filtered ("\n");
1975 }
1976
1977 do_cleanups (chain);
1978 }
1979
1980 static void
1981 info_spu_dma_command (char *args, int from_tty)
1982 {
1983 struct frame_info *frame = get_selected_frame (NULL);
1984 struct gdbarch *gdbarch = get_frame_arch (frame);
1985 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1986 ULONGEST dma_info_type;
1987 ULONGEST dma_info_mask;
1988 ULONGEST dma_info_status;
1989 ULONGEST dma_info_stall_and_notify;
1990 ULONGEST dma_info_atomic_command_status;
1991 struct cleanup *chain;
1992 char annex[32];
1993 gdb_byte buf[1024];
1994 LONGEST len;
1995 int i, id;
1996
1997 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1998 error (_("\"info spu\" is only supported on the SPU architecture."));
1999
2000 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2001
2002 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2003 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2004 buf, 0, 40 + 16 * 32);
2005 if (len <= 0)
2006 error (_("Could not read dma_info."));
2007
2008 dma_info_type
2009 = extract_unsigned_integer (buf, 8, byte_order);
2010 dma_info_mask
2011 = extract_unsigned_integer (buf + 8, 8, byte_order);
2012 dma_info_status
2013 = extract_unsigned_integer (buf + 16, 8, byte_order);
2014 dma_info_stall_and_notify
2015 = extract_unsigned_integer (buf + 24, 8, byte_order);
2016 dma_info_atomic_command_status
2017 = extract_unsigned_integer (buf + 32, 8, byte_order);
2018
2019 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2020
2021 if (ui_out_is_mi_like_p (uiout))
2022 {
2023 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2024 phex_nz (dma_info_type, 4));
2025 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2026 phex_nz (dma_info_mask, 4));
2027 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2028 phex_nz (dma_info_status, 4));
2029 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2030 phex_nz (dma_info_stall_and_notify, 4));
2031 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2032 phex_nz (dma_info_atomic_command_status, 4));
2033 }
2034 else
2035 {
2036 const char *query_msg = _("no query pending");
2037
2038 if (dma_info_type & 4)
2039 switch (dma_info_type & 3)
2040 {
2041 case 1: query_msg = _("'any' query pending"); break;
2042 case 2: query_msg = _("'all' query pending"); break;
2043 default: query_msg = _("undefined query type"); break;
2044 }
2045
2046 printf_filtered (_("Tag-Group Status 0x%s\n"),
2047 phex (dma_info_status, 4));
2048 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2049 phex (dma_info_mask, 4), query_msg);
2050 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2051 phex (dma_info_stall_and_notify, 4));
2052 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2053 phex (dma_info_atomic_command_status, 4));
2054 printf_filtered ("\n");
2055 }
2056
2057 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2058 do_cleanups (chain);
2059 }
2060
2061 static void
2062 info_spu_proxydma_command (char *args, int from_tty)
2063 {
2064 struct frame_info *frame = get_selected_frame (NULL);
2065 struct gdbarch *gdbarch = get_frame_arch (frame);
2066 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2067 ULONGEST dma_info_type;
2068 ULONGEST dma_info_mask;
2069 ULONGEST dma_info_status;
2070 struct cleanup *chain;
2071 char annex[32];
2072 gdb_byte buf[1024];
2073 LONGEST len;
2074 int i, id;
2075
2076 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2077 error (_("\"info spu\" is only supported on the SPU architecture."));
2078
2079 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2080
2081 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2082 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2083 buf, 0, 24 + 8 * 32);
2084 if (len <= 0)
2085 error (_("Could not read proxydma_info."));
2086
2087 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2088 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2089 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2090
2091 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2092
2093 if (ui_out_is_mi_like_p (uiout))
2094 {
2095 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2096 phex_nz (dma_info_type, 4));
2097 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2098 phex_nz (dma_info_mask, 4));
2099 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2100 phex_nz (dma_info_status, 4));
2101 }
2102 else
2103 {
2104 const char *query_msg;
2105
2106 switch (dma_info_type & 3)
2107 {
2108 case 0: query_msg = _("no query pending"); break;
2109 case 1: query_msg = _("'any' query pending"); break;
2110 case 2: query_msg = _("'all' query pending"); break;
2111 default: query_msg = _("undefined query type"); break;
2112 }
2113
2114 printf_filtered (_("Tag-Group Status 0x%s\n"),
2115 phex (dma_info_status, 4));
2116 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2117 phex (dma_info_mask, 4), query_msg);
2118 printf_filtered ("\n");
2119 }
2120
2121 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2122 do_cleanups (chain);
2123 }
2124
2125 static void
2126 info_spu_command (char *args, int from_tty)
2127 {
2128 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2129 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2130 }
2131
2132
2133 /* Set up gdbarch struct. */
2134
2135 static struct gdbarch *
2136 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2137 {
2138 struct gdbarch *gdbarch;
2139 struct gdbarch_tdep *tdep;
2140
2141 /* Find a candidate among the list of pre-declared architectures. */
2142 arches = gdbarch_list_lookup_by_info (arches, &info);
2143 if (arches != NULL)
2144 return arches->gdbarch;
2145
2146 /* Is is for us? */
2147 if (info.bfd_arch_info->mach != bfd_mach_spu)
2148 return NULL;
2149
2150 /* Yes, create a new architecture. */
2151 tdep = XCALLOC (1, struct gdbarch_tdep);
2152 gdbarch = gdbarch_alloc (&info, tdep);
2153
2154 /* Disassembler. */
2155 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2156
2157 /* Registers. */
2158 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2159 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2160 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2161 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2162 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2163 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2164 set_gdbarch_register_name (gdbarch, spu_register_name);
2165 set_gdbarch_register_type (gdbarch, spu_register_type);
2166 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2167 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2168 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2169 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2170
2171 /* Data types. */
2172 set_gdbarch_char_signed (gdbarch, 0);
2173 set_gdbarch_ptr_bit (gdbarch, 32);
2174 set_gdbarch_addr_bit (gdbarch, 32);
2175 set_gdbarch_short_bit (gdbarch, 16);
2176 set_gdbarch_int_bit (gdbarch, 32);
2177 set_gdbarch_long_bit (gdbarch, 32);
2178 set_gdbarch_long_long_bit (gdbarch, 64);
2179 set_gdbarch_float_bit (gdbarch, 32);
2180 set_gdbarch_double_bit (gdbarch, 64);
2181 set_gdbarch_long_double_bit (gdbarch, 64);
2182 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2183 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2184 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2185
2186 /* Address conversion. */
2187 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2188 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2189
2190 /* Inferior function calls. */
2191 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2192 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2193 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2194 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2195 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2196 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2197 set_gdbarch_return_value (gdbarch, spu_return_value);
2198
2199 /* Frame handling. */
2200 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2201 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2202 frame_base_set_default (gdbarch, &spu_frame_base);
2203 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2204 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2205 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2206 set_gdbarch_frame_args_skip (gdbarch, 0);
2207 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2208 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2209
2210 /* Breakpoints. */
2211 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2212 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2213 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2214 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2215 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2216
2217 /* Overlays. */
2218 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2219
2220 return gdbarch;
2221 }
2222
2223 /* Provide a prototype to silence -Wmissing-prototypes. */
2224 extern initialize_file_ftype _initialize_spu_tdep;
2225
2226 void
2227 _initialize_spu_tdep (void)
2228 {
2229 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2230
2231 /* Add ourselves to objfile event chain. */
2232 observer_attach_new_objfile (spu_overlay_new_objfile);
2233 spu_overlay_data = register_objfile_data ();
2234
2235 /* Add root prefix command for all "info spu" commands. */
2236 add_prefix_cmd ("spu", class_info, info_spu_command,
2237 _("Various SPU specific commands."),
2238 &infospucmdlist, "info spu ", 0, &infolist);
2239
2240 /* Add various "info spu" commands. */
2241 add_cmd ("event", class_info, info_spu_event_command,
2242 _("Display SPU event facility status.\n"),
2243 &infospucmdlist);
2244 add_cmd ("signal", class_info, info_spu_signal_command,
2245 _("Display SPU signal notification facility status.\n"),
2246 &infospucmdlist);
2247 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2248 _("Display SPU mailbox facility status.\n"),
2249 &infospucmdlist);
2250 add_cmd ("dma", class_info, info_spu_dma_command,
2251 _("Display MFC DMA status.\n"),
2252 &infospucmdlist);
2253 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2254 _("Display MFC Proxy-DMA status.\n"),
2255 &infospucmdlist);
2256 }
This page took 0.077181 seconds and 4 git commands to generate.