sim: clean up redundant objects
[deliverable/binutils-gdb.git] / sim / mcore / interp.c
1 /* Simulator for Motorola's MCore processor
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Cygnus Solutions.
4
5 This file is part of GDB, the GNU debugger.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/times.h>
25 #include <sys/param.h>
26 #include <unistd.h>
27 #include "bfd.h"
28 #include "gdb/callback.h"
29 #include "libiberty.h"
30 #include "gdb/remote-sim.h"
31
32 #include "sim-main.h"
33 #include "sim-base.h"
34 #include "sim-syscall.h"
35 #include "sim-options.h"
36
37 #define target_big_endian (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
38
39
40 static unsigned long
41 mcore_extract_unsigned_integer (unsigned char *addr, int len)
42 {
43 unsigned long retval;
44 unsigned char * p;
45 unsigned char * startaddr = (unsigned char *)addr;
46 unsigned char * endaddr = startaddr + len;
47
48 if (len > (int) sizeof (unsigned long))
49 printf ("That operation is not available on integers of more than %zu bytes.",
50 sizeof (unsigned long));
51
52 /* Start at the most significant end of the integer, and work towards
53 the least significant. */
54 retval = 0;
55
56 if (! target_big_endian)
57 {
58 for (p = endaddr; p > startaddr;)
59 retval = (retval << 8) | * -- p;
60 }
61 else
62 {
63 for (p = startaddr; p < endaddr;)
64 retval = (retval << 8) | * p ++;
65 }
66
67 return retval;
68 }
69
70 static void
71 mcore_store_unsigned_integer (unsigned char *addr, int len, unsigned long val)
72 {
73 unsigned char * p;
74 unsigned char * startaddr = (unsigned char *)addr;
75 unsigned char * endaddr = startaddr + len;
76
77 if (! target_big_endian)
78 {
79 for (p = startaddr; p < endaddr;)
80 {
81 * p ++ = val & 0xff;
82 val >>= 8;
83 }
84 }
85 else
86 {
87 for (p = endaddr; p > startaddr;)
88 {
89 * -- p = val & 0xff;
90 val >>= 8;
91 }
92 }
93 }
94
95 /* The machine state.
96 This state is maintained in host byte order. The
97 fetch/store register functions must translate between host
98 byte order and the target processor byte order.
99 Keeping this data in target byte order simplifies the register
100 read/write functions. Keeping this data in native order improves
101 the performance of the simulator. Simulation speed is deemed more
102 important. */
103 /* TODO: Should be moved to sim-main.h:sim_cpu. */
104
105 /* The ordering of the mcore_regset structure is matched in the
106 gdb/config/mcore/tm-mcore.h file in the REGISTER_NAMES macro. */
107 struct mcore_regset
108 {
109 word gregs [16]; /* primary registers */
110 word alt_gregs [16]; /* alt register file */
111 word cregs [32]; /* control registers */
112 int ticks;
113 int stalls;
114 int cycles;
115 int insts;
116 int exception;
117 word * active_gregs;
118 };
119
120 union
121 {
122 struct mcore_regset asregs;
123 word asints [1]; /* but accessed larger... */
124 } cpu;
125
126 #define LAST_VALID_CREG 32 /* only 0..12 implemented */
127 #define NUM_MCORE_REGS (16 + 16 + LAST_VALID_CREG + 1)
128
129 static int memcycles = 1;
130
131 #define gr asregs.active_gregs
132 #define cr asregs.cregs
133 #define sr asregs.cregs[0]
134 #define vbr asregs.cregs[1]
135 #define esr asregs.cregs[2]
136 #define fsr asregs.cregs[3]
137 #define epc asregs.cregs[4]
138 #define fpc asregs.cregs[5]
139 #define ss0 asregs.cregs[6]
140 #define ss1 asregs.cregs[7]
141 #define ss2 asregs.cregs[8]
142 #define ss3 asregs.cregs[9]
143 #define ss4 asregs.cregs[10]
144 #define gcr asregs.cregs[11]
145 #define gsr asregs.cregs[12]
146
147 /* maniuplate the carry bit */
148 #define C_ON() (cpu.sr & 1)
149 #define C_VALUE() (cpu.sr & 1)
150 #define C_OFF() ((cpu.sr & 1) == 0)
151 #define SET_C() {cpu.sr |= 1;}
152 #define CLR_C() {cpu.sr &= 0xfffffffe;}
153 #define NEW_C(v) {CLR_C(); cpu.sr |= ((v) & 1);}
154
155 #define SR_AF() ((cpu.sr >> 1) & 1)
156
157 #define TRAPCODE 1 /* r1 holds which function we want */
158 #define PARM1 2 /* first parameter */
159 #define PARM2 3
160 #define PARM3 4
161 #define PARM4 5
162 #define RET1 2 /* register for return values. */
163
164 /* Default to a 8 Mbyte (== 2^23) memory space. */
165 #define DEFAULT_MEMORY_SIZE 0x800000
166
167 static void
168 set_initial_gprs (SIM_CPU *scpu)
169 {
170 int i;
171 long space;
172
173 /* Set up machine just out of reset. */
174 CPU_PC_SET (scpu, 0);
175 cpu.sr = 0;
176
177 /* Clean out the GPRs and alternate GPRs. */
178 for (i = 0; i < 16; i++)
179 {
180 cpu.asregs.gregs[i] = 0;
181 cpu.asregs.alt_gregs[i] = 0;
182 }
183
184 /* Make our register set point to the right place. */
185 if (SR_AF())
186 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
187 else
188 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
189
190 /* ABI specifies initial values for these registers. */
191 cpu.gr[0] = DEFAULT_MEMORY_SIZE - 4;
192
193 /* dac fix, the stack address must be 8-byte aligned! */
194 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
195 cpu.gr[PARM1] = 0;
196 cpu.gr[PARM2] = 0;
197 cpu.gr[PARM3] = 0;
198 cpu.gr[PARM4] = cpu.gr[0];
199 }
200
201 /* Simulate a monitor trap. */
202
203 static void
204 handle_trap1 (SIM_DESC sd)
205 {
206 /* XXX: We don't pass back the actual errno value. */
207 cpu.gr[RET1] = sim_syscall (STATE_CPU (sd, 0), cpu.gr[TRAPCODE],
208 cpu.gr[PARM1], cpu.gr[PARM2], cpu.gr[PARM3],
209 cpu.gr[PARM4]);
210 }
211
212 static void
213 process_stub (SIM_DESC sd, int what)
214 {
215 /* These values should match those in libgloss/mcore/syscalls.s. */
216 switch (what)
217 {
218 case 3: /* _read */
219 case 4: /* _write */
220 case 5: /* _open */
221 case 6: /* _close */
222 case 10: /* _unlink */
223 case 19: /* _lseek */
224 case 43: /* _times */
225 cpu.gr [TRAPCODE] = what;
226 handle_trap1 (sd);
227 break;
228
229 default:
230 if (STATE_VERBOSE_P (sd))
231 fprintf (stderr, "Unhandled stub opcode: %d\n", what);
232 break;
233 }
234 }
235
236 static void
237 util (SIM_DESC sd, unsigned what)
238 {
239 switch (what)
240 {
241 case 0: /* exit */
242 cpu.asregs.exception = SIGQUIT;
243 break;
244
245 case 1: /* printf */
246 if (STATE_VERBOSE_P (sd))
247 fprintf (stderr, "WARNING: printf unimplemented\n");
248 break;
249
250 case 2: /* scanf */
251 if (STATE_VERBOSE_P (sd))
252 fprintf (stderr, "WARNING: scanf unimplemented\n");
253 break;
254
255 case 3: /* utime */
256 cpu.gr[RET1] = cpu.asregs.insts;
257 break;
258
259 case 0xFF:
260 process_stub (sd, cpu.gr[1]);
261 break;
262
263 default:
264 if (STATE_VERBOSE_P (sd))
265 fprintf (stderr, "Unhandled util code: %x\n", what);
266 break;
267 }
268 }
269
270 /* For figuring out whether we carried; addc/subc use this. */
271 static int
272 iu_carry (unsigned long a, unsigned long b, int cin)
273 {
274 unsigned long x;
275
276 x = (a & 0xffff) + (b & 0xffff) + cin;
277 x = (x >> 16) + (a >> 16) + (b >> 16);
278 x >>= 16;
279
280 return (x != 0);
281 }
282
283 /* TODO: Convert to common watchpoints. */
284 #undef WATCHFUNCTIONS
285 #ifdef WATCHFUNCTIONS
286
287 #define MAXWL 80
288 word WL[MAXWL];
289 char * WLstr[MAXWL];
290
291 int ENDWL=0;
292 int WLincyc;
293 int WLcyc[MAXWL];
294 int WLcnts[MAXWL];
295 int WLmax[MAXWL];
296 int WLmin[MAXWL];
297 word WLendpc;
298 int WLbcyc;
299 int WLW;
300 #endif
301
302 #define RD (inst & 0xF)
303 #define RS ((inst >> 4) & 0xF)
304 #define RX ((inst >> 8) & 0xF)
305 #define IMM5 ((inst >> 4) & 0x1F)
306 #define IMM4 ((inst) & 0xF)
307
308 #define rbat(X) sim_core_read_1 (scpu, 0, read_map, X)
309 #define rhat(X) sim_core_read_2 (scpu, 0, read_map, X)
310 #define rlat(X) sim_core_read_4 (scpu, 0, read_map, X)
311 #define wbat(X, D) sim_core_write_1 (scpu, 0, write_map, X, D)
312 #define what(X, D) sim_core_write_2 (scpu, 0, write_map, X, D)
313 #define wlat(X, D) sim_core_write_4 (scpu, 0, write_map, X, D)
314
315 static int tracing = 0;
316
317 void
318 sim_resume (SIM_DESC sd, int step, int siggnal)
319 {
320 SIM_CPU *scpu = STATE_CPU (sd, 0);
321 int needfetch;
322 word ibuf;
323 word pc;
324 unsigned short inst;
325 int memops;
326 int bonus_cycles;
327 int insts;
328 int w;
329 int cycs;
330 #ifdef WATCHFUNCTIONS
331 word WLhash;
332 #endif
333
334 cpu.asregs.exception = step ? SIGTRAP: 0;
335 pc = CPU_PC_GET (scpu);
336
337 /* Fetch the initial instructions that we'll decode. */
338 ibuf = rlat (pc & 0xFFFFFFFC);
339 needfetch = 0;
340
341 memops = 0;
342 bonus_cycles = 0;
343 insts = 0;
344
345 /* make our register set point to the right place */
346 if (SR_AF ())
347 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
348 else
349 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
350
351 #ifdef WATCHFUNCTIONS
352 /* make a hash to speed exec loop, hope it's nonzero */
353 WLhash = 0xFFFFFFFF;
354
355 for (w = 1; w <= ENDWL; w++)
356 WLhash = WLhash & WL[w];
357 #endif
358
359 do
360 {
361 word oldpc;
362
363 insts ++;
364
365 if (pc & 02)
366 {
367 if (! target_big_endian)
368 inst = ibuf >> 16;
369 else
370 inst = ibuf & 0xFFFF;
371 needfetch = 1;
372 }
373 else
374 {
375 if (! target_big_endian)
376 inst = ibuf & 0xFFFF;
377 else
378 inst = ibuf >> 16;
379 }
380
381 #ifdef WATCHFUNCTIONS
382 /* now scan list of watch addresses, if match, count it and
383 note return address and count cycles until pc=return address */
384
385 if ((WLincyc == 1) && (pc == WLendpc))
386 {
387 cycs = (cpu.asregs.cycles + (insts + bonus_cycles +
388 (memops * memcycles)) - WLbcyc);
389
390 if (WLcnts[WLW] == 1)
391 {
392 WLmax[WLW] = cycs;
393 WLmin[WLW] = cycs;
394 WLcyc[WLW] = 0;
395 }
396
397 if (cycs > WLmax[WLW])
398 {
399 WLmax[WLW] = cycs;
400 }
401
402 if (cycs < WLmin[WLW])
403 {
404 WLmin[WLW] = cycs;
405 }
406
407 WLcyc[WLW] += cycs;
408 WLincyc = 0;
409 WLendpc = 0;
410 }
411
412 /* Optimize with a hash to speed loop. */
413 if (WLincyc == 0)
414 {
415 if ((WLhash == 0) || ((WLhash & pc) != 0))
416 {
417 for (w=1; w <= ENDWL; w++)
418 {
419 if (pc == WL[w])
420 {
421 WLcnts[w]++;
422 WLbcyc = cpu.asregs.cycles + insts
423 + bonus_cycles + (memops * memcycles);
424 WLendpc = cpu.gr[15];
425 WLincyc = 1;
426 WLW = w;
427 break;
428 }
429 }
430 }
431 }
432 #endif
433
434 if (tracing)
435 fprintf (stderr, "%.4lx: inst = %.4x ", pc, inst);
436
437 oldpc = pc;
438
439 pc += 2;
440
441 switch (inst >> 8)
442 {
443 case 0x00:
444 switch RS
445 {
446 case 0x0:
447 switch RD
448 {
449 case 0x0: /* bkpt */
450 cpu.asregs.exception = SIGTRAP;
451 pc -= 2;
452 break;
453
454 case 0x1: /* sync */
455 break;
456
457 case 0x2: /* rte */
458 pc = cpu.epc;
459 cpu.sr = cpu.esr;
460 needfetch = 1;
461
462 if (SR_AF ())
463 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
464 else
465 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
466 break;
467
468 case 0x3: /* rfi */
469 pc = cpu.fpc;
470 cpu.sr = cpu.fsr;
471 needfetch = 1;
472
473 if (SR_AF ())
474 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
475 else
476 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
477 break;
478
479 case 0x4: /* stop */
480 if (STATE_VERBOSE_P (sd))
481 fprintf (stderr, "WARNING: stop unimplemented\n");
482 break;
483
484 case 0x5: /* wait */
485 if (STATE_VERBOSE_P (sd))
486 fprintf (stderr, "WARNING: wait unimplemented\n");
487 break;
488
489 case 0x6: /* doze */
490 if (STATE_VERBOSE_P (sd))
491 fprintf (stderr, "WARNING: doze unimplemented\n");
492 break;
493
494 case 0x7:
495 cpu.asregs.exception = SIGILL; /* illegal */
496 break;
497
498 case 0x8: /* trap 0 */
499 case 0xA: /* trap 2 */
500 case 0xB: /* trap 3 */
501 cpu.asregs.exception = SIGTRAP;
502 break;
503
504 case 0xC: /* trap 4 */
505 case 0xD: /* trap 5 */
506 case 0xE: /* trap 6 */
507 cpu.asregs.exception = SIGILL; /* illegal */
508 break;
509
510 case 0xF: /* trap 7 */
511 cpu.asregs.exception = SIGTRAP; /* integer div-by-0 */
512 break;
513
514 case 0x9: /* trap 1 */
515 handle_trap1 (sd);
516 break;
517 }
518 break;
519
520 case 0x1:
521 cpu.asregs.exception = SIGILL; /* illegal */
522 break;
523
524 case 0x2: /* mvc */
525 cpu.gr[RD] = C_VALUE();
526 break;
527 case 0x3: /* mvcv */
528 cpu.gr[RD] = C_OFF();
529 break;
530 case 0x4: /* ldq */
531 {
532 word addr = cpu.gr[RD];
533 int regno = 4; /* always r4-r7 */
534
535 bonus_cycles++;
536 memops += 4;
537 do
538 {
539 cpu.gr[regno] = rlat(addr);
540 addr += 4;
541 regno++;
542 }
543 while ((regno&0x3) != 0);
544 }
545 break;
546 case 0x5: /* stq */
547 {
548 word addr = cpu.gr[RD];
549 int regno = 4; /* always r4-r7 */
550
551 memops += 4;
552 bonus_cycles++;
553 do
554 {
555 wlat(addr, cpu.gr[regno]);
556 addr += 4;
557 regno++;
558 }
559 while ((regno & 0x3) != 0);
560 }
561 break;
562 case 0x6: /* ldm */
563 {
564 word addr = cpu.gr[0];
565 int regno = RD;
566
567 /* bonus cycle is really only needed if
568 the next insn shifts the last reg loaded.
569
570 bonus_cycles++;
571 */
572 memops += 16-regno;
573 while (regno <= 0xF)
574 {
575 cpu.gr[regno] = rlat(addr);
576 addr += 4;
577 regno++;
578 }
579 }
580 break;
581 case 0x7: /* stm */
582 {
583 word addr = cpu.gr[0];
584 int regno = RD;
585
586 /* this should be removed! */
587 /* bonus_cycles ++; */
588
589 memops += 16 - regno;
590 while (regno <= 0xF)
591 {
592 wlat(addr, cpu.gr[regno]);
593 addr += 4;
594 regno++;
595 }
596 }
597 break;
598
599 case 0x8: /* dect */
600 cpu.gr[RD] -= C_VALUE();
601 break;
602 case 0x9: /* decf */
603 cpu.gr[RD] -= C_OFF();
604 break;
605 case 0xA: /* inct */
606 cpu.gr[RD] += C_VALUE();
607 break;
608 case 0xB: /* incf */
609 cpu.gr[RD] += C_OFF();
610 break;
611 case 0xC: /* jmp */
612 pc = cpu.gr[RD];
613 if (tracing && RD == 15)
614 fprintf (stderr, "Func return, r2 = %lxx, r3 = %lx\n",
615 cpu.gr[2], cpu.gr[3]);
616 bonus_cycles++;
617 needfetch = 1;
618 break;
619 case 0xD: /* jsr */
620 cpu.gr[15] = pc;
621 pc = cpu.gr[RD];
622 bonus_cycles++;
623 needfetch = 1;
624 break;
625 case 0xE: /* ff1 */
626 {
627 word tmp, i;
628 tmp = cpu.gr[RD];
629 for (i = 0; !(tmp & 0x80000000) && i < 32; i++)
630 tmp <<= 1;
631 cpu.gr[RD] = i;
632 }
633 break;
634 case 0xF: /* brev */
635 {
636 word tmp;
637 tmp = cpu.gr[RD];
638 tmp = ((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1);
639 tmp = ((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2);
640 tmp = ((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4);
641 tmp = ((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8);
642 cpu.gr[RD] = ((tmp & 0xffff0000) >> 16) | ((tmp & 0x0000ffff) << 16);
643 }
644 break;
645 }
646 break;
647 case 0x01:
648 switch RS
649 {
650 case 0x0: /* xtrb3 */
651 cpu.gr[1] = (cpu.gr[RD]) & 0xFF;
652 NEW_C (cpu.gr[RD] != 0);
653 break;
654 case 0x1: /* xtrb2 */
655 cpu.gr[1] = (cpu.gr[RD]>>8) & 0xFF;
656 NEW_C (cpu.gr[RD] != 0);
657 break;
658 case 0x2: /* xtrb1 */
659 cpu.gr[1] = (cpu.gr[RD]>>16) & 0xFF;
660 NEW_C (cpu.gr[RD] != 0);
661 break;
662 case 0x3: /* xtrb0 */
663 cpu.gr[1] = (cpu.gr[RD]>>24) & 0xFF;
664 NEW_C (cpu.gr[RD] != 0);
665 break;
666 case 0x4: /* zextb */
667 cpu.gr[RD] &= 0x000000FF;
668 break;
669 case 0x5: /* sextb */
670 {
671 long tmp;
672 tmp = cpu.gr[RD];
673 tmp <<= 24;
674 tmp >>= 24;
675 cpu.gr[RD] = tmp;
676 }
677 break;
678 case 0x6: /* zexth */
679 cpu.gr[RD] &= 0x0000FFFF;
680 break;
681 case 0x7: /* sexth */
682 {
683 long tmp;
684 tmp = cpu.gr[RD];
685 tmp <<= 16;
686 tmp >>= 16;
687 cpu.gr[RD] = tmp;
688 }
689 break;
690 case 0x8: /* declt */
691 --cpu.gr[RD];
692 NEW_C ((long)cpu.gr[RD] < 0);
693 break;
694 case 0x9: /* tstnbz */
695 {
696 word tmp = cpu.gr[RD];
697 NEW_C ((tmp & 0xFF000000) != 0 &&
698 (tmp & 0x00FF0000) != 0 && (tmp & 0x0000FF00) != 0 &&
699 (tmp & 0x000000FF) != 0);
700 }
701 break;
702 case 0xA: /* decgt */
703 --cpu.gr[RD];
704 NEW_C ((long)cpu.gr[RD] > 0);
705 break;
706 case 0xB: /* decne */
707 --cpu.gr[RD];
708 NEW_C ((long)cpu.gr[RD] != 0);
709 break;
710 case 0xC: /* clrt */
711 if (C_ON())
712 cpu.gr[RD] = 0;
713 break;
714 case 0xD: /* clrf */
715 if (C_OFF())
716 cpu.gr[RD] = 0;
717 break;
718 case 0xE: /* abs */
719 if (cpu.gr[RD] & 0x80000000)
720 cpu.gr[RD] = ~cpu.gr[RD] + 1;
721 break;
722 case 0xF: /* not */
723 cpu.gr[RD] = ~cpu.gr[RD];
724 break;
725 }
726 break;
727 case 0x02: /* movt */
728 if (C_ON())
729 cpu.gr[RD] = cpu.gr[RS];
730 break;
731 case 0x03: /* mult */
732 /* consume 2 bits per cycle from rs, until rs is 0 */
733 {
734 unsigned int t = cpu.gr[RS];
735 int ticks;
736 for (ticks = 0; t != 0 ; t >>= 2)
737 ticks++;
738 bonus_cycles += ticks;
739 }
740 bonus_cycles += 2; /* min. is 3, so add 2, plus ticks above */
741 if (tracing)
742 fprintf (stderr, " mult %lx by %lx to give %lx",
743 cpu.gr[RD], cpu.gr[RS], cpu.gr[RD] * cpu.gr[RS]);
744 cpu.gr[RD] = cpu.gr[RD] * cpu.gr[RS];
745 break;
746 case 0x04: /* loopt */
747 if (C_ON())
748 {
749 pc += (IMM4 << 1) - 32;
750 bonus_cycles ++;
751 needfetch = 1;
752 }
753 --cpu.gr[RS]; /* not RD! */
754 NEW_C (((long)cpu.gr[RS]) > 0);
755 break;
756 case 0x05: /* subu */
757 cpu.gr[RD] -= cpu.gr[RS];
758 break;
759 case 0x06: /* addc */
760 {
761 unsigned long tmp, a, b;
762 a = cpu.gr[RD];
763 b = cpu.gr[RS];
764 cpu.gr[RD] = a + b + C_VALUE ();
765 tmp = iu_carry (a, b, C_VALUE ());
766 NEW_C (tmp);
767 }
768 break;
769 case 0x07: /* subc */
770 {
771 unsigned long tmp, a, b;
772 a = cpu.gr[RD];
773 b = cpu.gr[RS];
774 cpu.gr[RD] = a - b + C_VALUE () - 1;
775 tmp = iu_carry (a,~b, C_VALUE ());
776 NEW_C (tmp);
777 }
778 break;
779 case 0x08: /* illegal */
780 case 0x09: /* illegal*/
781 cpu.asregs.exception = SIGILL;
782 break;
783 case 0x0A: /* movf */
784 if (C_OFF())
785 cpu.gr[RD] = cpu.gr[RS];
786 break;
787 case 0x0B: /* lsr */
788 {
789 unsigned long dst, src;
790 dst = cpu.gr[RD];
791 src = cpu.gr[RS];
792 /* We must not rely solely upon the native shift operations, since they
793 may not match the M*Core's behaviour on boundary conditions. */
794 dst = src > 31 ? 0 : dst >> src;
795 cpu.gr[RD] = dst;
796 }
797 break;
798 case 0x0C: /* cmphs */
799 NEW_C ((unsigned long )cpu.gr[RD] >=
800 (unsigned long)cpu.gr[RS]);
801 break;
802 case 0x0D: /* cmplt */
803 NEW_C ((long)cpu.gr[RD] < (long)cpu.gr[RS]);
804 break;
805 case 0x0E: /* tst */
806 NEW_C ((cpu.gr[RD] & cpu.gr[RS]) != 0);
807 break;
808 case 0x0F: /* cmpne */
809 NEW_C (cpu.gr[RD] != cpu.gr[RS]);
810 break;
811 case 0x10: case 0x11: /* mfcr */
812 {
813 unsigned r;
814 r = IMM5;
815 if (r <= LAST_VALID_CREG)
816 cpu.gr[RD] = cpu.cr[r];
817 else
818 cpu.asregs.exception = SIGILL;
819 }
820 break;
821
822 case 0x12: /* mov */
823 cpu.gr[RD] = cpu.gr[RS];
824 if (tracing)
825 fprintf (stderr, "MOV %lx into reg %d", cpu.gr[RD], RD);
826 break;
827
828 case 0x13: /* bgenr */
829 if (cpu.gr[RS] & 0x20)
830 cpu.gr[RD] = 0;
831 else
832 cpu.gr[RD] = 1 << (cpu.gr[RS] & 0x1F);
833 break;
834
835 case 0x14: /* rsub */
836 cpu.gr[RD] = cpu.gr[RS] - cpu.gr[RD];
837 break;
838
839 case 0x15: /* ixw */
840 cpu.gr[RD] += cpu.gr[RS]<<2;
841 break;
842
843 case 0x16: /* and */
844 cpu.gr[RD] &= cpu.gr[RS];
845 break;
846
847 case 0x17: /* xor */
848 cpu.gr[RD] ^= cpu.gr[RS];
849 break;
850
851 case 0x18: case 0x19: /* mtcr */
852 {
853 unsigned r;
854 r = IMM5;
855 if (r <= LAST_VALID_CREG)
856 cpu.cr[r] = cpu.gr[RD];
857 else
858 cpu.asregs.exception = SIGILL;
859
860 /* we might have changed register sets... */
861 if (SR_AF ())
862 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
863 else
864 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
865 }
866 break;
867
868 case 0x1A: /* asr */
869 /* We must not rely solely upon the native shift operations, since they
870 may not match the M*Core's behaviour on boundary conditions. */
871 if (cpu.gr[RS] > 30)
872 cpu.gr[RD] = ((long) cpu.gr[RD]) < 0 ? -1 : 0;
873 else
874 cpu.gr[RD] = (long) cpu.gr[RD] >> cpu.gr[RS];
875 break;
876
877 case 0x1B: /* lsl */
878 /* We must not rely solely upon the native shift operations, since they
879 may not match the M*Core's behaviour on boundary conditions. */
880 cpu.gr[RD] = cpu.gr[RS] > 31 ? 0 : cpu.gr[RD] << cpu.gr[RS];
881 break;
882
883 case 0x1C: /* addu */
884 cpu.gr[RD] += cpu.gr[RS];
885 break;
886
887 case 0x1D: /* ixh */
888 cpu.gr[RD] += cpu.gr[RS] << 1;
889 break;
890
891 case 0x1E: /* or */
892 cpu.gr[RD] |= cpu.gr[RS];
893 break;
894
895 case 0x1F: /* andn */
896 cpu.gr[RD] &= ~cpu.gr[RS];
897 break;
898 case 0x20: case 0x21: /* addi */
899 cpu.gr[RD] =
900 cpu.gr[RD] + (IMM5 + 1);
901 break;
902 case 0x22: case 0x23: /* cmplti */
903 {
904 int tmp = (IMM5 + 1);
905 if (cpu.gr[RD] < tmp)
906 {
907 SET_C();
908 }
909 else
910 {
911 CLR_C();
912 }
913 }
914 break;
915 case 0x24: case 0x25: /* subi */
916 cpu.gr[RD] =
917 cpu.gr[RD] - (IMM5 + 1);
918 break;
919 case 0x26: case 0x27: /* illegal */
920 cpu.asregs.exception = SIGILL;
921 break;
922 case 0x28: case 0x29: /* rsubi */
923 cpu.gr[RD] =
924 IMM5 - cpu.gr[RD];
925 break;
926 case 0x2A: case 0x2B: /* cmpnei */
927 if (cpu.gr[RD] != IMM5)
928 {
929 SET_C();
930 }
931 else
932 {
933 CLR_C();
934 }
935 break;
936
937 case 0x2C: case 0x2D: /* bmaski, divu */
938 {
939 unsigned imm = IMM5;
940
941 if (imm == 1)
942 {
943 int exe;
944 int rxnlz, r1nlz;
945 unsigned int rx, r1;
946
947 rx = cpu.gr[RD];
948 r1 = cpu.gr[1];
949 exe = 0;
950
951 /* unsigned divide */
952 cpu.gr[RD] = (word) ((unsigned int) cpu.gr[RD] / (unsigned int)cpu.gr[1] );
953
954 /* compute bonus_cycles for divu */
955 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32); r1nlz ++)
956 r1 = r1 << 1;
957
958 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32); rxnlz ++)
959 rx = rx << 1;
960
961 if (r1nlz < rxnlz)
962 exe += 4;
963 else
964 exe += 5 + r1nlz - rxnlz;
965
966 if (exe >= (2 * memcycles - 1))
967 {
968 bonus_cycles += exe - (2 * memcycles) + 1;
969 }
970 }
971 else if (imm == 0 || imm >= 8)
972 {
973 /* bmaski */
974 if (imm == 0)
975 cpu.gr[RD] = -1;
976 else
977 cpu.gr[RD] = (1 << imm) - 1;
978 }
979 else
980 {
981 /* illegal */
982 cpu.asregs.exception = SIGILL;
983 }
984 }
985 break;
986 case 0x2E: case 0x2F: /* andi */
987 cpu.gr[RD] = cpu.gr[RD] & IMM5;
988 break;
989 case 0x30: case 0x31: /* bclri */
990 cpu.gr[RD] = cpu.gr[RD] & ~(1<<IMM5);
991 break;
992 case 0x32: case 0x33: /* bgeni, divs */
993 {
994 unsigned imm = IMM5;
995 if (imm == 1)
996 {
997 int exe,sc;
998 int rxnlz, r1nlz;
999 signed int rx, r1;
1000
1001 /* compute bonus_cycles for divu */
1002 rx = cpu.gr[RD];
1003 r1 = cpu.gr[1];
1004 exe = 0;
1005
1006 if (((rx < 0) && (r1 > 0)) || ((rx >= 0) && (r1 < 0)))
1007 sc = 1;
1008 else
1009 sc = 0;
1010
1011 rx = abs (rx);
1012 r1 = abs (r1);
1013
1014 /* signed divide, general registers are of type int, so / op is OK */
1015 cpu.gr[RD] = cpu.gr[RD] / cpu.gr[1];
1016
1017 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32) ; r1nlz ++ )
1018 r1 = r1 << 1;
1019
1020 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32) ; rxnlz ++ )
1021 rx = rx << 1;
1022
1023 if (r1nlz < rxnlz)
1024 exe += 5;
1025 else
1026 exe += 6 + r1nlz - rxnlz + sc;
1027
1028 if (exe >= (2 * memcycles - 1))
1029 {
1030 bonus_cycles += exe - (2 * memcycles) + 1;
1031 }
1032 }
1033 else if (imm >= 7)
1034 {
1035 /* bgeni */
1036 cpu.gr[RD] = (1 << IMM5);
1037 }
1038 else
1039 {
1040 /* illegal */
1041 cpu.asregs.exception = SIGILL;
1042 }
1043 break;
1044 }
1045 case 0x34: case 0x35: /* bseti */
1046 cpu.gr[RD] = cpu.gr[RD] | (1 << IMM5);
1047 break;
1048 case 0x36: case 0x37: /* btsti */
1049 NEW_C (cpu.gr[RD] >> IMM5);
1050 break;
1051 case 0x38: case 0x39: /* xsr, rotli */
1052 {
1053 unsigned imm = IMM5;
1054 unsigned long tmp = cpu.gr[RD];
1055 if (imm == 0)
1056 {
1057 word cbit;
1058 cbit = C_VALUE();
1059 NEW_C (tmp);
1060 cpu.gr[RD] = (cbit << 31) | (tmp >> 1);
1061 }
1062 else
1063 cpu.gr[RD] = (tmp << imm) | (tmp >> (32 - imm));
1064 }
1065 break;
1066 case 0x3A: case 0x3B: /* asrc, asri */
1067 {
1068 unsigned imm = IMM5;
1069 long tmp = cpu.gr[RD];
1070 if (imm == 0)
1071 {
1072 NEW_C (tmp);
1073 cpu.gr[RD] = tmp >> 1;
1074 }
1075 else
1076 cpu.gr[RD] = tmp >> imm;
1077 }
1078 break;
1079 case 0x3C: case 0x3D: /* lslc, lsli */
1080 {
1081 unsigned imm = IMM5;
1082 unsigned long tmp = cpu.gr[RD];
1083 if (imm == 0)
1084 {
1085 NEW_C (tmp >> 31);
1086 cpu.gr[RD] = tmp << 1;
1087 }
1088 else
1089 cpu.gr[RD] = tmp << imm;
1090 }
1091 break;
1092 case 0x3E: case 0x3F: /* lsrc, lsri */
1093 {
1094 unsigned imm = IMM5;
1095 unsigned long tmp = cpu.gr[RD];
1096 if (imm == 0)
1097 {
1098 NEW_C (tmp);
1099 cpu.gr[RD] = tmp >> 1;
1100 }
1101 else
1102 cpu.gr[RD] = tmp >> imm;
1103 }
1104 break;
1105 case 0x40: case 0x41: case 0x42: case 0x43:
1106 case 0x44: case 0x45: case 0x46: case 0x47:
1107 case 0x48: case 0x49: case 0x4A: case 0x4B:
1108 case 0x4C: case 0x4D: case 0x4E: case 0x4F:
1109 cpu.asregs.exception = SIGILL;
1110 break;
1111 case 0x50:
1112 util (sd, inst & 0xFF);
1113 break;
1114 case 0x51: case 0x52: case 0x53:
1115 case 0x54: case 0x55: case 0x56: case 0x57:
1116 case 0x58: case 0x59: case 0x5A: case 0x5B:
1117 case 0x5C: case 0x5D: case 0x5E: case 0x5F:
1118 cpu.asregs.exception = SIGILL;
1119 break;
1120 case 0x60: case 0x61: case 0x62: case 0x63: /* movi */
1121 case 0x64: case 0x65: case 0x66: case 0x67:
1122 cpu.gr[RD] = (inst >> 4) & 0x7F;
1123 break;
1124 case 0x68: case 0x69: case 0x6A: case 0x6B:
1125 case 0x6C: case 0x6D: case 0x6E: case 0x6F: /* illegal */
1126 cpu.asregs.exception = SIGILL;
1127 break;
1128 case 0x71: case 0x72: case 0x73:
1129 case 0x74: case 0x75: case 0x76: case 0x77:
1130 case 0x78: case 0x79: case 0x7A: case 0x7B:
1131 case 0x7C: case 0x7D: case 0x7E: /* lrw */
1132 cpu.gr[RX] = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1133 if (tracing)
1134 fprintf (stderr, "LRW of 0x%x from 0x%lx to reg %d",
1135 rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC),
1136 (pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC, RX);
1137 memops++;
1138 break;
1139 case 0x7F: /* jsri */
1140 cpu.gr[15] = pc;
1141 if (tracing)
1142 fprintf (stderr,
1143 "func call: r2 = %lx r3 = %lx r4 = %lx r5 = %lx r6 = %lx r7 = %lx\n",
1144 cpu.gr[2], cpu.gr[3], cpu.gr[4], cpu.gr[5], cpu.gr[6], cpu.gr[7]);
1145 case 0x70: /* jmpi */
1146 pc = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1147 memops++;
1148 bonus_cycles++;
1149 needfetch = 1;
1150 break;
1151
1152 case 0x80: case 0x81: case 0x82: case 0x83:
1153 case 0x84: case 0x85: case 0x86: case 0x87:
1154 case 0x88: case 0x89: case 0x8A: case 0x8B:
1155 case 0x8C: case 0x8D: case 0x8E: case 0x8F: /* ld */
1156 cpu.gr[RX] = rlat (cpu.gr[RD] + ((inst >> 2) & 0x003C));
1157 if (tracing)
1158 fprintf (stderr, "load reg %d from 0x%lx with 0x%lx",
1159 RX,
1160 cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1161 memops++;
1162 break;
1163 case 0x90: case 0x91: case 0x92: case 0x93:
1164 case 0x94: case 0x95: case 0x96: case 0x97:
1165 case 0x98: case 0x99: case 0x9A: case 0x9B:
1166 case 0x9C: case 0x9D: case 0x9E: case 0x9F: /* st */
1167 wlat (cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1168 if (tracing)
1169 fprintf (stderr, "store reg %d (containing 0x%lx) to 0x%lx",
1170 RX, cpu.gr[RX],
1171 cpu.gr[RD] + ((inst >> 2) & 0x003C));
1172 memops++;
1173 break;
1174 case 0xA0: case 0xA1: case 0xA2: case 0xA3:
1175 case 0xA4: case 0xA5: case 0xA6: case 0xA7:
1176 case 0xA8: case 0xA9: case 0xAA: case 0xAB:
1177 case 0xAC: case 0xAD: case 0xAE: case 0xAF: /* ld.b */
1178 cpu.gr[RX] = rbat (cpu.gr[RD] + RS);
1179 memops++;
1180 break;
1181 case 0xB0: case 0xB1: case 0xB2: case 0xB3:
1182 case 0xB4: case 0xB5: case 0xB6: case 0xB7:
1183 case 0xB8: case 0xB9: case 0xBA: case 0xBB:
1184 case 0xBC: case 0xBD: case 0xBE: case 0xBF: /* st.b */
1185 wbat (cpu.gr[RD] + RS, cpu.gr[RX]);
1186 memops++;
1187 break;
1188 case 0xC0: case 0xC1: case 0xC2: case 0xC3:
1189 case 0xC4: case 0xC5: case 0xC6: case 0xC7:
1190 case 0xC8: case 0xC9: case 0xCA: case 0xCB:
1191 case 0xCC: case 0xCD: case 0xCE: case 0xCF: /* ld.h */
1192 cpu.gr[RX] = rhat (cpu.gr[RD] + ((inst >> 3) & 0x001E));
1193 memops++;
1194 break;
1195 case 0xD0: case 0xD1: case 0xD2: case 0xD3:
1196 case 0xD4: case 0xD5: case 0xD6: case 0xD7:
1197 case 0xD8: case 0xD9: case 0xDA: case 0xDB:
1198 case 0xDC: case 0xDD: case 0xDE: case 0xDF: /* st.h */
1199 what (cpu.gr[RD] + ((inst >> 3) & 0x001E), cpu.gr[RX]);
1200 memops++;
1201 break;
1202 case 0xE8: case 0xE9: case 0xEA: case 0xEB:
1203 case 0xEC: case 0xED: case 0xEE: case 0xEF: /* bf */
1204 if (C_OFF())
1205 {
1206 int disp;
1207 disp = inst & 0x03FF;
1208 if (inst & 0x0400)
1209 disp |= 0xFFFFFC00;
1210 pc += disp<<1;
1211 bonus_cycles++;
1212 needfetch = 1;
1213 }
1214 break;
1215 case 0xE0: case 0xE1: case 0xE2: case 0xE3:
1216 case 0xE4: case 0xE5: case 0xE6: case 0xE7: /* bt */
1217 if (C_ON())
1218 {
1219 int disp;
1220 disp = inst & 0x03FF;
1221 if (inst & 0x0400)
1222 disp |= 0xFFFFFC00;
1223 pc += disp<<1;
1224 bonus_cycles++;
1225 needfetch = 1;
1226 }
1227 break;
1228
1229 case 0xF8: case 0xF9: case 0xFA: case 0xFB:
1230 case 0xFC: case 0xFD: case 0xFE: case 0xFF: /* bsr */
1231 cpu.gr[15] = pc;
1232 case 0xF0: case 0xF1: case 0xF2: case 0xF3:
1233 case 0xF4: case 0xF5: case 0xF6: case 0xF7: /* br */
1234 {
1235 int disp;
1236 disp = inst & 0x03FF;
1237 if (inst & 0x0400)
1238 disp |= 0xFFFFFC00;
1239 pc += disp<<1;
1240 bonus_cycles++;
1241 needfetch = 1;
1242 }
1243 break;
1244
1245 }
1246
1247 if (tracing)
1248 fprintf (stderr, "\n");
1249
1250 if (needfetch)
1251 {
1252 ibuf = rlat (pc & 0xFFFFFFFC);
1253 needfetch = 0;
1254 }
1255 }
1256 while (!cpu.asregs.exception);
1257
1258 /* Hide away the things we've cached while executing. */
1259 CPU_PC_SET (scpu, pc);
1260 cpu.asregs.insts += insts; /* instructions done ... */
1261 cpu.asregs.cycles += insts; /* and each takes a cycle */
1262 cpu.asregs.cycles += bonus_cycles; /* and extra cycles for branches */
1263 cpu.asregs.cycles += memops * memcycles; /* and memop cycle delays */
1264 }
1265
1266 int
1267 sim_store_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1268 {
1269 if (rn < NUM_MCORE_REGS && rn >= 0)
1270 {
1271 if (length == 4)
1272 {
1273 long ival;
1274
1275 /* misalignment safe */
1276 ival = mcore_extract_unsigned_integer (memory, 4);
1277 cpu.asints[rn] = ival;
1278 }
1279
1280 return 4;
1281 }
1282 else
1283 return 0;
1284 }
1285
1286 int
1287 sim_fetch_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1288 {
1289 if (rn < NUM_MCORE_REGS && rn >= 0)
1290 {
1291 if (length == 4)
1292 {
1293 long ival = cpu.asints[rn];
1294
1295 /* misalignment-safe */
1296 mcore_store_unsigned_integer (memory, 4, ival);
1297 }
1298
1299 return 4;
1300 }
1301 else
1302 return 0;
1303 }
1304
1305 void
1306 sim_stop_reason (SIM_DESC sd, enum sim_stop *reason, int *sigrc)
1307 {
1308 if (cpu.asregs.exception == SIGQUIT)
1309 {
1310 * reason = sim_exited;
1311 * sigrc = cpu.gr[PARM1];
1312 }
1313 else
1314 {
1315 * reason = sim_stopped;
1316 * sigrc = cpu.asregs.exception;
1317 }
1318 }
1319
1320 void
1321 sim_info (SIM_DESC sd, int verbose)
1322 {
1323 #ifdef WATCHFUNCTIONS
1324 int w, wcyc;
1325 #endif
1326 double virttime = cpu.asregs.cycles / 36.0e6;
1327 host_callback *callback = STATE_CALLBACK (sd);
1328
1329 callback->printf_filtered (callback, "\n\n# instructions executed %10d\n",
1330 cpu.asregs.insts);
1331 callback->printf_filtered (callback, "# cycles %10d\n",
1332 cpu.asregs.cycles);
1333 callback->printf_filtered (callback, "# pipeline stalls %10d\n",
1334 cpu.asregs.stalls);
1335 callback->printf_filtered (callback, "# virtual time taken %10.4f\n",
1336 virttime);
1337
1338 #ifdef WATCHFUNCTIONS
1339 callback->printf_filtered (callback, "\nNumber of watched functions: %d\n",
1340 ENDWL);
1341
1342 wcyc = 0;
1343
1344 for (w = 1; w <= ENDWL; w++)
1345 {
1346 callback->printf_filtered (callback, "WL = %s %8x\n",WLstr[w],WL[w]);
1347 callback->printf_filtered (callback, " calls = %d, cycles = %d\n",
1348 WLcnts[w],WLcyc[w]);
1349
1350 if (WLcnts[w] != 0)
1351 callback->printf_filtered (callback,
1352 " maxcpc = %d, mincpc = %d, avecpc = %d\n",
1353 WLmax[w],WLmin[w],WLcyc[w]/WLcnts[w]);
1354 wcyc += WLcyc[w];
1355 }
1356
1357 callback->printf_filtered (callback,
1358 "Total cycles for watched functions: %d\n",wcyc);
1359 #endif
1360 }
1361
1362 static sim_cia
1363 mcore_pc_get (sim_cpu *cpu)
1364 {
1365 return cpu->pc;
1366 }
1367
1368 static void
1369 mcore_pc_set (sim_cpu *cpu, sim_cia pc)
1370 {
1371 cpu->pc = pc;
1372 }
1373
1374 static void
1375 free_state (SIM_DESC sd)
1376 {
1377 if (STATE_MODULES (sd) != NULL)
1378 sim_module_uninstall (sd);
1379 sim_cpu_free_all (sd);
1380 sim_state_free (sd);
1381 }
1382
1383 SIM_DESC
1384 sim_open (SIM_OPEN_KIND kind, host_callback *cb, struct bfd *abfd, char **argv)
1385 {
1386 int i;
1387 SIM_DESC sd = sim_state_alloc (kind, cb);
1388 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
1389
1390 /* The cpu data is kept in a separately allocated chunk of memory. */
1391 if (sim_cpu_alloc_all (sd, 1, /*cgen_cpu_max_extra_bytes ()*/0) != SIM_RC_OK)
1392 {
1393 free_state (sd);
1394 return 0;
1395 }
1396
1397 if (sim_pre_argv_init (sd, argv[0]) != SIM_RC_OK)
1398 {
1399 free_state (sd);
1400 return 0;
1401 }
1402
1403 /* getopt will print the error message so we just have to exit if this fails.
1404 FIXME: Hmmm... in the case of gdb we need getopt to call
1405 print_filtered. */
1406 if (sim_parse_args (sd, argv) != SIM_RC_OK)
1407 {
1408 free_state (sd);
1409 return 0;
1410 }
1411
1412 /* Check for/establish the a reference program image. */
1413 if (sim_analyze_program (sd,
1414 (STATE_PROG_ARGV (sd) != NULL
1415 ? *STATE_PROG_ARGV (sd)
1416 : NULL), abfd) != SIM_RC_OK)
1417 {
1418 free_state (sd);
1419 return 0;
1420 }
1421
1422 /* Configure/verify the target byte order and other runtime
1423 configuration options. */
1424 if (sim_config (sd) != SIM_RC_OK)
1425 {
1426 sim_module_uninstall (sd);
1427 return 0;
1428 }
1429
1430 if (sim_post_argv_init (sd) != SIM_RC_OK)
1431 {
1432 /* Uninstall the modules to avoid memory leaks,
1433 file descriptor leaks, etc. */
1434 sim_module_uninstall (sd);
1435 return 0;
1436 }
1437
1438 /* CPU specific initialization. */
1439 for (i = 0; i < MAX_NR_PROCESSORS; ++i)
1440 {
1441 SIM_CPU *cpu = STATE_CPU (sd, i);
1442
1443 CPU_PC_FETCH (cpu) = mcore_pc_get;
1444 CPU_PC_STORE (cpu) = mcore_pc_set;
1445
1446 set_initial_gprs (cpu); /* Reset the GPR registers. */
1447 }
1448
1449 /* Default to a 8 Mbyte (== 2^23) memory space. */
1450 sim_do_commandf (sd, "memory-size %#x", DEFAULT_MEMORY_SIZE);
1451
1452 return sd;
1453 }
1454
1455 SIM_RC
1456 sim_create_inferior (SIM_DESC sd, struct bfd *prog_bfd, char **argv, char **env)
1457 {
1458 SIM_CPU *scpu = STATE_CPU (sd, 0);
1459 char ** avp;
1460 int nargs = 0;
1461 int nenv = 0;
1462 int s_length;
1463 int l;
1464 unsigned long strings;
1465 unsigned long pointers;
1466 unsigned long hi_stack;
1467
1468
1469 /* Set the initial register set. */
1470 set_initial_gprs (scpu);
1471
1472 hi_stack = DEFAULT_MEMORY_SIZE - 4;
1473 CPU_PC_SET (scpu, bfd_get_start_address (prog_bfd));
1474
1475 /* Calculate the argument and environment strings. */
1476 s_length = 0;
1477 nargs = 0;
1478 avp = argv;
1479 while (avp && *avp)
1480 {
1481 l = strlen (*avp) + 1; /* include the null */
1482 s_length += (l + 3) & ~3; /* make it a 4 byte boundary */
1483 nargs++; avp++;
1484 }
1485
1486 nenv = 0;
1487 avp = env;
1488 while (avp && *avp)
1489 {
1490 l = strlen (*avp) + 1; /* include the null */
1491 s_length += (l + 3) & ~ 3;/* make it a 4 byte boundary */
1492 nenv++; avp++;
1493 }
1494
1495 /* Claim some memory for the pointers and strings. */
1496 pointers = hi_stack - sizeof(word) * (nenv+1+nargs+1);
1497 pointers &= ~3; /* must be 4-byte aligned */
1498 cpu.gr[0] = pointers;
1499
1500 strings = cpu.gr[0] - s_length;
1501 strings &= ~3; /* want to make it 4-byte aligned */
1502 cpu.gr[0] = strings;
1503 /* dac fix, the stack address must be 8-byte aligned! */
1504 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
1505
1506 /* Loop through the arguments and fill them in. */
1507 cpu.gr[PARM1] = nargs;
1508 if (nargs == 0)
1509 {
1510 /* No strings to fill in. */
1511 cpu.gr[PARM2] = 0;
1512 }
1513 else
1514 {
1515 cpu.gr[PARM2] = pointers;
1516 avp = argv;
1517 while (avp && *avp)
1518 {
1519 /* Save where we're putting it. */
1520 wlat (pointers, strings);
1521
1522 /* Copy the string. */
1523 l = strlen (* avp) + 1;
1524 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1525
1526 /* Bump the pointers. */
1527 avp++;
1528 pointers += 4;
1529 strings += l+1;
1530 }
1531
1532 /* A null to finish the list. */
1533 wlat (pointers, 0);
1534 pointers += 4;
1535 }
1536
1537 /* Now do the environment pointers. */
1538 if (nenv == 0)
1539 {
1540 /* No strings to fill in. */
1541 cpu.gr[PARM3] = 0;
1542 }
1543 else
1544 {
1545 cpu.gr[PARM3] = pointers;
1546 avp = env;
1547
1548 while (avp && *avp)
1549 {
1550 /* Save where we're putting it. */
1551 wlat (pointers, strings);
1552
1553 /* Copy the string. */
1554 l = strlen (* avp) + 1;
1555 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1556
1557 /* Bump the pointers. */
1558 avp++;
1559 pointers += 4;
1560 strings += l+1;
1561 }
1562
1563 /* A null to finish the list. */
1564 wlat (pointers, 0);
1565 pointers += 4;
1566 }
1567
1568 return SIM_RC_OK;
1569 }
This page took 0.06068 seconds and 4 git commands to generate.