e77535b8ae5d77d93f0f36a068c7f5a6eed9d481
[deliverable/binutils-gdb.git] / sim / mcore / interp.c
1 /* Simulator for Motorola's MCore processor
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Cygnus Solutions.
4
5 This file is part of GDB, the GNU debugger.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/times.h>
25 #include <sys/param.h>
26 #include <unistd.h>
27 #include "bfd.h"
28 #include "gdb/callback.h"
29 #include "libiberty.h"
30 #include "gdb/remote-sim.h"
31
32 #include "sim-main.h"
33 #include "sim-base.h"
34 #include "sim-syscall.h"
35 #include "sim-options.h"
36
37 #define target_big_endian (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
38
39
40 static unsigned long
41 mcore_extract_unsigned_integer (unsigned char *addr, int len)
42 {
43 unsigned long retval;
44 unsigned char * p;
45 unsigned char * startaddr = (unsigned char *)addr;
46 unsigned char * endaddr = startaddr + len;
47
48 if (len > (int) sizeof (unsigned long))
49 printf ("That operation is not available on integers of more than %zu bytes.",
50 sizeof (unsigned long));
51
52 /* Start at the most significant end of the integer, and work towards
53 the least significant. */
54 retval = 0;
55
56 if (! target_big_endian)
57 {
58 for (p = endaddr; p > startaddr;)
59 retval = (retval << 8) | * -- p;
60 }
61 else
62 {
63 for (p = startaddr; p < endaddr;)
64 retval = (retval << 8) | * p ++;
65 }
66
67 return retval;
68 }
69
70 static void
71 mcore_store_unsigned_integer (unsigned char *addr, int len, unsigned long val)
72 {
73 unsigned char * p;
74 unsigned char * startaddr = (unsigned char *)addr;
75 unsigned char * endaddr = startaddr + len;
76
77 if (! target_big_endian)
78 {
79 for (p = startaddr; p < endaddr;)
80 {
81 * p ++ = val & 0xff;
82 val >>= 8;
83 }
84 }
85 else
86 {
87 for (p = endaddr; p > startaddr;)
88 {
89 * -- p = val & 0xff;
90 val >>= 8;
91 }
92 }
93 }
94
95 /* The machine state.
96 This state is maintained in host byte order. The
97 fetch/store register functions must translate between host
98 byte order and the target processor byte order.
99 Keeping this data in target byte order simplifies the register
100 read/write functions. Keeping this data in native order improves
101 the performance of the simulator. Simulation speed is deemed more
102 important. */
103 /* TODO: Should be moved to sim-main.h:sim_cpu. */
104
105 /* The ordering of the mcore_regset structure is matched in the
106 gdb/config/mcore/tm-mcore.h file in the REGISTER_NAMES macro. */
107 struct mcore_regset
108 {
109 word gregs [16]; /* primary registers */
110 word alt_gregs [16]; /* alt register file */
111 word cregs [32]; /* control registers */
112 int ticks;
113 int stalls;
114 int cycles;
115 int insts;
116 int exception;
117 word * active_gregs;
118 };
119
120 union
121 {
122 struct mcore_regset asregs;
123 word asints [1]; /* but accessed larger... */
124 } cpu;
125
126 #define LAST_VALID_CREG 32 /* only 0..12 implemented */
127 #define NUM_MCORE_REGS (16 + 16 + LAST_VALID_CREG + 1)
128
129 static int memcycles = 1;
130
131 #define gr asregs.active_gregs
132 #define cr asregs.cregs
133 #define sr asregs.cregs[0]
134 #define vbr asregs.cregs[1]
135 #define esr asregs.cregs[2]
136 #define fsr asregs.cregs[3]
137 #define epc asregs.cregs[4]
138 #define fpc asregs.cregs[5]
139 #define ss0 asregs.cregs[6]
140 #define ss1 asregs.cregs[7]
141 #define ss2 asregs.cregs[8]
142 #define ss3 asregs.cregs[9]
143 #define ss4 asregs.cregs[10]
144 #define gcr asregs.cregs[11]
145 #define gsr asregs.cregs[12]
146
147 /* maniuplate the carry bit */
148 #define C_ON() (cpu.sr & 1)
149 #define C_VALUE() (cpu.sr & 1)
150 #define C_OFF() ((cpu.sr & 1) == 0)
151 #define SET_C() {cpu.sr |= 1;}
152 #define CLR_C() {cpu.sr &= 0xfffffffe;}
153 #define NEW_C(v) {CLR_C(); cpu.sr |= ((v) & 1);}
154
155 #define SR_AF() ((cpu.sr >> 1) & 1)
156
157 #define TRAPCODE 1 /* r1 holds which function we want */
158 #define PARM1 2 /* first parameter */
159 #define PARM2 3
160 #define PARM3 4
161 #define PARM4 5
162 #define RET1 2 /* register for return values. */
163
164 /* Default to a 8 Mbyte (== 2^23) memory space. */
165 #define DEFAULT_MEMORY_SIZE 0x800000
166
167 static void
168 set_initial_gprs (SIM_CPU *scpu)
169 {
170 int i;
171 long space;
172
173 /* Set up machine just out of reset. */
174 CPU_PC_SET (scpu, 0);
175 cpu.sr = 0;
176
177 /* Clean out the GPRs and alternate GPRs. */
178 for (i = 0; i < 16; i++)
179 {
180 cpu.asregs.gregs[i] = 0;
181 cpu.asregs.alt_gregs[i] = 0;
182 }
183
184 /* Make our register set point to the right place. */
185 if (SR_AF())
186 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
187 else
188 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
189
190 /* ABI specifies initial values for these registers. */
191 cpu.gr[0] = DEFAULT_MEMORY_SIZE - 4;
192
193 /* dac fix, the stack address must be 8-byte aligned! */
194 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
195 cpu.gr[PARM1] = 0;
196 cpu.gr[PARM2] = 0;
197 cpu.gr[PARM3] = 0;
198 cpu.gr[PARM4] = cpu.gr[0];
199 }
200
201 /* Simulate a monitor trap. */
202
203 static void
204 handle_trap1 (SIM_DESC sd)
205 {
206 host_callback *cb = STATE_CALLBACK (sd);
207 CB_SYSCALL sc;
208
209 CB_SYSCALL_INIT (&sc);
210
211 sc.func = cpu.gr[TRAPCODE];
212 sc.arg1 = cpu.gr[PARM1];
213 sc.arg2 = cpu.gr[PARM2];
214 sc.arg3 = cpu.gr[PARM3];
215 sc.arg4 = cpu.gr[PARM4];
216
217 sc.p1 = (PTR) sd;
218 sc.p2 = (PTR) STATE_CPU (sd, 0);
219 sc.read_mem = sim_syscall_read_mem;
220 sc.write_mem = sim_syscall_write_mem;
221
222 cb_syscall (cb, &sc);
223
224 /* XXX: We don't pass back the actual errno value. */
225 cpu.gr[RET1] = sc.result;
226 }
227
228 static void
229 process_stub (SIM_DESC sd, int what)
230 {
231 /* These values should match those in libgloss/mcore/syscalls.s. */
232 switch (what)
233 {
234 case 3: /* _read */
235 case 4: /* _write */
236 case 5: /* _open */
237 case 6: /* _close */
238 case 10: /* _unlink */
239 case 19: /* _lseek */
240 case 43: /* _times */
241 cpu.gr [TRAPCODE] = what;
242 handle_trap1 (sd);
243 break;
244
245 default:
246 if (STATE_VERBOSE_P (sd))
247 fprintf (stderr, "Unhandled stub opcode: %d\n", what);
248 break;
249 }
250 }
251
252 static void
253 util (SIM_DESC sd, unsigned what)
254 {
255 switch (what)
256 {
257 case 0: /* exit */
258 cpu.asregs.exception = SIGQUIT;
259 break;
260
261 case 1: /* printf */
262 if (STATE_VERBOSE_P (sd))
263 fprintf (stderr, "WARNING: printf unimplemented\n");
264 break;
265
266 case 2: /* scanf */
267 if (STATE_VERBOSE_P (sd))
268 fprintf (stderr, "WARNING: scanf unimplemented\n");
269 break;
270
271 case 3: /* utime */
272 cpu.gr[RET1] = cpu.asregs.insts;
273 break;
274
275 case 0xFF:
276 process_stub (sd, cpu.gr[1]);
277 break;
278
279 default:
280 if (STATE_VERBOSE_P (sd))
281 fprintf (stderr, "Unhandled util code: %x\n", what);
282 break;
283 }
284 }
285
286 /* For figuring out whether we carried; addc/subc use this. */
287 static int
288 iu_carry (unsigned long a, unsigned long b, int cin)
289 {
290 unsigned long x;
291
292 x = (a & 0xffff) + (b & 0xffff) + cin;
293 x = (x >> 16) + (a >> 16) + (b >> 16);
294 x >>= 16;
295
296 return (x != 0);
297 }
298
299 /* TODO: Convert to common watchpoints. */
300 #undef WATCHFUNCTIONS
301 #ifdef WATCHFUNCTIONS
302
303 #define MAXWL 80
304 word WL[MAXWL];
305 char * WLstr[MAXWL];
306
307 int ENDWL=0;
308 int WLincyc;
309 int WLcyc[MAXWL];
310 int WLcnts[MAXWL];
311 int WLmax[MAXWL];
312 int WLmin[MAXWL];
313 word WLendpc;
314 int WLbcyc;
315 int WLW;
316 #endif
317
318 #define RD (inst & 0xF)
319 #define RS ((inst >> 4) & 0xF)
320 #define RX ((inst >> 8) & 0xF)
321 #define IMM5 ((inst >> 4) & 0x1F)
322 #define IMM4 ((inst) & 0xF)
323
324 #define rbat(X) sim_core_read_1 (scpu, 0, read_map, X)
325 #define rhat(X) sim_core_read_2 (scpu, 0, read_map, X)
326 #define rlat(X) sim_core_read_4 (scpu, 0, read_map, X)
327 #define wbat(X, D) sim_core_write_1 (scpu, 0, write_map, X, D)
328 #define what(X, D) sim_core_write_2 (scpu, 0, write_map, X, D)
329 #define wlat(X, D) sim_core_write_4 (scpu, 0, write_map, X, D)
330
331 static int tracing = 0;
332
333 void
334 sim_resume (SIM_DESC sd, int step, int siggnal)
335 {
336 SIM_CPU *scpu = STATE_CPU (sd, 0);
337 int needfetch;
338 word ibuf;
339 word pc;
340 unsigned short inst;
341 int memops;
342 int bonus_cycles;
343 int insts;
344 int w;
345 int cycs;
346 #ifdef WATCHFUNCTIONS
347 word WLhash;
348 #endif
349
350 cpu.asregs.exception = step ? SIGTRAP: 0;
351 pc = CPU_PC_GET (scpu);
352
353 /* Fetch the initial instructions that we'll decode. */
354 ibuf = rlat (pc & 0xFFFFFFFC);
355 needfetch = 0;
356
357 memops = 0;
358 bonus_cycles = 0;
359 insts = 0;
360
361 /* make our register set point to the right place */
362 if (SR_AF ())
363 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
364 else
365 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
366
367 #ifdef WATCHFUNCTIONS
368 /* make a hash to speed exec loop, hope it's nonzero */
369 WLhash = 0xFFFFFFFF;
370
371 for (w = 1; w <= ENDWL; w++)
372 WLhash = WLhash & WL[w];
373 #endif
374
375 do
376 {
377 word oldpc;
378
379 insts ++;
380
381 if (pc & 02)
382 {
383 if (! target_big_endian)
384 inst = ibuf >> 16;
385 else
386 inst = ibuf & 0xFFFF;
387 needfetch = 1;
388 }
389 else
390 {
391 if (! target_big_endian)
392 inst = ibuf & 0xFFFF;
393 else
394 inst = ibuf >> 16;
395 }
396
397 #ifdef WATCHFUNCTIONS
398 /* now scan list of watch addresses, if match, count it and
399 note return address and count cycles until pc=return address */
400
401 if ((WLincyc == 1) && (pc == WLendpc))
402 {
403 cycs = (cpu.asregs.cycles + (insts + bonus_cycles +
404 (memops * memcycles)) - WLbcyc);
405
406 if (WLcnts[WLW] == 1)
407 {
408 WLmax[WLW] = cycs;
409 WLmin[WLW] = cycs;
410 WLcyc[WLW] = 0;
411 }
412
413 if (cycs > WLmax[WLW])
414 {
415 WLmax[WLW] = cycs;
416 }
417
418 if (cycs < WLmin[WLW])
419 {
420 WLmin[WLW] = cycs;
421 }
422
423 WLcyc[WLW] += cycs;
424 WLincyc = 0;
425 WLendpc = 0;
426 }
427
428 /* Optimize with a hash to speed loop. */
429 if (WLincyc == 0)
430 {
431 if ((WLhash == 0) || ((WLhash & pc) != 0))
432 {
433 for (w=1; w <= ENDWL; w++)
434 {
435 if (pc == WL[w])
436 {
437 WLcnts[w]++;
438 WLbcyc = cpu.asregs.cycles + insts
439 + bonus_cycles + (memops * memcycles);
440 WLendpc = cpu.gr[15];
441 WLincyc = 1;
442 WLW = w;
443 break;
444 }
445 }
446 }
447 }
448 #endif
449
450 if (tracing)
451 fprintf (stderr, "%.4lx: inst = %.4x ", pc, inst);
452
453 oldpc = pc;
454
455 pc += 2;
456
457 switch (inst >> 8)
458 {
459 case 0x00:
460 switch RS
461 {
462 case 0x0:
463 switch RD
464 {
465 case 0x0: /* bkpt */
466 cpu.asregs.exception = SIGTRAP;
467 pc -= 2;
468 break;
469
470 case 0x1: /* sync */
471 break;
472
473 case 0x2: /* rte */
474 pc = cpu.epc;
475 cpu.sr = cpu.esr;
476 needfetch = 1;
477
478 if (SR_AF ())
479 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
480 else
481 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
482 break;
483
484 case 0x3: /* rfi */
485 pc = cpu.fpc;
486 cpu.sr = cpu.fsr;
487 needfetch = 1;
488
489 if (SR_AF ())
490 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
491 else
492 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
493 break;
494
495 case 0x4: /* stop */
496 if (STATE_VERBOSE_P (sd))
497 fprintf (stderr, "WARNING: stop unimplemented\n");
498 break;
499
500 case 0x5: /* wait */
501 if (STATE_VERBOSE_P (sd))
502 fprintf (stderr, "WARNING: wait unimplemented\n");
503 break;
504
505 case 0x6: /* doze */
506 if (STATE_VERBOSE_P (sd))
507 fprintf (stderr, "WARNING: doze unimplemented\n");
508 break;
509
510 case 0x7:
511 cpu.asregs.exception = SIGILL; /* illegal */
512 break;
513
514 case 0x8: /* trap 0 */
515 case 0xA: /* trap 2 */
516 case 0xB: /* trap 3 */
517 cpu.asregs.exception = SIGTRAP;
518 break;
519
520 case 0xC: /* trap 4 */
521 case 0xD: /* trap 5 */
522 case 0xE: /* trap 6 */
523 cpu.asregs.exception = SIGILL; /* illegal */
524 break;
525
526 case 0xF: /* trap 7 */
527 cpu.asregs.exception = SIGTRAP; /* integer div-by-0 */
528 break;
529
530 case 0x9: /* trap 1 */
531 handle_trap1 (sd);
532 break;
533 }
534 break;
535
536 case 0x1:
537 cpu.asregs.exception = SIGILL; /* illegal */
538 break;
539
540 case 0x2: /* mvc */
541 cpu.gr[RD] = C_VALUE();
542 break;
543 case 0x3: /* mvcv */
544 cpu.gr[RD] = C_OFF();
545 break;
546 case 0x4: /* ldq */
547 {
548 word addr = cpu.gr[RD];
549 int regno = 4; /* always r4-r7 */
550
551 bonus_cycles++;
552 memops += 4;
553 do
554 {
555 cpu.gr[regno] = rlat(addr);
556 addr += 4;
557 regno++;
558 }
559 while ((regno&0x3) != 0);
560 }
561 break;
562 case 0x5: /* stq */
563 {
564 word addr = cpu.gr[RD];
565 int regno = 4; /* always r4-r7 */
566
567 memops += 4;
568 bonus_cycles++;
569 do
570 {
571 wlat(addr, cpu.gr[regno]);
572 addr += 4;
573 regno++;
574 }
575 while ((regno & 0x3) != 0);
576 }
577 break;
578 case 0x6: /* ldm */
579 {
580 word addr = cpu.gr[0];
581 int regno = RD;
582
583 /* bonus cycle is really only needed if
584 the next insn shifts the last reg loaded.
585
586 bonus_cycles++;
587 */
588 memops += 16-regno;
589 while (regno <= 0xF)
590 {
591 cpu.gr[regno] = rlat(addr);
592 addr += 4;
593 regno++;
594 }
595 }
596 break;
597 case 0x7: /* stm */
598 {
599 word addr = cpu.gr[0];
600 int regno = RD;
601
602 /* this should be removed! */
603 /* bonus_cycles ++; */
604
605 memops += 16 - regno;
606 while (regno <= 0xF)
607 {
608 wlat(addr, cpu.gr[regno]);
609 addr += 4;
610 regno++;
611 }
612 }
613 break;
614
615 case 0x8: /* dect */
616 cpu.gr[RD] -= C_VALUE();
617 break;
618 case 0x9: /* decf */
619 cpu.gr[RD] -= C_OFF();
620 break;
621 case 0xA: /* inct */
622 cpu.gr[RD] += C_VALUE();
623 break;
624 case 0xB: /* incf */
625 cpu.gr[RD] += C_OFF();
626 break;
627 case 0xC: /* jmp */
628 pc = cpu.gr[RD];
629 if (tracing && RD == 15)
630 fprintf (stderr, "Func return, r2 = %lxx, r3 = %lx\n",
631 cpu.gr[2], cpu.gr[3]);
632 bonus_cycles++;
633 needfetch = 1;
634 break;
635 case 0xD: /* jsr */
636 cpu.gr[15] = pc;
637 pc = cpu.gr[RD];
638 bonus_cycles++;
639 needfetch = 1;
640 break;
641 case 0xE: /* ff1 */
642 {
643 word tmp, i;
644 tmp = cpu.gr[RD];
645 for (i = 0; !(tmp & 0x80000000) && i < 32; i++)
646 tmp <<= 1;
647 cpu.gr[RD] = i;
648 }
649 break;
650 case 0xF: /* brev */
651 {
652 word tmp;
653 tmp = cpu.gr[RD];
654 tmp = ((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1);
655 tmp = ((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2);
656 tmp = ((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4);
657 tmp = ((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8);
658 cpu.gr[RD] = ((tmp & 0xffff0000) >> 16) | ((tmp & 0x0000ffff) << 16);
659 }
660 break;
661 }
662 break;
663 case 0x01:
664 switch RS
665 {
666 case 0x0: /* xtrb3 */
667 cpu.gr[1] = (cpu.gr[RD]) & 0xFF;
668 NEW_C (cpu.gr[RD] != 0);
669 break;
670 case 0x1: /* xtrb2 */
671 cpu.gr[1] = (cpu.gr[RD]>>8) & 0xFF;
672 NEW_C (cpu.gr[RD] != 0);
673 break;
674 case 0x2: /* xtrb1 */
675 cpu.gr[1] = (cpu.gr[RD]>>16) & 0xFF;
676 NEW_C (cpu.gr[RD] != 0);
677 break;
678 case 0x3: /* xtrb0 */
679 cpu.gr[1] = (cpu.gr[RD]>>24) & 0xFF;
680 NEW_C (cpu.gr[RD] != 0);
681 break;
682 case 0x4: /* zextb */
683 cpu.gr[RD] &= 0x000000FF;
684 break;
685 case 0x5: /* sextb */
686 {
687 long tmp;
688 tmp = cpu.gr[RD];
689 tmp <<= 24;
690 tmp >>= 24;
691 cpu.gr[RD] = tmp;
692 }
693 break;
694 case 0x6: /* zexth */
695 cpu.gr[RD] &= 0x0000FFFF;
696 break;
697 case 0x7: /* sexth */
698 {
699 long tmp;
700 tmp = cpu.gr[RD];
701 tmp <<= 16;
702 tmp >>= 16;
703 cpu.gr[RD] = tmp;
704 }
705 break;
706 case 0x8: /* declt */
707 --cpu.gr[RD];
708 NEW_C ((long)cpu.gr[RD] < 0);
709 break;
710 case 0x9: /* tstnbz */
711 {
712 word tmp = cpu.gr[RD];
713 NEW_C ((tmp & 0xFF000000) != 0 &&
714 (tmp & 0x00FF0000) != 0 && (tmp & 0x0000FF00) != 0 &&
715 (tmp & 0x000000FF) != 0);
716 }
717 break;
718 case 0xA: /* decgt */
719 --cpu.gr[RD];
720 NEW_C ((long)cpu.gr[RD] > 0);
721 break;
722 case 0xB: /* decne */
723 --cpu.gr[RD];
724 NEW_C ((long)cpu.gr[RD] != 0);
725 break;
726 case 0xC: /* clrt */
727 if (C_ON())
728 cpu.gr[RD] = 0;
729 break;
730 case 0xD: /* clrf */
731 if (C_OFF())
732 cpu.gr[RD] = 0;
733 break;
734 case 0xE: /* abs */
735 if (cpu.gr[RD] & 0x80000000)
736 cpu.gr[RD] = ~cpu.gr[RD] + 1;
737 break;
738 case 0xF: /* not */
739 cpu.gr[RD] = ~cpu.gr[RD];
740 break;
741 }
742 break;
743 case 0x02: /* movt */
744 if (C_ON())
745 cpu.gr[RD] = cpu.gr[RS];
746 break;
747 case 0x03: /* mult */
748 /* consume 2 bits per cycle from rs, until rs is 0 */
749 {
750 unsigned int t = cpu.gr[RS];
751 int ticks;
752 for (ticks = 0; t != 0 ; t >>= 2)
753 ticks++;
754 bonus_cycles += ticks;
755 }
756 bonus_cycles += 2; /* min. is 3, so add 2, plus ticks above */
757 if (tracing)
758 fprintf (stderr, " mult %lx by %lx to give %lx",
759 cpu.gr[RD], cpu.gr[RS], cpu.gr[RD] * cpu.gr[RS]);
760 cpu.gr[RD] = cpu.gr[RD] * cpu.gr[RS];
761 break;
762 case 0x04: /* loopt */
763 if (C_ON())
764 {
765 pc += (IMM4 << 1) - 32;
766 bonus_cycles ++;
767 needfetch = 1;
768 }
769 --cpu.gr[RS]; /* not RD! */
770 NEW_C (((long)cpu.gr[RS]) > 0);
771 break;
772 case 0x05: /* subu */
773 cpu.gr[RD] -= cpu.gr[RS];
774 break;
775 case 0x06: /* addc */
776 {
777 unsigned long tmp, a, b;
778 a = cpu.gr[RD];
779 b = cpu.gr[RS];
780 cpu.gr[RD] = a + b + C_VALUE ();
781 tmp = iu_carry (a, b, C_VALUE ());
782 NEW_C (tmp);
783 }
784 break;
785 case 0x07: /* subc */
786 {
787 unsigned long tmp, a, b;
788 a = cpu.gr[RD];
789 b = cpu.gr[RS];
790 cpu.gr[RD] = a - b + C_VALUE () - 1;
791 tmp = iu_carry (a,~b, C_VALUE ());
792 NEW_C (tmp);
793 }
794 break;
795 case 0x08: /* illegal */
796 case 0x09: /* illegal*/
797 cpu.asregs.exception = SIGILL;
798 break;
799 case 0x0A: /* movf */
800 if (C_OFF())
801 cpu.gr[RD] = cpu.gr[RS];
802 break;
803 case 0x0B: /* lsr */
804 {
805 unsigned long dst, src;
806 dst = cpu.gr[RD];
807 src = cpu.gr[RS];
808 /* We must not rely solely upon the native shift operations, since they
809 may not match the M*Core's behaviour on boundary conditions. */
810 dst = src > 31 ? 0 : dst >> src;
811 cpu.gr[RD] = dst;
812 }
813 break;
814 case 0x0C: /* cmphs */
815 NEW_C ((unsigned long )cpu.gr[RD] >=
816 (unsigned long)cpu.gr[RS]);
817 break;
818 case 0x0D: /* cmplt */
819 NEW_C ((long)cpu.gr[RD] < (long)cpu.gr[RS]);
820 break;
821 case 0x0E: /* tst */
822 NEW_C ((cpu.gr[RD] & cpu.gr[RS]) != 0);
823 break;
824 case 0x0F: /* cmpne */
825 NEW_C (cpu.gr[RD] != cpu.gr[RS]);
826 break;
827 case 0x10: case 0x11: /* mfcr */
828 {
829 unsigned r;
830 r = IMM5;
831 if (r <= LAST_VALID_CREG)
832 cpu.gr[RD] = cpu.cr[r];
833 else
834 cpu.asregs.exception = SIGILL;
835 }
836 break;
837
838 case 0x12: /* mov */
839 cpu.gr[RD] = cpu.gr[RS];
840 if (tracing)
841 fprintf (stderr, "MOV %lx into reg %d", cpu.gr[RD], RD);
842 break;
843
844 case 0x13: /* bgenr */
845 if (cpu.gr[RS] & 0x20)
846 cpu.gr[RD] = 0;
847 else
848 cpu.gr[RD] = 1 << (cpu.gr[RS] & 0x1F);
849 break;
850
851 case 0x14: /* rsub */
852 cpu.gr[RD] = cpu.gr[RS] - cpu.gr[RD];
853 break;
854
855 case 0x15: /* ixw */
856 cpu.gr[RD] += cpu.gr[RS]<<2;
857 break;
858
859 case 0x16: /* and */
860 cpu.gr[RD] &= cpu.gr[RS];
861 break;
862
863 case 0x17: /* xor */
864 cpu.gr[RD] ^= cpu.gr[RS];
865 break;
866
867 case 0x18: case 0x19: /* mtcr */
868 {
869 unsigned r;
870 r = IMM5;
871 if (r <= LAST_VALID_CREG)
872 cpu.cr[r] = cpu.gr[RD];
873 else
874 cpu.asregs.exception = SIGILL;
875
876 /* we might have changed register sets... */
877 if (SR_AF ())
878 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
879 else
880 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
881 }
882 break;
883
884 case 0x1A: /* asr */
885 /* We must not rely solely upon the native shift operations, since they
886 may not match the M*Core's behaviour on boundary conditions. */
887 if (cpu.gr[RS] > 30)
888 cpu.gr[RD] = ((long) cpu.gr[RD]) < 0 ? -1 : 0;
889 else
890 cpu.gr[RD] = (long) cpu.gr[RD] >> cpu.gr[RS];
891 break;
892
893 case 0x1B: /* lsl */
894 /* We must not rely solely upon the native shift operations, since they
895 may not match the M*Core's behaviour on boundary conditions. */
896 cpu.gr[RD] = cpu.gr[RS] > 31 ? 0 : cpu.gr[RD] << cpu.gr[RS];
897 break;
898
899 case 0x1C: /* addu */
900 cpu.gr[RD] += cpu.gr[RS];
901 break;
902
903 case 0x1D: /* ixh */
904 cpu.gr[RD] += cpu.gr[RS] << 1;
905 break;
906
907 case 0x1E: /* or */
908 cpu.gr[RD] |= cpu.gr[RS];
909 break;
910
911 case 0x1F: /* andn */
912 cpu.gr[RD] &= ~cpu.gr[RS];
913 break;
914 case 0x20: case 0x21: /* addi */
915 cpu.gr[RD] =
916 cpu.gr[RD] + (IMM5 + 1);
917 break;
918 case 0x22: case 0x23: /* cmplti */
919 {
920 int tmp = (IMM5 + 1);
921 if (cpu.gr[RD] < tmp)
922 {
923 SET_C();
924 }
925 else
926 {
927 CLR_C();
928 }
929 }
930 break;
931 case 0x24: case 0x25: /* subi */
932 cpu.gr[RD] =
933 cpu.gr[RD] - (IMM5 + 1);
934 break;
935 case 0x26: case 0x27: /* illegal */
936 cpu.asregs.exception = SIGILL;
937 break;
938 case 0x28: case 0x29: /* rsubi */
939 cpu.gr[RD] =
940 IMM5 - cpu.gr[RD];
941 break;
942 case 0x2A: case 0x2B: /* cmpnei */
943 if (cpu.gr[RD] != IMM5)
944 {
945 SET_C();
946 }
947 else
948 {
949 CLR_C();
950 }
951 break;
952
953 case 0x2C: case 0x2D: /* bmaski, divu */
954 {
955 unsigned imm = IMM5;
956
957 if (imm == 1)
958 {
959 int exe;
960 int rxnlz, r1nlz;
961 unsigned int rx, r1;
962
963 rx = cpu.gr[RD];
964 r1 = cpu.gr[1];
965 exe = 0;
966
967 /* unsigned divide */
968 cpu.gr[RD] = (word) ((unsigned int) cpu.gr[RD] / (unsigned int)cpu.gr[1] );
969
970 /* compute bonus_cycles for divu */
971 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32); r1nlz ++)
972 r1 = r1 << 1;
973
974 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32); rxnlz ++)
975 rx = rx << 1;
976
977 if (r1nlz < rxnlz)
978 exe += 4;
979 else
980 exe += 5 + r1nlz - rxnlz;
981
982 if (exe >= (2 * memcycles - 1))
983 {
984 bonus_cycles += exe - (2 * memcycles) + 1;
985 }
986 }
987 else if (imm == 0 || imm >= 8)
988 {
989 /* bmaski */
990 if (imm == 0)
991 cpu.gr[RD] = -1;
992 else
993 cpu.gr[RD] = (1 << imm) - 1;
994 }
995 else
996 {
997 /* illegal */
998 cpu.asregs.exception = SIGILL;
999 }
1000 }
1001 break;
1002 case 0x2E: case 0x2F: /* andi */
1003 cpu.gr[RD] = cpu.gr[RD] & IMM5;
1004 break;
1005 case 0x30: case 0x31: /* bclri */
1006 cpu.gr[RD] = cpu.gr[RD] & ~(1<<IMM5);
1007 break;
1008 case 0x32: case 0x33: /* bgeni, divs */
1009 {
1010 unsigned imm = IMM5;
1011 if (imm == 1)
1012 {
1013 int exe,sc;
1014 int rxnlz, r1nlz;
1015 signed int rx, r1;
1016
1017 /* compute bonus_cycles for divu */
1018 rx = cpu.gr[RD];
1019 r1 = cpu.gr[1];
1020 exe = 0;
1021
1022 if (((rx < 0) && (r1 > 0)) || ((rx >= 0) && (r1 < 0)))
1023 sc = 1;
1024 else
1025 sc = 0;
1026
1027 rx = abs (rx);
1028 r1 = abs (r1);
1029
1030 /* signed divide, general registers are of type int, so / op is OK */
1031 cpu.gr[RD] = cpu.gr[RD] / cpu.gr[1];
1032
1033 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32) ; r1nlz ++ )
1034 r1 = r1 << 1;
1035
1036 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32) ; rxnlz ++ )
1037 rx = rx << 1;
1038
1039 if (r1nlz < rxnlz)
1040 exe += 5;
1041 else
1042 exe += 6 + r1nlz - rxnlz + sc;
1043
1044 if (exe >= (2 * memcycles - 1))
1045 {
1046 bonus_cycles += exe - (2 * memcycles) + 1;
1047 }
1048 }
1049 else if (imm >= 7)
1050 {
1051 /* bgeni */
1052 cpu.gr[RD] = (1 << IMM5);
1053 }
1054 else
1055 {
1056 /* illegal */
1057 cpu.asregs.exception = SIGILL;
1058 }
1059 break;
1060 }
1061 case 0x34: case 0x35: /* bseti */
1062 cpu.gr[RD] = cpu.gr[RD] | (1 << IMM5);
1063 break;
1064 case 0x36: case 0x37: /* btsti */
1065 NEW_C (cpu.gr[RD] >> IMM5);
1066 break;
1067 case 0x38: case 0x39: /* xsr, rotli */
1068 {
1069 unsigned imm = IMM5;
1070 unsigned long tmp = cpu.gr[RD];
1071 if (imm == 0)
1072 {
1073 word cbit;
1074 cbit = C_VALUE();
1075 NEW_C (tmp);
1076 cpu.gr[RD] = (cbit << 31) | (tmp >> 1);
1077 }
1078 else
1079 cpu.gr[RD] = (tmp << imm) | (tmp >> (32 - imm));
1080 }
1081 break;
1082 case 0x3A: case 0x3B: /* asrc, asri */
1083 {
1084 unsigned imm = IMM5;
1085 long tmp = cpu.gr[RD];
1086 if (imm == 0)
1087 {
1088 NEW_C (tmp);
1089 cpu.gr[RD] = tmp >> 1;
1090 }
1091 else
1092 cpu.gr[RD] = tmp >> imm;
1093 }
1094 break;
1095 case 0x3C: case 0x3D: /* lslc, lsli */
1096 {
1097 unsigned imm = IMM5;
1098 unsigned long tmp = cpu.gr[RD];
1099 if (imm == 0)
1100 {
1101 NEW_C (tmp >> 31);
1102 cpu.gr[RD] = tmp << 1;
1103 }
1104 else
1105 cpu.gr[RD] = tmp << imm;
1106 }
1107 break;
1108 case 0x3E: case 0x3F: /* lsrc, lsri */
1109 {
1110 unsigned imm = IMM5;
1111 unsigned long tmp = cpu.gr[RD];
1112 if (imm == 0)
1113 {
1114 NEW_C (tmp);
1115 cpu.gr[RD] = tmp >> 1;
1116 }
1117 else
1118 cpu.gr[RD] = tmp >> imm;
1119 }
1120 break;
1121 case 0x40: case 0x41: case 0x42: case 0x43:
1122 case 0x44: case 0x45: case 0x46: case 0x47:
1123 case 0x48: case 0x49: case 0x4A: case 0x4B:
1124 case 0x4C: case 0x4D: case 0x4E: case 0x4F:
1125 cpu.asregs.exception = SIGILL;
1126 break;
1127 case 0x50:
1128 util (sd, inst & 0xFF);
1129 break;
1130 case 0x51: case 0x52: case 0x53:
1131 case 0x54: case 0x55: case 0x56: case 0x57:
1132 case 0x58: case 0x59: case 0x5A: case 0x5B:
1133 case 0x5C: case 0x5D: case 0x5E: case 0x5F:
1134 cpu.asregs.exception = SIGILL;
1135 break;
1136 case 0x60: case 0x61: case 0x62: case 0x63: /* movi */
1137 case 0x64: case 0x65: case 0x66: case 0x67:
1138 cpu.gr[RD] = (inst >> 4) & 0x7F;
1139 break;
1140 case 0x68: case 0x69: case 0x6A: case 0x6B:
1141 case 0x6C: case 0x6D: case 0x6E: case 0x6F: /* illegal */
1142 cpu.asregs.exception = SIGILL;
1143 break;
1144 case 0x71: case 0x72: case 0x73:
1145 case 0x74: case 0x75: case 0x76: case 0x77:
1146 case 0x78: case 0x79: case 0x7A: case 0x7B:
1147 case 0x7C: case 0x7D: case 0x7E: /* lrw */
1148 cpu.gr[RX] = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1149 if (tracing)
1150 fprintf (stderr, "LRW of 0x%x from 0x%lx to reg %d",
1151 rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC),
1152 (pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC, RX);
1153 memops++;
1154 break;
1155 case 0x7F: /* jsri */
1156 cpu.gr[15] = pc;
1157 if (tracing)
1158 fprintf (stderr,
1159 "func call: r2 = %lx r3 = %lx r4 = %lx r5 = %lx r6 = %lx r7 = %lx\n",
1160 cpu.gr[2], cpu.gr[3], cpu.gr[4], cpu.gr[5], cpu.gr[6], cpu.gr[7]);
1161 case 0x70: /* jmpi */
1162 pc = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1163 memops++;
1164 bonus_cycles++;
1165 needfetch = 1;
1166 break;
1167
1168 case 0x80: case 0x81: case 0x82: case 0x83:
1169 case 0x84: case 0x85: case 0x86: case 0x87:
1170 case 0x88: case 0x89: case 0x8A: case 0x8B:
1171 case 0x8C: case 0x8D: case 0x8E: case 0x8F: /* ld */
1172 cpu.gr[RX] = rlat (cpu.gr[RD] + ((inst >> 2) & 0x003C));
1173 if (tracing)
1174 fprintf (stderr, "load reg %d from 0x%lx with 0x%lx",
1175 RX,
1176 cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1177 memops++;
1178 break;
1179 case 0x90: case 0x91: case 0x92: case 0x93:
1180 case 0x94: case 0x95: case 0x96: case 0x97:
1181 case 0x98: case 0x99: case 0x9A: case 0x9B:
1182 case 0x9C: case 0x9D: case 0x9E: case 0x9F: /* st */
1183 wlat (cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1184 if (tracing)
1185 fprintf (stderr, "store reg %d (containing 0x%lx) to 0x%lx",
1186 RX, cpu.gr[RX],
1187 cpu.gr[RD] + ((inst >> 2) & 0x003C));
1188 memops++;
1189 break;
1190 case 0xA0: case 0xA1: case 0xA2: case 0xA3:
1191 case 0xA4: case 0xA5: case 0xA6: case 0xA7:
1192 case 0xA8: case 0xA9: case 0xAA: case 0xAB:
1193 case 0xAC: case 0xAD: case 0xAE: case 0xAF: /* ld.b */
1194 cpu.gr[RX] = rbat (cpu.gr[RD] + RS);
1195 memops++;
1196 break;
1197 case 0xB0: case 0xB1: case 0xB2: case 0xB3:
1198 case 0xB4: case 0xB5: case 0xB6: case 0xB7:
1199 case 0xB8: case 0xB9: case 0xBA: case 0xBB:
1200 case 0xBC: case 0xBD: case 0xBE: case 0xBF: /* st.b */
1201 wbat (cpu.gr[RD] + RS, cpu.gr[RX]);
1202 memops++;
1203 break;
1204 case 0xC0: case 0xC1: case 0xC2: case 0xC3:
1205 case 0xC4: case 0xC5: case 0xC6: case 0xC7:
1206 case 0xC8: case 0xC9: case 0xCA: case 0xCB:
1207 case 0xCC: case 0xCD: case 0xCE: case 0xCF: /* ld.h */
1208 cpu.gr[RX] = rhat (cpu.gr[RD] + ((inst >> 3) & 0x001E));
1209 memops++;
1210 break;
1211 case 0xD0: case 0xD1: case 0xD2: case 0xD3:
1212 case 0xD4: case 0xD5: case 0xD6: case 0xD7:
1213 case 0xD8: case 0xD9: case 0xDA: case 0xDB:
1214 case 0xDC: case 0xDD: case 0xDE: case 0xDF: /* st.h */
1215 what (cpu.gr[RD] + ((inst >> 3) & 0x001E), cpu.gr[RX]);
1216 memops++;
1217 break;
1218 case 0xE8: case 0xE9: case 0xEA: case 0xEB:
1219 case 0xEC: case 0xED: case 0xEE: case 0xEF: /* bf */
1220 if (C_OFF())
1221 {
1222 int disp;
1223 disp = inst & 0x03FF;
1224 if (inst & 0x0400)
1225 disp |= 0xFFFFFC00;
1226 pc += disp<<1;
1227 bonus_cycles++;
1228 needfetch = 1;
1229 }
1230 break;
1231 case 0xE0: case 0xE1: case 0xE2: case 0xE3:
1232 case 0xE4: case 0xE5: case 0xE6: case 0xE7: /* bt */
1233 if (C_ON())
1234 {
1235 int disp;
1236 disp = inst & 0x03FF;
1237 if (inst & 0x0400)
1238 disp |= 0xFFFFFC00;
1239 pc += disp<<1;
1240 bonus_cycles++;
1241 needfetch = 1;
1242 }
1243 break;
1244
1245 case 0xF8: case 0xF9: case 0xFA: case 0xFB:
1246 case 0xFC: case 0xFD: case 0xFE: case 0xFF: /* bsr */
1247 cpu.gr[15] = pc;
1248 case 0xF0: case 0xF1: case 0xF2: case 0xF3:
1249 case 0xF4: case 0xF5: case 0xF6: case 0xF7: /* br */
1250 {
1251 int disp;
1252 disp = inst & 0x03FF;
1253 if (inst & 0x0400)
1254 disp |= 0xFFFFFC00;
1255 pc += disp<<1;
1256 bonus_cycles++;
1257 needfetch = 1;
1258 }
1259 break;
1260
1261 }
1262
1263 if (tracing)
1264 fprintf (stderr, "\n");
1265
1266 if (needfetch)
1267 {
1268 ibuf = rlat (pc & 0xFFFFFFFC);
1269 needfetch = 0;
1270 }
1271 }
1272 while (!cpu.asregs.exception);
1273
1274 /* Hide away the things we've cached while executing. */
1275 CPU_PC_SET (scpu, pc);
1276 cpu.asregs.insts += insts; /* instructions done ... */
1277 cpu.asregs.cycles += insts; /* and each takes a cycle */
1278 cpu.asregs.cycles += bonus_cycles; /* and extra cycles for branches */
1279 cpu.asregs.cycles += memops * memcycles; /* and memop cycle delays */
1280 }
1281
1282 int
1283 sim_store_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1284 {
1285 if (rn < NUM_MCORE_REGS && rn >= 0)
1286 {
1287 if (length == 4)
1288 {
1289 long ival;
1290
1291 /* misalignment safe */
1292 ival = mcore_extract_unsigned_integer (memory, 4);
1293 cpu.asints[rn] = ival;
1294 }
1295
1296 return 4;
1297 }
1298 else
1299 return 0;
1300 }
1301
1302 int
1303 sim_fetch_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1304 {
1305 if (rn < NUM_MCORE_REGS && rn >= 0)
1306 {
1307 if (length == 4)
1308 {
1309 long ival = cpu.asints[rn];
1310
1311 /* misalignment-safe */
1312 mcore_store_unsigned_integer (memory, 4, ival);
1313 }
1314
1315 return 4;
1316 }
1317 else
1318 return 0;
1319 }
1320
1321 void
1322 sim_stop_reason (SIM_DESC sd, enum sim_stop *reason, int *sigrc)
1323 {
1324 if (cpu.asregs.exception == SIGQUIT)
1325 {
1326 * reason = sim_exited;
1327 * sigrc = cpu.gr[PARM1];
1328 }
1329 else
1330 {
1331 * reason = sim_stopped;
1332 * sigrc = cpu.asregs.exception;
1333 }
1334 }
1335
1336 void
1337 sim_info (SIM_DESC sd, int verbose)
1338 {
1339 #ifdef WATCHFUNCTIONS
1340 int w, wcyc;
1341 #endif
1342 double virttime = cpu.asregs.cycles / 36.0e6;
1343 host_callback *callback = STATE_CALLBACK (sd);
1344
1345 callback->printf_filtered (callback, "\n\n# instructions executed %10d\n",
1346 cpu.asregs.insts);
1347 callback->printf_filtered (callback, "# cycles %10d\n",
1348 cpu.asregs.cycles);
1349 callback->printf_filtered (callback, "# pipeline stalls %10d\n",
1350 cpu.asregs.stalls);
1351 callback->printf_filtered (callback, "# virtual time taken %10.4f\n",
1352 virttime);
1353
1354 #ifdef WATCHFUNCTIONS
1355 callback->printf_filtered (callback, "\nNumber of watched functions: %d\n",
1356 ENDWL);
1357
1358 wcyc = 0;
1359
1360 for (w = 1; w <= ENDWL; w++)
1361 {
1362 callback->printf_filtered (callback, "WL = %s %8x\n",WLstr[w],WL[w]);
1363 callback->printf_filtered (callback, " calls = %d, cycles = %d\n",
1364 WLcnts[w],WLcyc[w]);
1365
1366 if (WLcnts[w] != 0)
1367 callback->printf_filtered (callback,
1368 " maxcpc = %d, mincpc = %d, avecpc = %d\n",
1369 WLmax[w],WLmin[w],WLcyc[w]/WLcnts[w]);
1370 wcyc += WLcyc[w];
1371 }
1372
1373 callback->printf_filtered (callback,
1374 "Total cycles for watched functions: %d\n",wcyc);
1375 #endif
1376 }
1377
1378 static sim_cia
1379 mcore_pc_get (sim_cpu *cpu)
1380 {
1381 return cpu->pc;
1382 }
1383
1384 static void
1385 mcore_pc_set (sim_cpu *cpu, sim_cia pc)
1386 {
1387 cpu->pc = pc;
1388 }
1389
1390 static void
1391 free_state (SIM_DESC sd)
1392 {
1393 if (STATE_MODULES (sd) != NULL)
1394 sim_module_uninstall (sd);
1395 sim_cpu_free_all (sd);
1396 sim_state_free (sd);
1397 }
1398
1399 SIM_DESC
1400 sim_open (SIM_OPEN_KIND kind, host_callback *cb, struct bfd *abfd, char **argv)
1401 {
1402 int i;
1403 SIM_DESC sd = sim_state_alloc (kind, cb);
1404 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
1405
1406 /* The cpu data is kept in a separately allocated chunk of memory. */
1407 if (sim_cpu_alloc_all (sd, 1, /*cgen_cpu_max_extra_bytes ()*/0) != SIM_RC_OK)
1408 {
1409 free_state (sd);
1410 return 0;
1411 }
1412
1413 if (sim_pre_argv_init (sd, argv[0]) != SIM_RC_OK)
1414 {
1415 free_state (sd);
1416 return 0;
1417 }
1418
1419 /* getopt will print the error message so we just have to exit if this fails.
1420 FIXME: Hmmm... in the case of gdb we need getopt to call
1421 print_filtered. */
1422 if (sim_parse_args (sd, argv) != SIM_RC_OK)
1423 {
1424 free_state (sd);
1425 return 0;
1426 }
1427
1428 /* Check for/establish the a reference program image. */
1429 if (sim_analyze_program (sd,
1430 (STATE_PROG_ARGV (sd) != NULL
1431 ? *STATE_PROG_ARGV (sd)
1432 : NULL), abfd) != SIM_RC_OK)
1433 {
1434 free_state (sd);
1435 return 0;
1436 }
1437
1438 /* Configure/verify the target byte order and other runtime
1439 configuration options. */
1440 if (sim_config (sd) != SIM_RC_OK)
1441 {
1442 sim_module_uninstall (sd);
1443 return 0;
1444 }
1445
1446 if (sim_post_argv_init (sd) != SIM_RC_OK)
1447 {
1448 /* Uninstall the modules to avoid memory leaks,
1449 file descriptor leaks, etc. */
1450 sim_module_uninstall (sd);
1451 return 0;
1452 }
1453
1454 /* CPU specific initialization. */
1455 for (i = 0; i < MAX_NR_PROCESSORS; ++i)
1456 {
1457 SIM_CPU *cpu = STATE_CPU (sd, i);
1458
1459 CPU_PC_FETCH (cpu) = mcore_pc_get;
1460 CPU_PC_STORE (cpu) = mcore_pc_set;
1461
1462 set_initial_gprs (cpu); /* Reset the GPR registers. */
1463 }
1464
1465 /* Default to a 8 Mbyte (== 2^23) memory space. */
1466 sim_do_commandf (sd, "memory-size %#x", DEFAULT_MEMORY_SIZE);
1467
1468 return sd;
1469 }
1470
1471 void
1472 sim_close (SIM_DESC sd, int quitting)
1473 {
1474 /* nothing to do */
1475 }
1476
1477 SIM_RC
1478 sim_create_inferior (SIM_DESC sd, struct bfd *prog_bfd, char **argv, char **env)
1479 {
1480 SIM_CPU *scpu = STATE_CPU (sd, 0);
1481 char ** avp;
1482 int nargs = 0;
1483 int nenv = 0;
1484 int s_length;
1485 int l;
1486 unsigned long strings;
1487 unsigned long pointers;
1488 unsigned long hi_stack;
1489
1490
1491 /* Set the initial register set. */
1492 set_initial_gprs (scpu);
1493
1494 hi_stack = DEFAULT_MEMORY_SIZE - 4;
1495 CPU_PC_SET (scpu, bfd_get_start_address (prog_bfd));
1496
1497 /* Calculate the argument and environment strings. */
1498 s_length = 0;
1499 nargs = 0;
1500 avp = argv;
1501 while (avp && *avp)
1502 {
1503 l = strlen (*avp) + 1; /* include the null */
1504 s_length += (l + 3) & ~3; /* make it a 4 byte boundary */
1505 nargs++; avp++;
1506 }
1507
1508 nenv = 0;
1509 avp = env;
1510 while (avp && *avp)
1511 {
1512 l = strlen (*avp) + 1; /* include the null */
1513 s_length += (l + 3) & ~ 3;/* make it a 4 byte boundary */
1514 nenv++; avp++;
1515 }
1516
1517 /* Claim some memory for the pointers and strings. */
1518 pointers = hi_stack - sizeof(word) * (nenv+1+nargs+1);
1519 pointers &= ~3; /* must be 4-byte aligned */
1520 cpu.gr[0] = pointers;
1521
1522 strings = cpu.gr[0] - s_length;
1523 strings &= ~3; /* want to make it 4-byte aligned */
1524 cpu.gr[0] = strings;
1525 /* dac fix, the stack address must be 8-byte aligned! */
1526 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
1527
1528 /* Loop through the arguments and fill them in. */
1529 cpu.gr[PARM1] = nargs;
1530 if (nargs == 0)
1531 {
1532 /* No strings to fill in. */
1533 cpu.gr[PARM2] = 0;
1534 }
1535 else
1536 {
1537 cpu.gr[PARM2] = pointers;
1538 avp = argv;
1539 while (avp && *avp)
1540 {
1541 /* Save where we're putting it. */
1542 wlat (pointers, strings);
1543
1544 /* Copy the string. */
1545 l = strlen (* avp) + 1;
1546 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1547
1548 /* Bump the pointers. */
1549 avp++;
1550 pointers += 4;
1551 strings += l+1;
1552 }
1553
1554 /* A null to finish the list. */
1555 wlat (pointers, 0);
1556 pointers += 4;
1557 }
1558
1559 /* Now do the environment pointers. */
1560 if (nenv == 0)
1561 {
1562 /* No strings to fill in. */
1563 cpu.gr[PARM3] = 0;
1564 }
1565 else
1566 {
1567 cpu.gr[PARM3] = pointers;
1568 avp = env;
1569
1570 while (avp && *avp)
1571 {
1572 /* Save where we're putting it. */
1573 wlat (pointers, strings);
1574
1575 /* Copy the string. */
1576 l = strlen (* avp) + 1;
1577 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1578
1579 /* Bump the pointers. */
1580 avp++;
1581 pointers += 4;
1582 strings += l+1;
1583 }
1584
1585 /* A null to finish the list. */
1586 wlat (pointers, 0);
1587 pointers += 4;
1588 }
1589
1590 return SIM_RC_OK;
1591 }
This page took 0.078737 seconds and 4 git commands to generate.