cpu/
[deliverable/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004 Free Software
3 Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of the GNU simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21
22 #define WANT_CPU
23 #define WANT_CPU_FRVBF
24
25 #include "sim-main.h"
26 #include "cgen-mem.h"
27 #include "cgen-ops.h"
28 #include "cgen-engine.h"
29 #include "cgen-par.h"
30 #include "bfd.h"
31 #include "gdb/sim-frv.h"
32 #include <math.h>
33
34 /* Maintain a flag in order to know when to write the address of the next
35 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
36 insns. */
37 int frvbf_write_next_vliw_addr_to_LR;
38
39 /* The contents of BUF are in target byte order. */
40 int
41 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
42 {
43 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
44 {
45 int hi_available, lo_available;
46 int grn = rn - SIM_FRV_GR0_REGNUM;
47
48 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
49
50 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
51 return 0;
52 else
53 SETTSI (buf, GET_H_GR (grn));
54 }
55 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
56 {
57 int hi_available, lo_available;
58 int frn = rn - SIM_FRV_FR0_REGNUM;
59
60 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
61
62 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
63 return 0;
64 else
65 SETTSI (buf, GET_H_FR (frn));
66 }
67 else if (rn == SIM_FRV_PC_REGNUM)
68 SETTSI (buf, GET_H_PC ());
69 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
70 {
71 /* Make sure the register is implemented. */
72 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
73 int spr = rn - SIM_FRV_SPR0_REGNUM;
74 if (! control->spr[spr].implemented)
75 return 0;
76 SETTSI (buf, GET_H_SPR (spr));
77 }
78 else
79 {
80 SETTSI (buf, 0xdeadbeef);
81 return 0;
82 }
83
84 return len;
85 }
86
87 /* The contents of BUF are in target byte order. */
88
89 int
90 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
91 {
92 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
93 {
94 int hi_available, lo_available;
95 int grn = rn - SIM_FRV_GR0_REGNUM;
96
97 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
98
99 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
100 return 0;
101 else
102 SET_H_GR (grn, GETTSI (buf));
103 }
104 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
105 {
106 int hi_available, lo_available;
107 int frn = rn - SIM_FRV_FR0_REGNUM;
108
109 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
110
111 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
112 return 0;
113 else
114 SET_H_FR (frn, GETTSI (buf));
115 }
116 else if (rn == SIM_FRV_PC_REGNUM)
117 SET_H_PC (GETTSI (buf));
118 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
119 {
120 /* Make sure the register is implemented. */
121 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
122 int spr = rn - SIM_FRV_SPR0_REGNUM;
123 if (! control->spr[spr].implemented)
124 return 0;
125 SET_H_SPR (spr, GETTSI (buf));
126 }
127 else
128 return 0;
129
130 return len;
131 }
132 \f
133 /* Cover fns to access the general registers. */
134 USI
135 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
136 {
137 frv_check_gr_access (current_cpu, gr);
138 return CPU (h_gr[gr]);
139 }
140
141 void
142 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
143 {
144 frv_check_gr_access (current_cpu, gr);
145
146 if (gr == 0)
147 return; /* Storing into gr0 has no effect. */
148
149 CPU (h_gr[gr]) = newval;
150 }
151 \f
152 /* Cover fns to access the floating point registers. */
153 SF
154 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
155 {
156 frv_check_fr_access (current_cpu, fr);
157 return CPU (h_fr[fr]);
158 }
159
160 void
161 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
162 {
163 frv_check_fr_access (current_cpu, fr);
164 CPU (h_fr[fr]) = newval;
165 }
166 \f
167 /* Cover fns to access the general registers as double words. */
168 static UINT
169 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
170 {
171 if (reg & align_mask)
172 {
173 SIM_DESC sd = CPU_STATE (current_cpu);
174 switch (STATE_ARCHITECTURE (sd)->mach)
175 {
176 case bfd_mach_fr400:
177 case bfd_mach_fr550:
178 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
179 break;
180 case bfd_mach_frvtomcat:
181 case bfd_mach_fr500:
182 case bfd_mach_frv:
183 frv_queue_register_exception_interrupt (current_cpu,
184 FRV_REC_UNALIGNED);
185 break;
186 default:
187 break;
188 }
189
190 reg &= ~align_mask;
191 }
192
193 return reg;
194 }
195
196 static UINT
197 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
198 {
199 if (reg & align_mask)
200 {
201 SIM_DESC sd = CPU_STATE (current_cpu);
202 switch (STATE_ARCHITECTURE (sd)->mach)
203 {
204 case bfd_mach_fr400:
205 case bfd_mach_fr550:
206 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
207 break;
208 case bfd_mach_frvtomcat:
209 case bfd_mach_fr500:
210 case bfd_mach_frv:
211 {
212 struct frv_fp_exception_info fp_info = {
213 FSR_NO_EXCEPTION, FTT_INVALID_FR
214 };
215 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
216 }
217 break;
218 default:
219 break;
220 }
221
222 reg &= ~align_mask;
223 }
224
225 return reg;
226 }
227
228 static UINT
229 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
230 {
231 if (address & align_mask)
232 {
233 SIM_DESC sd = CPU_STATE (current_cpu);
234 switch (STATE_ARCHITECTURE (sd)->mach)
235 {
236 case bfd_mach_fr400:
237 frv_queue_data_access_error_interrupt (current_cpu, address);
238 break;
239 case bfd_mach_frvtomcat:
240 case bfd_mach_fr500:
241 case bfd_mach_frv:
242 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
243 break;
244 default:
245 break;
246 }
247
248 address &= ~align_mask;
249 }
250
251 return address;
252 }
253
254 DI
255 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
256 {
257 DI value;
258
259 if (gr == 0)
260 return 0; /* gr0 is always 0. */
261
262 /* Check the register alignment. */
263 gr = check_register_alignment (current_cpu, gr, 1);
264
265 value = GET_H_GR (gr);
266 value <<= 32;
267 value |= (USI) GET_H_GR (gr + 1);
268 return value;
269 }
270
271 void
272 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
273 {
274 if (gr == 0)
275 return; /* Storing into gr0 has no effect. */
276
277 /* Check the register alignment. */
278 gr = check_register_alignment (current_cpu, gr, 1);
279
280 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
281 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
282 }
283 \f
284 /* Cover fns to access the floating point register as double words. */
285 DF
286 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
287 {
288 union {
289 SF as_sf[2];
290 DF as_df;
291 } value;
292
293 /* Check the register alignment. */
294 fr = check_fr_register_alignment (current_cpu, fr, 1);
295
296 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
297 {
298 value.as_sf[1] = GET_H_FR (fr);
299 value.as_sf[0] = GET_H_FR (fr + 1);
300 }
301 else
302 {
303 value.as_sf[0] = GET_H_FR (fr);
304 value.as_sf[1] = GET_H_FR (fr + 1);
305 }
306
307 return value.as_df;
308 }
309
310 void
311 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
312 {
313 union {
314 SF as_sf[2];
315 DF as_df;
316 } value;
317
318 /* Check the register alignment. */
319 fr = check_fr_register_alignment (current_cpu, fr, 1);
320
321 value.as_df = newval;
322 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
323 {
324 SET_H_FR (fr , value.as_sf[1]);
325 SET_H_FR (fr + 1, value.as_sf[0]);
326 }
327 else
328 {
329 SET_H_FR (fr , value.as_sf[0]);
330 SET_H_FR (fr + 1, value.as_sf[1]);
331 }
332 }
333 \f
334 /* Cover fns to access the floating point register as integer words. */
335 USI
336 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
337 {
338 union {
339 SF as_sf;
340 USI as_usi;
341 } value;
342
343 value.as_sf = GET_H_FR (fr);
344 return value.as_usi;
345 }
346
347 void
348 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
349 {
350 union {
351 SF as_sf;
352 USI as_usi;
353 } value;
354
355 value.as_usi = newval;
356 SET_H_FR (fr, value.as_sf);
357 }
358 \f
359 /* Cover fns to access the coprocessor registers as double words. */
360 DI
361 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
362 {
363 DI value;
364
365 /* Check the register alignment. */
366 cpr = check_register_alignment (current_cpu, cpr, 1);
367
368 value = GET_H_CPR (cpr);
369 value <<= 32;
370 value |= (USI) GET_H_CPR (cpr + 1);
371 return value;
372 }
373
374 void
375 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
376 {
377 /* Check the register alignment. */
378 cpr = check_register_alignment (current_cpu, cpr, 1);
379
380 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
381 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
382 }
383 \f
384 /* Cover fns to write registers as quad words. */
385 void
386 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
387 {
388 if (gr == 0)
389 return; /* Storing into gr0 has no effect. */
390
391 /* Check the register alignment. */
392 gr = check_register_alignment (current_cpu, gr, 3);
393
394 SET_H_GR (gr , newval[0]);
395 SET_H_GR (gr + 1, newval[1]);
396 SET_H_GR (gr + 2, newval[2]);
397 SET_H_GR (gr + 3, newval[3]);
398 }
399
400 void
401 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
402 {
403 /* Check the register alignment. */
404 fr = check_fr_register_alignment (current_cpu, fr, 3);
405
406 SET_H_FR (fr , newval[0]);
407 SET_H_FR (fr + 1, newval[1]);
408 SET_H_FR (fr + 2, newval[2]);
409 SET_H_FR (fr + 3, newval[3]);
410 }
411
412 void
413 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
414 {
415 /* Check the register alignment. */
416 cpr = check_register_alignment (current_cpu, cpr, 3);
417
418 SET_H_CPR (cpr , newval[0]);
419 SET_H_CPR (cpr + 1, newval[1]);
420 SET_H_CPR (cpr + 2, newval[2]);
421 SET_H_CPR (cpr + 3, newval[3]);
422 }
423 \f
424 /* Cover fns to access the special purpose registers. */
425 USI
426 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
427 {
428 /* Check access restrictions. */
429 frv_check_spr_read_access (current_cpu, spr);
430
431 switch (spr)
432 {
433 case H_SPR_PSR:
434 return spr_psr_get_handler (current_cpu);
435 case H_SPR_TBR:
436 return spr_tbr_get_handler (current_cpu);
437 case H_SPR_BPSR:
438 return spr_bpsr_get_handler (current_cpu);
439 case H_SPR_CCR:
440 return spr_ccr_get_handler (current_cpu);
441 case H_SPR_CCCR:
442 return spr_cccr_get_handler (current_cpu);
443 case H_SPR_SR0:
444 case H_SPR_SR1:
445 case H_SPR_SR2:
446 case H_SPR_SR3:
447 return spr_sr_get_handler (current_cpu, spr);
448 break;
449 default:
450 return CPU (h_spr[spr]);
451 }
452 return 0;
453 }
454
455 void
456 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
457 {
458 FRV_REGISTER_CONTROL *control;
459 USI mask;
460 USI oldval;
461
462 /* Check access restrictions. */
463 frv_check_spr_write_access (current_cpu, spr);
464
465 /* Only set those fields which are writeable. */
466 control = CPU_REGISTER_CONTROL (current_cpu);
467 mask = control->spr[spr].read_only_mask;
468 oldval = GET_H_SPR (spr);
469
470 newval = (newval & ~mask) | (oldval & mask);
471
472 /* Some registers are represented by individual components which are
473 referenced more often than the register itself. */
474 switch (spr)
475 {
476 case H_SPR_PSR:
477 spr_psr_set_handler (current_cpu, newval);
478 break;
479 case H_SPR_TBR:
480 spr_tbr_set_handler (current_cpu, newval);
481 break;
482 case H_SPR_BPSR:
483 spr_bpsr_set_handler (current_cpu, newval);
484 break;
485 case H_SPR_CCR:
486 spr_ccr_set_handler (current_cpu, newval);
487 break;
488 case H_SPR_CCCR:
489 spr_cccr_set_handler (current_cpu, newval);
490 break;
491 case H_SPR_SR0:
492 case H_SPR_SR1:
493 case H_SPR_SR2:
494 case H_SPR_SR3:
495 spr_sr_set_handler (current_cpu, spr, newval);
496 break;
497 case H_SPR_IHSR8:
498 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
499 break;
500 default:
501 CPU (h_spr[spr]) = newval;
502 break;
503 }
504 }
505 \f
506 /* Cover fns to access the gr_hi and gr_lo registers. */
507 UHI
508 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
509 {
510 return (GET_H_GR(gr) >> 16) & 0xffff;
511 }
512
513 void
514 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
515 {
516 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
517 SET_H_GR (gr, value);
518 }
519
520 UHI
521 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
522 {
523 return GET_H_GR(gr) & 0xffff;
524 }
525
526 void
527 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
528 {
529 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
530 SET_H_GR (gr, value);
531 }
532 \f
533 /* Cover fns to access the tbr bits. */
534 USI
535 spr_tbr_get_handler (SIM_CPU *current_cpu)
536 {
537 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
538 ((GET_H_TBR_TT () & 0xff) << 4);
539
540 return tbr;
541 }
542
543 void
544 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
545 {
546 int tbr = newval;
547
548 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
549 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
550 }
551 \f
552 /* Cover fns to access the bpsr bits. */
553 USI
554 spr_bpsr_get_handler (SIM_CPU *current_cpu)
555 {
556 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
557 ((GET_H_BPSR_BET () & 0x1) );
558
559 return bpsr;
560 }
561
562 void
563 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
564 {
565 int bpsr = newval;
566
567 SET_H_BPSR_BS ((bpsr >> 12) & 1);
568 SET_H_BPSR_BET ((bpsr ) & 1);
569 }
570 \f
571 /* Cover fns to access the psr bits. */
572 USI
573 spr_psr_get_handler (SIM_CPU *current_cpu)
574 {
575 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
576 ((GET_H_PSR_VER () & 0xf) << 24) |
577 ((GET_H_PSR_ICE () & 0x1) << 16) |
578 ((GET_H_PSR_NEM () & 0x1) << 14) |
579 ((GET_H_PSR_CM () & 0x1) << 13) |
580 ((GET_H_PSR_BE () & 0x1) << 12) |
581 ((GET_H_PSR_ESR () & 0x1) << 11) |
582 ((GET_H_PSR_EF () & 0x1) << 8) |
583 ((GET_H_PSR_EM () & 0x1) << 7) |
584 ((GET_H_PSR_PIL () & 0xf) << 3) |
585 ((GET_H_PSR_S () & 0x1) << 2) |
586 ((GET_H_PSR_PS () & 0x1) << 1) |
587 ((GET_H_PSR_ET () & 0x1) );
588
589 return psr;
590 }
591
592 void
593 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
594 {
595 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
596 first. */
597 SET_H_PSR_S ((newval >> 2) & 1);
598
599 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
600 SET_H_PSR_VER ((newval >> 24) & 0xf);
601 SET_H_PSR_ICE ((newval >> 16) & 1);
602 SET_H_PSR_NEM ((newval >> 14) & 1);
603 SET_H_PSR_CM ((newval >> 13) & 1);
604 SET_H_PSR_BE ((newval >> 12) & 1);
605 SET_H_PSR_ESR ((newval >> 11) & 1);
606 SET_H_PSR_EF ((newval >> 8) & 1);
607 SET_H_PSR_EM ((newval >> 7) & 1);
608 SET_H_PSR_PIL ((newval >> 3) & 0xf);
609 SET_H_PSR_PS ((newval >> 1) & 1);
610 SET_H_PSR_ET ((newval ) & 1);
611 }
612
613 void
614 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
615 {
616 /* If switching from user to supervisor mode, or vice-versa, then switch
617 the supervisor/user context. */
618 int psr_s = GET_H_PSR_S ();
619 if (psr_s != (newval & 1))
620 {
621 frvbf_switch_supervisor_user_context (current_cpu);
622 CPU (h_psr_s) = newval & 1;
623 }
624 }
625 \f
626 /* Cover fns to access the ccr bits. */
627 USI
628 spr_ccr_get_handler (SIM_CPU *current_cpu)
629 {
630 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
631 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
632 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
633 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
634 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
635 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
636 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
637 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
638
639 return ccr;
640 }
641
642 void
643 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
644 {
645 int ccr = newval;
646
647 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
648 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
649 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
650 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
651 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
652 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
653 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
654 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
655 }
656 \f
657 QI
658 frvbf_set_icc_for_shift_right (
659 SIM_CPU *current_cpu, SI value, SI shift, QI icc
660 )
661 {
662 /* Set the C flag of the given icc to the logical OR of the bits shifted
663 out. */
664 int mask = (1 << shift) - 1;
665 if ((value & mask) != 0)
666 return icc | 0x1;
667
668 return icc & 0xe;
669 }
670
671 QI
672 frvbf_set_icc_for_shift_left (
673 SIM_CPU *current_cpu, SI value, SI shift, QI icc
674 )
675 {
676 /* Set the V flag of the given icc to the logical OR of the bits shifted
677 out. */
678 int mask = ((1 << shift) - 1) << (32 - shift);
679 if ((value & mask) != 0)
680 return icc | 0x2;
681
682 return icc & 0xd;
683 }
684 \f
685 /* Cover fns to access the cccr bits. */
686 USI
687 spr_cccr_get_handler (SIM_CPU *current_cpu)
688 {
689 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
690 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
691 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
692 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
693 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
694 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
695 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
696 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
697
698 return cccr;
699 }
700
701 void
702 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
703 {
704 int cccr = newval;
705
706 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
707 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
708 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
709 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
710 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
711 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
712 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
713 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
714 }
715 \f
716 /* Cover fns to access the sr bits. */
717 USI
718 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
719 {
720 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
721 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
722 int psr_esr = GET_H_PSR_ESR ();
723 if (! psr_esr)
724 return GET_H_GR (4 + (spr - H_SPR_SR0));
725
726 return CPU (h_spr[spr]);
727 }
728
729 void
730 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
731 {
732 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
733 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
734 int psr_esr = GET_H_PSR_ESR ();
735 if (! psr_esr)
736 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
737 else
738 CPU (h_spr[spr]) = newval;
739 }
740 \f
741 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
742 void
743 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
744 {
745 if (GET_H_PSR_ESR ())
746 {
747 /* We need to be in supervisor mode to swap the registers. Access the
748 PSR.S directly in order to avoid recursive context switches. */
749 int i;
750 int save_psr_s = CPU (h_psr_s);
751 CPU (h_psr_s) = 1;
752 for (i = 0; i < 4; ++i)
753 {
754 int gr = i + 4;
755 int spr = i + H_SPR_SR0;
756 SI tmp = GET_H_SPR (spr);
757 SET_H_SPR (spr, GET_H_GR (gr));
758 SET_H_GR (gr, tmp);
759 }
760 CPU (h_psr_s) = save_psr_s;
761 }
762 }
763 \f
764 /* Handle load/store of quad registers. */
765 void
766 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
767 {
768 int i;
769 SI value[4];
770
771 /* Check memory alignment */
772 address = check_memory_alignment (current_cpu, address, 0xf);
773
774 /* If we need to count cycles, then the cache operation will be
775 initiated from the model profiling functions.
776 See frvbf_model_.... */
777 if (model_insn)
778 {
779 CPU_LOAD_ADDRESS (current_cpu) = address;
780 CPU_LOAD_LENGTH (current_cpu) = 16;
781 }
782 else
783 {
784 for (i = 0; i < 4; ++i)
785 {
786 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
787 address += 4;
788 }
789 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
790 value);
791 }
792 }
793
794 void
795 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
796 {
797 int i;
798 SI value[4];
799 USI hsr0;
800
801 /* Check register and memory alignment. */
802 src_ix = check_register_alignment (current_cpu, src_ix, 3);
803 address = check_memory_alignment (current_cpu, address, 0xf);
804
805 for (i = 0; i < 4; ++i)
806 {
807 /* GR0 is always 0. */
808 if (src_ix == 0)
809 value[i] = 0;
810 else
811 value[i] = GET_H_GR (src_ix + i);
812 }
813 hsr0 = GET_HSR0 ();
814 if (GET_HSR0_DCE (hsr0))
815 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
816 else
817 sim_queue_mem_xi_write (current_cpu, address, value);
818 }
819
820 void
821 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
822 {
823 int i;
824 SI value[4];
825
826 /* Check memory alignment */
827 address = check_memory_alignment (current_cpu, address, 0xf);
828
829 /* If we need to count cycles, then the cache operation will be
830 initiated from the model profiling functions.
831 See frvbf_model_.... */
832 if (model_insn)
833 {
834 CPU_LOAD_ADDRESS (current_cpu) = address;
835 CPU_LOAD_LENGTH (current_cpu) = 16;
836 }
837 else
838 {
839 for (i = 0; i < 4; ++i)
840 {
841 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
842 address += 4;
843 }
844 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
845 value);
846 }
847 }
848
849 void
850 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
851 {
852 int i;
853 SI value[4];
854 USI hsr0;
855
856 /* Check register and memory alignment. */
857 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
858 address = check_memory_alignment (current_cpu, address, 0xf);
859
860 for (i = 0; i < 4; ++i)
861 value[i] = GET_H_FR (src_ix + i);
862
863 hsr0 = GET_HSR0 ();
864 if (GET_HSR0_DCE (hsr0))
865 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
866 else
867 sim_queue_mem_xi_write (current_cpu, address, value);
868 }
869
870 void
871 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
872 {
873 int i;
874 SI value[4];
875
876 /* Check memory alignment */
877 address = check_memory_alignment (current_cpu, address, 0xf);
878
879 /* If we need to count cycles, then the cache operation will be
880 initiated from the model profiling functions.
881 See frvbf_model_.... */
882 if (model_insn)
883 {
884 CPU_LOAD_ADDRESS (current_cpu) = address;
885 CPU_LOAD_LENGTH (current_cpu) = 16;
886 }
887 else
888 {
889 for (i = 0; i < 4; ++i)
890 {
891 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
892 address += 4;
893 }
894 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
895 value);
896 }
897 }
898
899 void
900 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
901 {
902 int i;
903 SI value[4];
904 USI hsr0;
905
906 /* Check register and memory alignment. */
907 src_ix = check_register_alignment (current_cpu, src_ix, 3);
908 address = check_memory_alignment (current_cpu, address, 0xf);
909
910 for (i = 0; i < 4; ++i)
911 value[i] = GET_H_CPR (src_ix + i);
912
913 hsr0 = GET_HSR0 ();
914 if (GET_HSR0_DCE (hsr0))
915 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
916 else
917 sim_queue_mem_xi_write (current_cpu, address, value);
918 }
919 \f
920 void
921 frvbf_signed_integer_divide (
922 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
923 )
924 {
925 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
926 if (arg1 == 0x80000000 && arg2 == -1)
927 {
928 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
929 otherwise it may result in 0x7fffffff (sparc compatibility) or
930 0x80000000 (C language compatibility). */
931 USI isr;
932 dtt = FRV_DTT_OVERFLOW;
933
934 isr = GET_ISR ();
935 if (GET_ISR_EDE (isr))
936 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
937 0x7fffffff);
938 else
939 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
940 0x80000000);
941 frvbf_force_update (current_cpu); /* Force update of target register. */
942 }
943 else if (arg2 == 0)
944 dtt = FRV_DTT_DIVISION_BY_ZERO;
945 else
946 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
947 arg1 / arg2);
948
949 /* Check for exceptions. */
950 if (dtt != FRV_DTT_NO_EXCEPTION)
951 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
952 non_excepting);
953 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
954 {
955 /* Non excepting instruction. Clear the NE flag for the target
956 register. */
957 SI NE_flags[2];
958 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
959 CLEAR_NE_FLAG (NE_flags, target_index);
960 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
961 }
962 }
963
964 void
965 frvbf_unsigned_integer_divide (
966 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
967 )
968 {
969 if (arg2 == 0)
970 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
971 target_index, non_excepting);
972 else
973 {
974 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
975 arg1 / arg2);
976 if (non_excepting)
977 {
978 /* Non excepting instruction. Clear the NE flag for the target
979 register. */
980 SI NE_flags[2];
981 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
982 CLEAR_NE_FLAG (NE_flags, target_index);
983 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
984 }
985 }
986 }
987 \f
988 /* Clear accumulators. */
989 void
990 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
991 {
992 SIM_DESC sd = CPU_STATE (current_cpu);
993 int acc_num =
994 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 8 :
995 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 8 :
996 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 4 :
997 63;
998 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
999
1000 ps->mclracc_acc = acc_ix;
1001 ps->mclracc_A = A;
1002 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1003 {
1004 /* This instruction is a nop if the referenced accumulator is not
1005 implemented. */
1006 if (acc_ix < acc_num)
1007 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1008 }
1009 else
1010 {
1011 /* Clear all implemented accumulators. */
1012 int i;
1013 for (i = 0; i < acc_num; ++i)
1014 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1015 }
1016 }
1017 \f
1018 /* Functions to aid insn semantics. */
1019
1020 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1021 SI
1022 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1023 {
1024 SI i;
1025 SI mask;
1026
1027 if (value == 0)
1028 return 63;
1029
1030 /* Find the position of the first non-zero bit.
1031 The loop will terminate since there is guaranteed to be at least one
1032 non-zero bit. */
1033 mask = 1 << (sizeof (mask) * 8 - 1);
1034 for (i = 0; (value & mask) == 0; ++i)
1035 value <<= 1;
1036
1037 return i;
1038 }
1039
1040 /* Compute the result of the cut insns. */
1041 SI
1042 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1043 {
1044 SI result;
1045 if (cut_point < 32)
1046 {
1047 result = reg1 << cut_point;
1048 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1049 }
1050 else
1051 result = reg2 << (cut_point - 32);
1052
1053 return result;
1054 }
1055
1056 /* Compute the result of the cut insns. */
1057 SI
1058 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1059 {
1060 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1061 cut_point = cut_point << 26 >> 26;
1062
1063 /* The cut_point is relative to bit 40 of 64 bits. */
1064 if (cut_point >= 0)
1065 return (acc << (cut_point + 24)) >> 32;
1066
1067 /* Extend the sign bit (bit 40) for negative cuts. */
1068 if (cut_point == -32)
1069 return (acc << 24) >> 63; /* Special case for full shiftout. */
1070
1071 return (acc << 24) >> (32 + -cut_point);
1072 }
1073
1074 /* Compute the result of the cut insns. */
1075 SI
1076 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1077 {
1078 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1079 cut_point = cut_point << 26 >> 26;
1080
1081 if (cut_point >= 0)
1082 {
1083 /* The cut_point is relative to bit 40 of 64 bits. */
1084 DI shifted = acc << (cut_point + 24);
1085 DI unshifted = shifted >> (cut_point + 24);
1086
1087 /* The result will be saturated if significant bits are shifted out. */
1088 if (unshifted != acc)
1089 {
1090 if (acc < 0)
1091 return 0x80000000;
1092 return 0x7fffffff;
1093 }
1094 }
1095
1096 /* The result will not be saturated, so use the code for the normal cut. */
1097 return frvbf_media_cut (current_cpu, acc, cut_point);
1098 }
1099
1100 /* Compute the result of int accumulator cut (SCUTSS). */
1101 SI
1102 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1103 {
1104 DI lower, upper;
1105
1106 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1107 cut_point = cut_point << 25 >> 25;
1108
1109 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1110 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1111 of this 128-bit value.
1112
1113 Since we can't deal with 128-bit values very easily, convert the
1114 operation into an equivalent 64-bit one. */
1115 if (cut_point < 0)
1116 {
1117 /* Avoid an undefined shift operation. */
1118 if (cut_point == -64)
1119 acc >>= 63;
1120 else
1121 acc >>= -cut_point;
1122 cut_point = 0;
1123 }
1124
1125 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1126 32 bits of the result and UPPER to the result >> 31. */
1127 if (cut_point < 32)
1128 {
1129 /* The cut loses the (32 - CUT_POINT) least significant bits.
1130 Round the result up if the most significant of these lost bits
1131 is 1. */
1132 lower = acc >> (32 - cut_point);
1133 if (lower < 0x7fffffff)
1134 if (acc & LSBIT64 (32 - cut_point - 1))
1135 lower++;
1136 upper = lower >> 31;
1137 }
1138 else
1139 {
1140 lower = acc << (cut_point - 32);
1141 upper = acc >> (63 - cut_point);
1142 }
1143
1144 /* Saturate the result. */
1145 if (upper < -1)
1146 return ~0x7fffffff;
1147 else if (upper > 0)
1148 return 0x7fffffff;
1149 else
1150 return lower;
1151 }
1152
1153 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1154 SI
1155 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1156 {
1157 int neg_arg1;
1158
1159 /* FIXME: what to do with negative shift amt? */
1160 if (arg2 <= 0)
1161 return arg1;
1162
1163 if (arg1 == 0)
1164 return 0;
1165
1166 /* Signed shift by 31 or greater saturates by definition. */
1167 if (arg2 >= 31)
1168 if (arg1 > 0)
1169 return (SI) 0x7fffffff;
1170 else
1171 return (SI) 0x80000000;
1172
1173 /* OK, arg2 is between 1 and 31. */
1174 neg_arg1 = (arg1 < 0);
1175 do {
1176 arg1 <<= 1;
1177 /* Check for sign bit change (saturation). */
1178 if (neg_arg1 && (arg1 >= 0))
1179 return (SI) 0x80000000;
1180 else if (!neg_arg1 && (arg1 < 0))
1181 return (SI) 0x7fffffff;
1182 } while (--arg2 > 0);
1183
1184 return arg1;
1185 }
1186
1187 /* Simulate the media custom insns. */
1188 void
1189 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1190 {
1191 /* The semantics of the insn are a nop, since it is implementation defined.
1192 We do need to check whether it's implemented and set up for MTRAP
1193 if it's not. */
1194 USI msr0 = GET_MSR (0);
1195 if (GET_MSR_EMCI (msr0) == 0)
1196 {
1197 /* no interrupt queued at this time. */
1198 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1199 }
1200 }
1201
1202 /* Simulate the media average (MAVEH) insn. */
1203 static HI
1204 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1205 {
1206 SIM_DESC sd = CPU_STATE (current_cpu);
1207 SI sum = (arg1 + arg2);
1208 HI result = sum >> 1;
1209 int rounding_value;
1210
1211 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1212 toward negative infinity and the result is already correctly rounded. */
1213 switch (STATE_ARCHITECTURE (sd)->mach)
1214 {
1215 /* Need to check rounding mode. */
1216 case bfd_mach_fr400:
1217 case bfd_mach_fr550:
1218 /* Check whether rounding will be required. Rounding will be required
1219 if the sum is an odd number. */
1220 rounding_value = sum & 1;
1221 if (rounding_value)
1222 {
1223 USI msr0 = GET_MSR (0);
1224 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1225 if (GET_MSR_SRDAV (msr0))
1226 {
1227 /* MSR0.RD controls rounding. */
1228 switch (GET_MSR_RD (msr0))
1229 {
1230 case 0:
1231 /* Round to nearest. */
1232 if (result >= 0)
1233 ++result;
1234 break;
1235 case 1:
1236 /* Round toward 0. */
1237 if (result < 0)
1238 ++result;
1239 break;
1240 case 2:
1241 /* Round toward positive infinity. */
1242 ++result;
1243 break;
1244 case 3:
1245 /* Round toward negative infinity. The result is already
1246 correctly rounded. */
1247 break;
1248 default:
1249 abort ();
1250 break;
1251 }
1252 }
1253 else
1254 {
1255 /* MSR0.RDAV controls rounding. If set, round toward positive
1256 infinity. Otherwise the result is already rounded correctly
1257 toward negative infinity. */
1258 if (GET_MSR_RDAV (msr0))
1259 ++result;
1260 }
1261 }
1262 break;
1263 default:
1264 break;
1265 }
1266
1267 return result;
1268 }
1269
1270 SI
1271 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1272 {
1273 SI result;
1274 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1275 result &= 0xffff;
1276 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1277 (reg2 >> 16) & 0xffff) << 16;
1278 return result;
1279 }
1280
1281 /* Maintain a flag in order to know when to write the address of the next
1282 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1283 void
1284 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1285 {
1286 frvbf_write_next_vliw_addr_to_LR = value;
1287 }
1288
1289 void
1290 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1291 {
1292 USI NE_flags[2];
1293
1294 /* Save the target register so interrupt processing can set its NE flag
1295 in the event of an exception. */
1296 frv_interrupt_state.ne_index = index;
1297
1298 /* Clear the NE flag of the target register. It will be reset if necessary
1299 in the event of an exception. */
1300 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1301 CLEAR_NE_FLAG (NE_flags, index);
1302 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1303 }
1304
1305 void
1306 frvbf_force_update (SIM_CPU *current_cpu)
1307 {
1308 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1309 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1310 if (ix > 0)
1311 {
1312 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1313 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1314 }
1315 }
1316 \f
1317 /* Condition code logic. */
1318 enum cr_ops {
1319 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1320 num_cr_ops
1321 };
1322
1323 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1324
1325 static enum cr_result
1326 cr_logic[num_cr_ops][4][4] = {
1327 /* andcr */
1328 {
1329 /* undefined undefined false true */
1330 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1331 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1332 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1333 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1334 },
1335 /* orcr */
1336 {
1337 /* undefined undefined false true */
1338 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1339 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1340 /* false */ {cr_false, cr_false, cr_false, cr_true },
1341 /* true */ {cr_true, cr_true, cr_true, cr_true }
1342 },
1343 /* xorcr */
1344 {
1345 /* undefined undefined false true */
1346 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1347 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1348 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1349 /* true */ {cr_true, cr_true, cr_true, cr_false }
1350 },
1351 /* nandcr */
1352 {
1353 /* undefined undefined false true */
1354 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1355 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1356 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1357 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1358 },
1359 /* norcr */
1360 {
1361 /* undefined undefined false true */
1362 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1363 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1364 /* false */ {cr_true, cr_true, cr_true, cr_false },
1365 /* true */ {cr_false, cr_false, cr_false, cr_false }
1366 },
1367 /* andncr */
1368 {
1369 /* undefined undefined false true */
1370 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1371 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1372 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1373 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1374 },
1375 /* orncr */
1376 {
1377 /* undefined undefined false true */
1378 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1379 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1380 /* false */ {cr_true, cr_true, cr_true, cr_true },
1381 /* true */ {cr_false, cr_false, cr_false, cr_true }
1382 },
1383 /* nandncr */
1384 {
1385 /* undefined undefined false true */
1386 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1387 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1388 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1389 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1390 },
1391 /* norncr */
1392 {
1393 /* undefined undefined false true */
1394 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1395 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1396 /* false */ {cr_false, cr_false, cr_false, cr_false },
1397 /* true */ {cr_true, cr_true, cr_true, cr_false }
1398 }
1399 };
1400
1401 UQI
1402 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1403 {
1404 return cr_logic[operation][arg1][arg2];
1405 }
1406 \f
1407 /* Cache Manipulation. */
1408 void
1409 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1410 {
1411 /* If we need to count cycles, then the cache operation will be
1412 initiated from the model profiling functions.
1413 See frvbf_model_.... */
1414 int hsr0 = GET_HSR0 ();
1415 if (GET_HSR0_ICE (hsr0))
1416 {
1417 if (model_insn)
1418 {
1419 CPU_LOAD_ADDRESS (current_cpu) = address;
1420 CPU_LOAD_LENGTH (current_cpu) = length;
1421 CPU_LOAD_LOCK (current_cpu) = lock;
1422 }
1423 else
1424 {
1425 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1426 frv_cache_preload (cache, address, length, lock);
1427 }
1428 }
1429 }
1430
1431 void
1432 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1433 {
1434 /* If we need to count cycles, then the cache operation will be
1435 initiated from the model profiling functions.
1436 See frvbf_model_.... */
1437 int hsr0 = GET_HSR0 ();
1438 if (GET_HSR0_DCE (hsr0))
1439 {
1440 if (model_insn)
1441 {
1442 CPU_LOAD_ADDRESS (current_cpu) = address;
1443 CPU_LOAD_LENGTH (current_cpu) = length;
1444 CPU_LOAD_LOCK (current_cpu) = lock;
1445 }
1446 else
1447 {
1448 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1449 frv_cache_preload (cache, address, length, lock);
1450 }
1451 }
1452 }
1453
1454 void
1455 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1456 {
1457 /* If we need to count cycles, then the cache operation will be
1458 initiated from the model profiling functions.
1459 See frvbf_model_.... */
1460 int hsr0 = GET_HSR0 ();
1461 if (GET_HSR0_ICE (hsr0))
1462 {
1463 if (model_insn)
1464 CPU_LOAD_ADDRESS (current_cpu) = address;
1465 else
1466 {
1467 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1468 frv_cache_unlock (cache, address);
1469 }
1470 }
1471 }
1472
1473 void
1474 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1475 {
1476 /* If we need to count cycles, then the cache operation will be
1477 initiated from the model profiling functions.
1478 See frvbf_model_.... */
1479 int hsr0 = GET_HSR0 ();
1480 if (GET_HSR0_DCE (hsr0))
1481 {
1482 if (model_insn)
1483 CPU_LOAD_ADDRESS (current_cpu) = address;
1484 else
1485 {
1486 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1487 frv_cache_unlock (cache, address);
1488 }
1489 }
1490 }
1491
1492 void
1493 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1494 {
1495 /* Make sure the insn was specified properly. -1 will be passed for ALL
1496 for a icei with A=0. */
1497 if (all == -1)
1498 {
1499 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1500 return;
1501 }
1502
1503 /* If we need to count cycles, then the cache operation will be
1504 initiated from the model profiling functions.
1505 See frvbf_model_.... */
1506 if (model_insn)
1507 {
1508 /* Record the all-entries flag for use in profiling. */
1509 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1510 ps->all_cache_entries = all;
1511 CPU_LOAD_ADDRESS (current_cpu) = address;
1512 }
1513 else
1514 {
1515 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1516 if (all)
1517 frv_cache_invalidate_all (cache, 0/* flush? */);
1518 else
1519 frv_cache_invalidate (cache, address, 0/* flush? */);
1520 }
1521 }
1522
1523 void
1524 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1525 {
1526 /* Make sure the insn was specified properly. -1 will be passed for ALL
1527 for a dcei with A=0. */
1528 if (all == -1)
1529 {
1530 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1531 return;
1532 }
1533
1534 /* If we need to count cycles, then the cache operation will be
1535 initiated from the model profiling functions.
1536 See frvbf_model_.... */
1537 if (model_insn)
1538 {
1539 /* Record the all-entries flag for use in profiling. */
1540 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1541 ps->all_cache_entries = all;
1542 CPU_LOAD_ADDRESS (current_cpu) = address;
1543 }
1544 else
1545 {
1546 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1547 if (all)
1548 frv_cache_invalidate_all (cache, 0/* flush? */);
1549 else
1550 frv_cache_invalidate (cache, address, 0/* flush? */);
1551 }
1552 }
1553
1554 void
1555 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1556 {
1557 /* Make sure the insn was specified properly. -1 will be passed for ALL
1558 for a dcef with A=0. */
1559 if (all == -1)
1560 {
1561 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1562 return;
1563 }
1564
1565 /* If we need to count cycles, then the cache operation will be
1566 initiated from the model profiling functions.
1567 See frvbf_model_.... */
1568 if (model_insn)
1569 {
1570 /* Record the all-entries flag for use in profiling. */
1571 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1572 ps->all_cache_entries = all;
1573 CPU_LOAD_ADDRESS (current_cpu) = address;
1574 }
1575 else
1576 {
1577 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1578 if (all)
1579 frv_cache_invalidate_all (cache, 1/* flush? */);
1580 else
1581 frv_cache_invalidate (cache, address, 1/* flush? */);
1582 }
1583 }
This page took 0.062329 seconds and 4 git commands to generate.