2003-09-09 Dave Brolley <brolley@redhat.com>
[deliverable/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #define WANT_CPU
22 #define WANT_CPU_FRVBF
23
24 #include "sim-main.h"
25 #include "cgen-mem.h"
26 #include "cgen-ops.h"
27 #include "cgen-engine.h"
28 #include "cgen-par.h"
29 #include "bfd.h"
30 #include <math.h>
31
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
34 insns. */
35 int frvbf_write_next_vliw_addr_to_LR;
36
37 /* The contents of BUF are in target byte order. */
38 int
39 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
40 {
41 if (rn <= GR_REGNUM_MAX)
42 SETTSI (buf, GET_H_GR (rn));
43 else if (rn <= FR_REGNUM_MAX)
44 SETTSI (buf, GET_H_FR (rn - GR_REGNUM_MAX - 1));
45 else if (rn == PC_REGNUM)
46 SETTSI (buf, GET_H_PC ());
47 else if (rn == LR_REGNUM)
48 SETTSI (buf, GET_H_SPR (H_SPR_LR));
49 else
50 SETTSI (buf, 0xdeadbeef);
51
52 return -1;
53 }
54
55 /* The contents of BUF are in target byte order. */
56
57 int
58 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
59 {
60 if (rn <= GR_REGNUM_MAX)
61 SET_H_GR (rn, GETTSI (buf));
62 else if (rn <= FR_REGNUM_MAX)
63 SET_H_FR (rn - GR_REGNUM_MAX - 1, GETTSI (buf));
64 else if (rn == PC_REGNUM)
65 SET_H_PC (GETTSI (buf));
66 else if (rn == LR_REGNUM)
67 SET_H_SPR (H_SPR_LR, GETTSI (buf));
68
69 return -1;
70 }
71 \f
72 /* Cover fns to access the general registers. */
73 USI
74 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
75 {
76 frv_check_gr_access (current_cpu, gr);
77 return CPU (h_gr[gr]);
78 }
79
80 void
81 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
82 {
83 frv_check_gr_access (current_cpu, gr);
84
85 if (gr == 0)
86 return; /* Storing into gr0 has no effect. */
87
88 CPU (h_gr[gr]) = newval;
89 }
90 \f
91 /* Cover fns to access the floating point registers. */
92 SF
93 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
94 {
95 frv_check_fr_access (current_cpu, fr);
96 return CPU (h_fr[fr]);
97 }
98
99 void
100 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
101 {
102 frv_check_fr_access (current_cpu, fr);
103 CPU (h_fr[fr]) = newval;
104 }
105 \f
106 /* Cover fns to access the general registers as double words. */
107 static UINT
108 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
109 {
110 if (reg & align_mask)
111 {
112 SIM_DESC sd = CPU_STATE (current_cpu);
113 switch (STATE_ARCHITECTURE (sd)->mach)
114 {
115 case bfd_mach_fr400:
116 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
117 break;
118 case bfd_mach_frvtomcat:
119 case bfd_mach_fr500:
120 case bfd_mach_frv:
121 frv_queue_register_exception_interrupt (current_cpu,
122 FRV_REC_UNALIGNED);
123 break;
124 default:
125 break;
126 }
127
128 reg &= ~align_mask;
129 }
130
131 return reg;
132 }
133
134 static UINT
135 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
136 {
137 if (reg & align_mask)
138 {
139 SIM_DESC sd = CPU_STATE (current_cpu);
140 switch (STATE_ARCHITECTURE (sd)->mach)
141 {
142 case bfd_mach_fr400:
143 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
144 break;
145 case bfd_mach_frvtomcat:
146 case bfd_mach_fr500:
147 case bfd_mach_frv:
148 {
149 struct frv_fp_exception_info fp_info = {
150 FSR_NO_EXCEPTION, FTT_INVALID_FR
151 };
152 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
153 }
154 break;
155 default:
156 break;
157 }
158
159 reg &= ~align_mask;
160 }
161
162 return reg;
163 }
164
165 static UINT
166 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
167 {
168 if (address & align_mask)
169 {
170 SIM_DESC sd = CPU_STATE (current_cpu);
171 switch (STATE_ARCHITECTURE (sd)->mach)
172 {
173 case bfd_mach_fr400:
174 frv_queue_data_access_error_interrupt (current_cpu, address);
175 break;
176 case bfd_mach_frvtomcat:
177 case bfd_mach_fr500:
178 case bfd_mach_frv:
179 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
180 break;
181 default:
182 break;
183 }
184
185 address &= ~align_mask;
186 }
187
188 return address;
189 }
190
191 DI
192 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
193 {
194 DI value;
195
196 if (gr == 0)
197 return 0; /* gr0 is always 0. */
198
199 /* Check the register alignment. */
200 gr = check_register_alignment (current_cpu, gr, 1);
201
202 value = GET_H_GR (gr);
203 value <<= 32;
204 value |= (USI) GET_H_GR (gr + 1);
205 return value;
206 }
207
208 void
209 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
210 {
211 if (gr == 0)
212 return; /* Storing into gr0 has no effect. */
213
214 /* Check the register alignment. */
215 gr = check_register_alignment (current_cpu, gr, 1);
216
217 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
218 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
219 }
220 \f
221 /* Cover fns to access the floating point register as double words. */
222 DF
223 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
224 {
225 union {
226 SF as_sf[2];
227 DF as_df;
228 } value;
229
230 /* Check the register alignment. */
231 fr = check_fr_register_alignment (current_cpu, fr, 1);
232
233 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
234 {
235 value.as_sf[1] = GET_H_FR (fr);
236 value.as_sf[0] = GET_H_FR (fr + 1);
237 }
238 else
239 {
240 value.as_sf[0] = GET_H_FR (fr);
241 value.as_sf[1] = GET_H_FR (fr + 1);
242 }
243
244 return value.as_df;
245 }
246
247 void
248 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
249 {
250 union {
251 SF as_sf[2];
252 DF as_df;
253 } value;
254
255 /* Check the register alignment. */
256 fr = check_fr_register_alignment (current_cpu, fr, 1);
257
258 value.as_df = newval;
259 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
260 {
261 SET_H_FR (fr , value.as_sf[1]);
262 SET_H_FR (fr + 1, value.as_sf[0]);
263 }
264 else
265 {
266 SET_H_FR (fr , value.as_sf[0]);
267 SET_H_FR (fr + 1, value.as_sf[1]);
268 }
269 }
270 \f
271 /* Cover fns to access the floating point register as integer words. */
272 USI
273 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
274 {
275 union {
276 SF as_sf;
277 USI as_usi;
278 } value;
279
280 value.as_sf = GET_H_FR (fr);
281 return value.as_usi;
282 }
283
284 void
285 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
286 {
287 union {
288 SF as_sf;
289 USI as_usi;
290 } value;
291
292 value.as_usi = newval;
293 SET_H_FR (fr, value.as_sf);
294 }
295 \f
296 /* Cover fns to access the coprocessor registers as double words. */
297 DI
298 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
299 {
300 DI value;
301
302 /* Check the register alignment. */
303 cpr = check_register_alignment (current_cpu, cpr, 1);
304
305 value = GET_H_CPR (cpr);
306 value <<= 32;
307 value |= (USI) GET_H_CPR (cpr + 1);
308 return value;
309 }
310
311 void
312 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
313 {
314 /* Check the register alignment. */
315 cpr = check_register_alignment (current_cpu, cpr, 1);
316
317 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
318 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
319 }
320 \f
321 /* Cover fns to write registers as quad words. */
322 void
323 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
324 {
325 if (gr == 0)
326 return; /* Storing into gr0 has no effect. */
327
328 /* Check the register alignment. */
329 gr = check_register_alignment (current_cpu, gr, 3);
330
331 SET_H_GR (gr , newval[0]);
332 SET_H_GR (gr + 1, newval[1]);
333 SET_H_GR (gr + 2, newval[2]);
334 SET_H_GR (gr + 3, newval[3]);
335 }
336
337 void
338 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
339 {
340 /* Check the register alignment. */
341 fr = check_fr_register_alignment (current_cpu, fr, 3);
342
343 SET_H_FR (fr , newval[0]);
344 SET_H_FR (fr + 1, newval[1]);
345 SET_H_FR (fr + 2, newval[2]);
346 SET_H_FR (fr + 3, newval[3]);
347 }
348
349 void
350 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
351 {
352 /* Check the register alignment. */
353 cpr = check_register_alignment (current_cpu, cpr, 3);
354
355 SET_H_CPR (cpr , newval[0]);
356 SET_H_CPR (cpr + 1, newval[1]);
357 SET_H_CPR (cpr + 2, newval[2]);
358 SET_H_CPR (cpr + 3, newval[3]);
359 }
360 \f
361 /* Cover fns to access the special purpose registers. */
362 USI
363 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
364 {
365 /* Check access restrictions. */
366 frv_check_spr_read_access (current_cpu, spr);
367
368 switch (spr)
369 {
370 case H_SPR_PSR:
371 return spr_psr_get_handler (current_cpu);
372 case H_SPR_TBR:
373 return spr_tbr_get_handler (current_cpu);
374 case H_SPR_BPSR:
375 return spr_bpsr_get_handler (current_cpu);
376 case H_SPR_CCR:
377 return spr_ccr_get_handler (current_cpu);
378 case H_SPR_CCCR:
379 return spr_cccr_get_handler (current_cpu);
380 case H_SPR_SR0:
381 case H_SPR_SR1:
382 case H_SPR_SR2:
383 case H_SPR_SR3:
384 return spr_sr_get_handler (current_cpu, spr);
385 break;
386 default:
387 return CPU (h_spr[spr]);
388 }
389 return 0;
390 }
391
392 void
393 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
394 {
395 FRV_REGISTER_CONTROL *control;
396 USI mask;
397 USI oldval;
398
399 /* Check access restrictions. */
400 frv_check_spr_write_access (current_cpu, spr);
401
402 /* Only set those fields which are writeable. */
403 control = CPU_REGISTER_CONTROL (current_cpu);
404 mask = control->spr[spr].read_only_mask;
405 oldval = GET_H_SPR (spr);
406
407 newval = (newval & ~mask) | (oldval & mask);
408
409 /* Some registers are represented by individual components which are
410 referenced more often than the register itself. */
411 switch (spr)
412 {
413 case H_SPR_PSR:
414 spr_psr_set_handler (current_cpu, newval);
415 break;
416 case H_SPR_TBR:
417 spr_tbr_set_handler (current_cpu, newval);
418 break;
419 case H_SPR_BPSR:
420 spr_bpsr_set_handler (current_cpu, newval);
421 break;
422 case H_SPR_CCR:
423 spr_ccr_set_handler (current_cpu, newval);
424 break;
425 case H_SPR_CCCR:
426 spr_cccr_set_handler (current_cpu, newval);
427 break;
428 case H_SPR_SR0:
429 case H_SPR_SR1:
430 case H_SPR_SR2:
431 case H_SPR_SR3:
432 spr_sr_set_handler (current_cpu, spr, newval);
433 break;
434 default:
435 CPU (h_spr[spr]) = newval;
436 break;
437 }
438 }
439 \f
440 /* Cover fns to access the gr_hi and gr_lo registers. */
441 UHI
442 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
443 {
444 return (GET_H_GR(gr) >> 16) & 0xffff;
445 }
446
447 void
448 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
449 {
450 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
451 SET_H_GR (gr, value);
452 }
453
454 UHI
455 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
456 {
457 return GET_H_GR(gr) & 0xffff;
458 }
459
460 void
461 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
462 {
463 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
464 SET_H_GR (gr, value);
465 }
466 \f
467 /* Cover fns to access the tbr bits. */
468 USI
469 spr_tbr_get_handler (SIM_CPU *current_cpu)
470 {
471 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
472 ((GET_H_TBR_TT () & 0xff) << 4);
473
474 return tbr;
475 }
476
477 void
478 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
479 {
480 int tbr = newval;
481
482 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
483 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
484 }
485 \f
486 /* Cover fns to access the bpsr bits. */
487 USI
488 spr_bpsr_get_handler (SIM_CPU *current_cpu)
489 {
490 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
491 ((GET_H_BPSR_BET () & 0x1) );
492
493 return bpsr;
494 }
495
496 void
497 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
498 {
499 int bpsr = newval;
500
501 SET_H_BPSR_BS ((bpsr >> 12) & 1);
502 SET_H_BPSR_BET ((bpsr ) & 1);
503 }
504 \f
505 /* Cover fns to access the psr bits. */
506 USI
507 spr_psr_get_handler (SIM_CPU *current_cpu)
508 {
509 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
510 ((GET_H_PSR_VER () & 0xf) << 24) |
511 ((GET_H_PSR_ICE () & 0x1) << 16) |
512 ((GET_H_PSR_NEM () & 0x1) << 14) |
513 ((GET_H_PSR_CM () & 0x1) << 13) |
514 ((GET_H_PSR_BE () & 0x1) << 12) |
515 ((GET_H_PSR_ESR () & 0x1) << 11) |
516 ((GET_H_PSR_EF () & 0x1) << 8) |
517 ((GET_H_PSR_EM () & 0x1) << 7) |
518 ((GET_H_PSR_PIL () & 0xf) << 3) |
519 ((GET_H_PSR_S () & 0x1) << 2) |
520 ((GET_H_PSR_PS () & 0x1) << 1) |
521 ((GET_H_PSR_ET () & 0x1) );
522
523 return psr;
524 }
525
526 void
527 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
528 {
529 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
530 first. */
531 SET_H_PSR_S ((newval >> 2) & 1);
532
533 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
534 SET_H_PSR_VER ((newval >> 24) & 0xf);
535 SET_H_PSR_ICE ((newval >> 16) & 1);
536 SET_H_PSR_NEM ((newval >> 14) & 1);
537 SET_H_PSR_CM ((newval >> 13) & 1);
538 SET_H_PSR_BE ((newval >> 12) & 1);
539 SET_H_PSR_ESR ((newval >> 11) & 1);
540 SET_H_PSR_EF ((newval >> 8) & 1);
541 SET_H_PSR_EM ((newval >> 7) & 1);
542 SET_H_PSR_PIL ((newval >> 3) & 0xf);
543 SET_H_PSR_PS ((newval >> 1) & 1);
544 SET_H_PSR_ET ((newval ) & 1);
545 }
546
547 void
548 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
549 {
550 /* If switching from user to supervisor mode, or vice-versa, then switch
551 the supervisor/user context. */
552 int psr_s = GET_H_PSR_S ();
553 if (psr_s != (newval & 1))
554 {
555 frvbf_switch_supervisor_user_context (current_cpu);
556 CPU (h_psr_s) = newval & 1;
557 }
558 }
559 \f
560 /* Cover fns to access the ccr bits. */
561 USI
562 spr_ccr_get_handler (SIM_CPU *current_cpu)
563 {
564 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
565 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
566 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
567 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
568 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
569 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
570 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
571 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
572
573 return ccr;
574 }
575
576 void
577 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
578 {
579 int ccr = newval;
580
581 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
582 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
583 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
584 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
585 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
586 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
587 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
588 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
589 }
590 \f
591 QI
592 frvbf_set_icc_for_shift_right (
593 SIM_CPU *current_cpu, SI value, SI shift, QI icc
594 )
595 {
596 /* Set the C flag of the given icc to the logical OR of the bits shifted
597 out. */
598 int mask = (1 << shift) - 1;
599 if ((value & mask) != 0)
600 return icc | 0x1;
601
602 return icc & 0xe;
603 }
604
605 QI
606 frvbf_set_icc_for_shift_left (
607 SIM_CPU *current_cpu, SI value, SI shift, QI icc
608 )
609 {
610 /* Set the V flag of the given icc to the logical OR of the bits shifted
611 out. */
612 int mask = ((1 << shift) - 1) << (32 - shift);
613 if ((value & mask) != 0)
614 return icc | 0x2;
615
616 return icc & 0xd;
617 }
618 \f
619 /* Cover fns to access the cccr bits. */
620 USI
621 spr_cccr_get_handler (SIM_CPU *current_cpu)
622 {
623 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
624 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
625 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
626 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
627 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
628 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
629 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
630 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
631
632 return cccr;
633 }
634
635 void
636 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
637 {
638 int cccr = newval;
639
640 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
641 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
642 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
643 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
644 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
645 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
646 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
647 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
648 }
649 \f
650 /* Cover fns to access the sr bits. */
651 USI
652 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
653 {
654 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
655 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
656 int psr_esr = GET_H_PSR_ESR ();
657 if (! psr_esr)
658 return GET_H_GR (4 + (spr - H_SPR_SR0));
659
660 return CPU (h_spr[spr]);
661 }
662
663 void
664 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
665 {
666 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
667 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
668 int psr_esr = GET_H_PSR_ESR ();
669 if (! psr_esr)
670 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
671 else
672 CPU (h_spr[spr]) = newval;
673 }
674 \f
675 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
676 void
677 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
678 {
679 if (GET_H_PSR_ESR ())
680 {
681 /* We need to be in supervisor mode to swap the registers. Access the
682 PSR.S directly in order to avoid recursive context switches. */
683 int i;
684 int save_psr_s = CPU (h_psr_s);
685 CPU (h_psr_s) = 1;
686 for (i = 0; i < 4; ++i)
687 {
688 int gr = i + 4;
689 int spr = i + H_SPR_SR0;
690 SI tmp = GET_H_SPR (spr);
691 SET_H_SPR (spr, GET_H_GR (gr));
692 SET_H_GR (gr, tmp);
693 }
694 CPU (h_psr_s) = save_psr_s;
695 }
696 }
697 \f
698 /* Handle load/store of quad registers. */
699 void
700 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
701 {
702 int i;
703 SI value[4];
704
705 /* Check memory alignment */
706 address = check_memory_alignment (current_cpu, address, 0xf);
707
708 /* If we need to count cycles, then the cache operation will be
709 initiated from the model profiling functions.
710 See frvbf_model_.... */
711 if (model_insn)
712 {
713 CPU_LOAD_ADDRESS (current_cpu) = address;
714 CPU_LOAD_LENGTH (current_cpu) = 16;
715 }
716 else
717 {
718 for (i = 0; i < 4; ++i)
719 {
720 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
721 address += 4;
722 }
723 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
724 value);
725 }
726 }
727
728 void
729 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
730 {
731 int i;
732 SI value[4];
733 USI hsr0;
734
735 /* Check register and memory alignment. */
736 src_ix = check_register_alignment (current_cpu, src_ix, 3);
737 address = check_memory_alignment (current_cpu, address, 0xf);
738
739 for (i = 0; i < 4; ++i)
740 {
741 /* GR0 is always 0. */
742 if (src_ix == 0)
743 value[i] = 0;
744 else
745 value[i] = GET_H_GR (src_ix + i);
746 }
747 hsr0 = GET_HSR0 ();
748 if (GET_HSR0_DCE (hsr0))
749 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
750 else
751 sim_queue_mem_xi_write (current_cpu, address, value);
752 }
753
754 void
755 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
756 {
757 int i;
758 SI value[4];
759
760 /* Check memory alignment */
761 address = check_memory_alignment (current_cpu, address, 0xf);
762
763 /* If we need to count cycles, then the cache operation will be
764 initiated from the model profiling functions.
765 See frvbf_model_.... */
766 if (model_insn)
767 {
768 CPU_LOAD_ADDRESS (current_cpu) = address;
769 CPU_LOAD_LENGTH (current_cpu) = 16;
770 }
771 else
772 {
773 for (i = 0; i < 4; ++i)
774 {
775 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
776 address += 4;
777 }
778 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
779 value);
780 }
781 }
782
783 void
784 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
785 {
786 int i;
787 SI value[4];
788 USI hsr0;
789
790 /* Check register and memory alignment. */
791 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
792 address = check_memory_alignment (current_cpu, address, 0xf);
793
794 for (i = 0; i < 4; ++i)
795 value[i] = GET_H_FR (src_ix + i);
796
797 hsr0 = GET_HSR0 ();
798 if (GET_HSR0_DCE (hsr0))
799 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
800 else
801 sim_queue_mem_xi_write (current_cpu, address, value);
802 }
803
804 void
805 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
806 {
807 int i;
808 SI value[4];
809
810 /* Check memory alignment */
811 address = check_memory_alignment (current_cpu, address, 0xf);
812
813 /* If we need to count cycles, then the cache operation will be
814 initiated from the model profiling functions.
815 See frvbf_model_.... */
816 if (model_insn)
817 {
818 CPU_LOAD_ADDRESS (current_cpu) = address;
819 CPU_LOAD_LENGTH (current_cpu) = 16;
820 }
821 else
822 {
823 for (i = 0; i < 4; ++i)
824 {
825 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
826 address += 4;
827 }
828 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
829 value);
830 }
831 }
832
833 void
834 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
835 {
836 int i;
837 SI value[4];
838 USI hsr0;
839
840 /* Check register and memory alignment. */
841 src_ix = check_register_alignment (current_cpu, src_ix, 3);
842 address = check_memory_alignment (current_cpu, address, 0xf);
843
844 for (i = 0; i < 4; ++i)
845 value[i] = GET_H_CPR (src_ix + i);
846
847 hsr0 = GET_HSR0 ();
848 if (GET_HSR0_DCE (hsr0))
849 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
850 else
851 sim_queue_mem_xi_write (current_cpu, address, value);
852 }
853 \f
854 void
855 frvbf_signed_integer_divide (
856 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
857 )
858 {
859 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
860 if (arg1 == 0x80000000 && arg2 == -1)
861 {
862 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
863 otherwise it may result in 0x7fffffff (sparc compatibility) or
864 0x80000000 (C language compatibility). */
865 USI isr;
866 dtt = FRV_DTT_OVERFLOW;
867
868 isr = GET_ISR ();
869 if (GET_ISR_EDE (isr))
870 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
871 0x7fffffff);
872 else
873 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
874 0x80000000);
875 frvbf_force_update (current_cpu); /* Force update of target register. */
876 }
877 else if (arg2 == 0)
878 dtt = FRV_DTT_DIVISION_BY_ZERO;
879 else
880 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
881 arg1 / arg2);
882
883 /* Check for exceptions. */
884 if (dtt != FRV_DTT_NO_EXCEPTION)
885 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
886 non_excepting);
887 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
888 {
889 /* Non excepting instruction. Clear the NE flag for the target
890 register. */
891 SI NE_flags[2];
892 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
893 CLEAR_NE_FLAG (NE_flags, target_index);
894 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
895 }
896 }
897
898 void
899 frvbf_unsigned_integer_divide (
900 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
901 )
902 {
903 if (arg2 == 0)
904 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
905 target_index, non_excepting);
906 else
907 {
908 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
909 arg1 / arg2);
910 if (non_excepting)
911 {
912 /* Non excepting instruction. Clear the NE flag for the target
913 register. */
914 SI NE_flags[2];
915 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
916 CLEAR_NE_FLAG (NE_flags, target_index);
917 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
918 }
919 }
920 }
921 \f
922 /* Clear accumulators. */
923 void
924 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
925 {
926 SIM_DESC sd = CPU_STATE (current_cpu);
927 int acc_num =
928 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 8 :
929 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 4 :
930 63;
931
932 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
933 {
934 /* This instruction is a nop if the referenced accumulator is not
935 implemented. */
936 if (acc_ix < acc_num)
937 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
938 }
939 else
940 {
941 /* Clear all implemented accumulators. */
942 int i;
943 for (i = 0; i < acc_num; ++i)
944 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
945 }
946 }
947 \f
948 /* Functions to aid insn semantics. */
949
950 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
951 SI
952 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
953 {
954 SI i;
955 SI mask;
956
957 if (value == 0)
958 return 63;
959
960 /* Find the position of the first non-zero bit.
961 The loop will terminate since there is guaranteed to be at least one
962 non-zero bit. */
963 mask = 1 << (sizeof (mask) * 8 - 1);
964 for (i = 0; (value & mask) == 0; ++i)
965 value <<= 1;
966
967 return i;
968 }
969
970 /* Compute the result of the cut insns. */
971 SI
972 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
973 {
974 SI result;
975 if (cut_point < 32)
976 {
977 result = reg1 << cut_point;
978 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
979 }
980 else
981 result = reg2 << (cut_point - 32);
982
983 return result;
984 }
985
986 /* Compute the result of the cut insns. */
987 SI
988 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
989 {
990 /* The cut point is the lower 6 bits (signed) of what we are passed. */
991 cut_point = cut_point << 26 >> 26;
992
993 /* The cut_point is relative to bit 40 of 64 bits. */
994 if (cut_point >= 0)
995 return (acc << (cut_point + 24)) >> 32;
996
997 /* Extend the sign bit (bit 40) for negative cuts. */
998 if (cut_point == -32)
999 return (acc << 24) >> 63; /* Special case for full shiftout. */
1000
1001 return (acc << 24) >> (32 + -cut_point);
1002 }
1003
1004 /* Compute the result of the cut insns. */
1005 SI
1006 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1007 {
1008 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1009 cut_point = cut_point << 26 >> 26;
1010
1011 if (cut_point >= 0)
1012 {
1013 /* The cut_point is relative to bit 40 of 64 bits. */
1014 DI shifted = acc << (cut_point + 24);
1015 DI unshifted = shifted >> (cut_point + 24);
1016
1017 /* The result will be saturated if significant bits are shifted out. */
1018 if (unshifted != acc)
1019 {
1020 if (acc < 0)
1021 return 0x80000000;
1022 return 0x7fffffff;
1023 }
1024 }
1025
1026 /* The result will not be saturated, so use the code for the normal cut. */
1027 return frvbf_media_cut (current_cpu, acc, cut_point);
1028 }
1029
1030 /* Simulate the media custom insns. */
1031 void
1032 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1033 {
1034 /* The semantics of the insn are a nop, since it is implementation defined.
1035 We do need to check whether it's implemented and set up for MTRAP
1036 if it's not. */
1037 USI msr0 = GET_MSR (0);
1038 if (GET_MSR_EMCI (msr0) == 0)
1039 {
1040 /* no interrupt queued at this time. */
1041 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1042 }
1043 }
1044
1045 /* Simulate the media average (MAVEH) insn. */
1046 static HI
1047 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1048 {
1049 SIM_DESC sd = CPU_STATE (current_cpu);
1050 SI sum = (arg1 + arg2);
1051 HI result = sum >> 1;
1052 int rounding_value;
1053
1054 /* On fr400, check the rounding mode. On other machines rounding is always
1055 toward negative infinity and the result is already correctly rounded. */
1056 switch (STATE_ARCHITECTURE (sd)->mach)
1057 {
1058 /* Need to check rounding mode. */
1059 case bfd_mach_fr400:
1060 /* Check whether rounding will be required. Rounding will be required
1061 if the sum is an odd number. */
1062 rounding_value = sum & 1;
1063 if (rounding_value)
1064 {
1065 USI msr0 = GET_MSR (0);
1066 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1067 if (GET_MSR_SRDAV (msr0))
1068 {
1069 /* MSR0.RD controls rounding. */
1070 switch (GET_MSR_RD (msr0))
1071 {
1072 case 0:
1073 /* Round to nearest. */
1074 if (result >= 0)
1075 ++result;
1076 break;
1077 case 1:
1078 /* Round toward 0. */
1079 if (result < 0)
1080 ++result;
1081 break;
1082 case 2:
1083 /* Round toward positive infinity. */
1084 ++result;
1085 break;
1086 case 3:
1087 /* Round toward negative infinity. The result is already
1088 correctly rounded. */
1089 break;
1090 default:
1091 abort ();
1092 break;
1093 }
1094 }
1095 else
1096 {
1097 /* MSR0.RDAV controls rounding. If set, round toward positive
1098 infinity. Otherwise the result is already rounded correctly
1099 toward negative infinity. */
1100 if (GET_MSR_RDAV (msr0))
1101 ++result;
1102 }
1103 }
1104 break;
1105 default:
1106 break;
1107 }
1108
1109 return result;
1110 }
1111
1112 SI
1113 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1114 {
1115 SI result;
1116 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1117 result &= 0xffff;
1118 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1119 (reg2 >> 16) & 0xffff) << 16;
1120 return result;
1121 }
1122
1123 /* Maintain a flag in order to know when to write the address of the next
1124 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1125 void
1126 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1127 {
1128 frvbf_write_next_vliw_addr_to_LR = value;
1129 }
1130
1131 void
1132 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1133 {
1134 USI NE_flags[2];
1135
1136 /* Save the target register so interrupt processing can set its NE flag
1137 in the event of an exception. */
1138 frv_interrupt_state.ne_index = index;
1139
1140 /* Clear the NE flag of the target register. It will be reset if necessary
1141 in the event of an exception. */
1142 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1143 CLEAR_NE_FLAG (NE_flags, index);
1144 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1145 }
1146
1147 void
1148 frvbf_force_update (SIM_CPU *current_cpu)
1149 {
1150 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1151 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1152 if (ix > 0)
1153 {
1154 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1155 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1156 }
1157 }
1158 \f
1159 /* Condition code logic. */
1160 enum cr_ops {
1161 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1162 num_cr_ops
1163 };
1164
1165 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1166
1167 static enum cr_result
1168 cr_logic[num_cr_ops][4][4] = {
1169 /* andcr */
1170 {
1171 /* undefined undefined false true */
1172 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1173 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1174 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1175 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1176 },
1177 /* orcr */
1178 {
1179 /* undefined undefined false true */
1180 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1181 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1182 /* false */ {cr_false, cr_false, cr_false, cr_true },
1183 /* true */ {cr_true, cr_true, cr_true, cr_true }
1184 },
1185 /* xorcr */
1186 {
1187 /* undefined undefined false true */
1188 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1189 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1190 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1191 /* true */ {cr_true, cr_true, cr_true, cr_false }
1192 },
1193 /* nandcr */
1194 {
1195 /* undefined undefined false true */
1196 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1197 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1198 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1199 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1200 },
1201 /* norcr */
1202 {
1203 /* undefined undefined false true */
1204 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1205 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1206 /* false */ {cr_true, cr_true, cr_true, cr_false },
1207 /* true */ {cr_false, cr_false, cr_false, cr_false }
1208 },
1209 /* andncr */
1210 {
1211 /* undefined undefined false true */
1212 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1213 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1214 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1215 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1216 },
1217 /* orncr */
1218 {
1219 /* undefined undefined false true */
1220 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1221 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1222 /* false */ {cr_true, cr_true, cr_true, cr_true },
1223 /* true */ {cr_false, cr_false, cr_false, cr_true }
1224 },
1225 /* nandncr */
1226 {
1227 /* undefined undefined false true */
1228 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1229 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1230 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1231 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1232 },
1233 /* norncr */
1234 {
1235 /* undefined undefined false true */
1236 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1237 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1238 /* false */ {cr_false, cr_false, cr_false, cr_false },
1239 /* true */ {cr_true, cr_true, cr_true, cr_false }
1240 }
1241 };
1242
1243 UQI
1244 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1245 {
1246 return cr_logic[operation][arg1][arg2];
1247 }
1248 \f
1249 /* Cache Manipulation. */
1250 void
1251 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1252 {
1253 /* If we need to count cycles, then the cache operation will be
1254 initiated from the model profiling functions.
1255 See frvbf_model_.... */
1256 int hsr0 = GET_HSR0 ();
1257 if (GET_HSR0_ICE (hsr0))
1258 {
1259 if (model_insn)
1260 {
1261 CPU_LOAD_ADDRESS (current_cpu) = address;
1262 CPU_LOAD_LENGTH (current_cpu) = length;
1263 CPU_LOAD_LOCK (current_cpu) = lock;
1264 }
1265 else
1266 {
1267 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1268 frv_cache_preload (cache, address, length, lock);
1269 }
1270 }
1271 }
1272
1273 void
1274 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1275 {
1276 /* If we need to count cycles, then the cache operation will be
1277 initiated from the model profiling functions.
1278 See frvbf_model_.... */
1279 int hsr0 = GET_HSR0 ();
1280 if (GET_HSR0_DCE (hsr0))
1281 {
1282 if (model_insn)
1283 {
1284 CPU_LOAD_ADDRESS (current_cpu) = address;
1285 CPU_LOAD_LENGTH (current_cpu) = length;
1286 CPU_LOAD_LOCK (current_cpu) = lock;
1287 }
1288 else
1289 {
1290 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1291 frv_cache_preload (cache, address, length, lock);
1292 }
1293 }
1294 }
1295
1296 void
1297 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1298 {
1299 /* If we need to count cycles, then the cache operation will be
1300 initiated from the model profiling functions.
1301 See frvbf_model_.... */
1302 int hsr0 = GET_HSR0 ();
1303 if (GET_HSR0_ICE (hsr0))
1304 {
1305 if (model_insn)
1306 CPU_LOAD_ADDRESS (current_cpu) = address;
1307 else
1308 {
1309 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1310 frv_cache_unlock (cache, address);
1311 }
1312 }
1313 }
1314
1315 void
1316 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1317 {
1318 /* If we need to count cycles, then the cache operation will be
1319 initiated from the model profiling functions.
1320 See frvbf_model_.... */
1321 int hsr0 = GET_HSR0 ();
1322 if (GET_HSR0_DCE (hsr0))
1323 {
1324 if (model_insn)
1325 CPU_LOAD_ADDRESS (current_cpu) = address;
1326 else
1327 {
1328 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1329 frv_cache_unlock (cache, address);
1330 }
1331 }
1332 }
1333
1334 void
1335 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1336 {
1337 /* Make sure the insn was specified properly. -1 will be passed for ALL
1338 for a icei with A=0. */
1339 if (all == -1)
1340 {
1341 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1342 return;
1343 }
1344
1345 /* If we need to count cycles, then the cache operation will be
1346 initiated from the model profiling functions.
1347 See frvbf_model_.... */
1348 if (model_insn)
1349 {
1350 /* Record the all-entries flag for use in profiling. */
1351 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1352 ps->all_cache_entries = all;
1353 CPU_LOAD_ADDRESS (current_cpu) = address;
1354 }
1355 else
1356 {
1357 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1358 if (all)
1359 frv_cache_invalidate_all (cache, 0/* flush? */);
1360 else
1361 frv_cache_invalidate (cache, address, 0/* flush? */);
1362 }
1363 }
1364
1365 void
1366 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1367 {
1368 /* Make sure the insn was specified properly. -1 will be passed for ALL
1369 for a dcei with A=0. */
1370 if (all == -1)
1371 {
1372 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1373 return;
1374 }
1375
1376 /* If we need to count cycles, then the cache operation will be
1377 initiated from the model profiling functions.
1378 See frvbf_model_.... */
1379 if (model_insn)
1380 {
1381 /* Record the all-entries flag for use in profiling. */
1382 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1383 ps->all_cache_entries = all;
1384 CPU_LOAD_ADDRESS (current_cpu) = address;
1385 }
1386 else
1387 {
1388 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1389 if (all)
1390 frv_cache_invalidate_all (cache, 0/* flush? */);
1391 else
1392 frv_cache_invalidate (cache, address, 0/* flush? */);
1393 }
1394 }
1395
1396 void
1397 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1398 {
1399 /* Make sure the insn was specified properly. -1 will be passed for ALL
1400 for a dcef with A=0. */
1401 if (all == -1)
1402 {
1403 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1404 return;
1405 }
1406
1407 /* If we need to count cycles, then the cache operation will be
1408 initiated from the model profiling functions.
1409 See frvbf_model_.... */
1410 if (model_insn)
1411 {
1412 /* Record the all-entries flag for use in profiling. */
1413 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1414 ps->all_cache_entries = all;
1415 CPU_LOAD_ADDRESS (current_cpu) = address;
1416 }
1417 else
1418 {
1419 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1420 if (all)
1421 frv_cache_invalidate_all (cache, 1/* flush? */);
1422 else
1423 frv_cache_invalidate (cache, address, 1/* flush? */);
1424 }
1425 }
This page took 0.058382 seconds and 5 git commands to generate.