Commit | Line | Data |
---|---|---|
b34f6357 | 1 | # Simulator main loop for frv. -*- C -*- |
ecd75fc8 | 2 | # Copyright (C) 1998-2014 Free Software Foundation, Inc. |
b34f6357 DB |
3 | # Contributed by Red Hat. |
4 | # | |
5 | # This file is part of the GNU Simulators. | |
6 | # | |
7 | # This program is free software; you can redistribute it and/or modify | |
8 | # it under the terms of the GNU General Public License as published by | |
4744ac1b JB |
9 | # the Free Software Foundation; either version 3 of the License, or |
10 | # (at your option) any later version. | |
b34f6357 DB |
11 | # |
12 | # This program is distributed in the hope that it will be useful, | |
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | # GNU General Public License for more details. | |
16 | # | |
4744ac1b JB |
17 | # You should have received a copy of the GNU General Public License |
18 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
b34f6357 DB |
19 | |
20 | # Syntax: | |
21 | # /bin/sh mainloop.in command | |
22 | # | |
23 | # Command is one of: | |
24 | # | |
25 | # init | |
26 | # support | |
27 | # extract-{simple,scache,pbb} | |
28 | # {full,fast}-exec-{simple,scache,pbb} | |
29 | # | |
30 | # A target need only provide a "full" version of one of simple,scache,pbb. | |
31 | # If the target wants it can also provide a fast version of same. | |
32 | # It can't provide more than this. | |
33 | ||
34 | # ??? After a few more ports are done, revisit. | |
35 | # Will eventually need to machine generate a lot of this. | |
36 | ||
37 | case "x$1" in | |
38 | ||
39 | xsupport) | |
40 | ||
41 | cat <<EOF | |
42 | ||
43 | static INLINE const IDESC * | |
44 | extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf, | |
45 | int fast_p) | |
46 | { | |
47 | const IDESC *id = @cpu@_decode (current_cpu, pc, insn, insn, abuf); | |
48 | @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p); | |
49 | if (! fast_p) | |
50 | { | |
51 | int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc); | |
52 | int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc); | |
53 | @cpu@_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p); | |
54 | } | |
55 | return id; | |
56 | } | |
57 | ||
58 | static INLINE SEM_PC | |
59 | execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p) | |
60 | { | |
61 | SEM_PC vpc; | |
62 | ||
63 | /* Force gr0 to zero before every insn. */ | |
64 | @cpu@_h_gr_set (current_cpu, 0, 0); | |
65 | ||
66 | if (fast_p) | |
67 | { | |
68 | vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc); | |
69 | } | |
70 | else | |
71 | { | |
72 | ARGBUF *abuf = &sc->argbuf; | |
73 | const IDESC *idesc = abuf->idesc; | |
74 | #if WITH_SCACHE_PBB | |
75 | int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL); | |
76 | #else | |
77 | int virtual_p = 0; | |
78 | #endif | |
79 | ||
80 | if (! virtual_p) | |
81 | { | |
82 | /* FIXME: call x-before */ | |
83 | if (ARGBUF_PROFILE_P (abuf)) | |
84 | PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num); | |
85 | /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */ | |
86 | if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf))) | |
87 | { | |
88 | @cpu@_model_insn_before (current_cpu, sc->first_insn_p); | |
89 | model_insn = FRV_INSN_MODEL_PASS_1; | |
90 | if (idesc->timing->model_fn != NULL) | |
91 | (*idesc->timing->model_fn) (current_cpu, sc); | |
92 | } | |
93 | else | |
94 | model_insn = FRV_INSN_NO_MODELING; | |
95 | TRACE_INSN_INIT (current_cpu, abuf, 1); | |
96 | TRACE_INSN (current_cpu, idesc->idata, | |
97 | (const struct argbuf *) abuf, abuf->addr); | |
98 | } | |
99 | #if WITH_SCACHE | |
100 | vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc); | |
101 | #else | |
102 | vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf); | |
103 | #endif | |
104 | if (! virtual_p) | |
105 | { | |
106 | /* FIXME: call x-after */ | |
107 | if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf))) | |
108 | { | |
109 | int cycles; | |
110 | if (idesc->timing->model_fn != NULL) | |
111 | { | |
112 | model_insn = FRV_INSN_MODEL_PASS_2; | |
113 | cycles = (*idesc->timing->model_fn) (current_cpu, sc); | |
114 | } | |
115 | else | |
116 | cycles = 1; | |
117 | @cpu@_model_insn_after (current_cpu, sc->last_insn_p, cycles); | |
118 | } | |
119 | TRACE_INSN_FINI (current_cpu, abuf, 1); | |
120 | } | |
121 | } | |
122 | ||
123 | return vpc; | |
124 | } | |
125 | ||
126 | static void | |
127 | @cpu@_parallel_write_init (SIM_CPU *current_cpu) | |
128 | { | |
129 | CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu); | |
130 | CGEN_WRITE_QUEUE_CLEAR (q); | |
131 | previous_vliw_pc = CPU_PC_GET(current_cpu); | |
132 | frv_interrupt_state.f_ne_flags[0] = 0; | |
133 | frv_interrupt_state.f_ne_flags[1] = 0; | |
134 | frv_interrupt_state.imprecise_interrupt = NULL; | |
135 | } | |
136 | ||
137 | static void | |
138 | @cpu@_parallel_write_queued (SIM_CPU *current_cpu) | |
139 | { | |
140 | int i; | |
141 | ||
142 | FRV_VLIW *vliw = CPU_VLIW (current_cpu); | |
143 | CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu); | |
144 | ||
145 | /* Loop over the queued writes, executing them. Set the pc to the address | |
146 | of the insn which queued each write for the proper context in case an | |
147 | interrupt is caused. Restore the proper pc after the writes are | |
148 | completed. */ | |
149 | IADDR save_pc = CPU_PC_GET (current_cpu); | |
150 | IADDR new_pc = save_pc; | |
151 | int branch_taken = 0; | |
152 | int limit = CGEN_WRITE_QUEUE_INDEX (q); | |
153 | frv_interrupt_state.data_written.length = 0; | |
154 | ||
155 | for (i = 0; i < limit; ++i) | |
156 | { | |
157 | CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, i); | |
158 | ||
159 | /* If an imprecise interrupt was generated, then, check whether the | |
160 | result should still be written. */ | |
161 | if (frv_interrupt_state.imprecise_interrupt != NULL) | |
162 | { | |
163 | /* Only check writes by the insn causing the exception. */ | |
164 | if (CGEN_WRITE_QUEUE_ELEMENT_IADDR (item) | |
165 | == frv_interrupt_state.imprecise_interrupt->vpc) | |
166 | { | |
167 | /* Execute writes of floating point operations resulting in | |
168 | overflow, underflow or inexact. */ | |
169 | if (frv_interrupt_state.imprecise_interrupt->kind | |
170 | == FRV_FP_EXCEPTION) | |
171 | { | |
172 | if ((frv_interrupt_state.imprecise_interrupt | |
173 | ->u.fp_info.fsr_mask | |
174 | & ~(FSR_INEXACT | FSR_OVERFLOW | FSR_UNDERFLOW))) | |
175 | continue; /* Don't execute */ | |
176 | } | |
177 | /* Execute writes marked as 'forced'. */ | |
178 | else if (! (CGEN_WRITE_QUEUE_ELEMENT_FLAGS (item) | |
179 | & FRV_WRITE_QUEUE_FORCE_WRITE)) | |
180 | continue; /* Don't execute */ | |
181 | } | |
182 | } | |
183 | ||
184 | /* Only execute the first branch on the queue. */ | |
185 | if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE | |
186 | || CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_FN_PC_WRITE) | |
187 | { | |
188 | if (branch_taken) | |
189 | continue; | |
190 | branch_taken = 1; | |
191 | if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE) | |
192 | new_pc = item->kinds.pc_write.value; | |
193 | else | |
194 | new_pc = item->kinds.fn_pc_write.value; | |
195 | } | |
196 | ||
197 | CPU_PC_SET (current_cpu, CGEN_WRITE_QUEUE_ELEMENT_IADDR (item)); | |
198 | frv_save_data_written_for_interrupts (current_cpu, item); | |
199 | cgen_write_queue_element_execute (current_cpu, item); | |
200 | } | |
201 | ||
202 | /* Update the LR with the address of the next insn if the flag is set. | |
203 | This flag gets set in frvbf_set_write_next_vliw_to_LR by the JMPL, | |
204 | JMPIL and CALL insns. */ | |
205 | if (frvbf_write_next_vliw_addr_to_LR) | |
206 | { | |
207 | frvbf_h_spr_set_handler (current_cpu, H_SPR_LR, save_pc); | |
208 | frvbf_write_next_vliw_addr_to_LR = 0; | |
209 | } | |
210 | ||
211 | CPU_PC_SET (current_cpu, new_pc); | |
212 | CGEN_WRITE_QUEUE_CLEAR (q); | |
213 | } | |
214 | ||
215 | void | |
216 | @cpu@_perform_writeback (SIM_CPU *current_cpu) | |
217 | { | |
218 | @cpu@_parallel_write_queued (current_cpu); | |
219 | } | |
220 | ||
221 | static unsigned cache_reqno = 0x80000000; /* Start value is for debugging. */ | |
222 | ||
223 | #if 0 /* experimental */ | |
224 | /* FR400 has single prefetch. */ | |
225 | static void | |
226 | fr400_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc) | |
227 | { | |
228 | int cur_ix; | |
229 | FRV_CACHE *cache; | |
230 | ||
231 | /* The cpu receives 8 bytes worth of insn data for each fetch aligned | |
232 | on 8 byte boundary. */ | |
233 | #define FR400_FETCH_SIZE 8 | |
234 | ||
235 | cur_ix = LS; | |
236 | vpc &= ~(FR400_FETCH_SIZE - 1); | |
237 | cache = CPU_INSN_CACHE (current_cpu); | |
238 | ||
239 | /* Request a load of the current address buffer, if necessary. */ | |
240 | if (frv_insn_fetch_buffer[cur_ix].address != vpc) | |
241 | { | |
242 | frv_insn_fetch_buffer[cur_ix].address = vpc; | |
243 | frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++; | |
244 | if (FRV_COUNT_CYCLES (current_cpu, 1)) | |
245 | frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno, | |
246 | frv_insn_fetch_buffer[cur_ix].address, | |
247 | UNIT_I0 + cur_ix); | |
248 | } | |
249 | ||
250 | /* Wait for the current address buffer to be loaded, if necessary. */ | |
251 | if (FRV_COUNT_CYCLES (current_cpu, 1)) | |
252 | { | |
253 | FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); | |
254 | int wait; | |
255 | ||
256 | /* Account for any branch penalty. */ | |
257 | if (ps->branch_penalty > 0 && ! ps->past_first_p) | |
258 | { | |
259 | frv_model_advance_cycles (current_cpu, ps->branch_penalty); | |
260 | frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty, | |
261 | "Branch penalty:"); | |
262 | ps->branch_penalty = 0; | |
263 | } | |
264 | ||
265 | /* Account for insn fetch latency. */ | |
266 | wait = 0; | |
267 | while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO) | |
268 | { | |
269 | frv_model_advance_cycles (current_cpu, 1); | |
270 | ++wait; | |
271 | } | |
272 | frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:"); | |
273 | return; | |
274 | } | |
275 | ||
276 | /* Otherwise just load the insns directly from the cache. | |
277 | */ | |
278 | if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO) | |
279 | { | |
280 | frv_cache_read (cache, cur_ix, vpc); | |
281 | frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO; | |
282 | } | |
283 | } | |
284 | #endif /* experimental */ | |
285 | ||
286 | /* FR500 has dual prefetch. */ | |
287 | static void | |
288 | simulate_dual_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc, int fetch_size) | |
289 | { | |
290 | int i; | |
291 | int cur_ix, pre_ix; | |
292 | SI pre_address; | |
293 | FRV_CACHE *cache; | |
294 | ||
295 | /* See if the pc is within the addresses specified by either of the | |
296 | fetch buffers. If so, that will be the current buffer. Otherwise, | |
297 | arbitrarily select the LD buffer as the current one since it gets | |
298 | priority in the case of interfering load requests. */ | |
299 | cur_ix = LD; | |
300 | vpc &= ~(fetch_size - 1); | |
301 | for (i = LS; i < FRV_CACHE_PIPELINES; ++i) | |
302 | { | |
303 | if (frv_insn_fetch_buffer[i].address == vpc) | |
304 | { | |
305 | cur_ix = i; | |
306 | break; | |
307 | } | |
308 | } | |
309 | cache = CPU_INSN_CACHE (current_cpu); | |
310 | ||
311 | /* Request a load of the current address buffer, if necessary. */ | |
312 | if (frv_insn_fetch_buffer[cur_ix].address != vpc) | |
313 | { | |
314 | frv_insn_fetch_buffer[cur_ix].address = vpc; | |
315 | frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++; | |
316 | if (FRV_COUNT_CYCLES (current_cpu, 1)) | |
317 | frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno, | |
318 | frv_insn_fetch_buffer[cur_ix].address, | |
319 | UNIT_I0 + cur_ix); | |
320 | } | |
321 | ||
322 | /* If the prefetch buffer does not represent the next sequential address, then | |
323 | request a load of the next sequential address. */ | |
324 | pre_ix = (cur_ix + 1) % FRV_CACHE_PIPELINES; | |
325 | pre_address = vpc + fetch_size; | |
326 | if (frv_insn_fetch_buffer[pre_ix].address != pre_address) | |
327 | { | |
328 | frv_insn_fetch_buffer[pre_ix].address = pre_address; | |
329 | frv_insn_fetch_buffer[pre_ix].reqno = cache_reqno++; | |
330 | if (FRV_COUNT_CYCLES (current_cpu, 1)) | |
331 | frv_cache_request_load (cache, frv_insn_fetch_buffer[pre_ix].reqno, | |
332 | frv_insn_fetch_buffer[pre_ix].address, | |
333 | UNIT_I0 + pre_ix); | |
334 | } | |
335 | ||
336 | /* If counting cycles, account for any branch penalty and/or insn fetch | |
337 | latency here. */ | |
338 | if (FRV_COUNT_CYCLES (current_cpu, 1)) | |
339 | { | |
340 | FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); | |
341 | int wait; | |
342 | ||
343 | /* Account for any branch penalty. */ | |
344 | if (ps->branch_penalty > 0 && ! ps->past_first_p) | |
345 | { | |
346 | frv_model_advance_cycles (current_cpu, ps->branch_penalty); | |
347 | frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty, | |
348 | "Branch penalty:"); | |
349 | ps->branch_penalty = 0; | |
350 | } | |
351 | ||
352 | /* Account for insn fetch latency. */ | |
353 | wait = 0; | |
354 | while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO) | |
355 | { | |
356 | frv_model_advance_cycles (current_cpu, 1); | |
357 | ++wait; | |
358 | } | |
359 | frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:"); | |
360 | return; | |
361 | } | |
362 | ||
363 | /* Otherwise just load the insns directly from the cache. | |
364 | */ | |
365 | if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO) | |
366 | { | |
367 | frv_cache_read (cache, cur_ix, vpc); | |
368 | frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO; | |
369 | } | |
370 | if (frv_insn_fetch_buffer[pre_ix].reqno != NO_REQNO) | |
371 | { | |
372 | frv_cache_read (cache, pre_ix, pre_address); | |
373 | frv_insn_fetch_buffer[pre_ix].reqno = NO_REQNO; | |
374 | } | |
375 | } | |
376 | ||
377 | static void | |
378 | @cpu@_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc) | |
379 | { | |
380 | SI hsr0; | |
381 | SIM_DESC sd; | |
382 | ||
383 | /* Nothing to do if not counting cycles and the cache is not enabled. */ | |
384 | hsr0 = GET_HSR0 (); | |
385 | if (! GET_HSR0_ICE (hsr0) && ! FRV_COUNT_CYCLES (current_cpu, 1)) | |
386 | return; | |
387 | ||
388 | /* Different machines handle prefetch defferently. */ | |
389 | sd = CPU_STATE (current_cpu); | |
390 | switch (STATE_ARCHITECTURE (sd)->mach) | |
391 | { | |
392 | case bfd_mach_fr400: | |
676a64f4 | 393 | case bfd_mach_fr450: |
b34f6357 DB |
394 | simulate_dual_insn_prefetch (current_cpu, vpc, 8); |
395 | break; | |
396 | case bfd_mach_frvtomcat: | |
397 | case bfd_mach_fr500: | |
e930b1f5 | 398 | case bfd_mach_fr550: |
b34f6357 DB |
399 | case bfd_mach_frv: |
400 | simulate_dual_insn_prefetch (current_cpu, vpc, 16); | |
401 | break; | |
402 | default: | |
403 | break; | |
404 | } | |
405 | } | |
406 | ||
407 | int frv_save_profile_model_p; | |
408 | EOF | |
409 | ||
410 | ;; | |
411 | ||
412 | xinit) | |
413 | ||
414 | cat <<EOF | |
415 | /*xxxinit*/ | |
416 | /* If the timer is enabled, then we will enable model profiling during | |
417 | execution. This is because the timer needs accurate cycles counts to | |
418 | work properly. Save the original setting of model profiling. */ | |
419 | if (frv_interrupt_state.timer.enabled) | |
420 | frv_save_profile_model_p = PROFILE_MODEL_P (current_cpu); | |
421 | EOF | |
422 | ||
423 | ;; | |
424 | ||
425 | xextract-simple | xextract-scache) | |
426 | ||
427 | # Inputs: current_cpu, vpc, sc, FAST_P | |
428 | # Outputs: sc filled in | |
429 | # SET_LAST_INSN_P(last_p) called to indicate whether insn is last one | |
430 | ||
431 | cat <<EOF | |
432 | { | |
433 | CGEN_INSN_INT insn = frvbf_read_imem_USI (current_cpu, vpc); | |
434 | extract (current_cpu, vpc, insn, SEM_ARGBUF (sc), FAST_P); | |
435 | SET_LAST_INSN_P ((insn & 0x80000000) != 0); | |
436 | } | |
437 | EOF | |
438 | ||
439 | ;; | |
440 | ||
441 | xfull-exec-* | xfast-exec-*) | |
442 | ||
443 | # Inputs: current_cpu, vpc, FAST_P | |
444 | # Outputs: | |
445 | # vpc contains the address of the next insn to execute | |
446 | # pc of current_cpu must be up to date (=vpc) upon exit | |
447 | # CPU_INSN_COUNT (current_cpu) must be updated by number of insns executed | |
448 | # | |
449 | # Unlike the non-parallel case, this version is responsible for doing the | |
450 | # scache lookup. | |
451 | ||
452 | cat <<EOF | |
453 | { | |
454 | FRV_VLIW *vliw; | |
455 | int first_insn_p = 1; | |
456 | int last_insn_p = 0; | |
457 | int ninsns; | |
79e59fe6 | 458 | CGEN_ATTR_VALUE_ENUM_TYPE slot; |
b34f6357 DB |
459 | |
460 | /* If the timer is enabled, then enable model profiling. This is because | |
461 | the timer needs accurate cycles counts to work properly. */ | |
462 | if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p) | |
463 | sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "1"); | |
464 | ||
465 | /* Init parallel-write queue and vliw. */ | |
466 | @cpu@_parallel_write_init (current_cpu); | |
467 | vliw = CPU_VLIW (current_cpu); | |
468 | frv_vliw_reset (vliw, STATE_ARCHITECTURE (CPU_STATE (current_cpu))->mach, | |
469 | CPU_ELF_FLAGS (current_cpu)); | |
e930b1f5 | 470 | frv_current_fm_slot = UNIT_NIL; |
b34f6357 DB |
471 | |
472 | for (ninsns = 0; ! last_insn_p && ninsns < FRV_VLIW_SIZE; ++ninsns) | |
473 | { | |
474 | SCACHE *sc; | |
475 | const CGEN_INSN *insn; | |
476 | int error; | |
477 | /* Go through the motions of finding the insns in the cache. */ | |
478 | @cpu@_simulate_insn_prefetch (current_cpu, vpc); | |
479 | ||
480 | sc = @cpu@_scache_lookup (current_cpu, vpc, scache, hash_mask, FAST_P); | |
481 | sc->first_insn_p = first_insn_p; | |
482 | last_insn_p = sc->last_insn_p; | |
483 | ||
484 | /* Add the insn to the vliw and set up the interrupt state. */ | |
485 | insn = sc->argbuf.idesc->idata; | |
486 | error = frv_vliw_add_insn (vliw, insn); | |
487 | if (! error) | |
488 | frv_vliw_setup_insn (current_cpu, insn); | |
489 | frv_detect_insn_access_interrupts (current_cpu, sc); | |
e930b1f5 DB |
490 | slot = (*vliw->current_vliw)[vliw->next_slot - 1]; |
491 | if (slot >= UNIT_FM0 && slot <= UNIT_FM3) | |
492 | frv_current_fm_slot = slot; | |
b34f6357 DB |
493 | |
494 | vpc = execute (current_cpu, sc, FAST_P); | |
495 | ||
496 | SET_H_PC (vpc); /* needed for interrupt handling */ | |
497 | first_insn_p = 0; | |
498 | } | |
499 | ||
500 | /* If the timer is enabled, and model profiling was not originally enabled, | |
501 | then turn it off again. This is the only place we can currently gain | |
502 | control to do this. */ | |
503 | if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p) | |
504 | sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "0"); | |
505 | ||
506 | /* Check for interrupts. Also handles writeback if necessary. */ | |
507 | frv_process_interrupts (current_cpu); | |
508 | ||
509 | CPU_INSN_COUNT (current_cpu) += ninsns; | |
510 | } | |
511 | EOF | |
512 | ||
513 | ;; | |
514 | ||
515 | *) | |
516 | echo "Invalid argument to mainloop.in: $1" >&2 | |
517 | exit 1 | |
518 | ;; | |
519 | ||
520 | esac |