5093f16b5ef2ed3e415d53cc8c42ca28ca93cf0b
[deliverable/binutils-gdb.git] / sim / frv / cache.c
1 /* frv cache model.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
23
24 #include "libiberty.h"
25 #include "sim-main.h"
26 #include "cache.h"
27 #include "bfd.h"
28
29 void
30 frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
31 {
32 int elements;
33 int i, j;
34 SIM_DESC sd;
35
36 /* Set defaults for fields which are not initialized. */
37 sd = CPU_STATE (cpu);
38 switch (STATE_ARCHITECTURE (sd)->mach)
39 {
40 case bfd_mach_fr400:
41 if (cache->sets == 0)
42 cache->sets = 128;
43 if (cache->ways == 0)
44 cache->ways = 2;
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
49 break;
50 default:
51 if (cache->sets == 0)
52 cache->sets = 64;
53 if (cache->ways == 0)
54 cache->ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
59 break;
60 }
61
62 /* First allocate the cache storage based on the given dimensions. */
63 elements = cache->sets * cache->ways;
64 cache->tag_storage = (FRV_CACHE_TAG *)
65 zalloc (elements * sizeof (*cache->tag_storage));
66 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
67
68 /* Initialize the pipelines and status buffers. */
69 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
70 {
71 cache->pipeline[i].requests = NULL;
72 cache->pipeline[i].status.flush.valid = 0;
73 cache->pipeline[i].status.return_buffer.valid = 0;
74 cache->pipeline[i].status.return_buffer.data
75 = (char *) xmalloc (cache->line_size);
76 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
77 cache->pipeline[i].stages[j].request = NULL;
78 }
79 cache->BARS.valid = 0;
80 cache->NARS.valid = 0;
81
82 /* Now set the cache state. */
83 cache->cpu = cpu;
84 cache->statistics.accesses = 0;
85 cache->statistics.hits = 0;
86 }
87
88 void
89 frv_cache_term (FRV_CACHE *cache)
90 {
91 /* Free the cache storage. */
92 free (cache->tag_storage);
93 free (cache->data_storage);
94 free (cache->pipeline[LS].status.return_buffer.data);
95 free (cache->pipeline[LD].status.return_buffer.data);
96 }
97
98 /* Determine whether the given cache is enabled. */
99 int
100 frv_cache_enabled (FRV_CACHE *cache)
101 {
102 SIM_CPU *current_cpu = cache->cpu;
103 int hsr0 = GET_HSR0 ();
104 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
105 return 1;
106 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
107 return 1;
108 return 0;
109 }
110
111 /* Determine whether the given address should be accessed without using
112 the cache. */
113 static int
114 non_cache_access (FRV_CACHE *cache, USI address)
115 {
116 int hsr0;
117 SIM_DESC sd;
118 SIM_CPU *current_cpu = cache->cpu;
119
120 sd = CPU_STATE (current_cpu);
121 switch (STATE_ARCHITECTURE (sd)->mach)
122 {
123 case bfd_mach_fr400:
124 if (address >= 0xff000000
125 || address >= 0xfe000000 && address <= 0xfeffffff)
126 return 1; /* non-cache access */
127 default:
128 if (address >= 0xff000000
129 || address >= 0xfeff0000 && address <= 0xfeffffff)
130 return 1; /* non-cache access */
131 if (cache == CPU_INSN_CACHE (current_cpu))
132 {
133 if (address >= 0xfe000000 && address <= 0xfe003fff)
134 return 1; /* non-cache access */
135 }
136 else if (address >= 0xfe400000 && address <= 0xfe403fff)
137 return 1; /* non-cache access */
138 }
139
140 hsr0 = GET_HSR0 ();
141 if (GET_HSR0_RME (hsr0))
142 return 1; /* non-cache access */
143
144 return 0; /* cache-access */
145 }
146
147 /* Find the cache line corresponding to the given address.
148 If it is found then 'return_tag' is set to point to the tag for that line
149 and 1 is returned.
150 If it is not found, 'return_tag' is set to point to the tag for the least
151 recently used line and 0 is returned.
152 */
153 static int
154 get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
155 {
156 int set;
157 int way;
158 int bits;
159 USI tag;
160 FRV_CACHE_TAG *found;
161 FRV_CACHE_TAG *available;
162
163 ++cache->statistics.accesses;
164
165 /* First calculate which set this address will fall into. Do this by
166 shifting out the bits representing the offset within the line and
167 then keeping enough bits to index the set. */
168 set = address & ~(cache->line_size - 1);
169 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
170 set >>= 1;
171 set &= (cache->sets - 1);
172
173 /* Now search the set for a valid tag which matches this address. At the
174 same time make note of the least recently used tag, which we will return
175 if no match is found. */
176 available = NULL;
177 tag = CACHE_ADDRESS_TAG (cache, address);
178 for (way = 0; way < cache->ways; ++way)
179 {
180 found = CACHE_TAG (cache, set, way);
181 /* This tag is available as the least recently used if it is the
182 least recently used seen so far and it is not locked. */
183 if (! found->locked && (available == NULL || available->lru > found->lru))
184 available = found;
185 if (found->valid && found->tag == tag)
186 {
187 *return_tag = found;
188 ++cache->statistics.hits;
189 return 1; /* found it */
190 }
191 }
192
193 *return_tag = available;
194 return 0; /* not found */
195 }
196
197 /* Write the given data out to memory. */
198 static void
199 write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
200 {
201 SIM_CPU *cpu = cache->cpu;
202 IADDR pc = CPU_PC_GET (cpu);
203 int write_index = 0;
204
205 switch (length)
206 {
207 case 1:
208 default:
209 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
210 break;
211 case 2:
212 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
213 break;
214 case 4:
215 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
216 break;
217 case 8:
218 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
219 break;
220 }
221
222 for (write_index = 0; write_index < length; ++write_index)
223 {
224 /* TODO: Better way to copy memory than a byte at a time? */
225 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
226 data[write_index]);
227 }
228 }
229
230 /* Write a cache line out to memory. */
231 static void
232 write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
233 {
234 SI address = tag->tag;
235 int set = CACHE_TAG_SET_NUMBER (cache, tag);
236 int bits;
237 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
238 set <<= 1;
239 address |= set;
240 write_data_to_memory (cache, address, tag->line, cache->line_size);
241 }
242
243 static void
244 read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
245 int length)
246 {
247 PCADDR pc = CPU_PC_GET (current_cpu);
248 int i;
249 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
250 for (i = 0; i < length; ++i)
251 {
252 /* TODO: Better way to copy memory than a byte at a time? */
253 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
254 address + i);
255 }
256 }
257
258 /* Fill the given cache line from memory. */
259 static void
260 fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
261 {
262 PCADDR pc;
263 int line_alignment;
264 SI read_address;
265 SIM_CPU *current_cpu = cache->cpu;
266
267 /* If this line is already valid and the cache is in copy-back mode, then
268 write this line to memory before refilling it.
269 Check the dirty bit first, since it is less likely to be set. */
270 if (tag->dirty && tag->valid)
271 {
272 int hsr0 = GET_HSR0 ();
273 if (GET_HSR0_CBM (hsr0))
274 write_line_to_memory (cache, tag);
275 }
276 else if (tag->line == NULL)
277 {
278 int line_index = tag - cache->tag_storage;
279 tag->line = cache->data_storage + (line_index * cache->line_size);
280 }
281
282 pc = CPU_PC_GET (current_cpu);
283 line_alignment = cache->line_size - 1;
284 read_address = address & ~line_alignment;
285 read_data_from_memory (current_cpu, read_address, tag->line,
286 cache->line_size);
287 tag->tag = CACHE_ADDRESS_TAG (cache, address);
288 tag->valid = 1;
289 }
290
291 /* Update the LRU information for the tags in the same set as the given tag. */
292 static void
293 set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
294 {
295 /* All tags in the same set are contiguous, so find the beginning of the
296 set by aligning to the size of a set. */
297 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
298 FRV_CACHE_TAG *limit = item + cache->ways;
299
300 while (item < limit)
301 {
302 if (item->lru > tag->lru)
303 --item->lru;
304 ++item;
305 }
306 tag->lru = cache->ways; /* Mark as most recently used. */
307 }
308
309 /* Update the LRU information for the tags in the same set as the given tag. */
310 static void
311 set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
312 {
313 /* All tags in the same set are contiguous, so find the beginning of the
314 set by aligning to the size of a set. */
315 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
316 FRV_CACHE_TAG *limit = item + cache->ways;
317
318 while (item < limit)
319 {
320 if (item->lru != 0 && item->lru < tag->lru)
321 ++item->lru;
322 ++item;
323 }
324 tag->lru = 0; /* Mark as least recently used. */
325 }
326
327 /* Find the line containing the given address and load it if it is not
328 already loaded.
329 Returns the tag of the requested line. */
330 static FRV_CACHE_TAG *
331 find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
332 {
333 /* See if this data is already in the cache. */
334 FRV_CACHE_TAG *tag;
335 int found = get_tag (cache, address, &tag);
336
337 /* Fill the line from memory, if it is not valid. */
338 if (! found)
339 {
340 /* The tag could be NULL is all ways in the set were used and locked. */
341 if (tag == NULL)
342 return tag;
343
344 fill_line_from_memory (cache, tag, address);
345 tag->dirty = 0;
346 }
347
348 /* Update the LRU information for the tags in this set. */
349 set_most_recently_used (cache, tag);
350
351 return tag;
352 }
353
354 static void
355 copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
356 SI address)
357 {
358 /* A cache line was available for the data.
359 Copy the data from the cache line to the output buffer. */
360 memcpy (cache->pipeline[pipe].status.return_buffer.data,
361 tag->line, cache->line_size);
362 cache->pipeline[pipe].status.return_buffer.address
363 = address & ~(cache->line_size - 1);
364 cache->pipeline[pipe].status.return_buffer.valid = 1;
365 }
366
367 static void
368 copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
369 {
370 address &= ~(cache->line_size - 1);
371 read_data_from_memory (cache->cpu, address,
372 cache->pipeline[pipe].status.return_buffer.data,
373 cache->line_size);
374 cache->pipeline[pipe].status.return_buffer.address = address;
375 cache->pipeline[pipe].status.return_buffer.valid = 1;
376 }
377
378 static void
379 set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
380 {
381 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
382 }
383
384 /* Read data from the given cache.
385 Returns the number of cycles required to obtain the data. */
386 int
387 frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
388 {
389 FRV_CACHE_TAG *tag;
390
391 if (non_cache_access (cache, address))
392 {
393 copy_memory_to_return_buffer (cache, pipe, address);
394 return 1;
395 }
396
397 tag = find_or_retrieve_cache_line (cache, address);
398
399 if (tag == NULL)
400 return 0; /* Indicate non-cache-access. */
401
402 /* A cache line was available for the data.
403 Copy the data from the cache line to the output buffer. */
404 copy_line_to_return_buffer (cache, pipe, tag, address);
405
406 return 1; /* TODO - number of cycles unknown */
407 }
408
409 /* Writes data through the given cache.
410 The data is assumed to be in target endian order.
411 Returns the number of cycles required to write the data. */
412 int
413 frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
414 {
415 int copy_back;
416
417 /* See if this data is already in the cache. */
418 SIM_CPU *current_cpu = cache->cpu;
419 USI hsr0 = GET_HSR0 ();
420 FRV_CACHE_TAG *tag;
421 int found;
422
423 if (non_cache_access (cache, address))
424 {
425 write_data_to_memory (cache, address, data, length);
426 return 1;
427 }
428
429 found = get_tag (cache, address, &tag);
430
431 /* Write the data to the cache line if one was available and if it is
432 either a hit or a miss in copy-back mode.
433 The tag may be NULL if all ways were in use and locked on a miss.
434 */
435 copy_back = GET_HSR0_CBM (GET_HSR0 ());
436 if (tag != NULL && (found || copy_back))
437 {
438 int line_offset;
439 /* Load the line from memory first, if it was a miss. */
440 if (! found)
441 fill_line_from_memory (cache, tag, address);
442 line_offset = address & (cache->line_size - 1);
443 memcpy (tag->line + line_offset, data, length);
444 tag->dirty = 1;
445
446 /* Update the LRU information for the tags in this set. */
447 set_most_recently_used (cache, tag);
448 }
449
450 /* Write the data to memory if there was no line available or we are in
451 write-through (not copy-back mode). */
452 if (tag == NULL || ! copy_back)
453 {
454 write_data_to_memory (cache, address, data, length);
455 if (tag != NULL)
456 tag->dirty = 0;
457 }
458
459 return 1; /* TODO - number of cycles unknown */
460 }
461
462 /* Preload the cache line containing the given address. Lock the
463 data if requested.
464 Returns the number of cycles required to write the data. */
465 int
466 frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
467 {
468 int offset;
469 int lines;
470
471 if (non_cache_access (cache, address))
472 return 1;
473
474 /* preload at least 1 line. */
475 if (length == 0)
476 length = 1;
477
478 offset = address & (cache->line_size - 1);
479 lines = 1 + (offset + length - 1) / cache->line_size;
480
481 /* Careful with this loop -- length is unsigned. */
482 for (/**/; lines > 0; --lines)
483 {
484 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
485 if (lock && tag != NULL)
486 tag->locked = 1;
487 address += cache->line_size;
488 }
489
490 return 1; /* TODO - number of cycles unknown */
491 }
492
493 /* Unlock the cache line containing the given address.
494 Returns the number of cycles required to unlock the line. */
495 int
496 frv_cache_unlock (FRV_CACHE *cache, SI address)
497 {
498 FRV_CACHE_TAG *tag;
499 int found;
500
501 if (non_cache_access (cache, address))
502 return 1;
503
504 found = get_tag (cache, address, &tag);
505
506 if (found)
507 tag->locked = 0;
508
509 return 1; /* TODO - number of cycles unknown */
510 }
511
512 static void
513 invalidate_return_buffer (FRV_CACHE *cache, SI address)
514 {
515 /* If this address is in one of the return buffers, then invalidate that
516 return buffer. */
517 address &= ~(cache->line_size - 1);
518 if (address == cache->pipeline[LS].status.return_buffer.address)
519 cache->pipeline[LS].status.return_buffer.valid = 0;
520 if (address == cache->pipeline[LD].status.return_buffer.address)
521 cache->pipeline[LD].status.return_buffer.valid = 0;
522 }
523
524 /* Invalidate the cache line containing the given address. Flush the
525 data if requested.
526 Returns the number of cycles required to write the data. */
527 int
528 frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
529 {
530 /* See if this data is already in the cache. */
531 FRV_CACHE_TAG *tag;
532 int found;
533
534 /* Check for non-cache access. This operation is still perfromed even if
535 the cache is not currently enabled. */
536 if (non_cache_access (cache, address))
537 return 1;
538
539 /* If the line is found, invalidate it. If a flush is requested, then flush
540 it if it is dirty. */
541 found = get_tag (cache, address, &tag);
542 if (found)
543 {
544 SIM_CPU *cpu;
545 /* If a flush is requested, then flush it if it is dirty. */
546 if (tag->dirty && flush)
547 write_line_to_memory (cache, tag);
548 set_least_recently_used (cache, tag);
549 tag->valid = 0;
550 tag->locked = 0;
551
552 /* If this is the insn cache, then flush the cpu's scache as well. */
553 cpu = cache->cpu;
554 if (cache == CPU_INSN_CACHE (cpu))
555 scache_flush_cpu (cpu);
556 }
557
558 invalidate_return_buffer (cache, address);
559
560 return 1; /* TODO - number of cycles unknown */
561 }
562
563 /* Invalidate the entire cache. Flush the data if requested. */
564 int
565 frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
566 {
567 /* See if this data is already in the cache. */
568 int elements = cache->sets * cache->ways;
569 FRV_CACHE_TAG *tag = cache->tag_storage;
570 SIM_CPU *cpu;
571 int i;
572
573 for(i = 0; i < elements; ++i, ++tag)
574 {
575 /* If a flush is requested, then flush it if it is dirty. */
576 if (tag->valid && tag->dirty && flush)
577 write_line_to_memory (cache, tag);
578 tag->valid = 0;
579 tag->locked = 0;
580 }
581
582
583 /* If this is the insn cache, then flush the cpu's scache as well. */
584 cpu = cache->cpu;
585 if (cache == CPU_INSN_CACHE (cpu))
586 scache_flush_cpu (cpu);
587
588 /* Invalidate both return buffers. */
589 cache->pipeline[LS].status.return_buffer.valid = 0;
590 cache->pipeline[LD].status.return_buffer.valid = 0;
591
592 return 1; /* TODO - number of cycles unknown */
593 }
594
595 /* ---------------------------------------------------------------------------
596 Functions for operating the cache in cycle accurate mode.
597 ------------------------------------------------------------------------- */
598 /* Convert a VLIW slot to a cache pipeline index. */
599 static int
600 convert_slot_to_index (int slot)
601 {
602 switch (slot)
603 {
604 case UNIT_I0:
605 case UNIT_C:
606 return LS;
607 case UNIT_I1:
608 return LD;
609 default:
610 abort ();
611 }
612 return 0;
613 }
614
615 /* Allocate free chains of cache requests. */
616 #define FREE_CHAIN_SIZE 16
617 static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
618 static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
619
620 static void
621 allocate_new_cache_requests (void)
622 {
623 int i;
624 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
625 * sizeof (FRV_CACHE_REQUEST));
626 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
627 {
628 frv_cache_request_free_chain[i].next
629 = & frv_cache_request_free_chain[i + 1];
630 }
631
632 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
633 }
634
635 /* Return the next free request in the queue for the given cache pipeline. */
636 static FRV_CACHE_REQUEST *
637 new_cache_request (void)
638 {
639 FRV_CACHE_REQUEST *req;
640
641 /* Allocate new elements for the free chain if necessary. */
642 if (frv_cache_request_free_chain == NULL)
643 allocate_new_cache_requests ();
644
645 req = frv_cache_request_free_chain;
646 frv_cache_request_free_chain = req->next;
647
648 return req;
649 }
650
651 /* Return the given cache request to the free chain. */
652 static void
653 free_cache_request (FRV_CACHE_REQUEST *req)
654 {
655 if (req->kind == req_store)
656 {
657 req->next = frv_store_request_free_chain;
658 frv_store_request_free_chain = req;
659 }
660 else
661 {
662 req->next = frv_cache_request_free_chain;
663 frv_cache_request_free_chain = req;
664 }
665 }
666
667 /* Search the free chain for an existing store request with a buffer that's
668 large enough. */
669 static FRV_CACHE_REQUEST *
670 new_store_request (int length)
671 {
672 FRV_CACHE_REQUEST *prev = NULL;
673 FRV_CACHE_REQUEST *req;
674 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
675 {
676 if (req->u.store.length == length)
677 break;
678 prev = req;
679 }
680 if (req != NULL)
681 {
682 if (prev == NULL)
683 frv_store_request_free_chain = req->next;
684 else
685 prev->next = req->next;
686 return req;
687 }
688
689 /* No existing request buffer was found, so make a new one. */
690 req = new_cache_request ();
691 req->kind = req_store;
692 req->u.store.data = xmalloc (length);
693 req->u.store.length = length;
694 return req;
695 }
696
697 /* Remove the given request from the given pipeline. */
698 static void
699 pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
700 {
701 FRV_CACHE_REQUEST *next = request->next;
702 FRV_CACHE_REQUEST *prev = request->prev;
703
704 if (prev == NULL)
705 p->requests = next;
706 else
707 prev->next = next;
708
709 if (next != NULL)
710 next->prev = prev;
711 }
712
713 /* Add the given request to the given pipeline. */
714 static void
715 pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
716 {
717 FRV_CACHE_REQUEST *prev = NULL;
718 FRV_CACHE_REQUEST *item;
719
720 /* Add the request in priority order. 0 is the highest priority. */
721 for (item = p->requests; item != NULL; item = item->next)
722 {
723 if (item->priority > request->priority)
724 break;
725 prev = item;
726 }
727
728 request->next = item;
729 request->prev = prev;
730 if (prev == NULL)
731 p->requests = request;
732 else
733 prev->next = request;
734 if (item != NULL)
735 item->prev = request;
736 }
737
738 /* Requeu the given request from the last of the given pipeline. */
739 static void
740 pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
741 {
742 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
743 FRV_CACHE_REQUEST *req = stage->request;
744 stage->request = NULL;
745 pipeline_add_request (p, req);
746 }
747
748 /* Return the priority lower than the lowest one in this cache pipeline.
749 0 is the highest priority. */
750 static int
751 next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
752 {
753 int i, j;
754 int pipe;
755 int lowest = 0;
756 FRV_CACHE_REQUEST *req;
757
758 /* Check the priorities of any queued items. */
759 for (req = pipeline->requests; req != NULL; req = req->next)
760 if (req->priority > lowest)
761 lowest = req->priority;
762
763 /* Check the priorities of items in the pipeline stages. */
764 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
765 {
766 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
767 if (stage->request != NULL && stage->request->priority > lowest)
768 lowest = stage->request->priority;
769 }
770
771 /* Check the priorities of load requests waiting in WAR. These are one
772 higher than the request that spawned them. */
773 for (i = 0; i < NUM_WARS; ++i)
774 {
775 FRV_CACHE_WAR *war = & pipeline->WAR[i];
776 if (war->valid && war->priority > lowest)
777 lowest = war->priority + 1;
778 }
779
780 /* Check the priorities of any BARS or NARS associated with this pipeline.
781 These are one higher than the request that spawned them. */
782 pipe = pipeline - cache->pipeline;
783 if (cache->BARS.valid && cache->BARS.pipe == pipe
784 && cache->BARS.priority > lowest)
785 lowest = cache->BARS.priority + 1;
786 if (cache->NARS.valid && cache->NARS.pipe == pipe
787 && cache->NARS.priority > lowest)
788 lowest = cache->NARS.priority + 1;
789
790 /* Return a priority 2 lower than the lowest found. This allows a WAR
791 request to be generated with a priority greater than this but less than
792 the next higher priority request. */
793 return lowest + 2;
794 }
795
796 static void
797 add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
798 {
799 /* Add the load request to the indexed pipeline. */
800 FRV_CACHE_REQUEST *req = new_cache_request ();
801 req->kind = req_WAR;
802 req->reqno = war->reqno;
803 req->priority = war->priority;
804 req->address = war->address;
805 req->u.WAR.preload = war->preload;
806 req->u.WAR.lock = war->lock;
807 pipeline_add_request (pipeline, req);
808 }
809
810 /* Remove the next request from the given pipeline and return it. */
811 static FRV_CACHE_REQUEST *
812 pipeline_next_request (FRV_CACHE_PIPELINE *p)
813 {
814 FRV_CACHE_REQUEST *first = p->requests;
815 if (first != NULL)
816 pipeline_remove_request (p, first);
817 return first;
818 }
819
820 /* Return the request which is at the given stage of the given pipeline. */
821 static FRV_CACHE_REQUEST *
822 pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
823 {
824 return p->stages[stage].request;
825 }
826
827 static void
828 advance_pipelines (FRV_CACHE *cache)
829 {
830 int stage;
831 int pipe;
832 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
833
834 /* Free the final stage requests. */
835 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
836 {
837 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
838 if (req != NULL)
839 free_cache_request (req);
840 }
841
842 /* Shuffle the requests along the pipeline. */
843 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
844 {
845 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
846 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
847 }
848
849 /* Add a new request to the pipeline. */
850 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
851 pipelines[pipe].stages[FIRST_STAGE].request
852 = pipeline_next_request (& pipelines[pipe]);
853 }
854
855 /* Handle a request for a load from the given address. */
856 void
857 frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
858 {
859 FRV_CACHE_REQUEST *req;
860
861 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
862 int pipe = convert_slot_to_index (slot);
863 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
864
865 /* Add the load request to the indexed pipeline. */
866 req = new_cache_request ();
867 req->kind = req_load;
868 req->reqno = reqno;
869 req->priority = next_priority (cache, pipeline);
870 req->address = address;
871
872 pipeline_add_request (pipeline, req);
873 }
874
875 void
876 frv_cache_request_store (FRV_CACHE *cache, SI address,
877 int slot, char *data, unsigned length)
878 {
879 FRV_CACHE_REQUEST *req;
880
881 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
882 int pipe = convert_slot_to_index (slot);
883 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
884
885 /* Add the load request to the indexed pipeline. */
886 req = new_store_request (length);
887 req->kind = req_store;
888 req->reqno = NO_REQNO;
889 req->priority = next_priority (cache, pipeline);
890 req->address = address;
891 req->u.store.length = length;
892 memcpy (req->u.store.data, data, length);
893
894 pipeline_add_request (pipeline, req);
895 invalidate_return_buffer (cache, address);
896 }
897
898 /* Handle a request to invalidate the cache line containing the given address.
899 Flush the data if requested. */
900 void
901 frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
902 int slot, int all, int flush)
903 {
904 FRV_CACHE_REQUEST *req;
905
906 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
907 int pipe = convert_slot_to_index (slot);
908 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
909
910 /* Add the load request to the indexed pipeline. */
911 req = new_cache_request ();
912 req->kind = req_invalidate;
913 req->reqno = reqno;
914 req->priority = next_priority (cache, pipeline);
915 req->address = address;
916 req->u.invalidate.all = all;
917 req->u.invalidate.flush = flush;
918
919 pipeline_add_request (pipeline, req);
920 }
921
922 /* Handle a request to preload the cache line containing the given address. */
923 void
924 frv_cache_request_preload (FRV_CACHE *cache, SI address,
925 int slot, int length, int lock)
926 {
927 FRV_CACHE_REQUEST *req;
928
929 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
930 int pipe = convert_slot_to_index (slot);
931 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
932
933 /* Add the load request to the indexed pipeline. */
934 req = new_cache_request ();
935 req->kind = req_preload;
936 req->reqno = NO_REQNO;
937 req->priority = next_priority (cache, pipeline);
938 req->address = address;
939 req->u.preload.length = length;
940 req->u.preload.lock = lock;
941
942 pipeline_add_request (pipeline, req);
943 invalidate_return_buffer (cache, address);
944 }
945
946 /* Handle a request to unlock the cache line containing the given address. */
947 void
948 frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
949 {
950 FRV_CACHE_REQUEST *req;
951
952 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
953 int pipe = convert_slot_to_index (slot);
954 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
955
956 /* Add the load request to the indexed pipeline. */
957 req = new_cache_request ();
958 req->kind = req_unlock;
959 req->reqno = NO_REQNO;
960 req->priority = next_priority (cache, pipeline);
961 req->address = address;
962
963 pipeline_add_request (pipeline, req);
964 }
965
966 /* Check whether this address interferes with a pending request of
967 higher priority. */
968 static int
969 address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
970 int pipe)
971 {
972 int i, j;
973 int line_mask = ~(cache->line_size - 1);
974 int other_pipe;
975 int priority = req->priority;
976 FRV_CACHE_REQUEST *other_req;
977 SI other_address;
978 SI all_address;
979
980 address &= line_mask;
981 all_address = -1 & line_mask;
982
983 /* Check for collisions in the queue for this pipeline. */
984 for (other_req = cache->pipeline[pipe].requests;
985 other_req != NULL;
986 other_req = other_req->next)
987 {
988 other_address = other_req->address & line_mask;
989 if ((address == other_address || address == all_address)
990 && priority > other_req->priority)
991 return 1;
992 }
993
994 /* Check for a collision in the the other pipeline. */
995 other_pipe = pipe ^ 1;
996 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
997 if (other_req != NULL)
998 {
999 other_address = other_req->address & line_mask;
1000 if (address == other_address || address == all_address)
1001 return 1;
1002 }
1003
1004 /* Check for a collision with load requests waiting in WAR. */
1005 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1006 {
1007 for (j = 0; j < NUM_WARS; ++j)
1008 {
1009 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1010 if (war->valid
1011 && (address == (war->address & line_mask)
1012 || address == all_address)
1013 && priority > war->priority)
1014 return 1;
1015 }
1016 /* If this is not a WAR request, then yield to any WAR requests in
1017 either pipeline. */
1018 if (req->kind != req_WAR)
1019 {
1020 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1021 {
1022 other_req = cache->pipeline[i].stages[j].request;
1023 if (other_req != NULL && other_req->kind == req_WAR)
1024 return 1;
1025 }
1026 }
1027 }
1028
1029 /* Check for a collision with load requests waiting in ARS. */
1030 if (cache->BARS.valid
1031 && (address == (cache->BARS.address & line_mask)
1032 || address == all_address)
1033 && priority > cache->BARS.priority)
1034 return 1;
1035 if (cache->NARS.valid
1036 && (address == (cache->NARS.address & line_mask)
1037 || address == all_address)
1038 && priority > cache->NARS.priority)
1039 return 1;
1040
1041 return 0;
1042 }
1043
1044 /* Wait for a free WAR register in BARS or NARS. */
1045 static void
1046 wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1047 {
1048 FRV_CACHE_WAR war;
1049 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1050
1051 if (! cache->BARS.valid)
1052 {
1053 cache->BARS.pipe = pipe;
1054 cache->BARS.reqno = req->reqno;
1055 cache->BARS.address = req->address;
1056 cache->BARS.priority = req->priority - 1;
1057 switch (req->kind)
1058 {
1059 case req_load:
1060 cache->BARS.preload = 0;
1061 cache->BARS.lock = 0;
1062 break;
1063 case req_store:
1064 cache->BARS.preload = 1;
1065 cache->BARS.lock = 0;
1066 break;
1067 case req_preload:
1068 cache->BARS.preload = 1;
1069 cache->BARS.lock = req->u.preload.lock;
1070 break;
1071 }
1072 cache->BARS.valid = 1;
1073 return;
1074 }
1075 if (! cache->NARS.valid)
1076 {
1077 cache->NARS.pipe = pipe;
1078 cache->NARS.reqno = req->reqno;
1079 cache->NARS.address = req->address;
1080 cache->NARS.priority = req->priority - 1;
1081 switch (req->kind)
1082 {
1083 case req_load:
1084 cache->NARS.preload = 0;
1085 cache->NARS.lock = 0;
1086 break;
1087 case req_store:
1088 cache->NARS.preload = 1;
1089 cache->NARS.lock = 0;
1090 break;
1091 case req_preload:
1092 cache->NARS.preload = 1;
1093 cache->NARS.lock = req->u.preload.lock;
1094 break;
1095 }
1096 cache->NARS.valid = 1;
1097 return;
1098 }
1099 /* All wait registers are busy, so resubmit this request. */
1100 pipeline_requeue_request (pipeline);
1101 }
1102
1103 /* Find a free WAR register and wait for memory to fetch the data. */
1104 static void
1105 wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1106 {
1107 int war;
1108 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1109
1110 /* Find a valid WAR to hold this request. */
1111 for (war = 0; war < NUM_WARS; ++war)
1112 if (! pipeline->WAR[war].valid)
1113 break;
1114 if (war >= NUM_WARS)
1115 {
1116 wait_for_WAR (cache, pipe, req);
1117 return;
1118 }
1119
1120 pipeline->WAR[war].address = req->address;
1121 pipeline->WAR[war].reqno = req->reqno;
1122 pipeline->WAR[war].priority = req->priority - 1;
1123 pipeline->WAR[war].latency = cache->memory_latency + 1;
1124 switch (req->kind)
1125 {
1126 case req_load:
1127 pipeline->WAR[war].preload = 0;
1128 pipeline->WAR[war].lock = 0;
1129 break;
1130 case req_store:
1131 pipeline->WAR[war].preload = 1;
1132 pipeline->WAR[war].lock = 0;
1133 break;
1134 case req_preload:
1135 pipeline->WAR[war].preload = 1;
1136 pipeline->WAR[war].lock = req->u.preload.lock;
1137 break;
1138 }
1139 pipeline->WAR[war].valid = 1;
1140 }
1141
1142 static void
1143 handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1144 {
1145 FRV_CACHE_TAG *tag;
1146 SI address = req->address;
1147
1148 /* If this address interferes with an existing request, then requeue it. */
1149 if (address_interference (cache, address, req, pipe))
1150 {
1151 pipeline_requeue_request (& cache->pipeline[pipe]);
1152 return;
1153 }
1154
1155 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1156 {
1157 int found = get_tag (cache, address, &tag);
1158
1159 /* If the data was found, return it to the caller. */
1160 if (found)
1161 {
1162 set_most_recently_used (cache, tag);
1163 copy_line_to_return_buffer (cache, pipe, tag, address);
1164 set_return_buffer_reqno (cache, pipe, req->reqno);
1165 return;
1166 }
1167 }
1168
1169 /* The data is not in the cache or this is a non-cache access. We need to
1170 wait for the memory unit to fetch it. Store this request in the WAR in
1171 the meantime. */
1172 wait_in_WAR (cache, pipe, req);
1173 }
1174
1175 static void
1176 handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1177 {
1178 int found;
1179 FRV_CACHE_WAR war;
1180 FRV_CACHE_TAG *tag;
1181 int length;
1182 int lock;
1183 int offset;
1184 int lines;
1185 int line;
1186 SI address = req->address;
1187 SI cur_address;
1188
1189 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1190 return;
1191
1192 /* preload at least 1 line. */
1193 length = req->u.preload.length;
1194 if (length == 0)
1195 length = 1;
1196
1197 /* Make sure that this request does not interfere with a pending request. */
1198 offset = address & (cache->line_size - 1);
1199 lines = 1 + (offset + length - 1) / cache->line_size;
1200 cur_address = address & ~(cache->line_size - 1);
1201 for (line = 0; line < lines; ++line)
1202 {
1203 /* If this address interferes with an existing request,
1204 then requeue it. */
1205 if (address_interference (cache, cur_address, req, pipe))
1206 {
1207 pipeline_requeue_request (& cache->pipeline[pipe]);
1208 return;
1209 }
1210 cur_address += cache->line_size;
1211 }
1212
1213 /* Now process each cache line. */
1214 /* Careful with this loop -- length is unsigned. */
1215 lock = req->u.preload.lock;
1216 cur_address = address & ~(cache->line_size - 1);
1217 for (line = 0; line < lines; ++line)
1218 {
1219 /* If the data was found, then lock it if requested. */
1220 found = get_tag (cache, cur_address, &tag);
1221 if (found)
1222 {
1223 if (lock)
1224 tag->locked = 1;
1225 }
1226 else
1227 {
1228 /* The data is not in the cache. We need to wait for the memory
1229 unit to fetch it. Store this request in the WAR in the meantime.
1230 */
1231 wait_in_WAR (cache, pipe, req);
1232 }
1233 cur_address += cache->line_size;
1234 }
1235 }
1236
1237 static void
1238 handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1239 {
1240 SIM_CPU *current_cpu;
1241 FRV_CACHE_TAG *tag;
1242 int found;
1243 int copy_back;
1244 SI address = req->address;
1245 char *data = req->u.store.data;
1246 int length = req->u.store.length;
1247
1248 /* If this address interferes with an existing request, then requeue it. */
1249 if (address_interference (cache, address, req, pipe))
1250 {
1251 pipeline_requeue_request (& cache->pipeline[pipe]);
1252 return;
1253 }
1254
1255 /* Non-cache access. Write the data directly to memory. */
1256 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1257 {
1258 write_data_to_memory (cache, address, data, length);
1259 return;
1260 }
1261
1262 /* See if the data is in the cache. */
1263 found = get_tag (cache, address, &tag);
1264
1265 /* Write the data to the cache line if one was available and if it is
1266 either a hit or a miss in copy-back mode.
1267 The tag may be NULL if all ways were in use and locked on a miss.
1268 */
1269 current_cpu = cache->cpu;
1270 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1271 if (tag != NULL && (found || copy_back))
1272 {
1273 int line_offset;
1274 /* Load the line from memory first, if it was a miss. */
1275 if (! found)
1276 {
1277 /* We need to wait for the memory unit to fetch the data.
1278 Store this request in the WAR and requeue the store request. */
1279 wait_in_WAR (cache, pipe, req);
1280 pipeline_requeue_request (& cache->pipeline[pipe]);
1281 /* Decrement the counts of accesses and hits because when the requeued
1282 request is processed again, it will appear to be a new access and
1283 a hit. */
1284 --cache->statistics.accesses;
1285 --cache->statistics.hits;
1286 return;
1287 }
1288 line_offset = address & (cache->line_size - 1);
1289 memcpy (tag->line + line_offset, data, length);
1290 invalidate_return_buffer (cache, address);
1291 tag->dirty = 1;
1292
1293 /* Update the LRU information for the tags in this set. */
1294 set_most_recently_used (cache, tag);
1295 }
1296
1297 /* Write the data to memory if there was no line available or we are in
1298 write-through (not copy-back mode). */
1299 if (tag == NULL || ! copy_back)
1300 {
1301 write_data_to_memory (cache, address, data, length);
1302 if (tag != NULL)
1303 tag->dirty = 0;
1304 }
1305 }
1306
1307 static void
1308 handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1309 {
1310 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1311 SI address = req->address;
1312 SI interfere_address = req->u.invalidate.all ? -1 : address;
1313
1314 /* If this address interferes with an existing request, then requeue it. */
1315 if (address_interference (cache, interfere_address, req, pipe))
1316 {
1317 pipeline_requeue_request (pipeline);
1318 return;
1319 }
1320
1321 /* Invalidate the cache line now. This function already checks for
1322 non-cache access. */
1323 if (req->u.invalidate.all)
1324 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1325 else
1326 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1327 if (req->u.invalidate.flush)
1328 {
1329 pipeline->status.flush.reqno = req->reqno;
1330 pipeline->status.flush.address = address;
1331 pipeline->status.flush.valid = 1;
1332 }
1333 }
1334
1335 static void
1336 handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1337 {
1338 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1339 SI address = req->address;
1340
1341 /* If this address interferes with an existing request, then requeue it. */
1342 if (address_interference (cache, address, req, pipe))
1343 {
1344 pipeline_requeue_request (pipeline);
1345 return;
1346 }
1347
1348 /* Unlock the cache line. This function checks for non-cache access. */
1349 frv_cache_unlock (cache, address);
1350 }
1351
1352 static void
1353 handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1354 {
1355 char *buffer;
1356 FRV_CACHE_TAG *tag;
1357 SI address = req->address;
1358
1359 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1360 {
1361 /* Look for the data in the cache. The statistics of cache hit or
1362 miss have already been recorded, so save and restore the stats before
1363 and after obtaining the cache line. */
1364 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1365 tag = find_or_retrieve_cache_line (cache, address);
1366 cache->statistics = save_stats;
1367 if (tag != NULL)
1368 {
1369 if (! req->u.WAR.preload)
1370 {
1371 copy_line_to_return_buffer (cache, pipe, tag, address);
1372 set_return_buffer_reqno (cache, pipe, req->reqno);
1373 }
1374 else
1375 {
1376 invalidate_return_buffer (cache, address);
1377 if (req->u.WAR.lock)
1378 tag->locked = 1;
1379 }
1380 return;
1381 }
1382 }
1383
1384 /* All cache lines in the set were locked, so just copy the data to the
1385 return buffer directly. */
1386 if (! req->u.WAR.preload)
1387 {
1388 copy_memory_to_return_buffer (cache, pipe, address);
1389 set_return_buffer_reqno (cache, pipe, req->reqno);
1390 }
1391 }
1392
1393 /* Resolve any conflicts and/or execute the given requests. */
1394 static void
1395 arbitrate_requests (FRV_CACHE *cache)
1396 {
1397 int pipe;
1398 /* Simply execute the requests in the final pipeline stages. */
1399 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1400 {
1401 FRV_CACHE_REQUEST *req
1402 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1403 /* Make sure that there is a request to handle. */
1404 if (req == NULL)
1405 continue;
1406
1407 /* Handle the request. */
1408 switch (req->kind)
1409 {
1410 case req_load:
1411 handle_req_load (cache, pipe, req);
1412 break;
1413 case req_store:
1414 handle_req_store (cache, pipe, req);
1415 break;
1416 case req_invalidate:
1417 handle_req_invalidate (cache, pipe, req);
1418 break;
1419 case req_preload:
1420 handle_req_preload (cache, pipe, req);
1421 break;
1422 case req_unlock:
1423 handle_req_unlock (cache, pipe, req);
1424 break;
1425 case req_WAR:
1426 handle_req_WAR (cache, pipe, req);
1427 break;
1428 default:
1429 abort ();
1430 }
1431 }
1432 }
1433
1434 /* Move a waiting ARS register to a free WAR register. */
1435 static void
1436 move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1437 {
1438 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1439 NARS to BARS if it is valid. */
1440 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1441 {
1442 war->address = cache->BARS.address;
1443 war->reqno = cache->BARS.reqno;
1444 war->priority = cache->BARS.priority;
1445 war->preload = cache->BARS.preload;
1446 war->lock = cache->BARS.lock;
1447 war->latency = cache->memory_latency + 1;
1448 war->valid = 1;
1449 if (cache->NARS.valid)
1450 {
1451 cache->BARS = cache->NARS;
1452 cache->NARS.valid = 0;
1453 }
1454 else
1455 cache->BARS.valid = 0;
1456 return;
1457 }
1458 /* If NARS is valid for this pipe, then move it to the given WAR. */
1459 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1460 {
1461 war->address = cache->NARS.address;
1462 war->reqno = cache->NARS.reqno;
1463 war->priority = cache->NARS.priority;
1464 war->preload = cache->NARS.preload;
1465 war->lock = cache->NARS.lock;
1466 war->latency = cache->memory_latency + 1;
1467 war->valid = 1;
1468 cache->NARS.valid = 0;
1469 }
1470 }
1471
1472 /* Decrease the latencies of the various states in the cache. */
1473 static void
1474 decrease_latencies (FRV_CACHE *cache)
1475 {
1476 int pipe, j;
1477 /* Check the WAR registers. */
1478 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1479 {
1480 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1481 for (j = 0; j < NUM_WARS; ++j)
1482 {
1483 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1484 if (war->valid)
1485 {
1486 --war->latency;
1487 /* If the latency has expired, then submit a WAR request to the
1488 pipeline. */
1489 if (war->latency <= 0)
1490 {
1491 add_WAR_request (pipeline, war);
1492 war->valid = 0;
1493 move_ARS_to_WAR (cache, pipe, war);
1494 }
1495 }
1496 }
1497 }
1498 }
1499
1500 /* Run the cache for the given number of cycles. */
1501 void
1502 frv_cache_run (FRV_CACHE *cache, int cycles)
1503 {
1504 int i;
1505 for (i = 0; i < cycles; ++i)
1506 {
1507 advance_pipelines (cache);
1508 arbitrate_requests (cache);
1509 decrease_latencies (cache);
1510 }
1511 }
1512
1513 int
1514 frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1515 {
1516 SI offset;
1517 FRV_CACHE_TAG *tag;
1518
1519 if (non_cache_access (cache, address))
1520 return 0;
1521
1522 {
1523 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1524 int found = get_tag (cache, address, &tag);
1525 cache->statistics = save_stats;
1526
1527 if (! found)
1528 return 0; /* Indicate non-cache-access. */
1529 }
1530
1531 /* A cache line was available for the data.
1532 Extract the target data from the line. */
1533 offset = address & (cache->line_size - 1);
1534 offset &= ~3;
1535 *value = T2H_4 (*(SI *)(tag->line + offset));
1536 return 1;
1537 }
1538
1539 /* Check the return buffers of the data cache to see if the requested data is
1540 available. */
1541 int
1542 frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1543 unsigned reqno)
1544 {
1545 return cache->pipeline[pipe].status.return_buffer.valid
1546 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1547 && cache->pipeline[pipe].status.return_buffer.address <= address
1548 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1549 > address;
1550 }
1551
1552 /* Check to see if the requested data has been flushed. */
1553 int
1554 frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1555 {
1556 return cache->pipeline[pipe].status.flush.valid
1557 && cache->pipeline[pipe].status.flush.reqno == reqno
1558 && cache->pipeline[pipe].status.flush.address <= address
1559 && cache->pipeline[pipe].status.flush.address + cache->line_size
1560 > address;
1561 }
This page took 0.078355 seconds and 4 git commands to generate.