cpu/
[deliverable/binutils-gdb.git] / sim / frv / cache.c
CommitLineData
b34f6357 1/* frv cache model.
e930b1f5 2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
b34f6357
DB
3 Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License along
18with this program; if not, write to the Free Software Foundation, Inc.,
1959 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#define WANT_CPU frvbf
22#define WANT_CPU_FRVBF
23
24#include "libiberty.h"
25#include "sim-main.h"
26#include "cache.h"
27#include "bfd.h"
28
29void
30frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
31{
32 int elements;
33 int i, j;
34 SIM_DESC sd;
35
36 /* Set defaults for fields which are not initialized. */
37 sd = CPU_STATE (cpu);
38 switch (STATE_ARCHITECTURE (sd)->mach)
39 {
40 case bfd_mach_fr400:
e930b1f5 41 if (cache->configured_sets == 0)
c7a48b9a 42 cache->configured_sets = 512;
e930b1f5
DB
43 if (cache->configured_ways == 0)
44 cache->configured_ways = 2;
b34f6357
DB
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
49 break;
e930b1f5
DB
50 case bfd_mach_fr550:
51 if (cache->configured_sets == 0)
52 cache->configured_sets = 128;
53 if (cache->configured_ways == 0)
54 cache->configured_ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
59 break;
b34f6357 60 default:
e930b1f5
DB
61 if (cache->configured_sets == 0)
62 cache->configured_sets = 64;
63 if (cache->configured_ways == 0)
64 cache->configured_ways = 4;
b34f6357
DB
65 if (cache->line_size == 0)
66 cache->line_size = 64;
67 if (cache->memory_latency == 0)
68 cache->memory_latency = 20;
69 break;
70 }
71
e930b1f5
DB
72 frv_cache_reconfigure (cpu, cache);
73
b34f6357
DB
74 /* First allocate the cache storage based on the given dimensions. */
75 elements = cache->sets * cache->ways;
76 cache->tag_storage = (FRV_CACHE_TAG *)
77 zalloc (elements * sizeof (*cache->tag_storage));
78 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
79
80 /* Initialize the pipelines and status buffers. */
81 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
82 {
83 cache->pipeline[i].requests = NULL;
84 cache->pipeline[i].status.flush.valid = 0;
85 cache->pipeline[i].status.return_buffer.valid = 0;
86 cache->pipeline[i].status.return_buffer.data
87 = (char *) xmalloc (cache->line_size);
88 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89 cache->pipeline[i].stages[j].request = NULL;
90 }
91 cache->BARS.valid = 0;
92 cache->NARS.valid = 0;
93
94 /* Now set the cache state. */
95 cache->cpu = cpu;
96 cache->statistics.accesses = 0;
97 cache->statistics.hits = 0;
98}
99
100void
101frv_cache_term (FRV_CACHE *cache)
102{
103 /* Free the cache storage. */
104 free (cache->tag_storage);
105 free (cache->data_storage);
106 free (cache->pipeline[LS].status.return_buffer.data);
107 free (cache->pipeline[LD].status.return_buffer.data);
108}
109
e930b1f5
DB
110/* Reset the cache configuration based on registers in the cpu. */
111void
112frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
113{
114 int ihsr8;
115 int icdm;
116 SIM_DESC sd;
117
118 /* Set defaults for fields which are not initialized. */
119 sd = CPU_STATE (current_cpu);
120 switch (STATE_ARCHITECTURE (sd)->mach)
121 {
122 case bfd_mach_fr550:
123 if (cache == CPU_INSN_CACHE (current_cpu))
124 {
125 ihsr8 = GET_IHSR8 ();
126 icdm = GET_IHSR8_ICDM (ihsr8);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
128 if (icdm)
129 {
130 cache->sets = cache->sets * cache->ways;
131 cache->ways = 1;
132 break;
133 }
134 }
135 /* fall through */
136 default:
137 /* Set the cache to its original settings. */
138 cache->sets = cache->configured_sets;
139 cache->ways = cache->configured_ways;
140 break;
141 }
142}
143
b34f6357
DB
144/* Determine whether the given cache is enabled. */
145int
146frv_cache_enabled (FRV_CACHE *cache)
147{
148 SIM_CPU *current_cpu = cache->cpu;
149 int hsr0 = GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
151 return 1;
152 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
153 return 1;
154 return 0;
155}
156
e930b1f5
DB
157/* Determine whether the given address is RAM access, assuming that HSR0.RME
158 is set. */
159static int
160ram_access (FRV_CACHE *cache, USI address)
161{
162 int ihsr8;
163 int cwe;
164 USI start, end, way_size;
165 SIM_CPU *current_cpu = cache->cpu;
166 SIM_DESC sd = CPU_STATE (current_cpu);
167
168 switch (STATE_ARCHITECTURE (sd)->mach)
169 {
170 case bfd_mach_fr550:
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8 = GET_IHSR8 ();
173 if (cache == CPU_INSN_CACHE (current_cpu))
174 {
175 start = 0xfe000000;
176 end = 0xfe008000;
177 cwe = GET_IHSR8_ICWE (ihsr8);
178 }
179 else
180 {
181 start = 0xfe400000;
182 end = 0xfe408000;
183 cwe = GET_IHSR8_DCWE (ihsr8);
184 }
185 way_size = (end - start) / 4;
186 end -= way_size * cwe;
187 return address >= start && address < end;
188 default:
189 break;
190 }
191
192 return 1; /* RAM access */
193}
194
b34f6357
DB
195/* Determine whether the given address should be accessed without using
196 the cache. */
197static int
198non_cache_access (FRV_CACHE *cache, USI address)
199{
200 int hsr0;
201 SIM_DESC sd;
202 SIM_CPU *current_cpu = cache->cpu;
203
204 sd = CPU_STATE (current_cpu);
205 switch (STATE_ARCHITECTURE (sd)->mach)
206 {
207 case bfd_mach_fr400:
208 if (address >= 0xff000000
209 || address >= 0xfe000000 && address <= 0xfeffffff)
210 return 1; /* non-cache access */
c7a48b9a 211 break;
e930b1f5
DB
212 case bfd_mach_fr550:
213 if (address >= 0xff000000
214 || address >= 0xfeff0000 && address <= 0xfeffffff)
215 return 1; /* non-cache access */
216 if (cache == CPU_INSN_CACHE (current_cpu))
217 {
218 if (address >= 0xfe000000 && address <= 0xfe007fff)
219 return 1; /* non-cache access */
220 }
221 else if (address >= 0xfe400000 && address <= 0xfe407fff)
222 return 1; /* non-cache access */
c7a48b9a 223 break;
b34f6357
DB
224 default:
225 if (address >= 0xff000000
226 || address >= 0xfeff0000 && address <= 0xfeffffff)
227 return 1; /* non-cache access */
228 if (cache == CPU_INSN_CACHE (current_cpu))
229 {
230 if (address >= 0xfe000000 && address <= 0xfe003fff)
231 return 1; /* non-cache access */
232 }
233 else if (address >= 0xfe400000 && address <= 0xfe403fff)
234 return 1; /* non-cache access */
c7a48b9a 235 break;
b34f6357
DB
236 }
237
238 hsr0 = GET_HSR0 ();
239 if (GET_HSR0_RME (hsr0))
e930b1f5 240 return ram_access (cache, address);
b34f6357
DB
241
242 return 0; /* cache-access */
243}
244
245/* Find the cache line corresponding to the given address.
246 If it is found then 'return_tag' is set to point to the tag for that line
247 and 1 is returned.
248 If it is not found, 'return_tag' is set to point to the tag for the least
249 recently used line and 0 is returned.
250*/
251static int
252get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
253{
254 int set;
255 int way;
256 int bits;
257 USI tag;
258 FRV_CACHE_TAG *found;
259 FRV_CACHE_TAG *available;
260
261 ++cache->statistics.accesses;
262
263 /* First calculate which set this address will fall into. Do this by
264 shifting out the bits representing the offset within the line and
265 then keeping enough bits to index the set. */
266 set = address & ~(cache->line_size - 1);
267 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
268 set >>= 1;
269 set &= (cache->sets - 1);
270
271 /* Now search the set for a valid tag which matches this address. At the
272 same time make note of the least recently used tag, which we will return
273 if no match is found. */
274 available = NULL;
275 tag = CACHE_ADDRESS_TAG (cache, address);
276 for (way = 0; way < cache->ways; ++way)
277 {
278 found = CACHE_TAG (cache, set, way);
279 /* This tag is available as the least recently used if it is the
280 least recently used seen so far and it is not locked. */
281 if (! found->locked && (available == NULL || available->lru > found->lru))
282 available = found;
283 if (found->valid && found->tag == tag)
284 {
285 *return_tag = found;
286 ++cache->statistics.hits;
287 return 1; /* found it */
288 }
289 }
290
291 *return_tag = available;
292 return 0; /* not found */
293}
294
295/* Write the given data out to memory. */
296static void
297write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
298{
299 SIM_CPU *cpu = cache->cpu;
300 IADDR pc = CPU_PC_GET (cpu);
301 int write_index = 0;
302
303 switch (length)
304 {
305 case 1:
306 default:
307 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
308 break;
309 case 2:
310 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
311 break;
312 case 4:
313 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
314 break;
315 case 8:
316 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
317 break;
318 }
319
320 for (write_index = 0; write_index < length; ++write_index)
321 {
322 /* TODO: Better way to copy memory than a byte at a time? */
323 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
324 data[write_index]);
325 }
326}
327
328/* Write a cache line out to memory. */
329static void
330write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
331{
332 SI address = tag->tag;
333 int set = CACHE_TAG_SET_NUMBER (cache, tag);
334 int bits;
335 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
336 set <<= 1;
337 address |= set;
338 write_data_to_memory (cache, address, tag->line, cache->line_size);
339}
340
341static void
342read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
343 int length)
344{
345 PCADDR pc = CPU_PC_GET (current_cpu);
346 int i;
347 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
348 for (i = 0; i < length; ++i)
349 {
350 /* TODO: Better way to copy memory than a byte at a time? */
351 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
352 address + i);
353 }
354}
355
356/* Fill the given cache line from memory. */
357static void
358fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
359{
360 PCADDR pc;
361 int line_alignment;
362 SI read_address;
363 SIM_CPU *current_cpu = cache->cpu;
364
365 /* If this line is already valid and the cache is in copy-back mode, then
366 write this line to memory before refilling it.
367 Check the dirty bit first, since it is less likely to be set. */
368 if (tag->dirty && tag->valid)
369 {
370 int hsr0 = GET_HSR0 ();
371 if (GET_HSR0_CBM (hsr0))
372 write_line_to_memory (cache, tag);
373 }
374 else if (tag->line == NULL)
375 {
376 int line_index = tag - cache->tag_storage;
377 tag->line = cache->data_storage + (line_index * cache->line_size);
378 }
379
380 pc = CPU_PC_GET (current_cpu);
381 line_alignment = cache->line_size - 1;
382 read_address = address & ~line_alignment;
383 read_data_from_memory (current_cpu, read_address, tag->line,
384 cache->line_size);
385 tag->tag = CACHE_ADDRESS_TAG (cache, address);
386 tag->valid = 1;
387}
388
389/* Update the LRU information for the tags in the same set as the given tag. */
390static void
391set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
392{
393 /* All tags in the same set are contiguous, so find the beginning of the
394 set by aligning to the size of a set. */
395 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
396 FRV_CACHE_TAG *limit = item + cache->ways;
397
398 while (item < limit)
399 {
400 if (item->lru > tag->lru)
401 --item->lru;
402 ++item;
403 }
404 tag->lru = cache->ways; /* Mark as most recently used. */
405}
406
407/* Update the LRU information for the tags in the same set as the given tag. */
408static void
409set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
410{
411 /* All tags in the same set are contiguous, so find the beginning of the
412 set by aligning to the size of a set. */
413 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
414 FRV_CACHE_TAG *limit = item + cache->ways;
415
416 while (item < limit)
417 {
418 if (item->lru != 0 && item->lru < tag->lru)
419 ++item->lru;
420 ++item;
421 }
422 tag->lru = 0; /* Mark as least recently used. */
423}
424
425/* Find the line containing the given address and load it if it is not
426 already loaded.
427 Returns the tag of the requested line. */
428static FRV_CACHE_TAG *
429find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
430{
431 /* See if this data is already in the cache. */
432 FRV_CACHE_TAG *tag;
433 int found = get_tag (cache, address, &tag);
434
435 /* Fill the line from memory, if it is not valid. */
436 if (! found)
437 {
438 /* The tag could be NULL is all ways in the set were used and locked. */
439 if (tag == NULL)
440 return tag;
441
442 fill_line_from_memory (cache, tag, address);
443 tag->dirty = 0;
444 }
445
446 /* Update the LRU information for the tags in this set. */
447 set_most_recently_used (cache, tag);
448
449 return tag;
450}
451
452static void
453copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
454 SI address)
455{
456 /* A cache line was available for the data.
457 Copy the data from the cache line to the output buffer. */
458 memcpy (cache->pipeline[pipe].status.return_buffer.data,
459 tag->line, cache->line_size);
460 cache->pipeline[pipe].status.return_buffer.address
461 = address & ~(cache->line_size - 1);
462 cache->pipeline[pipe].status.return_buffer.valid = 1;
463}
464
465static void
466copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
467{
468 address &= ~(cache->line_size - 1);
469 read_data_from_memory (cache->cpu, address,
470 cache->pipeline[pipe].status.return_buffer.data,
471 cache->line_size);
472 cache->pipeline[pipe].status.return_buffer.address = address;
473 cache->pipeline[pipe].status.return_buffer.valid = 1;
474}
475
476static void
477set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
478{
479 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
480}
481
482/* Read data from the given cache.
483 Returns the number of cycles required to obtain the data. */
484int
485frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
486{
487 FRV_CACHE_TAG *tag;
488
489 if (non_cache_access (cache, address))
490 {
491 copy_memory_to_return_buffer (cache, pipe, address);
492 return 1;
493 }
494
495 tag = find_or_retrieve_cache_line (cache, address);
496
497 if (tag == NULL)
498 return 0; /* Indicate non-cache-access. */
499
500 /* A cache line was available for the data.
501 Copy the data from the cache line to the output buffer. */
502 copy_line_to_return_buffer (cache, pipe, tag, address);
503
504 return 1; /* TODO - number of cycles unknown */
505}
506
507/* Writes data through the given cache.
508 The data is assumed to be in target endian order.
509 Returns the number of cycles required to write the data. */
510int
511frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
512{
513 int copy_back;
514
515 /* See if this data is already in the cache. */
516 SIM_CPU *current_cpu = cache->cpu;
517 USI hsr0 = GET_HSR0 ();
518 FRV_CACHE_TAG *tag;
519 int found;
520
521 if (non_cache_access (cache, address))
522 {
523 write_data_to_memory (cache, address, data, length);
524 return 1;
525 }
526
527 found = get_tag (cache, address, &tag);
528
529 /* Write the data to the cache line if one was available and if it is
530 either a hit or a miss in copy-back mode.
531 The tag may be NULL if all ways were in use and locked on a miss.
532 */
533 copy_back = GET_HSR0_CBM (GET_HSR0 ());
534 if (tag != NULL && (found || copy_back))
535 {
536 int line_offset;
537 /* Load the line from memory first, if it was a miss. */
538 if (! found)
539 fill_line_from_memory (cache, tag, address);
540 line_offset = address & (cache->line_size - 1);
541 memcpy (tag->line + line_offset, data, length);
542 tag->dirty = 1;
543
544 /* Update the LRU information for the tags in this set. */
545 set_most_recently_used (cache, tag);
546 }
547
548 /* Write the data to memory if there was no line available or we are in
549 write-through (not copy-back mode). */
550 if (tag == NULL || ! copy_back)
551 {
552 write_data_to_memory (cache, address, data, length);
553 if (tag != NULL)
554 tag->dirty = 0;
555 }
556
557 return 1; /* TODO - number of cycles unknown */
558}
559
560/* Preload the cache line containing the given address. Lock the
561 data if requested.
562 Returns the number of cycles required to write the data. */
563int
564frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
565{
566 int offset;
567 int lines;
568
569 if (non_cache_access (cache, address))
570 return 1;
571
572 /* preload at least 1 line. */
573 if (length == 0)
574 length = 1;
575
576 offset = address & (cache->line_size - 1);
577 lines = 1 + (offset + length - 1) / cache->line_size;
578
579 /* Careful with this loop -- length is unsigned. */
580 for (/**/; lines > 0; --lines)
581 {
582 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
583 if (lock && tag != NULL)
584 tag->locked = 1;
585 address += cache->line_size;
586 }
587
588 return 1; /* TODO - number of cycles unknown */
589}
590
591/* Unlock the cache line containing the given address.
592 Returns the number of cycles required to unlock the line. */
593int
594frv_cache_unlock (FRV_CACHE *cache, SI address)
595{
596 FRV_CACHE_TAG *tag;
597 int found;
598
599 if (non_cache_access (cache, address))
600 return 1;
601
602 found = get_tag (cache, address, &tag);
603
604 if (found)
605 tag->locked = 0;
606
607 return 1; /* TODO - number of cycles unknown */
608}
609
610static void
611invalidate_return_buffer (FRV_CACHE *cache, SI address)
612{
613 /* If this address is in one of the return buffers, then invalidate that
614 return buffer. */
615 address &= ~(cache->line_size - 1);
616 if (address == cache->pipeline[LS].status.return_buffer.address)
617 cache->pipeline[LS].status.return_buffer.valid = 0;
618 if (address == cache->pipeline[LD].status.return_buffer.address)
619 cache->pipeline[LD].status.return_buffer.valid = 0;
620}
621
622/* Invalidate the cache line containing the given address. Flush the
623 data if requested.
624 Returns the number of cycles required to write the data. */
625int
626frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
627{
628 /* See if this data is already in the cache. */
629 FRV_CACHE_TAG *tag;
630 int found;
631
632 /* Check for non-cache access. This operation is still perfromed even if
633 the cache is not currently enabled. */
634 if (non_cache_access (cache, address))
635 return 1;
636
637 /* If the line is found, invalidate it. If a flush is requested, then flush
638 it if it is dirty. */
639 found = get_tag (cache, address, &tag);
640 if (found)
641 {
642 SIM_CPU *cpu;
643 /* If a flush is requested, then flush it if it is dirty. */
644 if (tag->dirty && flush)
645 write_line_to_memory (cache, tag);
646 set_least_recently_used (cache, tag);
647 tag->valid = 0;
648 tag->locked = 0;
649
650 /* If this is the insn cache, then flush the cpu's scache as well. */
651 cpu = cache->cpu;
652 if (cache == CPU_INSN_CACHE (cpu))
653 scache_flush_cpu (cpu);
654 }
655
656 invalidate_return_buffer (cache, address);
657
658 return 1; /* TODO - number of cycles unknown */
659}
660
661/* Invalidate the entire cache. Flush the data if requested. */
662int
663frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
664{
665 /* See if this data is already in the cache. */
666 int elements = cache->sets * cache->ways;
667 FRV_CACHE_TAG *tag = cache->tag_storage;
668 SIM_CPU *cpu;
669 int i;
670
671 for(i = 0; i < elements; ++i, ++tag)
672 {
673 /* If a flush is requested, then flush it if it is dirty. */
674 if (tag->valid && tag->dirty && flush)
675 write_line_to_memory (cache, tag);
676 tag->valid = 0;
677 tag->locked = 0;
678 }
679
680
681 /* If this is the insn cache, then flush the cpu's scache as well. */
682 cpu = cache->cpu;
683 if (cache == CPU_INSN_CACHE (cpu))
684 scache_flush_cpu (cpu);
685
686 /* Invalidate both return buffers. */
687 cache->pipeline[LS].status.return_buffer.valid = 0;
688 cache->pipeline[LD].status.return_buffer.valid = 0;
689
690 return 1; /* TODO - number of cycles unknown */
691}
692
693/* ---------------------------------------------------------------------------
694 Functions for operating the cache in cycle accurate mode.
695 ------------------------------------------------------------------------- */
696/* Convert a VLIW slot to a cache pipeline index. */
697static int
698convert_slot_to_index (int slot)
699{
700 switch (slot)
701 {
702 case UNIT_I0:
703 case UNIT_C:
704 return LS;
705 case UNIT_I1:
706 return LD;
707 default:
708 abort ();
709 }
710 return 0;
711}
712
713/* Allocate free chains of cache requests. */
714#define FREE_CHAIN_SIZE 16
715static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
716static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
717
718static void
719allocate_new_cache_requests (void)
720{
721 int i;
722 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
723 * sizeof (FRV_CACHE_REQUEST));
724 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
725 {
726 frv_cache_request_free_chain[i].next
727 = & frv_cache_request_free_chain[i + 1];
728 }
729
730 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
731}
732
733/* Return the next free request in the queue for the given cache pipeline. */
734static FRV_CACHE_REQUEST *
735new_cache_request (void)
736{
737 FRV_CACHE_REQUEST *req;
738
739 /* Allocate new elements for the free chain if necessary. */
740 if (frv_cache_request_free_chain == NULL)
741 allocate_new_cache_requests ();
742
743 req = frv_cache_request_free_chain;
744 frv_cache_request_free_chain = req->next;
745
746 return req;
747}
748
749/* Return the given cache request to the free chain. */
750static void
751free_cache_request (FRV_CACHE_REQUEST *req)
752{
753 if (req->kind == req_store)
754 {
755 req->next = frv_store_request_free_chain;
756 frv_store_request_free_chain = req;
757 }
758 else
759 {
760 req->next = frv_cache_request_free_chain;
761 frv_cache_request_free_chain = req;
762 }
763}
764
765/* Search the free chain for an existing store request with a buffer that's
766 large enough. */
767static FRV_CACHE_REQUEST *
768new_store_request (int length)
769{
770 FRV_CACHE_REQUEST *prev = NULL;
771 FRV_CACHE_REQUEST *req;
772 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
773 {
774 if (req->u.store.length == length)
775 break;
776 prev = req;
777 }
778 if (req != NULL)
779 {
780 if (prev == NULL)
781 frv_store_request_free_chain = req->next;
782 else
783 prev->next = req->next;
784 return req;
785 }
786
787 /* No existing request buffer was found, so make a new one. */
788 req = new_cache_request ();
789 req->kind = req_store;
790 req->u.store.data = xmalloc (length);
791 req->u.store.length = length;
792 return req;
793}
794
795/* Remove the given request from the given pipeline. */
796static void
797pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
798{
799 FRV_CACHE_REQUEST *next = request->next;
800 FRV_CACHE_REQUEST *prev = request->prev;
801
802 if (prev == NULL)
803 p->requests = next;
804 else
805 prev->next = next;
806
807 if (next != NULL)
808 next->prev = prev;
809}
810
811/* Add the given request to the given pipeline. */
812static void
813pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
814{
815 FRV_CACHE_REQUEST *prev = NULL;
816 FRV_CACHE_REQUEST *item;
817
818 /* Add the request in priority order. 0 is the highest priority. */
819 for (item = p->requests; item != NULL; item = item->next)
820 {
821 if (item->priority > request->priority)
822 break;
823 prev = item;
824 }
825
826 request->next = item;
827 request->prev = prev;
828 if (prev == NULL)
829 p->requests = request;
830 else
831 prev->next = request;
832 if (item != NULL)
833 item->prev = request;
834}
835
836/* Requeu the given request from the last of the given pipeline. */
837static void
838pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
839{
840 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
841 FRV_CACHE_REQUEST *req = stage->request;
842 stage->request = NULL;
843 pipeline_add_request (p, req);
844}
845
846/* Return the priority lower than the lowest one in this cache pipeline.
847 0 is the highest priority. */
848static int
849next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
850{
851 int i, j;
852 int pipe;
853 int lowest = 0;
854 FRV_CACHE_REQUEST *req;
855
856 /* Check the priorities of any queued items. */
857 for (req = pipeline->requests; req != NULL; req = req->next)
858 if (req->priority > lowest)
859 lowest = req->priority;
860
861 /* Check the priorities of items in the pipeline stages. */
862 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
863 {
864 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
865 if (stage->request != NULL && stage->request->priority > lowest)
866 lowest = stage->request->priority;
867 }
868
869 /* Check the priorities of load requests waiting in WAR. These are one
870 higher than the request that spawned them. */
871 for (i = 0; i < NUM_WARS; ++i)
872 {
873 FRV_CACHE_WAR *war = & pipeline->WAR[i];
874 if (war->valid && war->priority > lowest)
875 lowest = war->priority + 1;
876 }
877
878 /* Check the priorities of any BARS or NARS associated with this pipeline.
879 These are one higher than the request that spawned them. */
880 pipe = pipeline - cache->pipeline;
881 if (cache->BARS.valid && cache->BARS.pipe == pipe
882 && cache->BARS.priority > lowest)
883 lowest = cache->BARS.priority + 1;
884 if (cache->NARS.valid && cache->NARS.pipe == pipe
885 && cache->NARS.priority > lowest)
886 lowest = cache->NARS.priority + 1;
887
888 /* Return a priority 2 lower than the lowest found. This allows a WAR
889 request to be generated with a priority greater than this but less than
890 the next higher priority request. */
891 return lowest + 2;
892}
893
894static void
895add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
896{
897 /* Add the load request to the indexed pipeline. */
898 FRV_CACHE_REQUEST *req = new_cache_request ();
899 req->kind = req_WAR;
900 req->reqno = war->reqno;
901 req->priority = war->priority;
902 req->address = war->address;
903 req->u.WAR.preload = war->preload;
904 req->u.WAR.lock = war->lock;
905 pipeline_add_request (pipeline, req);
906}
907
908/* Remove the next request from the given pipeline and return it. */
909static FRV_CACHE_REQUEST *
910pipeline_next_request (FRV_CACHE_PIPELINE *p)
911{
912 FRV_CACHE_REQUEST *first = p->requests;
913 if (first != NULL)
914 pipeline_remove_request (p, first);
915 return first;
916}
917
918/* Return the request which is at the given stage of the given pipeline. */
919static FRV_CACHE_REQUEST *
920pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
921{
922 return p->stages[stage].request;
923}
924
925static void
926advance_pipelines (FRV_CACHE *cache)
927{
928 int stage;
929 int pipe;
930 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
931
932 /* Free the final stage requests. */
933 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
934 {
935 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
936 if (req != NULL)
937 free_cache_request (req);
938 }
939
940 /* Shuffle the requests along the pipeline. */
941 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
942 {
943 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
944 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
945 }
946
947 /* Add a new request to the pipeline. */
948 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
949 pipelines[pipe].stages[FIRST_STAGE].request
950 = pipeline_next_request (& pipelines[pipe]);
951}
952
953/* Handle a request for a load from the given address. */
954void
955frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
956{
957 FRV_CACHE_REQUEST *req;
958
959 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
960 int pipe = convert_slot_to_index (slot);
961 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
962
963 /* Add the load request to the indexed pipeline. */
964 req = new_cache_request ();
965 req->kind = req_load;
966 req->reqno = reqno;
967 req->priority = next_priority (cache, pipeline);
968 req->address = address;
969
970 pipeline_add_request (pipeline, req);
971}
972
973void
974frv_cache_request_store (FRV_CACHE *cache, SI address,
975 int slot, char *data, unsigned length)
976{
977 FRV_CACHE_REQUEST *req;
978
979 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
980 int pipe = convert_slot_to_index (slot);
981 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
982
983 /* Add the load request to the indexed pipeline. */
984 req = new_store_request (length);
985 req->kind = req_store;
986 req->reqno = NO_REQNO;
987 req->priority = next_priority (cache, pipeline);
988 req->address = address;
989 req->u.store.length = length;
990 memcpy (req->u.store.data, data, length);
991
992 pipeline_add_request (pipeline, req);
993 invalidate_return_buffer (cache, address);
994}
995
996/* Handle a request to invalidate the cache line containing the given address.
997 Flush the data if requested. */
998void
999frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
1000 int slot, int all, int flush)
1001{
1002 FRV_CACHE_REQUEST *req;
1003
1004 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1005 int pipe = convert_slot_to_index (slot);
1006 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1007
1008 /* Add the load request to the indexed pipeline. */
1009 req = new_cache_request ();
1010 req->kind = req_invalidate;
1011 req->reqno = reqno;
1012 req->priority = next_priority (cache, pipeline);
1013 req->address = address;
1014 req->u.invalidate.all = all;
1015 req->u.invalidate.flush = flush;
1016
1017 pipeline_add_request (pipeline, req);
1018}
1019
1020/* Handle a request to preload the cache line containing the given address. */
1021void
1022frv_cache_request_preload (FRV_CACHE *cache, SI address,
1023 int slot, int length, int lock)
1024{
1025 FRV_CACHE_REQUEST *req;
1026
1027 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1028 int pipe = convert_slot_to_index (slot);
1029 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1030
1031 /* Add the load request to the indexed pipeline. */
1032 req = new_cache_request ();
1033 req->kind = req_preload;
1034 req->reqno = NO_REQNO;
1035 req->priority = next_priority (cache, pipeline);
1036 req->address = address;
1037 req->u.preload.length = length;
1038 req->u.preload.lock = lock;
1039
1040 pipeline_add_request (pipeline, req);
1041 invalidate_return_buffer (cache, address);
1042}
1043
1044/* Handle a request to unlock the cache line containing the given address. */
1045void
1046frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1047{
1048 FRV_CACHE_REQUEST *req;
1049
1050 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1051 int pipe = convert_slot_to_index (slot);
1052 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1053
1054 /* Add the load request to the indexed pipeline. */
1055 req = new_cache_request ();
1056 req->kind = req_unlock;
1057 req->reqno = NO_REQNO;
1058 req->priority = next_priority (cache, pipeline);
1059 req->address = address;
1060
1061 pipeline_add_request (pipeline, req);
1062}
1063
1064/* Check whether this address interferes with a pending request of
1065 higher priority. */
1066static int
1067address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1068 int pipe)
1069{
1070 int i, j;
1071 int line_mask = ~(cache->line_size - 1);
1072 int other_pipe;
1073 int priority = req->priority;
1074 FRV_CACHE_REQUEST *other_req;
1075 SI other_address;
1076 SI all_address;
1077
1078 address &= line_mask;
1079 all_address = -1 & line_mask;
1080
1081 /* Check for collisions in the queue for this pipeline. */
1082 for (other_req = cache->pipeline[pipe].requests;
1083 other_req != NULL;
1084 other_req = other_req->next)
1085 {
1086 other_address = other_req->address & line_mask;
1087 if ((address == other_address || address == all_address)
1088 && priority > other_req->priority)
1089 return 1;
1090 }
1091
1092 /* Check for a collision in the the other pipeline. */
1093 other_pipe = pipe ^ 1;
1094 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1095 if (other_req != NULL)
1096 {
1097 other_address = other_req->address & line_mask;
1098 if (address == other_address || address == all_address)
1099 return 1;
1100 }
1101
1102 /* Check for a collision with load requests waiting in WAR. */
1103 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1104 {
1105 for (j = 0; j < NUM_WARS; ++j)
1106 {
1107 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1108 if (war->valid
1109 && (address == (war->address & line_mask)
1110 || address == all_address)
1111 && priority > war->priority)
1112 return 1;
1113 }
1114 /* If this is not a WAR request, then yield to any WAR requests in
0b01870b
DB
1115 either pipeline or to a higher priority request in the same pipeline.
1116 */
b34f6357
DB
1117 if (req->kind != req_WAR)
1118 {
1119 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1120 {
1121 other_req = cache->pipeline[i].stages[j].request;
0b01870b
DB
1122 if (other_req != NULL)
1123 {
1124 if (other_req->kind == req_WAR)
1125 return 1;
1126 if (i == pipe
1127 && (address == (other_req->address & line_mask)
1128 || address == all_address)
1129 && priority > other_req->priority)
1130 return 1;
1131 }
b34f6357
DB
1132 }
1133 }
1134 }
1135
1136 /* Check for a collision with load requests waiting in ARS. */
1137 if (cache->BARS.valid
1138 && (address == (cache->BARS.address & line_mask)
1139 || address == all_address)
1140 && priority > cache->BARS.priority)
1141 return 1;
1142 if (cache->NARS.valid
1143 && (address == (cache->NARS.address & line_mask)
1144 || address == all_address)
1145 && priority > cache->NARS.priority)
1146 return 1;
1147
1148 return 0;
1149}
1150
1151/* Wait for a free WAR register in BARS or NARS. */
1152static void
1153wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1154{
1155 FRV_CACHE_WAR war;
1156 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1157
1158 if (! cache->BARS.valid)
1159 {
1160 cache->BARS.pipe = pipe;
1161 cache->BARS.reqno = req->reqno;
1162 cache->BARS.address = req->address;
1163 cache->BARS.priority = req->priority - 1;
1164 switch (req->kind)
1165 {
1166 case req_load:
1167 cache->BARS.preload = 0;
1168 cache->BARS.lock = 0;
1169 break;
1170 case req_store:
1171 cache->BARS.preload = 1;
1172 cache->BARS.lock = 0;
1173 break;
1174 case req_preload:
1175 cache->BARS.preload = 1;
1176 cache->BARS.lock = req->u.preload.lock;
1177 break;
1178 }
1179 cache->BARS.valid = 1;
1180 return;
1181 }
1182 if (! cache->NARS.valid)
1183 {
1184 cache->NARS.pipe = pipe;
1185 cache->NARS.reqno = req->reqno;
1186 cache->NARS.address = req->address;
1187 cache->NARS.priority = req->priority - 1;
1188 switch (req->kind)
1189 {
1190 case req_load:
1191 cache->NARS.preload = 0;
1192 cache->NARS.lock = 0;
1193 break;
1194 case req_store:
1195 cache->NARS.preload = 1;
1196 cache->NARS.lock = 0;
1197 break;
1198 case req_preload:
1199 cache->NARS.preload = 1;
1200 cache->NARS.lock = req->u.preload.lock;
1201 break;
1202 }
1203 cache->NARS.valid = 1;
1204 return;
1205 }
1206 /* All wait registers are busy, so resubmit this request. */
1207 pipeline_requeue_request (pipeline);
1208}
1209
1210/* Find a free WAR register and wait for memory to fetch the data. */
1211static void
1212wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1213{
1214 int war;
1215 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1216
1217 /* Find a valid WAR to hold this request. */
1218 for (war = 0; war < NUM_WARS; ++war)
1219 if (! pipeline->WAR[war].valid)
1220 break;
1221 if (war >= NUM_WARS)
1222 {
1223 wait_for_WAR (cache, pipe, req);
1224 return;
1225 }
1226
1227 pipeline->WAR[war].address = req->address;
1228 pipeline->WAR[war].reqno = req->reqno;
1229 pipeline->WAR[war].priority = req->priority - 1;
1230 pipeline->WAR[war].latency = cache->memory_latency + 1;
1231 switch (req->kind)
1232 {
1233 case req_load:
1234 pipeline->WAR[war].preload = 0;
1235 pipeline->WAR[war].lock = 0;
1236 break;
1237 case req_store:
1238 pipeline->WAR[war].preload = 1;
1239 pipeline->WAR[war].lock = 0;
1240 break;
1241 case req_preload:
1242 pipeline->WAR[war].preload = 1;
1243 pipeline->WAR[war].lock = req->u.preload.lock;
1244 break;
1245 }
1246 pipeline->WAR[war].valid = 1;
1247}
1248
1249static void
1250handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1251{
1252 FRV_CACHE_TAG *tag;
1253 SI address = req->address;
1254
1255 /* If this address interferes with an existing request, then requeue it. */
1256 if (address_interference (cache, address, req, pipe))
1257 {
1258 pipeline_requeue_request (& cache->pipeline[pipe]);
1259 return;
1260 }
1261
1262 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1263 {
1264 int found = get_tag (cache, address, &tag);
1265
1266 /* If the data was found, return it to the caller. */
1267 if (found)
1268 {
1269 set_most_recently_used (cache, tag);
1270 copy_line_to_return_buffer (cache, pipe, tag, address);
1271 set_return_buffer_reqno (cache, pipe, req->reqno);
1272 return;
1273 }
1274 }
1275
1276 /* The data is not in the cache or this is a non-cache access. We need to
1277 wait for the memory unit to fetch it. Store this request in the WAR in
1278 the meantime. */
1279 wait_in_WAR (cache, pipe, req);
1280}
1281
1282static void
1283handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1284{
1285 int found;
1286 FRV_CACHE_WAR war;
1287 FRV_CACHE_TAG *tag;
1288 int length;
1289 int lock;
1290 int offset;
1291 int lines;
1292 int line;
1293 SI address = req->address;
1294 SI cur_address;
1295
1296 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1297 return;
1298
1299 /* preload at least 1 line. */
1300 length = req->u.preload.length;
1301 if (length == 0)
1302 length = 1;
1303
1304 /* Make sure that this request does not interfere with a pending request. */
1305 offset = address & (cache->line_size - 1);
1306 lines = 1 + (offset + length - 1) / cache->line_size;
1307 cur_address = address & ~(cache->line_size - 1);
1308 for (line = 0; line < lines; ++line)
1309 {
1310 /* If this address interferes with an existing request,
1311 then requeue it. */
1312 if (address_interference (cache, cur_address, req, pipe))
1313 {
1314 pipeline_requeue_request (& cache->pipeline[pipe]);
1315 return;
1316 }
1317 cur_address += cache->line_size;
1318 }
1319
1320 /* Now process each cache line. */
1321 /* Careful with this loop -- length is unsigned. */
1322 lock = req->u.preload.lock;
1323 cur_address = address & ~(cache->line_size - 1);
1324 for (line = 0; line < lines; ++line)
1325 {
1326 /* If the data was found, then lock it if requested. */
1327 found = get_tag (cache, cur_address, &tag);
1328 if (found)
1329 {
1330 if (lock)
1331 tag->locked = 1;
1332 }
1333 else
1334 {
1335 /* The data is not in the cache. We need to wait for the memory
1336 unit to fetch it. Store this request in the WAR in the meantime.
1337 */
1338 wait_in_WAR (cache, pipe, req);
1339 }
1340 cur_address += cache->line_size;
1341 }
1342}
1343
1344static void
1345handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1346{
1347 SIM_CPU *current_cpu;
1348 FRV_CACHE_TAG *tag;
1349 int found;
1350 int copy_back;
1351 SI address = req->address;
1352 char *data = req->u.store.data;
1353 int length = req->u.store.length;
1354
1355 /* If this address interferes with an existing request, then requeue it. */
1356 if (address_interference (cache, address, req, pipe))
1357 {
1358 pipeline_requeue_request (& cache->pipeline[pipe]);
1359 return;
1360 }
1361
1362 /* Non-cache access. Write the data directly to memory. */
1363 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1364 {
1365 write_data_to_memory (cache, address, data, length);
1366 return;
1367 }
1368
1369 /* See if the data is in the cache. */
1370 found = get_tag (cache, address, &tag);
1371
1372 /* Write the data to the cache line if one was available and if it is
1373 either a hit or a miss in copy-back mode.
1374 The tag may be NULL if all ways were in use and locked on a miss.
1375 */
1376 current_cpu = cache->cpu;
1377 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1378 if (tag != NULL && (found || copy_back))
1379 {
1380 int line_offset;
1381 /* Load the line from memory first, if it was a miss. */
1382 if (! found)
1383 {
1384 /* We need to wait for the memory unit to fetch the data.
1385 Store this request in the WAR and requeue the store request. */
1386 wait_in_WAR (cache, pipe, req);
1387 pipeline_requeue_request (& cache->pipeline[pipe]);
1388 /* Decrement the counts of accesses and hits because when the requeued
1389 request is processed again, it will appear to be a new access and
1390 a hit. */
1391 --cache->statistics.accesses;
1392 --cache->statistics.hits;
1393 return;
1394 }
1395 line_offset = address & (cache->line_size - 1);
1396 memcpy (tag->line + line_offset, data, length);
1397 invalidate_return_buffer (cache, address);
1398 tag->dirty = 1;
1399
1400 /* Update the LRU information for the tags in this set. */
1401 set_most_recently_used (cache, tag);
1402 }
1403
1404 /* Write the data to memory if there was no line available or we are in
1405 write-through (not copy-back mode). */
1406 if (tag == NULL || ! copy_back)
1407 {
1408 write_data_to_memory (cache, address, data, length);
1409 if (tag != NULL)
1410 tag->dirty = 0;
1411 }
1412}
1413
1414static void
1415handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1416{
1417 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1418 SI address = req->address;
1419 SI interfere_address = req->u.invalidate.all ? -1 : address;
1420
1421 /* If this address interferes with an existing request, then requeue it. */
1422 if (address_interference (cache, interfere_address, req, pipe))
1423 {
1424 pipeline_requeue_request (pipeline);
1425 return;
1426 }
1427
1428 /* Invalidate the cache line now. This function already checks for
1429 non-cache access. */
1430 if (req->u.invalidate.all)
1431 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1432 else
1433 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1434 if (req->u.invalidate.flush)
1435 {
1436 pipeline->status.flush.reqno = req->reqno;
1437 pipeline->status.flush.address = address;
1438 pipeline->status.flush.valid = 1;
1439 }
1440}
1441
1442static void
1443handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1444{
1445 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1446 SI address = req->address;
1447
1448 /* If this address interferes with an existing request, then requeue it. */
1449 if (address_interference (cache, address, req, pipe))
1450 {
1451 pipeline_requeue_request (pipeline);
1452 return;
1453 }
1454
1455 /* Unlock the cache line. This function checks for non-cache access. */
1456 frv_cache_unlock (cache, address);
1457}
1458
1459static void
1460handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1461{
1462 char *buffer;
1463 FRV_CACHE_TAG *tag;
1464 SI address = req->address;
1465
1466 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1467 {
1468 /* Look for the data in the cache. The statistics of cache hit or
1469 miss have already been recorded, so save and restore the stats before
1470 and after obtaining the cache line. */
1471 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1472 tag = find_or_retrieve_cache_line (cache, address);
1473 cache->statistics = save_stats;
1474 if (tag != NULL)
1475 {
1476 if (! req->u.WAR.preload)
1477 {
1478 copy_line_to_return_buffer (cache, pipe, tag, address);
1479 set_return_buffer_reqno (cache, pipe, req->reqno);
1480 }
1481 else
1482 {
1483 invalidate_return_buffer (cache, address);
1484 if (req->u.WAR.lock)
1485 tag->locked = 1;
1486 }
1487 return;
1488 }
1489 }
1490
1491 /* All cache lines in the set were locked, so just copy the data to the
1492 return buffer directly. */
1493 if (! req->u.WAR.preload)
1494 {
1495 copy_memory_to_return_buffer (cache, pipe, address);
1496 set_return_buffer_reqno (cache, pipe, req->reqno);
1497 }
1498}
1499
1500/* Resolve any conflicts and/or execute the given requests. */
1501static void
1502arbitrate_requests (FRV_CACHE *cache)
1503{
1504 int pipe;
1505 /* Simply execute the requests in the final pipeline stages. */
1506 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1507 {
1508 FRV_CACHE_REQUEST *req
1509 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1510 /* Make sure that there is a request to handle. */
1511 if (req == NULL)
1512 continue;
1513
1514 /* Handle the request. */
1515 switch (req->kind)
1516 {
1517 case req_load:
1518 handle_req_load (cache, pipe, req);
1519 break;
1520 case req_store:
1521 handle_req_store (cache, pipe, req);
1522 break;
1523 case req_invalidate:
1524 handle_req_invalidate (cache, pipe, req);
1525 break;
1526 case req_preload:
1527 handle_req_preload (cache, pipe, req);
1528 break;
1529 case req_unlock:
1530 handle_req_unlock (cache, pipe, req);
1531 break;
1532 case req_WAR:
1533 handle_req_WAR (cache, pipe, req);
1534 break;
1535 default:
1536 abort ();
1537 }
1538 }
1539}
1540
1541/* Move a waiting ARS register to a free WAR register. */
1542static void
1543move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1544{
1545 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1546 NARS to BARS if it is valid. */
1547 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1548 {
1549 war->address = cache->BARS.address;
1550 war->reqno = cache->BARS.reqno;
1551 war->priority = cache->BARS.priority;
1552 war->preload = cache->BARS.preload;
1553 war->lock = cache->BARS.lock;
1554 war->latency = cache->memory_latency + 1;
1555 war->valid = 1;
1556 if (cache->NARS.valid)
1557 {
1558 cache->BARS = cache->NARS;
1559 cache->NARS.valid = 0;
1560 }
1561 else
1562 cache->BARS.valid = 0;
1563 return;
1564 }
1565 /* If NARS is valid for this pipe, then move it to the given WAR. */
1566 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1567 {
1568 war->address = cache->NARS.address;
1569 war->reqno = cache->NARS.reqno;
1570 war->priority = cache->NARS.priority;
1571 war->preload = cache->NARS.preload;
1572 war->lock = cache->NARS.lock;
1573 war->latency = cache->memory_latency + 1;
1574 war->valid = 1;
1575 cache->NARS.valid = 0;
1576 }
1577}
1578
1579/* Decrease the latencies of the various states in the cache. */
1580static void
1581decrease_latencies (FRV_CACHE *cache)
1582{
1583 int pipe, j;
1584 /* Check the WAR registers. */
1585 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1586 {
1587 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1588 for (j = 0; j < NUM_WARS; ++j)
1589 {
1590 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1591 if (war->valid)
1592 {
1593 --war->latency;
1594 /* If the latency has expired, then submit a WAR request to the
1595 pipeline. */
1596 if (war->latency <= 0)
1597 {
1598 add_WAR_request (pipeline, war);
1599 war->valid = 0;
1600 move_ARS_to_WAR (cache, pipe, war);
1601 }
1602 }
1603 }
1604 }
1605}
1606
1607/* Run the cache for the given number of cycles. */
1608void
1609frv_cache_run (FRV_CACHE *cache, int cycles)
1610{
1611 int i;
1612 for (i = 0; i < cycles; ++i)
1613 {
1614 advance_pipelines (cache);
1615 arbitrate_requests (cache);
1616 decrease_latencies (cache);
1617 }
1618}
1619
1620int
1621frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1622{
1623 SI offset;
1624 FRV_CACHE_TAG *tag;
1625
1626 if (non_cache_access (cache, address))
1627 return 0;
1628
1629 {
1630 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1631 int found = get_tag (cache, address, &tag);
1632 cache->statistics = save_stats;
1633
1634 if (! found)
1635 return 0; /* Indicate non-cache-access. */
1636 }
1637
1638 /* A cache line was available for the data.
1639 Extract the target data from the line. */
1640 offset = address & (cache->line_size - 1);
b34f6357
DB
1641 *value = T2H_4 (*(SI *)(tag->line + offset));
1642 return 1;
1643}
1644
1645/* Check the return buffers of the data cache to see if the requested data is
1646 available. */
1647int
1648frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1649 unsigned reqno)
1650{
1651 return cache->pipeline[pipe].status.return_buffer.valid
1652 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1653 && cache->pipeline[pipe].status.return_buffer.address <= address
1654 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1655 > address;
1656}
1657
1658/* Check to see if the requested data has been flushed. */
1659int
1660frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1661{
1662 return cache->pipeline[pipe].status.flush.valid
1663 && cache->pipeline[pipe].status.flush.reqno == reqno
1664 && cache->pipeline[pipe].status.flush.address <= address
1665 && cache->pipeline[pipe].status.flush.address + cache->line_size
1666 > address;
1667}
This page took 0.105984 seconds and 4 git commands to generate.