/* This file is part of the program psim.
- Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
+ Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
+ the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _VM_C_
#define _VM_C_
+#if 0
#include "basics.h"
#include "registers.h"
#include "device.h"
#include "vm.h"
#include "interrupts.h"
#include "mon.h"
+#endif
+
+#include "cpu.h"
/* OEA vs VEA
typedef struct _om_segment_tlb_entry {
int key[nr_om_modes];
om_access_types invalid_access; /* set to instruction if no_execute bit */
- unsigned_word masked_virtual_segment_id; /* aligned ready for pte addr */
+ unsigned_word masked_virtual_segment_id; /* aligned ready for pte group addr */
#if (WITH_TARGET_WORD_BITSIZE == 64)
int is_valid;
unsigned_word masked_effective_segment_id;
nr_om_page_tlb_constants
};
-enum {
- invalid_tlb_vsid = MASK(0, 63),
-};
-
typedef struct _om_page_tlb_entry {
int protection;
int changed;
/* OEA Support procedures */
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_segment_tlb_index(unsigned_word ea)
{
unsigned_word index = EXTRACTED(ea,
return index;
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_page_tlb_index(unsigned_word ea)
{
unsigned_word index = EXTRACTED(ea,
return index;
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_hash_page(unsigned_word masked_vsid,
unsigned_word ea)
{
unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
#if (WITH_TARGET_WORD_BITSIZE == 32)
- return masked_vsid ^ INSERTED32(extracted_ea, 7, 31-6);
+ unsigned_word masked_ea = INSERTED32(extracted_ea, 7, 31-6);
+ unsigned_word hash = masked_vsid ^ masked_ea;
#endif
#if (WITH_TARGET_WORD_BITSIZE == 64)
- return masked_vsid ^ INSERTED64(extracted_ea, 18, 63-7);
+ unsigned_word masked_ea = INSERTED64(extracted_ea, 18, 63-7);
+ unsigned_word hash = masked_vsid ^ masked_ea;
#endif
+ TRACE(trace_vm, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
+ (unsigned long)ea,
+ (unsigned long)masked_vsid,
+ (unsigned long)masked_ea,
+ (unsigned long)hash));
+ return hash;
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_pte_0_api(unsigned_word pte_0)
{
#if (WITH_TARGET_WORD_BITSIZE == 32)
#endif
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_pte_0_hash(unsigned_word pte_0)
{
#if (WITH_TARGET_WORD_BITSIZE == 32)
#endif
}
-int STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(int)
om_pte_0_valid(unsigned_word pte_0)
{
#if (WITH_TARGET_WORD_BITSIZE == 32)
#endif
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_ea_masked_page(unsigned_word ea)
{
return MASKED(ea, 36, 51);
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_ea_masked_byte(unsigned_word ea)
{
return MASKED(ea, 52, 63);
}
-unsigned_word STATIC_INLINE_VM
+/* return the VSID aligned for pte group addr */
+STATIC_INLINE_VM\
+(unsigned_word)
om_pte_0_masked_vsid(unsigned_word pte_0)
{
- return INSERTED32(EXTRACTED32(pte_0, 1, 24), 7-5, 31-6);
+#if (WITH_TARGET_WORD_BITSIZE == 32)
+ return INSERTED32(EXTRACTED32(pte_0, 1, 24), 31-6-24+1, 31-6);
+#endif
+#if (WITH_TARGET_WORD_BITSIZE == 64)
+ return INSERTED64(EXTRACTED64(pte_0, 0, 51), 63-7-52+1, 63-7);
+#endif
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_pte_1_pp(unsigned_word pte_1)
{
return MASKED(pte_1, 62, 63); /*PP*/
}
-int STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(int)
om_pte_1_referenced(unsigned_word pte_1)
{
return EXTRACTED(pte_1, 55, 55);
}
-int STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(int)
om_pte_1_changed(unsigned_word pte_1)
{
return EXTRACTED(pte_1, 56, 56);
}
-int STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(int)
om_pte_1_masked_rpn(unsigned_word pte_1)
{
return MASKED(pte_1, 0, 51); /*RPN*/
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_ea_api(unsigned_word ea)
{
return EXTRACTED(ea, 36, 41);
/* Page and Segment table read/write operators, these need to still
account for the PPC's XOR operation */
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_read_word(om_map *map,
unsigned_word ra,
cpu *processor,
return core_map_read_word(map->physical, ra, processor, cia);
}
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_write_word(om_map *map,
unsigned_word ra,
unsigned_word val,
/* Bring things into existance */
-vm INLINE_VM *
+INLINE_VM\
+(vm *)
vm_create(core *physical)
{
vm *virtual;
if (nr_om_segment_tlb_entries
!= (1 << (om_segment_tlb_index_stop_bit
- om_segment_tlb_index_start_bit + 1)))
- error("new_vm() - internal error with om_segment constants\n");
+ error("internal error - vm_create - problem with om_segment constants\n");
if (nr_om_page_tlb_entries
!= (1 << (om_page_tlb_index_stop_bit
- om_page_tlb_index_start_bit + 1)))
- error("new_vm() - internal error with om_page constants\n");
+ error("internal error - vm_create - problem with om_page constants\n");
/* create the new vm register file */
virtual = ZALLOC(vm);
}
-om_bat STATIC_INLINE_VM *
+STATIC_INLINE_VM\
+(om_bat *)
om_effective_to_bat(om_map *map,
unsigned_word ea)
{
}
-om_segment_tlb_entry STATIC_INLINE_VM *
+STATIC_INLINE_VM\
+(om_segment_tlb_entry *)
om_effective_to_virtual(om_map *map,
unsigned_word ea,
cpu *processor,
+ om_segment_tlb_index(ea));
#if (WITH_TARGET_WORD_BITSIZE == 32)
+ TRACE(trace_vm, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
+ (unsigned long)ea,
+ (long)om_segment_tlb_index(ea),
+ (unsigned long)segment_tlb_entry->masked_virtual_segment_id,
+ (unsigned long)EXTRACTED32(segment_tlb_entry->masked_virtual_segment_id, 31-6-24+1, 31-6),
+ (unsigned long)EXTRACTED32(ea, 4, 31)));
return segment_tlb_entry;
#endif
: om_access_any);
segment_tlb_entry->masked_virtual_segment_id =
INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
- 18-13, 63-7); /* align ready for pte addr */
+ 18-13, 63-7); /* aligned ready for pte group addr */
return segment_tlb_entry;
}
}
-om_page_tlb_entry STATIC_INLINE_VM *
+STATIC_INLINE_VM\
+(om_page_tlb_entry *)
om_virtual_to_real(om_map *map,
unsigned_word ea,
om_segment_tlb_entry *segment_tlb_entry,
| (page_hash & map->page_table_hash_mask));
unsigned_word real_address_of_pte_0;
TRACE(trace_vm,
- ("ea=0x%lx - htab search - pteg=0x%lx htab=0x%lx mask=0x%lx hash=0x%lx\n",
- (long)ea, (long)real_address_of_pte_group,
+ ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
+ (long)ea, current_hash,
map->real_address_of_page_table,
+ page_hash,
map->page_table_hash_mask,
- page_hash));
+ (long)real_address_of_pte_group));
for (real_address_of_pte_0 = real_address_of_pte_group;
real_address_of_pte_0 < (real_address_of_pte_group
+ sizeof_pte_group);
processor, cia);
TRACE(trace_vm,
("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
- (long)ea, page_tlb_entry, (long)real_address_of_pte_1));
+ (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
}
else {
TRACE(trace_vm,
("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
- (long)ea, page_tlb_entry, (long)real_address_of_pte_1));
+ (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
}
return page_tlb_entry;
}
}
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_interrupt(cpu *processor,
unsigned_word cia,
unsigned_word ea,
instruction_storage_interrupt(processor, cia, reason);
break;
default:
- error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
- access, cia, ea);
+ error("internal error - om_interrupt - unexpected access type %d", access);
}
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
om_translate_effective_to_real(om_map *map,
unsigned_word ea,
om_access_types access,
if (!map->is_relocate) {
ra = ea;
- TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx", (long)ea, (long)ra));
+ TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx\n",
+ (long)ea, (long)ra));
return ra;
}
/* rebuild all the relevant bat information */
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_unpack_bat(om_bat *bat,
spreg ubat,
spreg lbat)
/* rebuild the given bat table */
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_unpack_bats(om_bats *bats,
spreg *raw_bats,
msreg msr)
spreg ubat = raw_bats[i];
spreg lbat = raw_bats[i+1];
if ((msr & msr_problem_state)
- ? EXTRACTED(ubat, 62, 62)
- : EXTRACTED(ubat, 63, 63)) {
+ ? EXTRACTED(ubat, 63, 63)
+ : EXTRACTED(ubat, 62, 62)) {
om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
ubat, lbat);
bats->nr_valid_bat_registers += 1;
#if (WITH_TARGET_WORD_BITSIZE == 32)
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_unpack_sr(vm *virtual,
sreg *srs,
- int which_sr)
+ int which_sr,
+ cpu *processor,
+ unsigned_word cia)
{
om_segment_tlb_entry *segment_tlb_entry = 0;
sreg new_sr_value = 0;
/* check register in range */
- if (which_sr < 0 || which_sr > nr_om_segment_tlb_entries)
- error("om_set_sr: segment register out of bounds\n");
+ ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);
/* get the working values */
segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
/* do we support this */
if (MASKED32(new_sr_value, 0, 0))
- error("om_ser_sr(): unsupported value of T in segment register %d\n",
- which_sr);
+ cpu_error(processor, cia, "unsupported value of T in segment register %d",
+ which_sr);
/* update info */
segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
: om_access_any);
segment_tlb_entry->masked_virtual_segment_id =
INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
- 7-5, 31-6); /* align ready for pte address */
+ 31-6-24+1, 31-6); /* aligned ready for pte group addr */
}
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
-void STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(void)
om_unpack_srs(vm *virtual,
- sreg *srs)
+ sreg *srs,
+ cpu *processor,
+ unsigned_word cia)
{
int which_sr;
for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
- om_unpack_sr(virtual, srs, which_sr);
+ om_unpack_sr(virtual, srs, which_sr,
+ processor, cia);
}
}
#endif
-/* Rebuild all the data structures for the new context as specifed by
+/* Rebuild all the data structures for the new context as specified by
the passed registers */
-void INLINE_VM
+INLINE_VM\
+(void)
vm_synchronize_context(vm *virtual,
spreg *sprs,
sreg *srs,
- msreg msr)
+ msreg msr,
+ /**/
+ cpu *processor,
+ unsigned_word cia)
{
/* enable/disable translation */
/* unpack the segment tlb registers */
#if (WITH_TARGET_WORD_BITSIZE == 32)
- om_unpack_srs(virtual, srs);
+ om_unpack_srs(virtual, srs,
+ processor, cia);
#endif
/* set up the XOR registers if the current endian mode conflicts
else {
/* don't allow the processor to change endian modes */
if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
- || (!little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN))
- error("vm_synchronize_context() - unsuported change of byte order\n");
+ || (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))
+ cpu_error(processor, cia, "attempt to change hardwired byte order");
}
}
+/* update vm data structures due to a TLB operation */
-vm_data_map INLINE_VM *
+INLINE_VM\
+(void)
+vm_page_tlb_invalidate_entry(vm *memory,
+ unsigned_word ea)
+{
+ int i = om_page_tlb_index(ea);
+ memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
+ memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
+ TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));
+}
+
+INLINE_VM\
+(void)
+vm_page_tlb_invalidate_all(vm *memory)
+{
+ int i;
+ for (i = 0; i < nr_om_page_tlb_entries; i++) {
+ memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
+ memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
+ }
+ TRACE(trace_vm, ("tlb invalidate all\n"));
+}
+
+
+
+INLINE_VM\
+(vm_data_map *)
vm_create_data_map(vm *memory)
{
return &memory->data_map;
}
-vm_instruction_map INLINE_VM *
+INLINE_VM\
+(vm_instruction_map *)
vm_create_instruction_map(vm *memory)
{
return &memory->instruction_map;
}
-unsigned_word STATIC_INLINE_VM
+STATIC_INLINE_VM\
+(unsigned_word)
vm_translate(om_map *map,
unsigned_word ea,
om_access_types access,
processor, cia,
abort);
default:
- error("vm_translate() - unknown environment\n");
+ error("internal error - vm_translate - bad switch");
return 0;
}
}
-unsigned_word INLINE_VM
+INLINE_VM\
+(unsigned_word)
vm_real_data_addr(vm_data_map *map,
unsigned_word ea,
int is_read,
}
-unsigned_word INLINE_VM
+INLINE_VM\
+(unsigned_word)
vm_real_instruction_addr(vm_instruction_map *map,
cpu *processor,
unsigned_word cia)
1); /*abort*/
}
-instruction_word INLINE_VM
+INLINE_VM\
+(instruction_word)
vm_instruction_map_read(vm_instruction_map *map,
cpu *processor,
unsigned_word cia)
}
-int INLINE_VM
+INLINE_VM\
+(int)
vm_data_map_read_buffer(vm_data_map *map,
void *target,
unsigned_word addr,
- unsigned nr_bytes)
+ unsigned nr_bytes,
+ cpu *processor,
+ unsigned_word cia)
{
unsigned count;
for (count = 0; count < nr_bytes; count++) {
unsigned_word ea = addr + count;
unsigned_word ra = vm_translate(&map->translation,
ea, om_data_read,
- NULL, /*processor*/
- 0, /*cia*/
- 0); /*dont-abort*/
+ processor, /*processor*/
+ cia, /*cia*/
+ processor != NULL); /*abort?*/
if (ra == MASK(0, 63))
break;
if (WITH_XOR_ENDIAN)
}
-int INLINE_VM
+INLINE_VM\
+(int)
vm_data_map_write_buffer(vm_data_map *map,
const void *source,
unsigned_word addr,
unsigned nr_bytes,
- int violate_read_only_section)
+ int violate_read_only_section,
+ cpu *processor,
+ unsigned_word cia)
{
unsigned count;
unsigned_1 byte;
unsigned_word ea = addr + count;
unsigned_word ra = vm_translate(&map->translation,
ea, om_data_write,
- NULL/*processor*/,
- 0, /*cia*/
- 0); /*dont-abort*/
+ processor,
+ cia,
+ processor != NULL); /*abort?*/
if (ra == MASK(0, 63))
break;
if (WITH_XOR_ENDIAN)