1 /* This file is part of the program psim.
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include "registers.h"
30 #include "interrupts.h"
35 For the VEA model, the VM layer is almost transparent. It's only
36 purpose is to maintain separate core_map's for the instruction
37 and data address spaces. This being so that writes to instruction
38 space or execution of a data space is prevented.
40 For the OEA model things are more complex. The reason for separate
41 instruction and data models becomes crucial. The OEA model is
42 built out of three parts. An instruction map, a data map and an
43 underlying structure that provides access to the VM data kept in
47 /* OEA data structures:
49 The OEA model maintains internal data structures that shadow the
50 semantics of the various OEA VM registers (BAT, SR, etc). This
51 allows a simple efficient model of the VM to be implemented.
53 Consistency between OEA registers and this model's internal data
54 structures is maintained by updating the structures at
55 `synchronization' points. Of particular note is that (at the time
56 of writing) the memory data types for BAT registers are rebuilt
57 when ever the processor moves between problem and system states.
59 Unpacked values are stored in the OEA so that they correctly align
60 to where they will be needed by the PTE address. */
65 Matrix of processor state, type of access and validity */
74 om_data_read
, om_data_write
,
75 om_instruction_read
, om_access_any
,
79 static int om_valid_access
[2][4][nr_om_access_types
] = {
80 /* read, write, instruction, any */
83 { 1, 1, 1, 1 }, /* 00 */
84 { 1, 1, 1, 1 }, /* 01 */
85 { 1, 1, 1, 1 }, /* 10 */
86 { 1, 0, 1, 1 }, /* 11 */
88 /* K bit == 1 or P bit valid */
90 { 0, 0, 0, 0 }, /* 00 */
91 { 1, 0, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
100 The bat data structure only contains information on valid BAT
101 translations for the current processor mode and type of access. */
103 typedef struct _om_bat
{
104 unsigned_word block_effective_page_index
;
105 unsigned_word block_effective_page_index_mask
;
106 unsigned_word block_length_mask
;
107 unsigned_word block_real_page_number
;
111 enum _nr_om_bat_registers
{
112 nr_om_bat_registers
= 4
115 typedef struct _om_bats
{
116 int nr_valid_bat_registers
;
117 om_bat bat
[nr_om_bat_registers
];
123 In this model the 32 and 64 bit segment tables are treated in very
124 similar ways. The 32bit segment registers are treated as a
125 simplification of the 64bit segment tlb */
127 enum _om_segment_tlb_constants
{
128 #if (WITH_TARGET_WORD_BITSIZE == 64)
129 sizeof_segment_table_entry_group
= 128,
130 sizeof_segment_table_entry
= 16,
132 om_segment_tlb_index_start_bit
= 32,
133 om_segment_tlb_index_stop_bit
= 35,
134 nr_om_segment_tlb_entries
= 16,
135 nr_om_segment_tlb_constants
138 typedef struct _om_segment_tlb_entry
{
139 int key
[nr_om_modes
];
140 om_access_types invalid_access
; /* set to instruction if no_execute bit */
141 unsigned_word masked_virtual_segment_id
; /* aligned ready for pte addr */
142 #if (WITH_TARGET_WORD_BITSIZE == 64)
144 unsigned_word masked_effective_segment_id
;
146 } om_segment_tlb_entry
;
148 typedef struct _om_segment_tlb
{
149 om_segment_tlb_entry entry
[nr_om_segment_tlb_entries
];
155 This OEA model includes a small direct map Page TLB. The tlb is to
156 cut down on the need for the OEA to perform walks of the page hash
159 enum _om_page_tlb_constants
{
160 om_page_tlb_index_start_bit
= 46,
161 om_page_tlb_index_stop_bit
= 51,
162 nr_om_page_tlb_entries
= 64,
163 #if (WITH_TARGET_WORD_BITSIZE == 64)
164 sizeof_pte_group
= 128,
167 #if (WITH_TARGET_WORD_BITSIZE == 32)
168 sizeof_pte_group
= 64,
171 nr_om_page_tlb_constants
175 invalid_tlb_vsid
= MASK(0, 63),
178 typedef struct _om_page_tlb_entry
{
181 unsigned_word real_address_of_pte_1
;
182 unsigned_word masked_virtual_segment_id
;
183 unsigned_word masked_page
;
184 unsigned_word masked_real_page_number
;
187 typedef struct _om_page_tlb
{
188 om_page_tlb_entry entry
[nr_om_page_tlb_entries
];
192 /* memory translation:
194 OEA memory translation possibly involves BAT, SR, TLB and HTAB
197 typedef struct _om_map
{
199 /* local cache of register values */
201 int is_problem_state
;
203 /* block address translation */
204 om_bats
*bat_registers
;
206 /* failing that, translate ea to va using segment tlb */
207 #if (WITH_TARGET_WORD_BITSIZE == 64)
208 unsigned_word real_address_of_segment_table
;
210 om_segment_tlb
*segment_tlb
;
212 /* then va to ra using hashed page table and tlb */
213 unsigned_word real_address_of_page_table
;
214 unsigned_word page_table_hash_mask
;
215 om_page_tlb
*page_tlb
;
217 /* physical memory for fetching page table entries */
220 /* address xor for PPC endian */
221 unsigned xor[WITH_XOR_ENDIAN
];
228 External objects defined by vm.h */
230 struct _vm_instruction_map
{
231 /* real memory for last part */
233 /* translate effective to real */
237 struct _vm_data_map
{
238 /* translate effective to real */
240 /* real memory for translated address */
248 Underlying memory object. For the VEA this is just the
249 core_map. For OEA it is the instruction and data memory
254 /* OEA: base address registers */
258 /* OEA: segment registers */
259 om_segment_tlb segment_tlb
;
261 /* OEA: translation lookaside buffers */
262 om_page_tlb instruction_tlb
;
263 om_page_tlb data_tlb
;
269 vm_instruction_map instruction_map
;
270 vm_data_map data_map
;
275 /* OEA Support procedures */
278 unsigned_word STATIC_INLINE_VM
279 om_segment_tlb_index(unsigned_word ea
)
281 unsigned_word index
= EXTRACTED(ea
,
282 om_segment_tlb_index_start_bit
,
283 om_segment_tlb_index_stop_bit
);
287 unsigned_word STATIC_INLINE_VM
288 om_page_tlb_index(unsigned_word ea
)
290 unsigned_word index
= EXTRACTED(ea
,
291 om_page_tlb_index_start_bit
,
292 om_page_tlb_index_stop_bit
);
296 unsigned_word STATIC_INLINE_VM
297 om_hash_page(unsigned_word masked_vsid
,
300 unsigned_word extracted_ea
= EXTRACTED(ea
, 36, 51);
301 #if (WITH_TARGET_WORD_BITSIZE == 32)
302 return masked_vsid
^ INSERTED32(extracted_ea
, 7, 31-6);
304 #if (WITH_TARGET_WORD_BITSIZE == 64)
305 return masked_vsid
^ INSERTED64(extracted_ea
, 18, 63-7);
309 unsigned_word STATIC_INLINE_VM
310 om_pte_0_api(unsigned_word pte_0
)
312 #if (WITH_TARGET_WORD_BITSIZE == 32)
313 return EXTRACTED32(pte_0
, 26, 31);
315 #if (WITH_TARGET_WORD_BITSIZE == 64)
316 return EXTRACTED64(pte_0
, 52, 56);
320 unsigned_word STATIC_INLINE_VM
321 om_pte_0_hash(unsigned_word pte_0
)
323 #if (WITH_TARGET_WORD_BITSIZE == 32)
324 return EXTRACTED32(pte_0
, 25, 25);
326 #if (WITH_TARGET_WORD_BITSIZE == 64)
327 return EXTRACTED64(pte_0
, 62, 62);
332 om_pte_0_valid(unsigned_word pte_0
)
334 #if (WITH_TARGET_WORD_BITSIZE == 32)
335 return MASKED32(pte_0
, 0, 0) != 0;
337 #if (WITH_TARGET_WORD_BITSIZE == 64)
338 return MASKED64(pte_0
, 63, 63) != 0;
342 unsigned_word STATIC_INLINE_VM
343 om_ea_masked_page(unsigned_word ea
)
345 return MASKED(ea
, 36, 51);
348 unsigned_word STATIC_INLINE_VM
349 om_ea_masked_byte(unsigned_word ea
)
351 return MASKED(ea
, 52, 63);
354 unsigned_word STATIC_INLINE_VM
355 om_pte_0_masked_vsid(unsigned_word pte_0
)
357 return INSERTED32(EXTRACTED32(pte_0
, 1, 24), 7-5, 31-6);
360 unsigned_word STATIC_INLINE_VM
361 om_pte_1_pp(unsigned_word pte_1
)
363 return MASKED(pte_1
, 62, 63); /*PP*/
367 om_pte_1_referenced(unsigned_word pte_1
)
369 return EXTRACTED(pte_1
, 55, 55);
373 om_pte_1_changed(unsigned_word pte_1
)
375 return EXTRACTED(pte_1
, 56, 56);
379 om_pte_1_masked_rpn(unsigned_word pte_1
)
381 return MASKED(pte_1
, 0, 51); /*RPN*/
384 unsigned_word STATIC_INLINE_VM
385 om_ea_api(unsigned_word ea
)
387 return EXTRACTED(ea
, 36, 41);
391 /* Page and Segment table read/write operators, these need to still
392 account for the PPC's XOR operation */
394 unsigned_word STATIC_INLINE_VM
395 om_read_word(om_map
*map
,
401 ra
^= map
->xor[sizeof(instruction_word
) - 1];
402 return core_map_read_word(map
->physical
, ra
, processor
, cia
);
405 void STATIC_INLINE_VM
406 om_write_word(om_map
*map
,
413 ra
^= map
->xor[sizeof(instruction_word
) - 1];
414 core_map_write_word(map
->physical
, ra
, val
, processor
, cia
);
418 /* Bring things into existance */
421 vm_create(core
*physical
)
425 /* internal checks */
426 if (nr_om_segment_tlb_entries
427 != (1 << (om_segment_tlb_index_stop_bit
428 - om_segment_tlb_index_start_bit
+ 1)))
429 error("new_vm() - internal error with om_segment constants\n");
430 if (nr_om_page_tlb_entries
431 != (1 << (om_page_tlb_index_stop_bit
432 - om_page_tlb_index_start_bit
+ 1)))
433 error("new_vm() - internal error with om_page constants\n");
435 /* create the new vm register file */
436 virtual = ZALLOC(vm
);
439 virtual->physical
= physical
;
441 /* set up the address decoders */
442 virtual->instruction_map
.translation
.bat_registers
= &virtual->ibats
;
443 virtual->instruction_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
444 virtual->instruction_map
.translation
.page_tlb
= &virtual->instruction_tlb
;
445 virtual->instruction_map
.translation
.is_relocate
= 0;
446 virtual->instruction_map
.translation
.is_problem_state
= 0;
447 virtual->instruction_map
.translation
.physical
= core_readable(physical
);
448 virtual->instruction_map
.code
= core_readable(physical
);
450 virtual->data_map
.translation
.bat_registers
= &virtual->dbats
;
451 virtual->data_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
452 virtual->data_map
.translation
.page_tlb
= &virtual->data_tlb
;
453 virtual->data_map
.translation
.is_relocate
= 0;
454 virtual->data_map
.translation
.is_problem_state
= 0;
455 virtual->data_map
.translation
.physical
= core_readable(physical
);
456 virtual->data_map
.read
= core_readable(physical
);
457 virtual->data_map
.write
= core_writeable(physical
);
463 om_bat STATIC_INLINE_VM
*
464 om_effective_to_bat(om_map
*map
,
468 om_bats
*bats
= map
->bat_registers
;
469 int nr_bats
= bats
->nr_valid_bat_registers
;
471 for (curr_bat
= 0; curr_bat
< nr_bats
; curr_bat
++) {
472 om_bat
*bat
= bats
->bat
+ curr_bat
;
473 if ((ea
& bat
->block_effective_page_index_mask
)
474 != bat
->block_effective_page_index
)
483 om_segment_tlb_entry STATIC_INLINE_VM
*
484 om_effective_to_virtual(om_map
*map
,
489 /* first try the segment tlb */
490 om_segment_tlb_entry
*segment_tlb_entry
= (map
->segment_tlb
->entry
491 + om_segment_tlb_index(ea
));
493 #if (WITH_TARGET_WORD_BITSIZE == 32)
494 return segment_tlb_entry
;
497 #if (WITH_TARGET_WORD_BITSIZE == 64)
498 if (segment_tlb_entry
->is_valid
499 && (segment_tlb_entry
->masked_effective_segment_id
== MASKED(ea
, 0, 35))) {
500 error("fixme - is there a need to update any bits\n");
501 return segment_tlb_entry
;
504 /* drats, segment tlb missed */
506 unsigned_word segment_id_hash
= ea
;
507 int current_hash
= 0;
508 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
509 unsigned_word segment_table_entry_group
=
510 (map
->real_address_of_segment_table
511 | (MASKED64(segment_id_hash
, 31, 35) >> (56-35)));
512 unsigned_word segment_table_entry
;
513 for (segment_table_entry
= segment_table_entry_group
;
514 segment_table_entry
< (segment_table_entry_group
515 + sizeof_segment_table_entry_group
);
516 segment_table_entry
+= sizeof_segment_table_entry
) {
518 unsigned_word segment_table_entry_dword_0
=
519 om_read_word(map
->physical
, segment_table_entry
, processor
, cia
);
520 unsigned_word segment_table_entry_dword_1
=
521 om_read_word(map
->physical
, segment_table_entry
+ 8,
523 int is_valid
= MASKED64(segment_table_entry_dword_0
, 56, 56) != 0;
524 unsigned_word masked_effective_segment_id
=
525 MASKED64(segment_table_entry_dword_0
, 0, 35);
526 if (is_valid
&& masked_effective_segment_id
== MASKED64(ea
, 0, 35)) {
527 /* don't permit some things */
528 if (MASKED64(segment_table_entry_dword_0
, 57, 57))
529 error("om_effective_to_virtual() - T=1 in STE not supported\n");
530 /* update segment tlb */
531 segment_tlb_entry
->is_valid
= is_valid
;
532 segment_tlb_entry
->masked_effective_segment_id
=
533 masked_effective_segment_id
;
534 segment_tlb_entry
->key
[om_supervisor_state
] =
535 EXTRACTED64(segment_table_entry_dword_0
, 58, 58);
536 segment_tlb_entry
->key
[om_problem_state
] =
537 EXTRACTED64(segment_table_entry_dword_0
, 59, 59);
538 segment_tlb_entry
->invalid_access
=
539 (MASKED64(segment_table_entry_dword_0
, 60, 60)
540 ? om_instruction_read
542 segment_tlb_entry
->masked_virtual_segment_id
=
543 INSERTED64(EXTRACTED64(segment_table_entry_dword_1
, 0, 51),
544 18-13, 63-7); /* align ready for pte addr */
545 return segment_tlb_entry
;
548 segment_id_hash
= ~segment_id_hash
;
557 om_page_tlb_entry STATIC_INLINE_VM
*
558 om_virtual_to_real(om_map
*map
,
560 om_segment_tlb_entry
*segment_tlb_entry
,
561 om_access_types access
,
565 om_page_tlb_entry
*page_tlb_entry
= (map
->page_tlb
->entry
566 + om_page_tlb_index(ea
));
568 /* is it a tlb hit? */
569 if ((page_tlb_entry
->masked_virtual_segment_id
570 == segment_tlb_entry
->masked_virtual_segment_id
)
571 && (page_tlb_entry
->masked_page
572 == om_ea_masked_page(ea
))) {
573 TRACE(trace_vm
, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
574 (long)ea
, (long)page_tlb_entry
));
575 return page_tlb_entry
;
578 /* drats, it is a tlb miss */
580 unsigned_word page_hash
=
581 om_hash_page(segment_tlb_entry
->masked_virtual_segment_id
, ea
);
583 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
584 unsigned_word real_address_of_pte_group
=
585 (map
->real_address_of_page_table
586 | (page_hash
& map
->page_table_hash_mask
));
587 unsigned_word real_address_of_pte_0
;
589 ("ea=0x%lx - htab search - pteg=0x%lx htab=0x%lx mask=0x%lx hash=0x%lx\n",
590 (long)ea
, (long)real_address_of_pte_group
,
591 map
->real_address_of_page_table
,
592 map
->page_table_hash_mask
,
594 for (real_address_of_pte_0
= real_address_of_pte_group
;
595 real_address_of_pte_0
< (real_address_of_pte_group
597 real_address_of_pte_0
+= sizeof_pte
) {
598 unsigned_word pte_0
= om_read_word(map
,
599 real_address_of_pte_0
,
602 if (om_pte_0_valid(pte_0
)
603 && (current_hash
== om_pte_0_hash(pte_0
))
604 && (segment_tlb_entry
->masked_virtual_segment_id
605 == om_pte_0_masked_vsid(pte_0
))
606 && (om_ea_api(ea
) == om_pte_0_api(pte_0
))) {
607 unsigned_word real_address_of_pte_1
= (real_address_of_pte_0
609 unsigned_word pte_1
= om_read_word(map
,
610 real_address_of_pte_1
,
612 page_tlb_entry
->protection
= om_pte_1_pp(pte_1
);
613 page_tlb_entry
->changed
= om_pte_1_changed(pte_1
);
614 page_tlb_entry
->masked_virtual_segment_id
= segment_tlb_entry
->masked_virtual_segment_id
;
615 page_tlb_entry
->masked_page
= om_ea_masked_page(ea
);
616 page_tlb_entry
->masked_real_page_number
= om_pte_1_masked_rpn(pte_1
);
617 page_tlb_entry
->real_address_of_pte_1
= real_address_of_pte_1
;
618 if (!om_pte_1_referenced(pte_1
)) {
620 real_address_of_pte_1
,
624 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
625 (long)ea
, page_tlb_entry
, (long)real_address_of_pte_1
));
629 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
630 (long)ea
, page_tlb_entry
, (long)real_address_of_pte_1
));
632 return page_tlb_entry
;
635 page_hash
= ~page_hash
; /*???*/
642 void STATIC_INLINE_VM
643 om_interrupt(cpu
*processor
,
646 om_access_types access
,
647 storage_interrupt_reasons reason
)
651 data_storage_interrupt(processor
, cia
, ea
, reason
, 0/*!is_store*/);
654 data_storage_interrupt(processor
, cia
, ea
, reason
, 1/*is_store*/);
656 case om_instruction_read
:
657 instruction_storage_interrupt(processor
, cia
, reason
);
660 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
666 unsigned_word STATIC_INLINE_VM
667 om_translate_effective_to_real(om_map
*map
,
669 om_access_types access
,
675 om_segment_tlb_entry
*segment_tlb_entry
= NULL
;
676 om_page_tlb_entry
*page_tlb_entry
= NULL
;
679 if (!map
->is_relocate
) {
681 TRACE(trace_vm
, ("ea=0x%lx - direct map - ra=0x%lx", (long)ea
, (long)ra
));
685 /* match with BAT? */
686 bat
= om_effective_to_bat(map
, ea
);
688 if (!om_valid_access
[1][bat
->protection_bits
][access
]) {
689 TRACE(trace_vm
, ("ea=0x%lx - bat access violation\n", (long)ea
));
691 om_interrupt(processor
, cia
, ea
, access
,
692 protection_violation_storage_interrupt
);
697 ra
= ((ea
& bat
->block_length_mask
) | bat
->block_real_page_number
);
698 TRACE(trace_vm
, ("ea=0x%lx - bat translation - ra=0x%lx\n",
699 (long)ea
, (long)ra
));
703 /* translate ea to va using segment map */
704 segment_tlb_entry
= om_effective_to_virtual(map
, ea
, processor
, cia
);
705 #if (WITH_TARGET_WORD_BITSIZE == 64)
706 if (segment_tlb_entry
== NULL
) {
707 TRACE(trace_vm
, ("ea=0x%lx - segment tlb miss\n", (long)ea
));
709 om_interrupt(processor
, cia
, ea
, access
,
710 segment_table_miss_storage_interrupt
);
715 /* check for invalid segment access type */
716 if (segment_tlb_entry
->invalid_access
== access
) {
717 TRACE(trace_vm
, ("ea=0x%lx - segment access invalid\n", (long)ea
));
719 om_interrupt(processor
, cia
, ea
, access
,
720 protection_violation_storage_interrupt
);
726 page_tlb_entry
= om_virtual_to_real(map
, ea
, segment_tlb_entry
,
729 if (page_tlb_entry
== NULL
) {
730 TRACE(trace_vm
, ("ea=0x%lx - page tlb miss\n", (long)ea
));
732 om_interrupt(processor
, cia
, ea
, access
,
733 hash_table_miss_storage_interrupt
);
737 if (!(om_valid_access
738 [segment_tlb_entry
->key
[map
->is_problem_state
]]
739 [page_tlb_entry
->protection
]
741 TRACE(trace_vm
, ("ea=0x%lx - page tlb access violation\n", (long)ea
));
743 om_interrupt(processor
, cia
, ea
, access
,
744 protection_violation_storage_interrupt
);
749 /* update change bit as needed */
750 if (access
== om_data_write
&&!page_tlb_entry
->changed
) {
751 unsigned_word pte_1
= om_read_word(map
,
752 page_tlb_entry
->real_address_of_pte_1
,
755 page_tlb_entry
->real_address_of_pte_1
,
758 TRACE(trace_vm
, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
759 (long)ea
, (long)page_tlb_entry
,
760 (long)page_tlb_entry
->real_address_of_pte_1
));
763 ra
= (page_tlb_entry
->masked_real_page_number
| om_ea_masked_byte(ea
));
764 TRACE(trace_vm
, ("ea=0x%lx - page translation - ra=0x%lx\n",
765 (long)ea
, (long)ra
));
771 * Definition of operations for memory management
775 /* rebuild all the relevant bat information */
776 void STATIC_INLINE_VM
777 om_unpack_bat(om_bat
*bat
,
781 /* for extracting out the offset within a page */
782 bat
->block_length_mask
= ((MASKED(ubat
, 51, 61) << (17-2))
783 | MASK(63-17+1, 63));
785 /* for checking the effective page index */
786 bat
->block_effective_page_index
= MASKED(ubat
, 0, 46);
787 bat
->block_effective_page_index_mask
= ~bat
->block_length_mask
;
789 /* protection information */
790 bat
->protection_bits
= EXTRACTED(lbat
, 62, 63);
791 bat
->block_real_page_number
= MASKED(lbat
, 0, 46);
795 /* rebuild the given bat table */
796 void STATIC_INLINE_VM
797 om_unpack_bats(om_bats
*bats
,
802 bats
->nr_valid_bat_registers
= 0;
803 for (i
= 0; i
< nr_om_bat_registers
*2; i
+= 2) {
804 spreg ubat
= raw_bats
[i
];
805 spreg lbat
= raw_bats
[i
+1];
806 if ((msr
& msr_problem_state
)
807 ? EXTRACTED(ubat
, 62, 62)
808 : EXTRACTED(ubat
, 63, 63)) {
809 om_unpack_bat(&bats
->bat
[bats
->nr_valid_bat_registers
],
811 bats
->nr_valid_bat_registers
+= 1;
817 #if (WITH_TARGET_WORD_BITSIZE == 32)
818 void STATIC_INLINE_VM
819 om_unpack_sr(vm
*virtual,
823 om_segment_tlb_entry
*segment_tlb_entry
= 0;
824 sreg new_sr_value
= 0;
826 /* check register in range */
827 if (which_sr
< 0 || which_sr
> nr_om_segment_tlb_entries
)
828 error("om_set_sr: segment register out of bounds\n");
830 /* get the working values */
831 segment_tlb_entry
= &virtual->segment_tlb
.entry
[which_sr
];
832 new_sr_value
= srs
[which_sr
];
834 /* do we support this */
835 if (MASKED32(new_sr_value
, 0, 0))
836 error("om_ser_sr(): unsupported value of T in segment register %d\n",
840 segment_tlb_entry
->key
[om_supervisor_state
] = EXTRACTED32(new_sr_value
, 1, 1);
841 segment_tlb_entry
->key
[om_problem_state
] = EXTRACTED32(new_sr_value
, 2, 2);
842 segment_tlb_entry
->invalid_access
= (MASKED32(new_sr_value
, 3, 3)
843 ? om_instruction_read
845 segment_tlb_entry
->masked_virtual_segment_id
=
846 INSERTED32(EXTRACTED32(new_sr_value
, 8, 31),
847 7-5, 31-6); /* align ready for pte address */
852 #if (WITH_TARGET_WORD_BITSIZE == 32)
853 void STATIC_INLINE_VM
854 om_unpack_srs(vm
*virtual,
858 for (which_sr
= 0; which_sr
< nr_om_segment_tlb_entries
; which_sr
++) {
859 om_unpack_sr(virtual, srs
, which_sr
);
865 /* Rebuild all the data structures for the new context as specifed by
866 the passed registers */
868 vm_synchronize_context(vm
*virtual,
874 /* enable/disable translation */
875 int problem_state
= (msr
& msr_problem_state
) != 0;
876 int data_relocate
= (msr
& msr_data_relocate
) != 0;
877 int instruction_relocate
= (msr
& msr_instruction_relocate
) != 0;
878 int little_endian
= (msr
& msr_little_endian_mode
) != 0;
880 unsigned_word page_table_hash_mask
;
881 unsigned_word real_address_of_page_table
;
883 /* update current processor mode */
884 virtual->instruction_map
.translation
.is_relocate
= instruction_relocate
;
885 virtual->instruction_map
.translation
.is_problem_state
= problem_state
;
886 virtual->data_map
.translation
.is_relocate
= data_relocate
;
887 virtual->data_map
.translation
.is_problem_state
= problem_state
;
889 /* update bat registers for the new context */
890 om_unpack_bats(&virtual->ibats
, &sprs
[spr_ibat0u
], msr
);
891 om_unpack_bats(&virtual->dbats
, &sprs
[spr_dbat0u
], msr
);
893 /* unpack SDR1 - the storage description register 1 */
894 #if (WITH_TARGET_WORD_BITSIZE == 64)
895 real_address_of_page_table
= MASKED64(sprs
[spr_sdr1
], 0, 45);
896 page_table_hash_mask
= MASK64(18+28-EXTRACTED64(sprs
[spr_sdr1
], 59, 63),
899 #if (WITH_TARGET_WORD_BITSIZE == 32)
900 real_address_of_page_table
= MASKED32(sprs
[spr_sdr1
], 0, 15);
901 page_table_hash_mask
= (INSERTED32(EXTRACTED32(sprs
[spr_sdr1
], 23, 31),
903 | MASK32(7+9, 31-6));
905 virtual->instruction_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
906 virtual->instruction_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
907 virtual->data_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
908 virtual->data_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
911 /* unpack the segment tlb registers */
912 #if (WITH_TARGET_WORD_BITSIZE == 32)
913 om_unpack_srs(virtual, srs
);
916 /* set up the XOR registers if the current endian mode conflicts
917 with what is in the MSR */
918 if (WITH_XOR_ENDIAN
) {
921 if ((little_endian
&& CURRENT_TARGET_BYTE_ORDER
== LITTLE_ENDIAN
)
922 || (!little_endian
&& CURRENT_TARGET_BYTE_ORDER
== BIG_ENDIAN
))
925 mask
= WITH_XOR_ENDIAN
- 1;
926 while (i
- 1 < WITH_XOR_ENDIAN
) {
927 virtual->instruction_map
.translation
.xor[i
-1] = mask
;
928 virtual->data_map
.translation
.xor[i
-1] = mask
;
929 mask
= (mask
<< 1) & (WITH_XOR_ENDIAN
- 1);
934 /* don't allow the processor to change endian modes */
935 if ((little_endian
&& CURRENT_TARGET_BYTE_ORDER
!= LITTLE_ENDIAN
)
936 || (!little_endian
&& CURRENT_TARGET_BYTE_ORDER
!= LITTLE_ENDIAN
))
937 error("vm_synchronize_context() - unsuported change of byte order\n");
942 vm_data_map INLINE_VM
*
943 vm_create_data_map(vm
*memory
)
945 return &memory
->data_map
;
949 vm_instruction_map INLINE_VM
*
950 vm_create_instruction_map(vm
*memory
)
952 return &memory
->instruction_map
;
956 unsigned_word STATIC_INLINE_VM
957 vm_translate(om_map
*map
,
959 om_access_types access
,
964 switch (CURRENT_ENVIRONMENT
) {
965 case USER_ENVIRONMENT
:
966 case VIRTUAL_ENVIRONMENT
:
968 case OPERATING_ENVIRONMENT
:
969 return om_translate_effective_to_real(map
, ea
, access
,
973 error("vm_translate() - unknown environment\n");
979 unsigned_word INLINE_VM
980 vm_real_data_addr(vm_data_map
*map
,
986 return vm_translate(&map
->translation
,
988 is_read
? om_data_read
: om_data_write
,
995 unsigned_word INLINE_VM
996 vm_real_instruction_addr(vm_instruction_map
*map
,
1000 return vm_translate(&map
->translation
,
1002 om_instruction_read
,
1008 instruction_word INLINE_VM
1009 vm_instruction_map_read(vm_instruction_map
*map
,
1013 unsigned_word ra
= vm_real_instruction_addr(map
, processor
, cia
);
1014 ASSERT((cia
& 0x3) == 0); /* always aligned */
1015 if (WITH_XOR_ENDIAN
)
1016 ra
^= map
->translation
.xor[sizeof(instruction_word
) - 1];
1017 return core_map_read_4(map
->code
, ra
, processor
, cia
);
1022 vm_data_map_read_buffer(vm_data_map
*map
,
1028 for (count
= 0; count
< nr_bytes
; count
++) {
1030 unsigned_word ea
= addr
+ count
;
1031 unsigned_word ra
= vm_translate(&map
->translation
,
1036 if (ra
== MASK(0, 63))
1038 if (WITH_XOR_ENDIAN
)
1039 ra
^= map
->translation
.xor[0];
1040 if (core_map_read_buffer(map
->read
, &byte
, ra
, sizeof(byte
))
1043 ((unsigned_1
*)target
)[count
] = T2H_1(byte
);
1050 vm_data_map_write_buffer(vm_data_map
*map
,
1054 int violate_read_only_section
)
1058 for (count
= 0; count
< nr_bytes
; count
++) {
1059 unsigned_word ea
= addr
+ count
;
1060 unsigned_word ra
= vm_translate(&map
->translation
,
1065 if (ra
== MASK(0, 63))
1067 if (WITH_XOR_ENDIAN
)
1068 ra
^= map
->translation
.xor[0];
1069 byte
= T2H_1(((unsigned_1
*)source
)[count
]);
1070 if (core_map_write_buffer((violate_read_only_section
1073 &byte
, ra
, sizeof(byte
)) != sizeof(byte
))
1080 /* define the read/write 1/2/4/8/word functions */