2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm_host.h>
23 #include <linux/highmem.h>
24 #include <asm/mmu-44x.h>
25 #include <asm/kvm_ppc.h>
29 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
30 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
32 static unsigned int kvmppc_tlb_44x_pos
;
34 static u32
kvmppc_44x_tlb_shadow_attrib(u32 attrib
, int usermode
)
36 /* Mask off reserved bits. */
37 attrib
&= PPC44x_TLB_PERM_MASK
|PPC44x_TLB_ATTR_MASK
;
40 /* Guest is in supervisor mode, so we need to translate guest
41 * supervisor permissions into user permissions. */
42 attrib
&= ~PPC44x_TLB_USER_PERM_MASK
;
43 attrib
|= (attrib
& PPC44x_TLB_SUPER_PERM_MASK
) << 3;
46 /* Make sure host can always access this memory. */
47 attrib
|= PPC44x_TLB_SX
|PPC44x_TLB_SR
|PPC44x_TLB_SW
;
52 /* Search the guest TLB for a matching entry. */
53 int kvmppc_44x_tlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
, unsigned int pid
,
58 /* XXX Replace loop with fancy data structures. */
59 for (i
= 0; i
< PPC44x_TLB_SIZE
; i
++) {
60 struct tlbe
*tlbe
= &vcpu
->arch
.guest_tlb
[i
];
63 if (eaddr
< get_tlb_eaddr(tlbe
))
66 if (eaddr
> get_tlb_end(tlbe
))
69 tid
= get_tlb_tid(tlbe
);
70 if (tid
&& (tid
!= pid
))
76 if (get_tlb_ts(tlbe
) != as
)
85 struct tlbe
*kvmppc_44x_itlb_search(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
87 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
90 index
= kvmppc_44x_tlb_index(vcpu
, eaddr
, vcpu
->arch
.pid
, as
);
93 return &vcpu
->arch
.guest_tlb
[index
];
96 struct tlbe
*kvmppc_44x_dtlb_search(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
98 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
101 index
= kvmppc_44x_tlb_index(vcpu
, eaddr
, vcpu
->arch
.pid
, as
);
104 return &vcpu
->arch
.guest_tlb
[index
];
107 static int kvmppc_44x_tlbe_is_writable(struct tlbe
*tlbe
)
109 return tlbe
->word2
& (PPC44x_TLB_SW
|PPC44x_TLB_UW
);
112 /* Must be called with mmap_sem locked for writing. */
113 static void kvmppc_44x_shadow_release(struct kvm_vcpu
*vcpu
,
116 struct tlbe
*stlbe
= &vcpu
->arch
.shadow_tlb
[index
];
117 struct page
*page
= vcpu
->arch
.shadow_pages
[index
];
119 kunmap(vcpu
->arch
.shadow_pages
[index
]);
121 if (get_tlb_v(stlbe
)) {
122 if (kvmppc_44x_tlbe_is_writable(stlbe
))
123 kvm_release_page_dirty(page
);
125 kvm_release_page_clean(page
);
129 /* Caller must ensure that the specified guest TLB entry is safe to insert into
131 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 gvaddr
, gfn_t gfn
, u64 asid
,
134 struct page
*new_page
;
139 /* Future optimization: don't overwrite the TLB entry containing the
140 * current PC (or stack?). */
141 victim
= kvmppc_tlb_44x_pos
++;
142 if (kvmppc_tlb_44x_pos
> tlb_44x_hwater
)
143 kvmppc_tlb_44x_pos
= 0;
144 stlbe
= &vcpu
->arch
.shadow_tlb
[victim
];
146 /* Get reference to new page. */
147 down_write(¤t
->mm
->mmap_sem
);
148 new_page
= gfn_to_page(vcpu
->kvm
, gfn
);
149 if (is_error_page(new_page
)) {
150 printk(KERN_ERR
"Couldn't get guest page!\n");
151 kvm_release_page_clean(new_page
);
154 hpaddr
= page_to_phys(new_page
);
156 /* Drop reference to old page. */
157 kvmppc_44x_shadow_release(vcpu
, victim
);
158 up_write(¤t
->mm
->mmap_sem
);
160 vcpu
->arch
.shadow_pages
[victim
] = new_page
;
162 /* XXX Make sure (va, size) doesn't overlap any other
163 * entries. 440x6 user manual says the result would be
166 /* XXX what about AS? */
168 stlbe
->tid
= asid
& 0xff;
170 /* Force TS=1 for all guest mappings. */
171 /* For now we hardcode 4KB mappings, but it will be important to
172 * use host large pages in the future. */
173 stlbe
->word0
= (gvaddr
& PAGE_MASK
) | PPC44x_TLB_VALID
| PPC44x_TLB_TS
176 stlbe
->word1
= (hpaddr
& 0xfffffc00) | ((hpaddr
>> 32) & 0xf);
177 stlbe
->word2
= kvmppc_44x_tlb_shadow_attrib(flags
,
178 vcpu
->arch
.msr
& MSR_PR
);
181 void kvmppc_mmu_invalidate(struct kvm_vcpu
*vcpu
, u64 eaddr
, u64 asid
)
183 unsigned int pid
= asid
& 0xff;
186 /* XXX Replace loop with fancy data structures. */
187 down_write(¤t
->mm
->mmap_sem
);
188 for (i
= 0; i
<= tlb_44x_hwater
; i
++) {
189 struct tlbe
*stlbe
= &vcpu
->arch
.shadow_tlb
[i
];
192 if (!get_tlb_v(stlbe
))
195 if (eaddr
< get_tlb_eaddr(stlbe
))
198 if (eaddr
> get_tlb_end(stlbe
))
201 tid
= get_tlb_tid(stlbe
);
202 if (tid
&& (tid
!= pid
))
205 kvmppc_44x_shadow_release(vcpu
, i
);
208 up_write(¤t
->mm
->mmap_sem
);
211 /* Invalidate all mappings, so that when they fault back in they will get the
212 * proper permission bits. */
213 void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
)
217 /* XXX Replace loop with fancy data structures. */
218 down_write(¤t
->mm
->mmap_sem
);
219 for (i
= 0; i
<= tlb_44x_hwater
; i
++) {
220 kvmppc_44x_shadow_release(vcpu
, i
);
221 vcpu
->arch
.shadow_tlb
[i
].word0
= 0;
223 up_write(¤t
->mm
->mmap_sem
);