KVM: switch to get_user_pages_fast
[deliverable/linux.git] / arch / powerpc / kvm / 44x_tlb.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <asm/mmu-44x.h>
26 #include <asm/kvm_ppc.h>
27
28 #include "44x_tlb.h"
29
30 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
31 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
32
33 static unsigned int kvmppc_tlb_44x_pos;
34
35 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
36 {
37 /* Mask off reserved bits. */
38 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
39
40 if (!usermode) {
41 /* Guest is in supervisor mode, so we need to translate guest
42 * supervisor permissions into user permissions. */
43 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
44 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
45 }
46
47 /* Make sure host can always access this memory. */
48 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
49
50 return attrib;
51 }
52
53 /* Search the guest TLB for a matching entry. */
54 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
55 unsigned int as)
56 {
57 int i;
58
59 /* XXX Replace loop with fancy data structures. */
60 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
61 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
62 unsigned int tid;
63
64 if (eaddr < get_tlb_eaddr(tlbe))
65 continue;
66
67 if (eaddr > get_tlb_end(tlbe))
68 continue;
69
70 tid = get_tlb_tid(tlbe);
71 if (tid && (tid != pid))
72 continue;
73
74 if (!get_tlb_v(tlbe))
75 continue;
76
77 if (get_tlb_ts(tlbe) != as)
78 continue;
79
80 return i;
81 }
82
83 return -1;
84 }
85
86 struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
87 {
88 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
89 unsigned int index;
90
91 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
92 if (index == -1)
93 return NULL;
94 return &vcpu->arch.guest_tlb[index];
95 }
96
97 struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
98 {
99 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
100 unsigned int index;
101
102 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
103 if (index == -1)
104 return NULL;
105 return &vcpu->arch.guest_tlb[index];
106 }
107
108 static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
109 {
110 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
111 }
112
113 static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
114 unsigned int index)
115 {
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index];
118
119 if (get_tlb_v(stlbe)) {
120 if (kvmppc_44x_tlbe_is_writable(stlbe))
121 kvm_release_page_dirty(page);
122 else
123 kvm_release_page_clean(page);
124 }
125 }
126
127 void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
128 {
129 vcpu->arch.shadow_tlb_mod[i] = 1;
130 }
131
132 /* Caller must ensure that the specified guest TLB entry is safe to insert into
133 * the shadow TLB. */
134 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
135 u32 flags)
136 {
137 struct page *new_page;
138 struct tlbe *stlbe;
139 hpa_t hpaddr;
140 unsigned int victim;
141
142 /* Future optimization: don't overwrite the TLB entry containing the
143 * current PC (or stack?). */
144 victim = kvmppc_tlb_44x_pos++;
145 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
146 kvmppc_tlb_44x_pos = 0;
147 stlbe = &vcpu->arch.shadow_tlb[victim];
148
149 /* Get reference to new page. */
150 new_page = gfn_to_page(vcpu->kvm, gfn);
151 if (is_error_page(new_page)) {
152 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
153 kvm_release_page_clean(new_page);
154 return;
155 }
156 hpaddr = page_to_phys(new_page);
157
158 /* Drop reference to old page. */
159 kvmppc_44x_shadow_release(vcpu, victim);
160
161 vcpu->arch.shadow_pages[victim] = new_page;
162
163 /* XXX Make sure (va, size) doesn't overlap any other
164 * entries. 440x6 user manual says the result would be
165 * "undefined." */
166
167 /* XXX what about AS? */
168
169 stlbe->tid = !(asid & 0xff);
170
171 /* Force TS=1 for all guest mappings. */
172 /* For now we hardcode 4KB mappings, but it will be important to
173 * use host large pages in the future. */
174 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
175 | PPC44x_TLB_4K;
176 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
177 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
178 vcpu->arch.msr & MSR_PR);
179 kvmppc_tlbe_set_modified(vcpu, victim);
180
181 KVMTRACE_5D(STLB_WRITE, vcpu, victim,
182 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
183 handler);
184 }
185
186 void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
187 gva_t eend, u32 asid)
188 {
189 unsigned int pid = !(asid & 0xff);
190 int i;
191
192 /* XXX Replace loop with fancy data structures. */
193 for (i = 0; i <= tlb_44x_hwater; i++) {
194 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
195 unsigned int tid;
196
197 if (!get_tlb_v(stlbe))
198 continue;
199
200 if (eend < get_tlb_eaddr(stlbe))
201 continue;
202
203 if (eaddr > get_tlb_end(stlbe))
204 continue;
205
206 tid = get_tlb_tid(stlbe);
207 if (tid && (tid != pid))
208 continue;
209
210 kvmppc_44x_shadow_release(vcpu, i);
211 stlbe->word0 = 0;
212 kvmppc_tlbe_set_modified(vcpu, i);
213 KVMTRACE_5D(STLB_INVAL, vcpu, i,
214 stlbe->tid, stlbe->word0, stlbe->word1,
215 stlbe->word2, handler);
216 }
217 }
218
219 /* Invalidate all mappings on the privilege switch after PID has been changed.
220 * The guest always runs with PID=1, so we must clear the entire TLB when
221 * switching address spaces. */
222 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
223 {
224 int i;
225
226 if (vcpu->arch.swap_pid) {
227 /* XXX Replace loop with fancy data structures. */
228 for (i = 0; i <= tlb_44x_hwater; i++) {
229 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
230
231 /* Future optimization: clear only userspace mappings. */
232 kvmppc_44x_shadow_release(vcpu, i);
233 stlbe->word0 = 0;
234 kvmppc_tlbe_set_modified(vcpu, i);
235 KVMTRACE_5D(STLB_INVAL, vcpu, i,
236 stlbe->tid, stlbe->word0, stlbe->word1,
237 stlbe->word2, handler);
238 }
239 vcpu->arch.swap_pid = 0;
240 }
241
242 vcpu->arch.shadow_pid = !usermode;
243 }
This page took 0.03535 seconds and 5 git commands to generate.