Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | */ | |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/kvm.h> | |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/gfp.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/hugetlb.h> | |
8936dda4 | 26 | #include <linux/vmalloc.h> |
de56a948 PM |
27 | |
28 | #include <asm/tlbflush.h> | |
29 | #include <asm/kvm_ppc.h> | |
30 | #include <asm/kvm_book3s.h> | |
31 | #include <asm/mmu-hash64.h> | |
32 | #include <asm/hvcall.h> | |
33 | #include <asm/synch.h> | |
34 | #include <asm/ppc-opcode.h> | |
35 | #include <asm/cputable.h> | |
36 | ||
9e368f29 PM |
37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
38 | #define MAX_LPID_970 63 | |
de56a948 PM |
39 | |
40 | long kvmppc_alloc_hpt(struct kvm *kvm) | |
41 | { | |
42 | unsigned long hpt; | |
043cc4d7 | 43 | long lpid; |
8936dda4 | 44 | struct revmap_entry *rev; |
d2a1b483 | 45 | struct kvmppc_linear_info *li; |
de56a948 | 46 | |
8936dda4 | 47 | /* Allocate guest's hashed page table */ |
d2a1b483 AG |
48 | li = kvm_alloc_hpt(); |
49 | if (li) { | |
50 | /* using preallocated memory */ | |
51 | hpt = (ulong)li->base_virt; | |
52 | kvm->arch.hpt_li = li; | |
53 | } else { | |
54 | /* using dynamic memory */ | |
55 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| | |
56 | __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); | |
57 | } | |
58 | ||
de56a948 PM |
59 | if (!hpt) { |
60 | pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); | |
61 | return -ENOMEM; | |
62 | } | |
63 | kvm->arch.hpt_virt = hpt; | |
64 | ||
8936dda4 PM |
65 | /* Allocate reverse map array */ |
66 | rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); | |
67 | if (!rev) { | |
68 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); | |
69 | goto out_freehpt; | |
70 | } | |
71 | kvm->arch.revmap = rev; | |
72 | ||
043cc4d7 SW |
73 | lpid = kvmppc_alloc_lpid(); |
74 | if (lpid < 0) | |
75 | goto out_freeboth; | |
de56a948 PM |
76 | |
77 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); | |
78 | kvm->arch.lpid = lpid; | |
de56a948 PM |
79 | |
80 | pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); | |
81 | return 0; | |
8936dda4 PM |
82 | |
83 | out_freeboth: | |
84 | vfree(rev); | |
85 | out_freehpt: | |
86 | free_pages(hpt, HPT_ORDER - PAGE_SHIFT); | |
87 | return -ENOMEM; | |
de56a948 PM |
88 | } |
89 | ||
90 | void kvmppc_free_hpt(struct kvm *kvm) | |
91 | { | |
043cc4d7 | 92 | kvmppc_free_lpid(kvm->arch.lpid); |
8936dda4 | 93 | vfree(kvm->arch.revmap); |
d2a1b483 AG |
94 | if (kvm->arch.hpt_li) |
95 | kvm_release_hpt(kvm->arch.hpt_li); | |
96 | else | |
97 | free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); | |
de56a948 PM |
98 | } |
99 | ||
da9d1d7f PM |
100 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
101 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) | |
102 | { | |
103 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; | |
104 | } | |
105 | ||
106 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ | |
107 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) | |
108 | { | |
109 | return (pgsize == 0x10000) ? 0x1000 : 0; | |
110 | } | |
111 | ||
112 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |
113 | unsigned long porder) | |
de56a948 PM |
114 | { |
115 | unsigned long i; | |
b2b2f165 | 116 | unsigned long npages; |
c77162de PM |
117 | unsigned long hp_v, hp_r; |
118 | unsigned long addr, hash; | |
da9d1d7f PM |
119 | unsigned long psize; |
120 | unsigned long hp0, hp1; | |
c77162de | 121 | long ret; |
de56a948 | 122 | |
da9d1d7f PM |
123 | psize = 1ul << porder; |
124 | npages = memslot->npages >> (porder - PAGE_SHIFT); | |
de56a948 PM |
125 | |
126 | /* VRMA can't be > 1TB */ | |
8936dda4 PM |
127 | if (npages > 1ul << (40 - porder)) |
128 | npages = 1ul << (40 - porder); | |
de56a948 PM |
129 | /* Can't use more than 1 HPTE per HPTEG */ |
130 | if (npages > HPT_NPTEG) | |
131 | npages = HPT_NPTEG; | |
132 | ||
da9d1d7f PM |
133 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
134 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); | |
135 | hp1 = hpte1_pgsize_encoding(psize) | | |
136 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; | |
137 | ||
de56a948 | 138 | for (i = 0; i < npages; ++i) { |
c77162de | 139 | addr = i << porder; |
de56a948 PM |
140 | /* can't use hpt_hash since va > 64 bits */ |
141 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; | |
142 | /* | |
143 | * We assume that the hash table is empty and no | |
144 | * vcpus are using it at this stage. Since we create | |
145 | * at most one HPTE per HPTEG, we just assume entry 7 | |
146 | * is available and use it. | |
147 | */ | |
8936dda4 | 148 | hash = (hash << 3) + 7; |
da9d1d7f PM |
149 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
150 | hp_r = hp1 | addr; | |
c77162de PM |
151 | ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); |
152 | if (ret != H_SUCCESS) { | |
153 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", | |
154 | addr, ret); | |
155 | break; | |
156 | } | |
de56a948 PM |
157 | } |
158 | } | |
159 | ||
160 | int kvmppc_mmu_hv_init(void) | |
161 | { | |
9e368f29 PM |
162 | unsigned long host_lpid, rsvd_lpid; |
163 | ||
164 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
de56a948 | 165 | return -EINVAL; |
9e368f29 | 166 | |
043cc4d7 | 167 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
9e368f29 PM |
168 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
169 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ | |
170 | rsvd_lpid = LPID_RSVD; | |
171 | } else { | |
172 | host_lpid = 0; /* PPC970 */ | |
173 | rsvd_lpid = MAX_LPID_970; | |
174 | } | |
175 | ||
043cc4d7 SW |
176 | kvmppc_init_lpid(rsvd_lpid + 1); |
177 | ||
178 | kvmppc_claim_lpid(host_lpid); | |
9e368f29 | 179 | /* rsvd_lpid is reserved for use in partition switching */ |
043cc4d7 | 180 | kvmppc_claim_lpid(rsvd_lpid); |
de56a948 PM |
181 | |
182 | return 0; | |
183 | } | |
184 | ||
185 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | |
186 | { | |
187 | } | |
188 | ||
189 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | |
190 | { | |
191 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | |
192 | } | |
193 | ||
c77162de PM |
194 | /* |
195 | * This is called to get a reference to a guest page if there isn't | |
196 | * one already in the kvm->arch.slot_phys[][] arrays. | |
197 | */ | |
198 | static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, | |
da9d1d7f PM |
199 | struct kvm_memory_slot *memslot, |
200 | unsigned long psize) | |
c77162de PM |
201 | { |
202 | unsigned long start; | |
da9d1d7f PM |
203 | long np, err; |
204 | struct page *page, *hpage, *pages[1]; | |
205 | unsigned long s, pgsize; | |
c77162de | 206 | unsigned long *physp; |
9d0ef5ea PM |
207 | unsigned int is_io, got, pgorder; |
208 | struct vm_area_struct *vma; | |
da9d1d7f | 209 | unsigned long pfn, i, npages; |
c77162de PM |
210 | |
211 | physp = kvm->arch.slot_phys[memslot->id]; | |
212 | if (!physp) | |
213 | return -EINVAL; | |
da9d1d7f | 214 | if (physp[gfn - memslot->base_gfn]) |
c77162de PM |
215 | return 0; |
216 | ||
9d0ef5ea PM |
217 | is_io = 0; |
218 | got = 0; | |
c77162de | 219 | page = NULL; |
da9d1d7f | 220 | pgsize = psize; |
9d0ef5ea | 221 | err = -EINVAL; |
c77162de PM |
222 | start = gfn_to_hva_memslot(memslot, gfn); |
223 | ||
224 | /* Instantiate and get the page we want access to */ | |
225 | np = get_user_pages_fast(start, 1, 1, pages); | |
9d0ef5ea PM |
226 | if (np != 1) { |
227 | /* Look up the vma for the page */ | |
228 | down_read(¤t->mm->mmap_sem); | |
229 | vma = find_vma(current->mm, start); | |
230 | if (!vma || vma->vm_start > start || | |
231 | start + psize > vma->vm_end || | |
232 | !(vma->vm_flags & VM_PFNMAP)) | |
233 | goto up_err; | |
234 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
235 | pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
236 | /* check alignment of pfn vs. requested page size */ | |
237 | if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1))) | |
238 | goto up_err; | |
239 | up_read(¤t->mm->mmap_sem); | |
240 | ||
241 | } else { | |
242 | page = pages[0]; | |
243 | got = KVMPPC_GOT_PAGE; | |
244 | ||
245 | /* See if this is a large page */ | |
246 | s = PAGE_SIZE; | |
247 | if (PageHuge(page)) { | |
248 | hpage = compound_head(page); | |
249 | s <<= compound_order(hpage); | |
250 | /* Get the whole large page if slot alignment is ok */ | |
251 | if (s > psize && slot_is_aligned(memslot, s) && | |
252 | !(memslot->userspace_addr & (s - 1))) { | |
253 | start &= ~(s - 1); | |
254 | pgsize = s; | |
de6c0b02 DG |
255 | get_page(hpage); |
256 | put_page(page); | |
9d0ef5ea PM |
257 | page = hpage; |
258 | } | |
da9d1d7f | 259 | } |
9d0ef5ea PM |
260 | if (s < psize) |
261 | goto out; | |
262 | pfn = page_to_pfn(page); | |
c77162de | 263 | } |
c77162de | 264 | |
da9d1d7f PM |
265 | npages = pgsize >> PAGE_SHIFT; |
266 | pgorder = __ilog2(npages); | |
267 | physp += (gfn - memslot->base_gfn) & ~(npages - 1); | |
c77162de | 268 | spin_lock(&kvm->arch.slot_phys_lock); |
da9d1d7f PM |
269 | for (i = 0; i < npages; ++i) { |
270 | if (!physp[i]) { | |
9d0ef5ea PM |
271 | physp[i] = ((pfn + i) << PAGE_SHIFT) + |
272 | got + is_io + pgorder; | |
da9d1d7f PM |
273 | got = 0; |
274 | } | |
275 | } | |
c77162de | 276 | spin_unlock(&kvm->arch.slot_phys_lock); |
da9d1d7f | 277 | err = 0; |
c77162de | 278 | |
da9d1d7f | 279 | out: |
de6c0b02 | 280 | if (got) |
da9d1d7f | 281 | put_page(page); |
da9d1d7f | 282 | return err; |
9d0ef5ea PM |
283 | |
284 | up_err: | |
285 | up_read(¤t->mm->mmap_sem); | |
286 | return err; | |
c77162de PM |
287 | } |
288 | ||
289 | /* | |
342d3db7 PM |
290 | * We come here on a H_ENTER call from the guest when we are not |
291 | * using mmu notifiers and we don't have the requested page pinned | |
292 | * already. | |
c77162de PM |
293 | */ |
294 | long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
295 | long pte_index, unsigned long pteh, unsigned long ptel) | |
296 | { | |
297 | struct kvm *kvm = vcpu->kvm; | |
298 | unsigned long psize, gpa, gfn; | |
299 | struct kvm_memory_slot *memslot; | |
300 | long ret; | |
301 | ||
342d3db7 PM |
302 | if (kvm->arch.using_mmu_notifiers) |
303 | goto do_insert; | |
304 | ||
c77162de PM |
305 | psize = hpte_page_size(pteh, ptel); |
306 | if (!psize) | |
307 | return H_PARAMETER; | |
308 | ||
697d3899 PM |
309 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
310 | ||
c77162de PM |
311 | /* Find the memslot (if any) for this address */ |
312 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
313 | gfn = gpa >> PAGE_SHIFT; | |
314 | memslot = gfn_to_memslot(kvm, gfn); | |
697d3899 PM |
315 | if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) { |
316 | if (!slot_is_aligned(memslot, psize)) | |
317 | return H_PARAMETER; | |
318 | if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) | |
319 | return H_PARAMETER; | |
320 | } | |
c77162de | 321 | |
342d3db7 PM |
322 | do_insert: |
323 | /* Protect linux PTE lookup from page table destruction */ | |
324 | rcu_read_lock_sched(); /* this disables preemption too */ | |
325 | vcpu->arch.pgdir = current->mm->pgd; | |
c77162de | 326 | ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); |
342d3db7 | 327 | rcu_read_unlock_sched(); |
c77162de PM |
328 | if (ret == H_TOO_HARD) { |
329 | /* this can't happen */ | |
330 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); | |
331 | ret = H_RESOURCE; /* or something */ | |
332 | } | |
333 | return ret; | |
334 | ||
335 | } | |
336 | ||
697d3899 PM |
337 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
338 | gva_t eaddr) | |
339 | { | |
340 | u64 mask; | |
341 | int i; | |
342 | ||
343 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
344 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) | |
345 | continue; | |
346 | ||
347 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) | |
348 | mask = ESID_MASK_1T; | |
349 | else | |
350 | mask = ESID_MASK; | |
351 | ||
352 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) | |
353 | return &vcpu->arch.slb[i]; | |
354 | } | |
355 | return NULL; | |
356 | } | |
357 | ||
358 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |
359 | unsigned long ea) | |
360 | { | |
361 | unsigned long ra_mask; | |
362 | ||
363 | ra_mask = hpte_page_size(v, r) - 1; | |
364 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); | |
365 | } | |
366 | ||
de56a948 | 367 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
697d3899 | 368 | struct kvmppc_pte *gpte, bool data) |
de56a948 | 369 | { |
697d3899 PM |
370 | struct kvm *kvm = vcpu->kvm; |
371 | struct kvmppc_slb *slbe; | |
372 | unsigned long slb_v; | |
373 | unsigned long pp, key; | |
374 | unsigned long v, gr; | |
375 | unsigned long *hptep; | |
376 | int index; | |
377 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | |
378 | ||
379 | /* Get SLB entry */ | |
380 | if (virtmode) { | |
381 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); | |
382 | if (!slbe) | |
383 | return -EINVAL; | |
384 | slb_v = slbe->origv; | |
385 | } else { | |
386 | /* real mode access */ | |
387 | slb_v = vcpu->kvm->arch.vrma_slb_v; | |
388 | } | |
389 | ||
390 | /* Find the HPTE in the hash table */ | |
391 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | |
392 | HPTE_V_VALID | HPTE_V_ABSENT); | |
393 | if (index < 0) | |
394 | return -ENOENT; | |
395 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
396 | v = hptep[0] & ~HPTE_V_HVLOCK; | |
397 | gr = kvm->arch.revmap[index].guest_rpte; | |
398 | ||
399 | /* Unlock the HPTE */ | |
400 | asm volatile("lwsync" : : : "memory"); | |
401 | hptep[0] = v; | |
402 | ||
403 | gpte->eaddr = eaddr; | |
404 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | |
405 | ||
406 | /* Get PP bits and key for permission check */ | |
407 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
408 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
409 | key &= slb_v; | |
410 | ||
411 | /* Calculate permissions */ | |
412 | gpte->may_read = hpte_read_permission(pp, key); | |
413 | gpte->may_write = hpte_write_permission(pp, key); | |
414 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); | |
415 | ||
416 | /* Storage key permission check for POWER7 */ | |
417 | if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { | |
418 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); | |
419 | if (amrfield & 1) | |
420 | gpte->may_read = 0; | |
421 | if (amrfield & 2) | |
422 | gpte->may_write = 0; | |
423 | } | |
424 | ||
425 | /* Get the guest physical address */ | |
426 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); | |
427 | return 0; | |
428 | } | |
429 | ||
430 | /* | |
431 | * Quick test for whether an instruction is a load or a store. | |
432 | * If the instruction is a load or a store, then this will indicate | |
433 | * which it is, at least on server processors. (Embedded processors | |
434 | * have some external PID instructions that don't follow the rule | |
435 | * embodied here.) If the instruction isn't a load or store, then | |
436 | * this doesn't return anything useful. | |
437 | */ | |
438 | static int instruction_is_store(unsigned int instr) | |
439 | { | |
440 | unsigned int mask; | |
441 | ||
442 | mask = 0x10000000; | |
443 | if ((instr & 0xfc000000) == 0x7c000000) | |
444 | mask = 0x100; /* major opcode 31 */ | |
445 | return (instr & mask) != 0; | |
446 | } | |
447 | ||
448 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
6020c0f6 | 449 | unsigned long gpa, gva_t ea, int is_store) |
697d3899 PM |
450 | { |
451 | int ret; | |
452 | u32 last_inst; | |
453 | unsigned long srr0 = kvmppc_get_pc(vcpu); | |
454 | ||
455 | /* We try to load the last instruction. We don't let | |
456 | * emulate_instruction do it as it doesn't check what | |
457 | * kvmppc_ld returns. | |
458 | * If we fail, we just return to the guest and try executing it again. | |
459 | */ | |
460 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { | |
461 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
462 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) | |
463 | return RESUME_GUEST; | |
464 | vcpu->arch.last_inst = last_inst; | |
465 | } | |
466 | ||
467 | /* | |
468 | * WARNING: We do not know for sure whether the instruction we just | |
469 | * read from memory is the same that caused the fault in the first | |
470 | * place. If the instruction we read is neither an load or a store, | |
471 | * then it can't access memory, so we don't need to worry about | |
472 | * enforcing access permissions. So, assuming it is a load or | |
473 | * store, we just check that its direction (load or store) is | |
474 | * consistent with the original fault, since that's what we | |
475 | * checked the access permissions against. If there is a mismatch | |
476 | * we just return and retry the instruction. | |
477 | */ | |
478 | ||
479 | if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) | |
480 | return RESUME_GUEST; | |
481 | ||
482 | /* | |
483 | * Emulated accesses are emulated by looking at the hash for | |
484 | * translation once, then performing the access later. The | |
485 | * translation could be invalidated in the meantime in which | |
486 | * point performing the subsequent memory access on the old | |
487 | * physical address could possibly be a security hole for the | |
488 | * guest (but not the host). | |
489 | * | |
490 | * This is less of an issue for MMIO stores since they aren't | |
491 | * globally visible. It could be an issue for MMIO loads to | |
492 | * a certain extent but we'll ignore it for now. | |
493 | */ | |
494 | ||
495 | vcpu->arch.paddr_accessed = gpa; | |
6020c0f6 | 496 | vcpu->arch.vaddr_accessed = ea; |
697d3899 PM |
497 | return kvmppc_emulate_mmio(run, vcpu); |
498 | } | |
499 | ||
500 | int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
501 | unsigned long ea, unsigned long dsisr) | |
502 | { | |
503 | struct kvm *kvm = vcpu->kvm; | |
342d3db7 PM |
504 | unsigned long *hptep, hpte[3], r; |
505 | unsigned long mmu_seq, psize, pte_size; | |
506 | unsigned long gfn, hva, pfn; | |
697d3899 | 507 | struct kvm_memory_slot *memslot; |
342d3db7 | 508 | unsigned long *rmap; |
697d3899 | 509 | struct revmap_entry *rev; |
342d3db7 PM |
510 | struct page *page, *pages[1]; |
511 | long index, ret, npages; | |
512 | unsigned long is_io; | |
4cf302bc | 513 | unsigned int writing, write_ok; |
342d3db7 | 514 | struct vm_area_struct *vma; |
bad3b507 | 515 | unsigned long rcbits; |
697d3899 PM |
516 | |
517 | /* | |
518 | * Real-mode code has already searched the HPT and found the | |
519 | * entry we're interested in. Lock the entry and check that | |
520 | * it hasn't changed. If it has, just return and re-execute the | |
521 | * instruction. | |
522 | */ | |
523 | if (ea != vcpu->arch.pgfault_addr) | |
524 | return RESUME_GUEST; | |
525 | index = vcpu->arch.pgfault_index; | |
526 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
527 | rev = &kvm->arch.revmap[index]; | |
528 | preempt_disable(); | |
529 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
530 | cpu_relax(); | |
531 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; | |
532 | hpte[1] = hptep[1]; | |
342d3db7 | 533 | hpte[2] = r = rev->guest_rpte; |
697d3899 PM |
534 | asm volatile("lwsync" : : : "memory"); |
535 | hptep[0] = hpte[0]; | |
536 | preempt_enable(); | |
537 | ||
538 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || | |
539 | hpte[1] != vcpu->arch.pgfault_hpte[1]) | |
540 | return RESUME_GUEST; | |
541 | ||
542 | /* Translate the logical address and get the page */ | |
342d3db7 PM |
543 | psize = hpte_page_size(hpte[0], r); |
544 | gfn = hpte_rpn(r, psize); | |
697d3899 PM |
545 | memslot = gfn_to_memslot(kvm, gfn); |
546 | ||
547 | /* No memslot means it's an emulated MMIO region */ | |
548 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { | |
549 | unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); | |
6020c0f6 | 550 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
697d3899 PM |
551 | dsisr & DSISR_ISSTORE); |
552 | } | |
553 | ||
342d3db7 PM |
554 | if (!kvm->arch.using_mmu_notifiers) |
555 | return -EFAULT; /* should never get here */ | |
556 | ||
557 | /* used to check for invalidations in progress */ | |
558 | mmu_seq = kvm->mmu_notifier_seq; | |
559 | smp_rmb(); | |
560 | ||
561 | is_io = 0; | |
562 | pfn = 0; | |
563 | page = NULL; | |
564 | pte_size = PAGE_SIZE; | |
4cf302bc PM |
565 | writing = (dsisr & DSISR_ISSTORE) != 0; |
566 | /* If writing != 0, then the HPTE must allow writing, if we get here */ | |
567 | write_ok = writing; | |
342d3db7 | 568 | hva = gfn_to_hva_memslot(memslot, gfn); |
4cf302bc | 569 | npages = get_user_pages_fast(hva, 1, writing, pages); |
342d3db7 PM |
570 | if (npages < 1) { |
571 | /* Check if it's an I/O mapping */ | |
572 | down_read(¤t->mm->mmap_sem); | |
573 | vma = find_vma(current->mm, hva); | |
574 | if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && | |
575 | (vma->vm_flags & VM_PFNMAP)) { | |
576 | pfn = vma->vm_pgoff + | |
577 | ((hva - vma->vm_start) >> PAGE_SHIFT); | |
578 | pte_size = psize; | |
579 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
4cf302bc | 580 | write_ok = vma->vm_flags & VM_WRITE; |
342d3db7 PM |
581 | } |
582 | up_read(¤t->mm->mmap_sem); | |
583 | if (!pfn) | |
584 | return -EFAULT; | |
585 | } else { | |
586 | page = pages[0]; | |
587 | if (PageHuge(page)) { | |
588 | page = compound_head(page); | |
589 | pte_size <<= compound_order(page); | |
590 | } | |
4cf302bc PM |
591 | /* if the guest wants write access, see if that is OK */ |
592 | if (!writing && hpte_is_writable(r)) { | |
593 | pte_t *ptep, pte; | |
594 | ||
595 | /* | |
596 | * We need to protect against page table destruction | |
597 | * while looking up and updating the pte. | |
598 | */ | |
599 | rcu_read_lock_sched(); | |
600 | ptep = find_linux_pte_or_hugepte(current->mm->pgd, | |
601 | hva, NULL); | |
602 | if (ptep && pte_present(*ptep)) { | |
603 | pte = kvmppc_read_update_linux_pte(ptep, 1); | |
604 | if (pte_write(pte)) | |
605 | write_ok = 1; | |
606 | } | |
607 | rcu_read_unlock_sched(); | |
608 | } | |
342d3db7 PM |
609 | pfn = page_to_pfn(page); |
610 | } | |
611 | ||
612 | ret = -EFAULT; | |
613 | if (psize > pte_size) | |
614 | goto out_put; | |
615 | ||
616 | /* Check WIMG vs. the actual page we're accessing */ | |
617 | if (!hpte_cache_flags_ok(r, is_io)) { | |
618 | if (is_io) | |
619 | return -EFAULT; | |
620 | /* | |
621 | * Allow guest to map emulated device memory as | |
622 | * uncacheable, but actually make it cacheable. | |
623 | */ | |
624 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | |
625 | } | |
626 | ||
627 | /* Set the HPTE to point to pfn */ | |
628 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | |
4cf302bc PM |
629 | if (hpte_is_writable(r) && !write_ok) |
630 | r = hpte_make_readonly(r); | |
342d3db7 PM |
631 | ret = RESUME_GUEST; |
632 | preempt_disable(); | |
633 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
634 | cpu_relax(); | |
635 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || | |
636 | rev->guest_rpte != hpte[2]) | |
637 | /* HPTE has been changed under us; let the guest retry */ | |
638 | goto out_unlock; | |
639 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
640 | ||
641 | rmap = &memslot->rmap[gfn - memslot->base_gfn]; | |
642 | lock_rmap(rmap); | |
643 | ||
644 | /* Check if we might have been invalidated; let the guest retry if so */ | |
645 | ret = RESUME_GUEST; | |
646 | if (mmu_notifier_retry(vcpu, mmu_seq)) { | |
647 | unlock_rmap(rmap); | |
648 | goto out_unlock; | |
649 | } | |
4cf302bc | 650 | |
bad3b507 PM |
651 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
652 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
653 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
654 | ||
4cf302bc PM |
655 | if (hptep[0] & HPTE_V_VALID) { |
656 | /* HPTE was previously valid, so we need to invalidate it */ | |
657 | unlock_rmap(rmap); | |
658 | hptep[0] |= HPTE_V_ABSENT; | |
659 | kvmppc_invalidate_hpte(kvm, hptep, index); | |
bad3b507 PM |
660 | /* don't lose previous R and C bits */ |
661 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); | |
4cf302bc PM |
662 | } else { |
663 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | |
664 | } | |
342d3db7 PM |
665 | |
666 | hptep[1] = r; | |
667 | eieio(); | |
668 | hptep[0] = hpte[0]; | |
669 | asm volatile("ptesync" : : : "memory"); | |
670 | preempt_enable(); | |
4cf302bc | 671 | if (page && hpte_is_writable(r)) |
342d3db7 PM |
672 | SetPageDirty(page); |
673 | ||
674 | out_put: | |
de6c0b02 DG |
675 | if (page) { |
676 | /* | |
677 | * We drop pages[0] here, not page because page might | |
678 | * have been set to the head page of a compound, but | |
679 | * we have to drop the reference on the correct tail | |
680 | * page to match the get inside gup() | |
681 | */ | |
682 | put_page(pages[0]); | |
683 | } | |
342d3db7 PM |
684 | return ret; |
685 | ||
686 | out_unlock: | |
687 | hptep[0] &= ~HPTE_V_HVLOCK; | |
688 | preempt_enable(); | |
689 | goto out_put; | |
690 | } | |
691 | ||
692 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |
693 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, | |
694 | unsigned long gfn)) | |
695 | { | |
696 | int ret; | |
697 | int retval = 0; | |
698 | struct kvm_memslots *slots; | |
699 | struct kvm_memory_slot *memslot; | |
700 | ||
701 | slots = kvm_memslots(kvm); | |
702 | kvm_for_each_memslot(memslot, slots) { | |
703 | unsigned long start = memslot->userspace_addr; | |
704 | unsigned long end; | |
705 | ||
706 | end = start + (memslot->npages << PAGE_SHIFT); | |
707 | if (hva >= start && hva < end) { | |
708 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; | |
709 | ||
710 | ret = handler(kvm, &memslot->rmap[gfn_offset], | |
711 | memslot->base_gfn + gfn_offset); | |
712 | retval |= ret; | |
713 | } | |
714 | } | |
715 | ||
716 | return retval; | |
717 | } | |
718 | ||
719 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |
720 | unsigned long gfn) | |
721 | { | |
722 | struct revmap_entry *rev = kvm->arch.revmap; | |
723 | unsigned long h, i, j; | |
724 | unsigned long *hptep; | |
bad3b507 | 725 | unsigned long ptel, psize, rcbits; |
342d3db7 PM |
726 | |
727 | for (;;) { | |
bad3b507 | 728 | lock_rmap(rmapp); |
342d3db7 | 729 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
bad3b507 | 730 | unlock_rmap(rmapp); |
342d3db7 PM |
731 | break; |
732 | } | |
733 | ||
734 | /* | |
735 | * To avoid an ABBA deadlock with the HPTE lock bit, | |
bad3b507 PM |
736 | * we can't spin on the HPTE lock while holding the |
737 | * rmap chain lock. | |
342d3db7 PM |
738 | */ |
739 | i = *rmapp & KVMPPC_RMAP_INDEX; | |
bad3b507 PM |
740 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
741 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
742 | /* unlock rmap before spinning on the HPTE lock */ | |
743 | unlock_rmap(rmapp); | |
744 | while (hptep[0] & HPTE_V_HVLOCK) | |
745 | cpu_relax(); | |
746 | continue; | |
747 | } | |
342d3db7 PM |
748 | j = rev[i].forw; |
749 | if (j == i) { | |
750 | /* chain is now empty */ | |
bad3b507 | 751 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
342d3db7 PM |
752 | } else { |
753 | /* remove i from chain */ | |
754 | h = rev[i].back; | |
755 | rev[h].forw = j; | |
756 | rev[j].back = h; | |
757 | rev[i].forw = rev[i].back = i; | |
bad3b507 | 758 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
342d3db7 | 759 | } |
342d3db7 | 760 | |
bad3b507 | 761 | /* Now check and modify the HPTE */ |
342d3db7 PM |
762 | ptel = rev[i].guest_rpte; |
763 | psize = hpte_page_size(hptep[0], ptel); | |
764 | if ((hptep[0] & HPTE_V_VALID) && | |
765 | hpte_rpn(ptel, psize) == gfn) { | |
342d3db7 | 766 | hptep[0] |= HPTE_V_ABSENT; |
bad3b507 PM |
767 | kvmppc_invalidate_hpte(kvm, hptep, i); |
768 | /* Harvest R and C */ | |
769 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); | |
770 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | |
771 | rev[i].guest_rpte = ptel | rcbits; | |
342d3db7 | 772 | } |
bad3b507 | 773 | unlock_rmap(rmapp); |
342d3db7 PM |
774 | hptep[0] &= ~HPTE_V_HVLOCK; |
775 | } | |
776 | return 0; | |
777 | } | |
778 | ||
779 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
780 | { | |
781 | if (kvm->arch.using_mmu_notifiers) | |
782 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
783 | return 0; | |
784 | } | |
785 | ||
786 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |
787 | unsigned long gfn) | |
788 | { | |
55514893 PM |
789 | struct revmap_entry *rev = kvm->arch.revmap; |
790 | unsigned long head, i, j; | |
791 | unsigned long *hptep; | |
792 | int ret = 0; | |
793 | ||
794 | retry: | |
795 | lock_rmap(rmapp); | |
796 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { | |
797 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; | |
798 | ret = 1; | |
799 | } | |
800 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
801 | unlock_rmap(rmapp); | |
802 | return ret; | |
803 | } | |
804 | ||
805 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
806 | do { | |
807 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
808 | j = rev[i].forw; | |
809 | ||
810 | /* If this HPTE isn't referenced, ignore it */ | |
811 | if (!(hptep[1] & HPTE_R_R)) | |
812 | continue; | |
813 | ||
814 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
815 | /* unlock rmap before spinning on the HPTE lock */ | |
816 | unlock_rmap(rmapp); | |
817 | while (hptep[0] & HPTE_V_HVLOCK) | |
818 | cpu_relax(); | |
819 | goto retry; | |
820 | } | |
821 | ||
822 | /* Now check and modify the HPTE */ | |
823 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { | |
824 | kvmppc_clear_ref_hpte(kvm, hptep, i); | |
825 | rev[i].guest_rpte |= HPTE_R_R; | |
826 | ret = 1; | |
827 | } | |
828 | hptep[0] &= ~HPTE_V_HVLOCK; | |
829 | } while ((i = j) != head); | |
830 | ||
831 | unlock_rmap(rmapp); | |
832 | return ret; | |
342d3db7 PM |
833 | } |
834 | ||
835 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
836 | { | |
837 | if (!kvm->arch.using_mmu_notifiers) | |
838 | return 0; | |
839 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | |
840 | } | |
841 | ||
842 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |
843 | unsigned long gfn) | |
844 | { | |
55514893 PM |
845 | struct revmap_entry *rev = kvm->arch.revmap; |
846 | unsigned long head, i, j; | |
847 | unsigned long *hp; | |
848 | int ret = 1; | |
849 | ||
850 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
851 | return 1; | |
852 | ||
853 | lock_rmap(rmapp); | |
854 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
855 | goto out; | |
856 | ||
857 | if (*rmapp & KVMPPC_RMAP_PRESENT) { | |
858 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
859 | do { | |
860 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); | |
861 | j = rev[i].forw; | |
862 | if (hp[1] & HPTE_R_R) | |
863 | goto out; | |
864 | } while ((i = j) != head); | |
865 | } | |
866 | ret = 0; | |
867 | ||
868 | out: | |
869 | unlock_rmap(rmapp); | |
870 | return ret; | |
342d3db7 PM |
871 | } |
872 | ||
873 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
874 | { | |
875 | if (!kvm->arch.using_mmu_notifiers) | |
876 | return 0; | |
877 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); | |
878 | } | |
879 | ||
880 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
881 | { | |
882 | if (!kvm->arch.using_mmu_notifiers) | |
883 | return; | |
884 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
de56a948 PM |
885 | } |
886 | ||
82ed3616 PM |
887 | static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) |
888 | { | |
889 | struct revmap_entry *rev = kvm->arch.revmap; | |
890 | unsigned long head, i, j; | |
891 | unsigned long *hptep; | |
892 | int ret = 0; | |
893 | ||
894 | retry: | |
895 | lock_rmap(rmapp); | |
896 | if (*rmapp & KVMPPC_RMAP_CHANGED) { | |
897 | *rmapp &= ~KVMPPC_RMAP_CHANGED; | |
898 | ret = 1; | |
899 | } | |
900 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
901 | unlock_rmap(rmapp); | |
902 | return ret; | |
903 | } | |
904 | ||
905 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
906 | do { | |
907 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
908 | j = rev[i].forw; | |
909 | ||
910 | if (!(hptep[1] & HPTE_R_C)) | |
911 | continue; | |
912 | ||
913 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
914 | /* unlock rmap before spinning on the HPTE lock */ | |
915 | unlock_rmap(rmapp); | |
916 | while (hptep[0] & HPTE_V_HVLOCK) | |
917 | cpu_relax(); | |
918 | goto retry; | |
919 | } | |
920 | ||
921 | /* Now check and modify the HPTE */ | |
922 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { | |
923 | /* need to make it temporarily absent to clear C */ | |
924 | hptep[0] |= HPTE_V_ABSENT; | |
925 | kvmppc_invalidate_hpte(kvm, hptep, i); | |
926 | hptep[1] &= ~HPTE_R_C; | |
927 | eieio(); | |
928 | hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
929 | rev[i].guest_rpte |= HPTE_R_C; | |
930 | ret = 1; | |
931 | } | |
932 | hptep[0] &= ~HPTE_V_HVLOCK; | |
933 | } while ((i = j) != head); | |
934 | ||
935 | unlock_rmap(rmapp); | |
936 | return ret; | |
937 | } | |
938 | ||
939 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) | |
940 | { | |
941 | unsigned long i; | |
942 | unsigned long *rmapp, *map; | |
943 | ||
944 | preempt_disable(); | |
945 | rmapp = memslot->rmap; | |
946 | map = memslot->dirty_bitmap; | |
947 | for (i = 0; i < memslot->npages; ++i) { | |
948 | if (kvm_test_clear_dirty(kvm, rmapp)) | |
949 | __set_bit_le(i, map); | |
950 | ++rmapp; | |
951 | } | |
952 | preempt_enable(); | |
953 | return 0; | |
954 | } | |
955 | ||
93e60249 PM |
956 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
957 | unsigned long *nb_ret) | |
958 | { | |
959 | struct kvm_memory_slot *memslot; | |
960 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
342d3db7 PM |
961 | struct page *page, *pages[1]; |
962 | int npages; | |
963 | unsigned long hva, psize, offset; | |
da9d1d7f | 964 | unsigned long pa; |
93e60249 PM |
965 | unsigned long *physp; |
966 | ||
967 | memslot = gfn_to_memslot(kvm, gfn); | |
968 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
969 | return NULL; | |
342d3db7 PM |
970 | if (!kvm->arch.using_mmu_notifiers) { |
971 | physp = kvm->arch.slot_phys[memslot->id]; | |
972 | if (!physp) | |
c77162de | 973 | return NULL; |
342d3db7 | 974 | physp += gfn - memslot->base_gfn; |
c77162de | 975 | pa = *physp; |
342d3db7 PM |
976 | if (!pa) { |
977 | if (kvmppc_get_guest_page(kvm, gfn, memslot, | |
978 | PAGE_SIZE) < 0) | |
979 | return NULL; | |
980 | pa = *physp; | |
981 | } | |
982 | page = pfn_to_page(pa >> PAGE_SHIFT); | |
de6c0b02 | 983 | get_page(page); |
342d3db7 PM |
984 | } else { |
985 | hva = gfn_to_hva_memslot(memslot, gfn); | |
986 | npages = get_user_pages_fast(hva, 1, 1, pages); | |
987 | if (npages < 1) | |
988 | return NULL; | |
989 | page = pages[0]; | |
c77162de | 990 | } |
da9d1d7f PM |
991 | psize = PAGE_SIZE; |
992 | if (PageHuge(page)) { | |
993 | page = compound_head(page); | |
994 | psize <<= compound_order(page); | |
995 | } | |
da9d1d7f | 996 | offset = gpa & (psize - 1); |
93e60249 | 997 | if (nb_ret) |
da9d1d7f | 998 | *nb_ret = psize - offset; |
93e60249 PM |
999 | return page_address(page) + offset; |
1000 | } | |
1001 | ||
1002 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) | |
1003 | { | |
1004 | struct page *page = virt_to_page(va); | |
1005 | ||
93e60249 PM |
1006 | put_page(page); |
1007 | } | |
1008 | ||
de56a948 PM |
1009 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
1010 | { | |
1011 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | |
1012 | ||
9e368f29 PM |
1013 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
1014 | vcpu->arch.slb_nr = 32; /* POWER7 */ | |
1015 | else | |
1016 | vcpu->arch.slb_nr = 64; | |
de56a948 PM |
1017 | |
1018 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; | |
1019 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; | |
1020 | ||
1021 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | |
1022 | } |