Commit | Line | Data |
---|---|---|
a8606e20 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/kvm.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/hugetlb.h> | |
c77162de | 14 | #include <linux/module.h> |
a8606e20 PM |
15 | |
16 | #include <asm/tlbflush.h> | |
17 | #include <asm/kvm_ppc.h> | |
18 | #include <asm/kvm_book3s.h> | |
19 | #include <asm/mmu-hash64.h> | |
20 | #include <asm/hvcall.h> | |
21 | #include <asm/synch.h> | |
22 | #include <asm/ppc-opcode.h> | |
23 | ||
b2b2f165 PM |
24 | /* |
25 | * Since this file is built in even if KVM is a module, we need | |
26 | * a local copy of this function for the case where kvm_main.c is | |
27 | * modular. | |
28 | */ | |
29 | static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm, | |
30 | gfn_t gfn) | |
31 | { | |
32 | struct kvm_memslots *slots; | |
33 | struct kvm_memory_slot *memslot; | |
34 | ||
35 | slots = kvm_memslots(kvm); | |
36 | kvm_for_each_memslot(memslot, slots) | |
37 | if (gfn >= memslot->base_gfn && | |
38 | gfn < memslot->base_gfn + memslot->npages) | |
39 | return memslot; | |
40 | return NULL; | |
41 | } | |
42 | ||
8936dda4 PM |
43 | /* Translate address of a vmalloc'd thing to a linear map address */ |
44 | static void *real_vmalloc_addr(void *x) | |
45 | { | |
46 | unsigned long addr = (unsigned long) x; | |
47 | pte_t *p; | |
48 | ||
49 | p = find_linux_pte(swapper_pg_dir, addr); | |
50 | if (!p || !pte_present(*p)) | |
51 | return NULL; | |
52 | /* assume we don't have huge pages in vmalloc space... */ | |
53 | addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); | |
54 | return __va(addr); | |
55 | } | |
a8606e20 | 56 | |
06ce2c63 PM |
57 | /* |
58 | * Add this HPTE into the chain for the real page. | |
59 | * Must be called with the chain locked; it unlocks the chain. | |
60 | */ | |
61 | static void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, | |
62 | unsigned long *rmap, long pte_index, int realmode) | |
63 | { | |
64 | struct revmap_entry *head, *tail; | |
65 | unsigned long i; | |
66 | ||
67 | if (*rmap & KVMPPC_RMAP_PRESENT) { | |
68 | i = *rmap & KVMPPC_RMAP_INDEX; | |
69 | head = &kvm->arch.revmap[i]; | |
70 | if (realmode) | |
71 | head = real_vmalloc_addr(head); | |
72 | tail = &kvm->arch.revmap[head->back]; | |
73 | if (realmode) | |
74 | tail = real_vmalloc_addr(tail); | |
75 | rev->forw = i; | |
76 | rev->back = head->back; | |
77 | tail->forw = pte_index; | |
78 | head->back = pte_index; | |
79 | } else { | |
80 | rev->forw = rev->back = pte_index; | |
81 | i = pte_index; | |
82 | } | |
83 | smp_wmb(); | |
84 | *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */ | |
85 | } | |
86 | ||
87 | /* Remove this HPTE from the chain for a real page */ | |
88 | static void remove_revmap_chain(struct kvm *kvm, long pte_index, | |
89 | unsigned long hpte_v) | |
90 | { | |
91 | struct revmap_entry *rev, *next, *prev; | |
92 | unsigned long gfn, ptel, head; | |
93 | struct kvm_memory_slot *memslot; | |
94 | unsigned long *rmap; | |
95 | ||
96 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
97 | ptel = rev->guest_rpte; | |
98 | gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); | |
99 | memslot = builtin_gfn_to_memslot(kvm, gfn); | |
100 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
101 | return; | |
102 | ||
103 | rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); | |
104 | lock_rmap(rmap); | |
105 | ||
106 | head = *rmap & KVMPPC_RMAP_INDEX; | |
107 | next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); | |
108 | prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); | |
109 | next->back = rev->back; | |
110 | prev->forw = rev->forw; | |
111 | if (head == pte_index) { | |
112 | head = rev->forw; | |
113 | if (head == pte_index) | |
114 | *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); | |
115 | else | |
116 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; | |
117 | } | |
118 | unlock_rmap(rmap); | |
119 | } | |
120 | ||
a8606e20 PM |
121 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, |
122 | long pte_index, unsigned long pteh, unsigned long ptel) | |
123 | { | |
a8606e20 | 124 | struct kvm *kvm = vcpu->kvm; |
c77162de PM |
125 | unsigned long i, pa, gpa, gfn, psize; |
126 | unsigned long slot_fn; | |
a8606e20 | 127 | unsigned long *hpte; |
8936dda4 PM |
128 | struct revmap_entry *rev; |
129 | unsigned long g_ptel = ptel; | |
b2b2f165 | 130 | struct kvm_memory_slot *memslot; |
c77162de | 131 | unsigned long *physp, pte_size; |
9d0ef5ea | 132 | unsigned long is_io; |
06ce2c63 | 133 | unsigned long *rmap; |
c77162de PM |
134 | bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING; |
135 | ||
136 | psize = hpte_page_size(pteh, ptel); | |
137 | if (!psize) | |
a8606e20 | 138 | return H_PARAMETER; |
b2b2f165 | 139 | |
c77162de PM |
140 | /* Find the memslot (if any) for this address */ |
141 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
142 | gfn = gpa >> PAGE_SHIFT; | |
b2b2f165 PM |
143 | memslot = builtin_gfn_to_memslot(kvm, gfn); |
144 | if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) | |
145 | return H_PARAMETER; | |
da9d1d7f PM |
146 | |
147 | /* Check if the requested page fits entirely in the memslot. */ | |
148 | if (!slot_is_aligned(memslot, psize)) | |
149 | return H_PARAMETER; | |
c77162de | 150 | slot_fn = gfn - memslot->base_gfn; |
06ce2c63 | 151 | rmap = &memslot->rmap[slot_fn]; |
c77162de | 152 | |
b2b2f165 PM |
153 | physp = kvm->arch.slot_phys[memslot->id]; |
154 | if (!physp) | |
155 | return H_PARAMETER; | |
c77162de PM |
156 | physp += slot_fn; |
157 | if (realmode) | |
158 | physp = real_vmalloc_addr(physp); | |
b2b2f165 | 159 | pa = *physp; |
a8606e20 | 160 | if (!pa) |
c77162de | 161 | return H_TOO_HARD; |
9d0ef5ea | 162 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
da9d1d7f | 163 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
b2b2f165 PM |
164 | pa &= PAGE_MASK; |
165 | ||
c77162de PM |
166 | if (pte_size < psize) |
167 | return H_PARAMETER; | |
168 | if (pa && pte_size > psize) | |
169 | pa |= gpa & (pte_size - 1); | |
170 | ||
171 | ptel &= ~(HPTE_R_PP0 - psize); | |
172 | ptel |= pa; | |
173 | ||
a8606e20 | 174 | /* Check WIMG */ |
9d0ef5ea PM |
175 | if (!hpte_cache_flags_ok(ptel, is_io)) { |
176 | if (is_io) | |
177 | return H_PARAMETER; | |
178 | /* | |
179 | * Allow guest to map emulated device memory as | |
180 | * uncacheable, but actually make it cacheable. | |
181 | */ | |
182 | ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); | |
183 | ptel |= HPTE_R_M; | |
184 | } | |
a8606e20 | 185 | pteh &= ~0x60UL; |
c77162de | 186 | pteh |= HPTE_V_VALID; |
075295dd | 187 | |
8936dda4 | 188 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
189 | return H_PARAMETER; |
190 | if (likely((flags & H_EXACT) == 0)) { | |
191 | pte_index &= ~7UL; | |
192 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 193 | for (i = 0; i < 8; ++i) { |
a8606e20 | 194 | if ((*hpte & HPTE_V_VALID) == 0 && |
075295dd | 195 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) |
a8606e20 PM |
196 | break; |
197 | hpte += 2; | |
198 | } | |
075295dd PM |
199 | if (i == 8) { |
200 | /* | |
201 | * Since try_lock_hpte doesn't retry (not even stdcx. | |
202 | * failures), it could be that there is a free slot | |
203 | * but we transiently failed to lock it. Try again, | |
204 | * actually locking each slot and checking it. | |
205 | */ | |
206 | hpte -= 16; | |
207 | for (i = 0; i < 8; ++i) { | |
208 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
209 | cpu_relax(); | |
210 | if ((*hpte & HPTE_V_VALID) == 0) | |
211 | break; | |
212 | *hpte &= ~HPTE_V_HVLOCK; | |
213 | hpte += 2; | |
214 | } | |
215 | if (i == 8) | |
216 | return H_PTEG_FULL; | |
217 | } | |
8936dda4 | 218 | pte_index += i; |
a8606e20 | 219 | } else { |
a8606e20 | 220 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd PM |
221 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) { |
222 | /* Lock the slot and check again */ | |
223 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
224 | cpu_relax(); | |
225 | if (*hpte & HPTE_V_VALID) { | |
226 | *hpte &= ~HPTE_V_HVLOCK; | |
227 | return H_PTEG_FULL; | |
228 | } | |
229 | } | |
a8606e20 | 230 | } |
8936dda4 PM |
231 | |
232 | /* Save away the guest's idea of the second HPTE dword */ | |
06ce2c63 PM |
233 | rev = &kvm->arch.revmap[pte_index]; |
234 | if (realmode) | |
235 | rev = real_vmalloc_addr(rev); | |
8936dda4 PM |
236 | if (rev) |
237 | rev->guest_rpte = g_ptel; | |
06ce2c63 PM |
238 | |
239 | /* Link HPTE into reverse-map chain */ | |
240 | if (realmode) | |
241 | rmap = real_vmalloc_addr(rmap); | |
242 | lock_rmap(rmap); | |
243 | kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode); | |
244 | ||
a8606e20 | 245 | hpte[1] = ptel; |
06ce2c63 PM |
246 | |
247 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | |
a8606e20 PM |
248 | eieio(); |
249 | hpte[0] = pteh; | |
250 | asm volatile("ptesync" : : : "memory"); | |
06ce2c63 | 251 | |
8936dda4 | 252 | vcpu->arch.gpr[4] = pte_index; |
a8606e20 PM |
253 | return H_SUCCESS; |
254 | } | |
c77162de | 255 | EXPORT_SYMBOL_GPL(kvmppc_h_enter); |
a8606e20 | 256 | |
a8606e20 PM |
257 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
258 | ||
259 | static inline int try_lock_tlbie(unsigned int *lock) | |
260 | { | |
261 | unsigned int tmp, old; | |
262 | unsigned int token = LOCK_TOKEN; | |
263 | ||
264 | asm volatile("1:lwarx %1,0,%2\n" | |
265 | " cmpwi cr0,%1,0\n" | |
266 | " bne 2f\n" | |
267 | " stwcx. %3,0,%2\n" | |
268 | " bne- 1b\n" | |
269 | " isync\n" | |
270 | "2:" | |
271 | : "=&r" (tmp), "=&r" (old) | |
272 | : "r" (lock), "r" (token) | |
273 | : "cc", "memory"); | |
274 | return old == 0; | |
275 | } | |
276 | ||
277 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
278 | unsigned long pte_index, unsigned long avpn, | |
279 | unsigned long va) | |
280 | { | |
281 | struct kvm *kvm = vcpu->kvm; | |
282 | unsigned long *hpte; | |
283 | unsigned long v, r, rb; | |
284 | ||
8936dda4 | 285 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
286 | return H_PARAMETER; |
287 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 288 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 PM |
289 | cpu_relax(); |
290 | if ((hpte[0] & HPTE_V_VALID) == 0 || | |
291 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || | |
292 | ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { | |
293 | hpte[0] &= ~HPTE_V_HVLOCK; | |
294 | return H_NOT_FOUND; | |
295 | } | |
296 | if (atomic_read(&kvm->online_vcpus) == 1) | |
297 | flags |= H_LOCAL; | |
298 | vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK; | |
299 | vcpu->arch.gpr[5] = r = hpte[1]; | |
300 | rb = compute_tlbie_rb(v, r, pte_index); | |
06ce2c63 PM |
301 | remove_revmap_chain(kvm, pte_index, v); |
302 | smp_wmb(); | |
a8606e20 PM |
303 | hpte[0] = 0; |
304 | if (!(flags & H_LOCAL)) { | |
305 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
306 | cpu_relax(); | |
307 | asm volatile("ptesync" : : : "memory"); | |
308 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
309 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
310 | asm volatile("ptesync" : : : "memory"); | |
311 | kvm->arch.tlbie_lock = 0; | |
312 | } else { | |
313 | asm volatile("ptesync" : : : "memory"); | |
314 | asm volatile("tlbiel %0" : : "r" (rb)); | |
315 | asm volatile("ptesync" : : : "memory"); | |
316 | } | |
317 | return H_SUCCESS; | |
318 | } | |
319 | ||
320 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |
321 | { | |
322 | struct kvm *kvm = vcpu->kvm; | |
323 | unsigned long *args = &vcpu->arch.gpr[4]; | |
324 | unsigned long *hp, tlbrb[4]; | |
325 | long int i, found; | |
326 | long int n_inval = 0; | |
327 | unsigned long flags, req, pte_index; | |
328 | long int local = 0; | |
329 | long int ret = H_SUCCESS; | |
330 | ||
331 | if (atomic_read(&kvm->online_vcpus) == 1) | |
332 | local = 1; | |
333 | for (i = 0; i < 4; ++i) { | |
334 | pte_index = args[i * 2]; | |
335 | flags = pte_index >> 56; | |
336 | pte_index &= ((1ul << 56) - 1); | |
337 | req = flags >> 6; | |
338 | flags &= 3; | |
339 | if (req == 3) | |
340 | break; | |
341 | if (req != 1 || flags == 3 || | |
8936dda4 | 342 | pte_index >= HPT_NPTE) { |
a8606e20 PM |
343 | /* parameter error */ |
344 | args[i * 2] = ((0xa0 | flags) << 56) + pte_index; | |
345 | ret = H_PARAMETER; | |
346 | break; | |
347 | } | |
348 | hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 349 | while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) |
a8606e20 PM |
350 | cpu_relax(); |
351 | found = 0; | |
352 | if (hp[0] & HPTE_V_VALID) { | |
353 | switch (flags & 3) { | |
354 | case 0: /* absolute */ | |
355 | found = 1; | |
356 | break; | |
357 | case 1: /* andcond */ | |
358 | if (!(hp[0] & args[i * 2 + 1])) | |
359 | found = 1; | |
360 | break; | |
361 | case 2: /* AVPN */ | |
362 | if ((hp[0] & ~0x7fUL) == args[i * 2 + 1]) | |
363 | found = 1; | |
364 | break; | |
365 | } | |
366 | } | |
367 | if (!found) { | |
368 | hp[0] &= ~HPTE_V_HVLOCK; | |
369 | args[i * 2] = ((0x90 | flags) << 56) + pte_index; | |
370 | continue; | |
371 | } | |
372 | /* insert R and C bits from PTE */ | |
373 | flags |= (hp[1] >> 5) & 0x0c; | |
374 | args[i * 2] = ((0x80 | flags) << 56) + pte_index; | |
375 | tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index); | |
06ce2c63 PM |
376 | remove_revmap_chain(kvm, pte_index, hp[0]); |
377 | smp_wmb(); | |
a8606e20 PM |
378 | hp[0] = 0; |
379 | } | |
380 | if (n_inval == 0) | |
381 | return ret; | |
382 | ||
383 | if (!local) { | |
384 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
385 | cpu_relax(); | |
386 | asm volatile("ptesync" : : : "memory"); | |
387 | for (i = 0; i < n_inval; ++i) | |
388 | asm volatile(PPC_TLBIE(%1,%0) | |
389 | : : "r" (tlbrb[i]), "r" (kvm->arch.lpid)); | |
390 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
391 | kvm->arch.tlbie_lock = 0; | |
392 | } else { | |
393 | asm volatile("ptesync" : : : "memory"); | |
394 | for (i = 0; i < n_inval; ++i) | |
395 | asm volatile("tlbiel %0" : : "r" (tlbrb[i])); | |
396 | asm volatile("ptesync" : : : "memory"); | |
397 | } | |
398 | return ret; | |
399 | } | |
400 | ||
401 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
402 | unsigned long pte_index, unsigned long avpn, | |
403 | unsigned long va) | |
404 | { | |
405 | struct kvm *kvm = vcpu->kvm; | |
406 | unsigned long *hpte; | |
8936dda4 PM |
407 | struct revmap_entry *rev; |
408 | unsigned long v, r, rb, mask, bits; | |
a8606e20 | 409 | |
8936dda4 | 410 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
411 | return H_PARAMETER; |
412 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 413 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 PM |
414 | cpu_relax(); |
415 | if ((hpte[0] & HPTE_V_VALID) == 0 || | |
416 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { | |
417 | hpte[0] &= ~HPTE_V_HVLOCK; | |
418 | return H_NOT_FOUND; | |
419 | } | |
420 | if (atomic_read(&kvm->online_vcpus) == 1) | |
421 | flags |= H_LOCAL; | |
422 | v = hpte[0]; | |
8936dda4 PM |
423 | bits = (flags << 55) & HPTE_R_PP0; |
424 | bits |= (flags << 48) & HPTE_R_KEY_HI; | |
425 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | |
426 | ||
427 | /* Update guest view of 2nd HPTE dword */ | |
428 | mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | | |
429 | HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
430 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
431 | if (rev) { | |
432 | r = (rev->guest_rpte & ~mask) | bits; | |
433 | rev->guest_rpte = r; | |
434 | } | |
435 | r = (hpte[1] & ~mask) | bits; | |
436 | ||
437 | /* Update HPTE */ | |
a8606e20 PM |
438 | rb = compute_tlbie_rb(v, r, pte_index); |
439 | hpte[0] = v & ~HPTE_V_VALID; | |
440 | if (!(flags & H_LOCAL)) { | |
441 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
442 | cpu_relax(); | |
443 | asm volatile("ptesync" : : : "memory"); | |
444 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
445 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
446 | asm volatile("ptesync" : : : "memory"); | |
447 | kvm->arch.tlbie_lock = 0; | |
448 | } else { | |
449 | asm volatile("ptesync" : : : "memory"); | |
450 | asm volatile("tlbiel %0" : : "r" (rb)); | |
451 | asm volatile("ptesync" : : : "memory"); | |
452 | } | |
453 | hpte[1] = r; | |
454 | eieio(); | |
455 | hpte[0] = v & ~HPTE_V_HVLOCK; | |
456 | asm volatile("ptesync" : : : "memory"); | |
457 | return H_SUCCESS; | |
458 | } | |
459 | ||
a8606e20 PM |
460 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, |
461 | unsigned long pte_index) | |
462 | { | |
463 | struct kvm *kvm = vcpu->kvm; | |
464 | unsigned long *hpte, r; | |
465 | int i, n = 1; | |
8936dda4 | 466 | struct revmap_entry *rev = NULL; |
a8606e20 | 467 | |
8936dda4 | 468 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
469 | return H_PARAMETER; |
470 | if (flags & H_READ_4) { | |
471 | pte_index &= ~3; | |
472 | n = 4; | |
473 | } | |
8936dda4 PM |
474 | if (flags & H_R_XLATE) |
475 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
a8606e20 PM |
476 | for (i = 0; i < n; ++i, ++pte_index) { |
477 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
478 | r = hpte[1]; | |
8936dda4 PM |
479 | if (hpte[0] & HPTE_V_VALID) { |
480 | if (rev) | |
481 | r = rev[i].guest_rpte; | |
482 | else | |
483 | r = hpte[1] | HPTE_R_RPN; | |
484 | } | |
a8606e20 PM |
485 | vcpu->arch.gpr[4 + i * 2] = hpte[0]; |
486 | vcpu->arch.gpr[5 + i * 2] = r; | |
487 | } | |
488 | return H_SUCCESS; | |
489 | } |