2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
29 static struct kvm_memory_slot
*builtin_gfn_to_memslot(struct kvm
*kvm
,
32 struct kvm_memslots
*slots
;
33 struct kvm_memory_slot
*memslot
;
35 slots
= kvm_memslots(kvm
);
36 kvm_for_each_memslot(memslot
, slots
)
37 if (gfn
>= memslot
->base_gfn
&&
38 gfn
< memslot
->base_gfn
+ memslot
->npages
)
43 /* Translate address of a vmalloc'd thing to a linear map address */
44 static void *real_vmalloc_addr(void *x
)
46 unsigned long addr
= (unsigned long) x
;
49 p
= find_linux_pte(swapper_pg_dir
, addr
);
50 if (!p
|| !pte_present(*p
))
52 /* assume we don't have huge pages in vmalloc space... */
53 addr
= (pte_pfn(*p
) << PAGE_SHIFT
) | (addr
& ~PAGE_MASK
);
57 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
58 long pte_index
, unsigned long pteh
, unsigned long ptel
)
60 struct kvm
*kvm
= vcpu
->kvm
;
61 unsigned long i
, pa
, gpa
, gfn
, psize
;
62 unsigned long slot_fn
;
64 struct revmap_entry
*rev
;
65 unsigned long g_ptel
= ptel
;
66 struct kvm_memory_slot
*memslot
;
67 unsigned long *physp
, pte_size
;
69 bool realmode
= vcpu
->arch
.vcore
->vcore_state
== VCORE_RUNNING
;
71 psize
= hpte_page_size(pteh
, ptel
);
75 /* Find the memslot (if any) for this address */
76 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
77 gfn
= gpa
>> PAGE_SHIFT
;
78 memslot
= builtin_gfn_to_memslot(kvm
, gfn
);
79 if (!(memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
)))
82 /* Check if the requested page fits entirely in the memslot. */
83 if (!slot_is_aligned(memslot
, psize
))
85 slot_fn
= gfn
- memslot
->base_gfn
;
87 physp
= kvm
->arch
.slot_phys
[memslot
->id
];
92 physp
= real_vmalloc_addr(physp
);
96 is_io
= pa
& (HPTE_R_I
| HPTE_R_W
);
97 pte_size
= PAGE_SIZE
<< (pa
& KVMPPC_PAGE_ORDER_MASK
);
100 if (pte_size
< psize
)
102 if (pa
&& pte_size
> psize
)
103 pa
|= gpa
& (pte_size
- 1);
105 ptel
&= ~(HPTE_R_PP0
- psize
);
109 if (!hpte_cache_flags_ok(ptel
, is_io
)) {
113 * Allow guest to map emulated device memory as
114 * uncacheable, but actually make it cacheable.
116 ptel
&= ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
);
120 pteh
|= HPTE_V_VALID
;
122 if (pte_index
>= HPT_NPTE
)
124 if (likely((flags
& H_EXACT
) == 0)) {
126 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
127 for (i
= 0; i
< 8; ++i
) {
128 if ((*hpte
& HPTE_V_VALID
) == 0 &&
129 try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
))
135 * Since try_lock_hpte doesn't retry (not even stdcx.
136 * failures), it could be that there is a free slot
137 * but we transiently failed to lock it. Try again,
138 * actually locking each slot and checking it.
141 for (i
= 0; i
< 8; ++i
) {
142 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
144 if ((*hpte
& HPTE_V_VALID
) == 0)
146 *hpte
&= ~HPTE_V_HVLOCK
;
154 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
155 if (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
)) {
156 /* Lock the slot and check again */
157 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
159 if (*hpte
& HPTE_V_VALID
) {
160 *hpte
&= ~HPTE_V_HVLOCK
;
166 /* Save away the guest's idea of the second HPTE dword */
167 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
169 rev
->guest_rpte
= g_ptel
;
173 asm volatile("ptesync" : : : "memory");
174 vcpu
->arch
.gpr
[4] = pte_index
;
177 EXPORT_SYMBOL_GPL(kvmppc_h_enter
);
179 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
181 static inline int try_lock_tlbie(unsigned int *lock
)
183 unsigned int tmp
, old
;
184 unsigned int token
= LOCK_TOKEN
;
186 asm volatile("1:lwarx %1,0,%2\n"
193 : "=&r" (tmp
), "=&r" (old
)
194 : "r" (lock
), "r" (token
)
199 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
200 unsigned long pte_index
, unsigned long avpn
,
203 struct kvm
*kvm
= vcpu
->kvm
;
205 unsigned long v
, r
, rb
;
207 if (pte_index
>= HPT_NPTE
)
209 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
210 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
212 if ((hpte
[0] & HPTE_V_VALID
) == 0 ||
213 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
) ||
214 ((flags
& H_ANDCOND
) && (hpte
[0] & avpn
) != 0)) {
215 hpte
[0] &= ~HPTE_V_HVLOCK
;
218 if (atomic_read(&kvm
->online_vcpus
) == 1)
220 vcpu
->arch
.gpr
[4] = v
= hpte
[0] & ~HPTE_V_HVLOCK
;
221 vcpu
->arch
.gpr
[5] = r
= hpte
[1];
222 rb
= compute_tlbie_rb(v
, r
, pte_index
);
224 if (!(flags
& H_LOCAL
)) {
225 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
227 asm volatile("ptesync" : : : "memory");
228 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
229 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
230 asm volatile("ptesync" : : : "memory");
231 kvm
->arch
.tlbie_lock
= 0;
233 asm volatile("ptesync" : : : "memory");
234 asm volatile("tlbiel %0" : : "r" (rb
));
235 asm volatile("ptesync" : : : "memory");
240 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
)
242 struct kvm
*kvm
= vcpu
->kvm
;
243 unsigned long *args
= &vcpu
->arch
.gpr
[4];
244 unsigned long *hp
, tlbrb
[4];
246 long int n_inval
= 0;
247 unsigned long flags
, req
, pte_index
;
249 long int ret
= H_SUCCESS
;
251 if (atomic_read(&kvm
->online_vcpus
) == 1)
253 for (i
= 0; i
< 4; ++i
) {
254 pte_index
= args
[i
* 2];
255 flags
= pte_index
>> 56;
256 pte_index
&= ((1ul << 56) - 1);
261 if (req
!= 1 || flags
== 3 ||
262 pte_index
>= HPT_NPTE
) {
263 /* parameter error */
264 args
[i
* 2] = ((0xa0 | flags
) << 56) + pte_index
;
268 hp
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
269 while (!try_lock_hpte(hp
, HPTE_V_HVLOCK
))
272 if (hp
[0] & HPTE_V_VALID
) {
274 case 0: /* absolute */
277 case 1: /* andcond */
278 if (!(hp
[0] & args
[i
* 2 + 1]))
282 if ((hp
[0] & ~0x7fUL
) == args
[i
* 2 + 1])
288 hp
[0] &= ~HPTE_V_HVLOCK
;
289 args
[i
* 2] = ((0x90 | flags
) << 56) + pte_index
;
292 /* insert R and C bits from PTE */
293 flags
|= (hp
[1] >> 5) & 0x0c;
294 args
[i
* 2] = ((0x80 | flags
) << 56) + pte_index
;
295 tlbrb
[n_inval
++] = compute_tlbie_rb(hp
[0], hp
[1], pte_index
);
302 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
304 asm volatile("ptesync" : : : "memory");
305 for (i
= 0; i
< n_inval
; ++i
)
306 asm volatile(PPC_TLBIE(%1,%0)
307 : : "r" (tlbrb
[i
]), "r" (kvm
->arch
.lpid
));
308 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
309 kvm
->arch
.tlbie_lock
= 0;
311 asm volatile("ptesync" : : : "memory");
312 for (i
= 0; i
< n_inval
; ++i
)
313 asm volatile("tlbiel %0" : : "r" (tlbrb
[i
]));
314 asm volatile("ptesync" : : : "memory");
319 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
320 unsigned long pte_index
, unsigned long avpn
,
323 struct kvm
*kvm
= vcpu
->kvm
;
325 struct revmap_entry
*rev
;
326 unsigned long v
, r
, rb
, mask
, bits
;
328 if (pte_index
>= HPT_NPTE
)
330 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
331 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
333 if ((hpte
[0] & HPTE_V_VALID
) == 0 ||
334 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
)) {
335 hpte
[0] &= ~HPTE_V_HVLOCK
;
338 if (atomic_read(&kvm
->online_vcpus
) == 1)
341 bits
= (flags
<< 55) & HPTE_R_PP0
;
342 bits
|= (flags
<< 48) & HPTE_R_KEY_HI
;
343 bits
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
345 /* Update guest view of 2nd HPTE dword */
346 mask
= HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
|
347 HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
348 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
350 r
= (rev
->guest_rpte
& ~mask
) | bits
;
353 r
= (hpte
[1] & ~mask
) | bits
;
356 rb
= compute_tlbie_rb(v
, r
, pte_index
);
357 hpte
[0] = v
& ~HPTE_V_VALID
;
358 if (!(flags
& H_LOCAL
)) {
359 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
361 asm volatile("ptesync" : : : "memory");
362 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
363 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
364 asm volatile("ptesync" : : : "memory");
365 kvm
->arch
.tlbie_lock
= 0;
367 asm volatile("ptesync" : : : "memory");
368 asm volatile("tlbiel %0" : : "r" (rb
));
369 asm volatile("ptesync" : : : "memory");
373 hpte
[0] = v
& ~HPTE_V_HVLOCK
;
374 asm volatile("ptesync" : : : "memory");
378 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
379 unsigned long pte_index
)
381 struct kvm
*kvm
= vcpu
->kvm
;
382 unsigned long *hpte
, r
;
384 struct revmap_entry
*rev
= NULL
;
386 if (pte_index
>= HPT_NPTE
)
388 if (flags
& H_READ_4
) {
392 if (flags
& H_R_XLATE
)
393 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
394 for (i
= 0; i
< n
; ++i
, ++pte_index
) {
395 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
397 if (hpte
[0] & HPTE_V_VALID
) {
399 r
= rev
[i
].guest_rpte
;
401 r
= hpte
[1] | HPTE_R_RPN
;
403 vcpu
->arch
.gpr
[4 + i
* 2] = hpte
[0];
404 vcpu
->arch
.gpr
[5 + i
* 2] = r
;
This page took 0.084041 seconds and 5 git commands to generate.