KVM: PPC: Book3S: Provide different CAPs based on HV or PR mode
[deliverable/linux.git] / arch / powerpc / include / asm / kvm_book3s_64.h
CommitLineData
3ae07890
AG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
7aa79938 23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
468a12c2 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
3ae07890 25{
468a12c2 26 preempt_disable();
3ae07890
AG
27 return &get_paca()->shadow_vcpu;
28}
468a12c2
AG
29
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
de56a948 34#endif
3ae07890 35
54738c09
DG
36#define SPAPR_TCE_SHIFT 12
37
9975f5e3 38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
32fad281 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
6c45b810 40extern unsigned long kvm_rma_pages;
8936dda4
PM
41#endif
42
697d3899
PM
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
075295dd
PM
45/*
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
48 * HPTEs.
49 */
50#define HPTE_V_HVLOCK 0x40UL
697d3899 51#define HPTE_V_ABSENT 0x20UL
075295dd 52
44e5f6be
PM
53/*
54 * We use this bit in the guest_rpte field of the revmap entry
55 * to indicate a modified HPTE.
56 */
57#define HPTE_GR_MODIFIED (1ul << 62)
58
59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61
6f22bd32 62static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
075295dd
PM
63{
64 unsigned long tmp, old;
6f22bd32
AG
65 __be64 be_lockbit, be_bits;
66
67 /*
68 * We load/store in native endian, but the HTAB is in big endian. If
69 * we byte swap all data we apply on the PTE we're implicitly correct
70 * again.
71 */
72 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
73 be_bits = cpu_to_be64(bits);
075295dd
PM
74
75 asm volatile(" ldarx %0,0,%2\n"
76 " and. %1,%0,%3\n"
77 " bne 2f\n"
6f22bd32 78 " or %0,%0,%4\n"
075295dd
PM
79 " stdcx. %0,0,%2\n"
80 " beq+ 2f\n"
8b5869ad 81 " mr %1,%3\n"
075295dd
PM
82 "2: isync"
83 : "=&r" (tmp), "=&r" (old)
6f22bd32 84 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
075295dd
PM
85 : "cc", "memory");
86 return old == 0;
87}
88
1f365bb0
AK
89static inline int __hpte_actual_psize(unsigned int lp, int psize)
90{
91 int i, shift;
92 unsigned int mask;
93
94 /* start from 1 ignoring MMU_PAGE_4K */
95 for (i = 1; i < MMU_PAGE_COUNT; i++) {
96
97 /* invalid penc */
98 if (mmu_psize_defs[psize].penc[i] == -1)
99 continue;
100 /*
101 * encoding bits per actual page size
102 * PTE LP actual page size
103 * rrrr rrrz >=8KB
104 * rrrr rrzz >=16KB
105 * rrrr rzzz >=32KB
106 * rrrr zzzz >=64KB
107 * .......
108 */
109 shift = mmu_psize_defs[i].shift - LP_SHIFT;
110 if (shift > LP_BITS)
111 shift = LP_BITS;
112 mask = (1 << shift) - 1;
113 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
114 return i;
115 }
116 return -1;
117}
118
36cc66d6
AS
119static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
120 unsigned long pte_index)
121{
f6bf3a66 122 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
1f365bb0
AK
123 unsigned int penc;
124 unsigned long rb = 0, va_low, sllp;
125 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
126
f6bf3a66 127 if (v & HPTE_V_LARGE) {
1f365bb0
AK
128 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
129
130 /* valid entries have a shift value */
131 if (!mmu_psize_defs[b_psize].shift)
132 continue;
36cc66d6 133
1f365bb0
AK
134 a_psize = __hpte_actual_psize(lp, b_psize);
135 if (a_psize != -1)
136 break;
137 }
138 }
139 /*
140 * Ignore the top 14 bits of va
141 * v have top two bits covering segment size, hence move
142 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
143 * AVA field in v also have the lower 23 bits ignored.
144 * For base page size 4K we need 14 .. 65 bits (so need to
145 * collect extra 11 bits)
146 * For others we need 14..14+i
147 */
148 /* This covers 14..54 bits of va*/
36cc66d6 149 rb = (v & ~0x7fUL) << 16; /* AVA field */
1f365bb0
AK
150 /*
151 * AVA in v had cleared lower 23 bits. We need to derive
152 * that from pteg index
153 */
36cc66d6
AS
154 va_low = pte_index >> 3;
155 if (v & HPTE_V_SECONDARY)
156 va_low = ~va_low;
1f365bb0
AK
157 /*
158 * get the vpn bits from va_low using reverse of hashing.
159 * In v we have va with 23 bits dropped and then left shifted
160 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
161 * right shift it with (SID_SHIFT - (23 - 7))
162 */
36cc66d6 163 if (!(v & HPTE_V_1TB_SEG))
1f365bb0 164 va_low ^= v >> (SID_SHIFT - 16);
36cc66d6 165 else
1f365bb0 166 va_low ^= v >> (SID_SHIFT_1T - 16);
36cc66d6 167 va_low &= 0x7ff;
1f365bb0
AK
168
169 switch (b_psize) {
170 case MMU_PAGE_4K:
171 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
172 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
173 rb |= sllp << 5; /* AP field */
174 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
175 break;
176 default:
177 {
178 int aval_shift;
179 /*
180 * remaining 7bits of AVA/LP fields
181 * Also contain the rr bits of LP
182 */
183 rb |= (va_low & 0x7f) << 16;
184 /*
185 * Now clear not needed LP bits based on actual psize
186 */
187 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
188 /*
189 * AVAL field 58..77 - base_page_shift bits of va
190 * we have space for 58..64 bits, Missing bits should
191 * be zero filled. +1 is to take care of L bit shift
192 */
193 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
194 rb |= ((va_low << aval_shift) & 0xfe);
195
196 rb |= 1; /* L field */
197 penc = mmu_psize_defs[b_psize].penc[a_psize];
198 rb |= penc << 12; /* LP field */
199 break;
200 }
36cc66d6
AS
201 }
202 rb |= (v >> 54) & 0x300; /* B field */
203 return rb;
204}
205
c77162de
PM
206static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
207{
1f365bb0
AK
208 int size, a_psize;
209 /* Look at the 8 bit LP value */
210 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
211
c77162de
PM
212 /* only handle 4k, 64k and 16M pages for now */
213 if (!(h & HPTE_V_LARGE))
1f365bb0
AK
214 return 1ul << 12;
215 else {
216 for (size = 0; size < MMU_PAGE_COUNT; size++) {
217 /* valid entries have a shift value */
218 if (!mmu_psize_defs[size].shift)
219 continue;
220
221 a_psize = __hpte_actual_psize(lp, size);
222 if (a_psize != -1)
223 return 1ul << mmu_psize_defs[a_psize].shift;
224 }
225
226 }
227 return 0;
c77162de
PM
228}
229
06ce2c63
PM
230static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
231{
232 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
233}
234
4cf302bc
PM
235static inline int hpte_is_writable(unsigned long ptel)
236{
237 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
238
239 return pp != PP_RXRX && pp != PP_RXXX;
240}
241
242static inline unsigned long hpte_make_readonly(unsigned long ptel)
243{
244 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
245 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
246 else
247 ptel |= PP_RXRX;
248 return ptel;
249}
250
9d0ef5ea
PM
251static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
252{
253 unsigned int wimg = ptel & HPTE_R_WIMG;
254
255 /* Handle SAO */
256 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
257 cpu_has_feature(CPU_FTR_ARCH_206))
258 wimg = HPTE_R_M;
259
260 if (!io_type)
261 return wimg == HPTE_R_M;
262
263 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
264}
265
342d3db7 266/*
db7cb5b9
AK
267 * If it's present and writable, atomically set dirty and referenced bits and
268 * return the PTE, otherwise return 0. If we find a transparent hugepage
269 * and if it is marked splitting we return 0;
342d3db7 270 */
db7cb5b9
AK
271static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
272 unsigned int hugepage)
342d3db7 273{
db7cb5b9
AK
274 pte_t old_pte, new_pte = __pte(0);
275
276 while (1) {
277 old_pte = pte_val(*ptep);
278 /*
279 * wait until _PAGE_BUSY is clear then set it atomically
280 */
281 if (unlikely(old_pte & _PAGE_BUSY)) {
282 cpu_relax();
283 continue;
284 }
285#ifdef CONFIG_TRANSPARENT_HUGEPAGE
286 /* If hugepage and is trans splitting return None */
287 if (unlikely(hugepage &&
288 pmd_trans_splitting(pte_pmd(old_pte))))
289 return __pte(0);
290#endif
291 /* If pte is not present return None */
292 if (unlikely(!(old_pte & _PAGE_PRESENT)))
293 return __pte(0);
342d3db7 294
db7cb5b9
AK
295 new_pte = pte_mkyoung(old_pte);
296 if (writing && pte_write(old_pte))
297 new_pte = pte_mkdirty(new_pte);
342d3db7 298
db7cb5b9
AK
299 if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
300 new_pte))
301 break;
302 }
303 return new_pte;
342d3db7
PM
304}
305
db7cb5b9 306
9d0ef5ea
PM
307/* Return HPTE cache control bits corresponding to Linux pte bits */
308static inline unsigned long hpte_cache_bits(unsigned long pte_val)
309{
310#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
311 return pte_val & (HPTE_R_W | HPTE_R_I);
312#else
313 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
314 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
315#endif
316}
317
697d3899
PM
318static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
319{
320 if (key)
321 return PP_RWRX <= pp && pp <= PP_RXRX;
322 return 1;
323}
324
325static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
326{
327 if (key)
328 return pp == PP_RWRW;
329 return pp <= PP_RWRW;
330}
331
332static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
333{
334 unsigned long skey;
335
336 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
337 ((hpte_r & HPTE_R_KEY_LO) >> 9);
338 return (amr >> (62 - 2 * skey)) & 3;
339}
340
06ce2c63
PM
341static inline void lock_rmap(unsigned long *rmap)
342{
343 do {
344 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
345 cpu_relax();
346 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
347}
348
349static inline void unlock_rmap(unsigned long *rmap)
350{
351 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
352}
353
da9d1d7f
PM
354static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
355 unsigned long pagesize)
356{
357 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
358
359 if (pagesize <= PAGE_SIZE)
360 return 1;
361 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
362}
363
a2932923
PM
364/*
365 * This works for 4k, 64k and 16M pages on POWER7,
366 * and 4k and 16M pages on PPC970.
367 */
368static inline unsigned long slb_pgsize_encoding(unsigned long psize)
369{
370 unsigned long senc = 0;
371
372 if (psize > 0x1000) {
373 senc = SLB_VSID_L;
374 if (psize == 0x10000)
375 senc |= SLB_VSID_LP_01;
376 }
377 return senc;
378}
379
380static inline int is_vrma_hpte(unsigned long hpte_v)
381{
382 return (hpte_v & ~0xffffffUL) ==
383 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
384}
385
9975f5e3 386#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a1b4a0f6
PM
387/*
388 * Note modification of an HPTE; set the HPTE modified bit
389 * if anyone is interested.
390 */
391static inline void note_hpte_modification(struct kvm *kvm,
392 struct revmap_entry *rev)
393{
394 if (atomic_read(&kvm->arch.hpte_mod_interest))
395 rev->guest_rpte |= HPTE_GR_MODIFIED;
396}
797f9c07
PM
397
398/*
399 * Like kvm_memslots(), but for use in real mode when we can't do
400 * any RCU stuff (since the secondary threads are offline from the
401 * kernel's point of view), and we can't print anything.
402 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
403 */
404static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
405{
406 return rcu_dereference_raw_notrace(kvm->memslots);
407}
408
9975f5e3 409#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
a1b4a0f6 410
3ae07890 411#endif /* __ASM_KVM_BOOK3S_64_H__ */
This page took 0.290076 seconds and 5 git commands to generate.