Merge branch 'nvmf-4.8-rc' of git://git.infradead.org/nvme-fabrics into for-linus
[deliverable/linux.git] / arch / powerpc / include / asm / kvm_book3s_64.h
CommitLineData
3ae07890
AG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
7aa79938 23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
468a12c2 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
3ae07890 25{
468a12c2 26 preempt_disable();
3ae07890
AG
27 return &get_paca()->shadow_vcpu;
28}
468a12c2
AG
29
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
de56a948 34#endif
3ae07890 35
9975f5e3 36#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
32fad281 37#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
8936dda4
PM
38#endif
39
697d3899
PM
40#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
41
075295dd
PM
42/*
43 * We use a lock bit in HPTE dword 0 to synchronize updates and
44 * accesses to each HPTE, and another bit to indicate non-present
45 * HPTEs.
46 */
47#define HPTE_V_HVLOCK 0x40UL
697d3899 48#define HPTE_V_ABSENT 0x20UL
075295dd 49
44e5f6be
PM
50/*
51 * We use this bit in the guest_rpte field of the revmap entry
52 * to indicate a modified HPTE.
53 */
54#define HPTE_GR_MODIFIED (1ul << 62)
55
56/* These bits are reserved in the guest view of the HPTE */
57#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
58
6f22bd32 59static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
075295dd
PM
60{
61 unsigned long tmp, old;
6f22bd32
AG
62 __be64 be_lockbit, be_bits;
63
64 /*
65 * We load/store in native endian, but the HTAB is in big endian. If
66 * we byte swap all data we apply on the PTE we're implicitly correct
67 * again.
68 */
69 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
70 be_bits = cpu_to_be64(bits);
075295dd
PM
71
72 asm volatile(" ldarx %0,0,%2\n"
73 " and. %1,%0,%3\n"
74 " bne 2f\n"
6f22bd32 75 " or %0,%0,%4\n"
075295dd
PM
76 " stdcx. %0,0,%2\n"
77 " beq+ 2f\n"
8b5869ad 78 " mr %1,%3\n"
075295dd
PM
79 "2: isync"
80 : "=&r" (tmp), "=&r" (old)
6f22bd32 81 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
075295dd
PM
82 : "cc", "memory");
83 return old == 0;
84}
85
a4bd6eb0
AK
86static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
87{
88 hpte_v &= ~HPTE_V_HVLOCK;
89 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
90 hpte[0] = cpu_to_be64(hpte_v);
91}
92
93/* Without barrier */
94static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
95{
96 hpte_v &= ~HPTE_V_HVLOCK;
97 hpte[0] = cpu_to_be64(hpte_v);
98}
99
1f365bb0
AK
100static inline int __hpte_actual_psize(unsigned int lp, int psize)
101{
102 int i, shift;
103 unsigned int mask;
104
105 /* start from 1 ignoring MMU_PAGE_4K */
106 for (i = 1; i < MMU_PAGE_COUNT; i++) {
107
108 /* invalid penc */
109 if (mmu_psize_defs[psize].penc[i] == -1)
110 continue;
111 /*
112 * encoding bits per actual page size
113 * PTE LP actual page size
114 * rrrr rrrz >=8KB
115 * rrrr rrzz >=16KB
116 * rrrr rzzz >=32KB
117 * rrrr zzzz >=64KB
118 * .......
119 */
120 shift = mmu_psize_defs[i].shift - LP_SHIFT;
121 if (shift > LP_BITS)
122 shift = LP_BITS;
123 mask = (1 << shift) - 1;
124 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
125 return i;
126 }
127 return -1;
128}
129
36cc66d6
AS
130static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
131 unsigned long pte_index)
132{
f6bf3a66 133 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
1f365bb0
AK
134 unsigned int penc;
135 unsigned long rb = 0, va_low, sllp;
136 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
137
f6bf3a66 138 if (v & HPTE_V_LARGE) {
1f365bb0
AK
139 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
140
141 /* valid entries have a shift value */
142 if (!mmu_psize_defs[b_psize].shift)
143 continue;
36cc66d6 144
1f365bb0
AK
145 a_psize = __hpte_actual_psize(lp, b_psize);
146 if (a_psize != -1)
147 break;
148 }
149 }
150 /*
151 * Ignore the top 14 bits of va
152 * v have top two bits covering segment size, hence move
153 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
154 * AVA field in v also have the lower 23 bits ignored.
155 * For base page size 4K we need 14 .. 65 bits (so need to
156 * collect extra 11 bits)
157 * For others we need 14..14+i
158 */
159 /* This covers 14..54 bits of va*/
36cc66d6 160 rb = (v & ~0x7fUL) << 16; /* AVA field */
63fff5c1 161
d506735b 162 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
1f365bb0
AK
163 /*
164 * AVA in v had cleared lower 23 bits. We need to derive
165 * that from pteg index
166 */
36cc66d6
AS
167 va_low = pte_index >> 3;
168 if (v & HPTE_V_SECONDARY)
169 va_low = ~va_low;
1f365bb0
AK
170 /*
171 * get the vpn bits from va_low using reverse of hashing.
172 * In v we have va with 23 bits dropped and then left shifted
173 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
174 * right shift it with (SID_SHIFT - (23 - 7))
175 */
36cc66d6 176 if (!(v & HPTE_V_1TB_SEG))
1f365bb0 177 va_low ^= v >> (SID_SHIFT - 16);
36cc66d6 178 else
1f365bb0 179 va_low ^= v >> (SID_SHIFT_1T - 16);
36cc66d6 180 va_low &= 0x7ff;
1f365bb0
AK
181
182 switch (b_psize) {
183 case MMU_PAGE_4K:
138ee7ee 184 sllp = get_sllp_encoding(a_psize);
1f365bb0
AK
185 rb |= sllp << 5; /* AP field */
186 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
187 break;
188 default:
189 {
190 int aval_shift;
191 /*
63fff5c1 192 * remaining bits of AVA/LP fields
1f365bb0
AK
193 * Also contain the rr bits of LP
194 */
63fff5c1 195 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
1f365bb0
AK
196 /*
197 * Now clear not needed LP bits based on actual psize
198 */
199 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
200 /*
201 * AVAL field 58..77 - base_page_shift bits of va
202 * we have space for 58..64 bits, Missing bits should
203 * be zero filled. +1 is to take care of L bit shift
204 */
205 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
206 rb |= ((va_low << aval_shift) & 0xfe);
207
208 rb |= 1; /* L field */
209 penc = mmu_psize_defs[b_psize].penc[a_psize];
210 rb |= penc << 12; /* LP field */
211 break;
212 }
36cc66d6
AS
213 }
214 rb |= (v >> 54) & 0x300; /* B field */
215 return rb;
216}
217
341acbb3
AK
218static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
219 bool is_base_size)
c77162de 220{
341acbb3 221
1f365bb0
AK
222 int size, a_psize;
223 /* Look at the 8 bit LP value */
224 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
225
c77162de
PM
226 /* only handle 4k, 64k and 16M pages for now */
227 if (!(h & HPTE_V_LARGE))
1f365bb0
AK
228 return 1ul << 12;
229 else {
230 for (size = 0; size < MMU_PAGE_COUNT; size++) {
231 /* valid entries have a shift value */
232 if (!mmu_psize_defs[size].shift)
233 continue;
234
235 a_psize = __hpte_actual_psize(lp, size);
341acbb3
AK
236 if (a_psize != -1) {
237 if (is_base_size)
238 return 1ul << mmu_psize_defs[size].shift;
1f365bb0 239 return 1ul << mmu_psize_defs[a_psize].shift;
341acbb3 240 }
1f365bb0
AK
241 }
242
243 }
244 return 0;
c77162de
PM
245}
246
341acbb3
AK
247static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
248{
249 return __hpte_page_size(h, l, 0);
250}
251
252static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
253{
254 return __hpte_page_size(h, l, 1);
255}
256
06ce2c63
PM
257static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
258{
259 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
260}
261
4cf302bc
PM
262static inline int hpte_is_writable(unsigned long ptel)
263{
264 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
265
266 return pp != PP_RXRX && pp != PP_RXXX;
267}
268
269static inline unsigned long hpte_make_readonly(unsigned long ptel)
270{
271 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
272 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
273 else
274 ptel |= PP_RXRX;
275 return ptel;
276}
277
30bda41a 278static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
9d0ef5ea 279{
30bda41a 280 unsigned int wimg = hptel & HPTE_R_WIMG;
9d0ef5ea
PM
281
282 /* Handle SAO */
283 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
284 cpu_has_feature(CPU_FTR_ARCH_206))
285 wimg = HPTE_R_M;
286
30bda41a 287 if (!is_ci)
9d0ef5ea 288 return wimg == HPTE_R_M;
30bda41a
AK
289 /*
290 * if host is mapped cache inhibited, make sure hptel also have
291 * cache inhibited.
292 */
293 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
294 return false;
295 return !!(wimg & HPTE_R_I);
9d0ef5ea
PM
296}
297
342d3db7 298/*
db7cb5b9 299 * If it's present and writable, atomically set dirty and referenced bits and
7d6e7f7f 300 * return the PTE, otherwise return 0.
342d3db7 301 */
7d6e7f7f 302static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
342d3db7 303{
db7cb5b9
AK
304 pte_t old_pte, new_pte = __pte(0);
305
306 while (1) {
5e1d44ae
AK
307 /*
308 * Make sure we don't reload from ptep
309 */
310 old_pte = READ_ONCE(*ptep);
db7cb5b9 311 /*
945537df 312 * wait until H_PAGE_BUSY is clear then set it atomically
db7cb5b9 313 */
945537df 314 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
db7cb5b9
AK
315 cpu_relax();
316 continue;
317 }
db7cb5b9 318 /* If pte is not present return None */
4f9c53c8 319 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
db7cb5b9 320 return __pte(0);
342d3db7 321
db7cb5b9
AK
322 new_pte = pte_mkyoung(old_pte);
323 if (writing && pte_write(old_pte))
324 new_pte = pte_mkdirty(new_pte);
342d3db7 325
3910a7f4 326 if (pte_xchg(ptep, old_pte, new_pte))
db7cb5b9
AK
327 break;
328 }
329 return new_pte;
342d3db7
PM
330}
331
697d3899
PM
332static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
333{
334 if (key)
335 return PP_RWRX <= pp && pp <= PP_RXRX;
acdb6685 336 return true;
697d3899
PM
337}
338
339static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
340{
341 if (key)
342 return pp == PP_RWRW;
343 return pp <= PP_RWRW;
344}
345
346static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
347{
348 unsigned long skey;
349
350 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
351 ((hpte_r & HPTE_R_KEY_LO) >> 9);
352 return (amr >> (62 - 2 * skey)) & 3;
353}
354
06ce2c63
PM
355static inline void lock_rmap(unsigned long *rmap)
356{
357 do {
358 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
359 cpu_relax();
360 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
361}
362
363static inline void unlock_rmap(unsigned long *rmap)
364{
365 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
366}
367
da9d1d7f
PM
368static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
369 unsigned long pagesize)
370{
371 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
372
373 if (pagesize <= PAGE_SIZE)
acdb6685 374 return true;
da9d1d7f
PM
375 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
376}
377
a2932923
PM
378/*
379 * This works for 4k, 64k and 16M pages on POWER7,
380 * and 4k and 16M pages on PPC970.
381 */
382static inline unsigned long slb_pgsize_encoding(unsigned long psize)
383{
384 unsigned long senc = 0;
385
386 if (psize > 0x1000) {
387 senc = SLB_VSID_L;
388 if (psize == 0x10000)
389 senc |= SLB_VSID_LP_01;
390 }
391 return senc;
392}
393
394static inline int is_vrma_hpte(unsigned long hpte_v)
395{
396 return (hpte_v & ~0xffffffUL) ==
397 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
398}
399
9975f5e3 400#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a1b4a0f6
PM
401/*
402 * Note modification of an HPTE; set the HPTE modified bit
403 * if anyone is interested.
404 */
405static inline void note_hpte_modification(struct kvm *kvm,
406 struct revmap_entry *rev)
407{
408 if (atomic_read(&kvm->arch.hpte_mod_interest))
409 rev->guest_rpte |= HPTE_GR_MODIFIED;
410}
797f9c07
PM
411
412/*
413 * Like kvm_memslots(), but for use in real mode when we can't do
414 * any RCU stuff (since the secondary threads are offline from the
415 * kernel's point of view), and we can't print anything.
416 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
417 */
418static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
419{
f481b069 420 return rcu_dereference_raw_notrace(kvm->memslots[0]);
797f9c07
PM
421}
422
e23a808b
PM
423extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
424
eddb60fb
PM
425extern void kvmhv_rm_send_ipi(int cpu);
426
9975f5e3 427#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
a1b4a0f6 428
3ae07890 429#endif /* __ASM_KVM_BOOK3S_64_H__ */
This page took 0.381437 seconds and 5 git commands to generate.