2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR
24 static inline struct kvmppc_book3s_shadow_vcpu
*svcpu_get(struct kvm_vcpu
*vcpu
)
27 return &get_paca()->shadow_vcpu
;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu
*svcpu
)
36 #define SPAPR_TCE_SHIFT 12
38 #ifdef CONFIG_KVM_BOOK3S_64_HV
39 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40 extern int kvm_hpt_order
; /* order of preallocated HPTs */
43 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
50 #define HPTE_V_HVLOCK 0x40UL
51 #define HPTE_V_ABSENT 0x20UL
53 static inline long try_lock_hpte(unsigned long *hpte
, unsigned long bits
)
55 unsigned long tmp
, old
;
57 asm volatile(" ldarx %0,0,%2\n"
65 : "=&r" (tmp
), "=&r" (old
)
66 : "r" (hpte
), "r" (bits
), "i" (HPTE_V_HVLOCK
)
71 static inline unsigned long compute_tlbie_rb(unsigned long v
, unsigned long r
,
72 unsigned long pte_index
)
74 unsigned long rb
, va_low
;
76 rb
= (v
& ~0x7fUL
) << 16; /* AVA field */
77 va_low
= pte_index
>> 3;
78 if (v
& HPTE_V_SECONDARY
)
80 /* xor vsid from AVA */
81 if (!(v
& HPTE_V_1TB_SEG
))
86 if (v
& HPTE_V_LARGE
) {
87 rb
|= 1; /* L field */
88 if (cpu_has_feature(CPU_FTR_ARCH_206
) &&
90 /* non-16MB large page, must be 64k */
91 /* (masks depend on page size) */
92 rb
|= 0x1000; /* page encoding in LP field */
93 rb
|= (va_low
& 0x7f) << 16; /* 7b of VA in AVA/LP field */
94 rb
|= (va_low
& 0xfe); /* AVAL field (P7 doesn't seem to care) */
98 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11b of VA */
100 rb
|= (v
>> 54) & 0x300; /* B field */
104 static inline unsigned long hpte_page_size(unsigned long h
, unsigned long l
)
106 /* only handle 4k, 64k and 16M pages for now */
107 if (!(h
& HPTE_V_LARGE
))
108 return 1ul << 12; /* 4k page */
109 if ((l
& 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206
))
110 return 1ul << 16; /* 64k page */
111 if ((l
& 0xff000) == 0)
112 return 1ul << 24; /* 16M page */
113 return 0; /* error */
116 static inline unsigned long hpte_rpn(unsigned long ptel
, unsigned long psize
)
118 return ((ptel
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
121 static inline int hpte_is_writable(unsigned long ptel
)
123 unsigned long pp
= ptel
& (HPTE_R_PP0
| HPTE_R_PP
);
125 return pp
!= PP_RXRX
&& pp
!= PP_RXXX
;
128 static inline unsigned long hpte_make_readonly(unsigned long ptel
)
130 if ((ptel
& HPTE_R_PP0
) || (ptel
& HPTE_R_PP
) == PP_RWXX
)
131 ptel
= (ptel
& ~HPTE_R_PP
) | PP_RXXX
;
137 static inline int hpte_cache_flags_ok(unsigned long ptel
, unsigned long io_type
)
139 unsigned int wimg
= ptel
& HPTE_R_WIMG
;
142 if (wimg
== (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
) &&
143 cpu_has_feature(CPU_FTR_ARCH_206
))
147 return wimg
== HPTE_R_M
;
149 return (wimg
& (HPTE_R_W
| HPTE_R_I
)) == io_type
;
153 * Lock and read a linux PTE. If it's present and writable, atomically
154 * set dirty and referenced bits and return the PTE, otherwise return 0.
156 static inline pte_t
kvmppc_read_update_linux_pte(pte_t
*p
, int writing
)
160 /* wait until _PAGE_BUSY is clear then set it atomically */
161 __asm__
__volatile__ (
168 : "=&r" (pte
), "=&r" (tmp
), "=m" (*p
)
169 : "r" (p
), "i" (_PAGE_BUSY
)
172 if (pte_present(pte
)) {
173 pte
= pte_mkyoung(pte
);
174 if (writing
&& pte_write(pte
))
175 pte
= pte_mkdirty(pte
);
178 *p
= pte
; /* clears _PAGE_BUSY */
183 /* Return HPTE cache control bits corresponding to Linux pte bits */
184 static inline unsigned long hpte_cache_bits(unsigned long pte_val
)
186 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
187 return pte_val
& (HPTE_R_W
| HPTE_R_I
);
189 return ((pte_val
& _PAGE_NO_CACHE
) ? HPTE_R_I
: 0) +
190 ((pte_val
& _PAGE_WRITETHRU
) ? HPTE_R_W
: 0);
194 static inline bool hpte_read_permission(unsigned long pp
, unsigned long key
)
197 return PP_RWRX
<= pp
&& pp
<= PP_RXRX
;
201 static inline bool hpte_write_permission(unsigned long pp
, unsigned long key
)
204 return pp
== PP_RWRW
;
205 return pp
<= PP_RWRW
;
208 static inline int hpte_get_skey_perm(unsigned long hpte_r
, unsigned long amr
)
212 skey
= ((hpte_r
& HPTE_R_KEY_HI
) >> 57) |
213 ((hpte_r
& HPTE_R_KEY_LO
) >> 9);
214 return (amr
>> (62 - 2 * skey
)) & 3;
217 static inline void lock_rmap(unsigned long *rmap
)
220 while (test_bit(KVMPPC_RMAP_LOCK_BIT
, rmap
))
222 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmap
));
225 static inline void unlock_rmap(unsigned long *rmap
)
227 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmap
);
230 static inline bool slot_is_aligned(struct kvm_memory_slot
*memslot
,
231 unsigned long pagesize
)
233 unsigned long mask
= (pagesize
>> PAGE_SHIFT
) - 1;
235 if (pagesize
<= PAGE_SIZE
)
237 return !(memslot
->base_gfn
& mask
) && !(memslot
->npages
& mask
);
240 #endif /* __ASM_KVM_BOOK3S_64_H__ */