KVM: PPC: Book3S HV: Allow DTL to be set to address 0, length 0
[deliverable/linux.git] / arch / powerpc / include / asm / kvm_book3s_64.h
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22
23 #ifdef CONFIG_KVM_BOOK3S_PR
24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25 {
26 preempt_disable();
27 return &get_paca()->shadow_vcpu;
28 }
29
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31 {
32 preempt_enable();
33 }
34 #endif
35
36 #define SPAPR_TCE_SHIFT 12
37
38 #ifdef CONFIG_KVM_BOOK3S_64_HV
39 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40 extern int kvm_hpt_order; /* order of preallocated HPTs */
41 #endif
42
43 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
45 /*
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
48 * HPTEs.
49 */
50 #define HPTE_V_HVLOCK 0x40UL
51 #define HPTE_V_ABSENT 0x20UL
52
53 static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
54 {
55 unsigned long tmp, old;
56
57 asm volatile(" ldarx %0,0,%2\n"
58 " and. %1,%0,%3\n"
59 " bne 2f\n"
60 " ori %0,%0,%4\n"
61 " stdcx. %0,0,%2\n"
62 " beq+ 2f\n"
63 " li %1,%3\n"
64 "2: isync"
65 : "=&r" (tmp), "=&r" (old)
66 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
67 : "cc", "memory");
68 return old == 0;
69 }
70
71 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
72 unsigned long pte_index)
73 {
74 unsigned long rb, va_low;
75
76 rb = (v & ~0x7fUL) << 16; /* AVA field */
77 va_low = pte_index >> 3;
78 if (v & HPTE_V_SECONDARY)
79 va_low = ~va_low;
80 /* xor vsid from AVA */
81 if (!(v & HPTE_V_1TB_SEG))
82 va_low ^= v >> 12;
83 else
84 va_low ^= v >> 24;
85 va_low &= 0x7ff;
86 if (v & HPTE_V_LARGE) {
87 rb |= 1; /* L field */
88 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
89 (r & 0xff000)) {
90 /* non-16MB large page, must be 64k */
91 /* (masks depend on page size) */
92 rb |= 0x1000; /* page encoding in LP field */
93 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
94 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
95 }
96 } else {
97 /* 4kB page */
98 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
99 }
100 rb |= (v >> 54) & 0x300; /* B field */
101 return rb;
102 }
103
104 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
105 {
106 /* only handle 4k, 64k and 16M pages for now */
107 if (!(h & HPTE_V_LARGE))
108 return 1ul << 12; /* 4k page */
109 if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
110 return 1ul << 16; /* 64k page */
111 if ((l & 0xff000) == 0)
112 return 1ul << 24; /* 16M page */
113 return 0; /* error */
114 }
115
116 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
117 {
118 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
119 }
120
121 static inline int hpte_is_writable(unsigned long ptel)
122 {
123 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
124
125 return pp != PP_RXRX && pp != PP_RXXX;
126 }
127
128 static inline unsigned long hpte_make_readonly(unsigned long ptel)
129 {
130 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
131 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
132 else
133 ptel |= PP_RXRX;
134 return ptel;
135 }
136
137 static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
138 {
139 unsigned int wimg = ptel & HPTE_R_WIMG;
140
141 /* Handle SAO */
142 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
143 cpu_has_feature(CPU_FTR_ARCH_206))
144 wimg = HPTE_R_M;
145
146 if (!io_type)
147 return wimg == HPTE_R_M;
148
149 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
150 }
151
152 /*
153 * Lock and read a linux PTE. If it's present and writable, atomically
154 * set dirty and referenced bits and return the PTE, otherwise return 0.
155 */
156 static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
157 {
158 pte_t pte, tmp;
159
160 /* wait until _PAGE_BUSY is clear then set it atomically */
161 __asm__ __volatile__ (
162 "1: ldarx %0,0,%3\n"
163 " andi. %1,%0,%4\n"
164 " bne- 1b\n"
165 " ori %1,%0,%4\n"
166 " stdcx. %1,0,%3\n"
167 " bne- 1b"
168 : "=&r" (pte), "=&r" (tmp), "=m" (*p)
169 : "r" (p), "i" (_PAGE_BUSY)
170 : "cc");
171
172 if (pte_present(pte)) {
173 pte = pte_mkyoung(pte);
174 if (writing && pte_write(pte))
175 pte = pte_mkdirty(pte);
176 }
177
178 *p = pte; /* clears _PAGE_BUSY */
179
180 return pte;
181 }
182
183 /* Return HPTE cache control bits corresponding to Linux pte bits */
184 static inline unsigned long hpte_cache_bits(unsigned long pte_val)
185 {
186 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
187 return pte_val & (HPTE_R_W | HPTE_R_I);
188 #else
189 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
190 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
191 #endif
192 }
193
194 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
195 {
196 if (key)
197 return PP_RWRX <= pp && pp <= PP_RXRX;
198 return 1;
199 }
200
201 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
202 {
203 if (key)
204 return pp == PP_RWRW;
205 return pp <= PP_RWRW;
206 }
207
208 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
209 {
210 unsigned long skey;
211
212 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
213 ((hpte_r & HPTE_R_KEY_LO) >> 9);
214 return (amr >> (62 - 2 * skey)) & 3;
215 }
216
217 static inline void lock_rmap(unsigned long *rmap)
218 {
219 do {
220 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
221 cpu_relax();
222 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
223 }
224
225 static inline void unlock_rmap(unsigned long *rmap)
226 {
227 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
228 }
229
230 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
231 unsigned long pagesize)
232 {
233 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
234
235 if (pagesize <= PAGE_SIZE)
236 return 1;
237 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
238 }
239
240 #endif /* __ASM_KVM_BOOK3S_64_H__ */
This page took 0.043304 seconds and 5 git commands to generate.