Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
342cd0ab CD |
18 | |
19 | #include <linux/mman.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/io.h> | |
45e96ea6 | 22 | #include <trace/events/kvm.h> |
342cd0ab | 23 | #include <asm/pgalloc.h> |
94f8e641 | 24 | #include <asm/cacheflush.h> |
342cd0ab CD |
25 | #include <asm/kvm_arm.h> |
26 | #include <asm/kvm_mmu.h> | |
45e96ea6 | 27 | #include <asm/kvm_mmio.h> |
d5d8184d | 28 | #include <asm/kvm_asm.h> |
94f8e641 | 29 | #include <asm/kvm_emulate.h> |
d5d8184d CD |
30 | |
31 | #include "trace.h" | |
342cd0ab CD |
32 | |
33 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |
34 | ||
5a677ce0 | 35 | static pgd_t *boot_hyp_pgd; |
2fb41059 | 36 | static pgd_t *hyp_pgd; |
342cd0ab CD |
37 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
38 | ||
5a677ce0 MZ |
39 | static void *init_bounce_page; |
40 | static unsigned long hyp_idmap_start; | |
41 | static unsigned long hyp_idmap_end; | |
42 | static phys_addr_t hyp_idmap_vector; | |
43 | ||
48762767 | 44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
d5d8184d | 45 | { |
d4cb9df5 MZ |
46 | /* |
47 | * This function also gets called when dealing with HYP page | |
48 | * tables. As HYP doesn't have an associated struct kvm (and | |
49 | * the HYP page tables are fairly static), we don't do | |
50 | * anything there. | |
51 | */ | |
52 | if (kvm) | |
53 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | |
d5d8184d CD |
54 | } |
55 | ||
d5d8184d CD |
56 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
57 | int min, int max) | |
58 | { | |
59 | void *page; | |
60 | ||
61 | BUG_ON(max > KVM_NR_MEM_OBJS); | |
62 | if (cache->nobjs >= min) | |
63 | return 0; | |
64 | while (cache->nobjs < max) { | |
65 | page = (void *)__get_free_page(PGALLOC_GFP); | |
66 | if (!page) | |
67 | return -ENOMEM; | |
68 | cache->objects[cache->nobjs++] = page; | |
69 | } | |
70 | return 0; | |
71 | } | |
72 | ||
73 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |
74 | { | |
75 | while (mc->nobjs) | |
76 | free_page((unsigned long)mc->objects[--mc->nobjs]); | |
77 | } | |
78 | ||
79 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |
80 | { | |
81 | void *p; | |
82 | ||
83 | BUG_ON(!mc || !mc->nobjs); | |
84 | p = mc->objects[--mc->nobjs]; | |
85 | return p; | |
86 | } | |
87 | ||
d4cb9df5 | 88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
342cd0ab | 89 | { |
4f728276 MZ |
90 | pmd_t *pmd_table = pmd_offset(pud, 0); |
91 | pud_clear(pud); | |
d4cb9df5 | 92 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
4f728276 MZ |
93 | pmd_free(NULL, pmd_table); |
94 | put_page(virt_to_page(pud)); | |
95 | } | |
342cd0ab | 96 | |
d4cb9df5 | 97 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
4f728276 MZ |
98 | { |
99 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | |
100 | pmd_clear(pmd); | |
d4cb9df5 | 101 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
4f728276 MZ |
102 | pte_free_kernel(NULL, pte_table); |
103 | put_page(virt_to_page(pmd)); | |
104 | } | |
105 | ||
106 | static bool pmd_empty(pmd_t *pmd) | |
107 | { | |
108 | struct page *pmd_page = virt_to_page(pmd); | |
109 | return page_count(pmd_page) == 1; | |
110 | } | |
111 | ||
d4cb9df5 | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
4f728276 MZ |
113 | { |
114 | if (pte_present(*pte)) { | |
115 | kvm_set_pte(pte, __pte(0)); | |
116 | put_page(virt_to_page(pte)); | |
d4cb9df5 | 117 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
342cd0ab CD |
118 | } |
119 | } | |
120 | ||
4f728276 MZ |
121 | static bool pte_empty(pte_t *pte) |
122 | { | |
123 | struct page *pte_page = virt_to_page(pte); | |
124 | return page_count(pte_page) == 1; | |
125 | } | |
126 | ||
d4cb9df5 MZ |
127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | |
000d3996 MZ |
129 | { |
130 | pgd_t *pgd; | |
131 | pud_t *pud; | |
132 | pmd_t *pmd; | |
4f728276 MZ |
133 | pte_t *pte; |
134 | unsigned long long addr = start, end = start + size; | |
135 | u64 range; | |
000d3996 | 136 | |
4f728276 MZ |
137 | while (addr < end) { |
138 | pgd = pgdp + pgd_index(addr); | |
139 | pud = pud_offset(pgd, addr); | |
140 | if (pud_none(*pud)) { | |
141 | addr += PUD_SIZE; | |
142 | continue; | |
143 | } | |
000d3996 | 144 | |
4f728276 MZ |
145 | pmd = pmd_offset(pud, addr); |
146 | if (pmd_none(*pmd)) { | |
147 | addr += PMD_SIZE; | |
148 | continue; | |
149 | } | |
000d3996 | 150 | |
4f728276 | 151 | pte = pte_offset_kernel(pmd, addr); |
d4cb9df5 | 152 | clear_pte_entry(kvm, pte, addr); |
4f728276 MZ |
153 | range = PAGE_SIZE; |
154 | ||
155 | /* If we emptied the pte, walk back up the ladder */ | |
156 | if (pte_empty(pte)) { | |
d4cb9df5 | 157 | clear_pmd_entry(kvm, pmd, addr); |
4f728276 MZ |
158 | range = PMD_SIZE; |
159 | if (pmd_empty(pmd)) { | |
d4cb9df5 | 160 | clear_pud_entry(kvm, pud, addr); |
4f728276 MZ |
161 | range = PUD_SIZE; |
162 | } | |
163 | } | |
164 | ||
165 | addr += range; | |
166 | } | |
000d3996 MZ |
167 | } |
168 | ||
d157f4a5 MZ |
169 | /** |
170 | * free_boot_hyp_pgd - free HYP boot page tables | |
171 | * | |
172 | * Free the HYP boot page tables. The bounce page is also freed. | |
173 | */ | |
174 | void free_boot_hyp_pgd(void) | |
175 | { | |
176 | mutex_lock(&kvm_hyp_pgd_mutex); | |
177 | ||
178 | if (boot_hyp_pgd) { | |
d4cb9df5 MZ |
179 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
180 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | |
d157f4a5 MZ |
181 | kfree(boot_hyp_pgd); |
182 | boot_hyp_pgd = NULL; | |
183 | } | |
184 | ||
185 | if (hyp_pgd) | |
d4cb9df5 | 186 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
d157f4a5 MZ |
187 | |
188 | kfree(init_bounce_page); | |
189 | init_bounce_page = NULL; | |
190 | ||
191 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
192 | } | |
193 | ||
342cd0ab | 194 | /** |
4f728276 | 195 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 196 | * |
5a677ce0 MZ |
197 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
198 | * therefore contains either mappings in the kernel memory area (above | |
199 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | |
200 | * VMALLOC_START to VMALLOC_END). | |
201 | * | |
202 | * boot_hyp_pgd should only map two pages for the init code. | |
342cd0ab | 203 | */ |
4f728276 | 204 | void free_hyp_pgds(void) |
342cd0ab | 205 | { |
342cd0ab CD |
206 | unsigned long addr; |
207 | ||
d157f4a5 | 208 | free_boot_hyp_pgd(); |
4f728276 | 209 | |
d157f4a5 | 210 | mutex_lock(&kvm_hyp_pgd_mutex); |
5a677ce0 | 211 | |
4f728276 MZ |
212 | if (hyp_pgd) { |
213 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | |
d4cb9df5 | 214 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
4f728276 | 215 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
d4cb9df5 MZ |
216 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
217 | ||
4f728276 | 218 | kfree(hyp_pgd); |
d157f4a5 | 219 | hyp_pgd = NULL; |
4f728276 MZ |
220 | } |
221 | ||
342cd0ab CD |
222 | mutex_unlock(&kvm_hyp_pgd_mutex); |
223 | } | |
224 | ||
225 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |
6060df84 MZ |
226 | unsigned long end, unsigned long pfn, |
227 | pgprot_t prot) | |
342cd0ab CD |
228 | { |
229 | pte_t *pte; | |
230 | unsigned long addr; | |
342cd0ab | 231 | |
3562c76d MZ |
232 | addr = start; |
233 | do { | |
6060df84 MZ |
234 | pte = pte_offset_kernel(pmd, addr); |
235 | kvm_set_pte(pte, pfn_pte(pfn, prot)); | |
4f728276 | 236 | get_page(virt_to_page(pte)); |
5a677ce0 | 237 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
6060df84 | 238 | pfn++; |
3562c76d | 239 | } while (addr += PAGE_SIZE, addr != end); |
342cd0ab CD |
240 | } |
241 | ||
242 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |
6060df84 MZ |
243 | unsigned long end, unsigned long pfn, |
244 | pgprot_t prot) | |
342cd0ab CD |
245 | { |
246 | pmd_t *pmd; | |
247 | pte_t *pte; | |
248 | unsigned long addr, next; | |
249 | ||
3562c76d MZ |
250 | addr = start; |
251 | do { | |
6060df84 | 252 | pmd = pmd_offset(pud, addr); |
342cd0ab CD |
253 | |
254 | BUG_ON(pmd_sect(*pmd)); | |
255 | ||
256 | if (pmd_none(*pmd)) { | |
6060df84 | 257 | pte = pte_alloc_one_kernel(NULL, addr); |
342cd0ab CD |
258 | if (!pte) { |
259 | kvm_err("Cannot allocate Hyp pte\n"); | |
260 | return -ENOMEM; | |
261 | } | |
262 | pmd_populate_kernel(NULL, pmd, pte); | |
4f728276 | 263 | get_page(virt_to_page(pmd)); |
5a677ce0 | 264 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
342cd0ab CD |
265 | } |
266 | ||
267 | next = pmd_addr_end(addr, end); | |
268 | ||
6060df84 MZ |
269 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
270 | pfn += (next - addr) >> PAGE_SHIFT; | |
3562c76d | 271 | } while (addr = next, addr != end); |
342cd0ab CD |
272 | |
273 | return 0; | |
274 | } | |
275 | ||
6060df84 MZ |
276 | static int __create_hyp_mappings(pgd_t *pgdp, |
277 | unsigned long start, unsigned long end, | |
278 | unsigned long pfn, pgprot_t prot) | |
342cd0ab | 279 | { |
342cd0ab CD |
280 | pgd_t *pgd; |
281 | pud_t *pud; | |
282 | pmd_t *pmd; | |
283 | unsigned long addr, next; | |
284 | int err = 0; | |
285 | ||
342cd0ab | 286 | mutex_lock(&kvm_hyp_pgd_mutex); |
3562c76d MZ |
287 | addr = start & PAGE_MASK; |
288 | end = PAGE_ALIGN(end); | |
289 | do { | |
6060df84 MZ |
290 | pgd = pgdp + pgd_index(addr); |
291 | pud = pud_offset(pgd, addr); | |
342cd0ab CD |
292 | |
293 | if (pud_none_or_clear_bad(pud)) { | |
6060df84 | 294 | pmd = pmd_alloc_one(NULL, addr); |
342cd0ab CD |
295 | if (!pmd) { |
296 | kvm_err("Cannot allocate Hyp pmd\n"); | |
297 | err = -ENOMEM; | |
298 | goto out; | |
299 | } | |
300 | pud_populate(NULL, pud, pmd); | |
4f728276 | 301 | get_page(virt_to_page(pud)); |
5a677ce0 | 302 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); |
342cd0ab CD |
303 | } |
304 | ||
305 | next = pgd_addr_end(addr, end); | |
6060df84 | 306 | err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
342cd0ab CD |
307 | if (err) |
308 | goto out; | |
6060df84 | 309 | pfn += (next - addr) >> PAGE_SHIFT; |
3562c76d | 310 | } while (addr = next, addr != end); |
342cd0ab CD |
311 | out: |
312 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
313 | return err; | |
314 | } | |
315 | ||
316 | /** | |
06e8c3b0 | 317 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
318 | * @from: The virtual kernel start address of the range |
319 | * @to: The virtual kernel end address of the range (exclusive) | |
320 | * | |
06e8c3b0 MZ |
321 | * The same virtual address as the kernel virtual address is also used |
322 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
323 | * physical pages. | |
342cd0ab CD |
324 | */ |
325 | int create_hyp_mappings(void *from, void *to) | |
326 | { | |
6060df84 MZ |
327 | unsigned long phys_addr = virt_to_phys(from); |
328 | unsigned long start = KERN_TO_HYP((unsigned long)from); | |
329 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
330 | ||
331 | /* Check for a valid kernel memory mapping */ | |
332 | if (!virt_addr_valid(from) || !virt_addr_valid(to - 1)) | |
333 | return -EINVAL; | |
334 | ||
335 | return __create_hyp_mappings(hyp_pgd, start, end, | |
336 | __phys_to_pfn(phys_addr), PAGE_HYP); | |
342cd0ab CD |
337 | } |
338 | ||
339 | /** | |
06e8c3b0 MZ |
340 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
341 | * @from: The kernel start VA of the range | |
342 | * @to: The kernel end VA of the range (exclusive) | |
6060df84 | 343 | * @phys_addr: The physical start address which gets mapped |
06e8c3b0 MZ |
344 | * |
345 | * The resulting HYP VA is the same as the kernel VA, modulo | |
346 | * HYP_PAGE_OFFSET. | |
342cd0ab | 347 | */ |
6060df84 | 348 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
342cd0ab | 349 | { |
6060df84 MZ |
350 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
351 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
352 | ||
353 | /* Check for a valid kernel IO mapping */ | |
354 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | |
355 | return -EINVAL; | |
356 | ||
357 | return __create_hyp_mappings(hyp_pgd, start, end, | |
358 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | |
342cd0ab CD |
359 | } |
360 | ||
d5d8184d CD |
361 | /** |
362 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | |
363 | * @kvm: The KVM struct pointer for the VM. | |
364 | * | |
365 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | |
366 | * support either full 40-bit input addresses or limited to 32-bit input | |
367 | * addresses). Clears the allocated pages. | |
368 | * | |
369 | * Note we don't need locking here as this is only called when the VM is | |
370 | * created, which can only be done once. | |
371 | */ | |
372 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | |
373 | { | |
374 | pgd_t *pgd; | |
375 | ||
376 | if (kvm->arch.pgd != NULL) { | |
377 | kvm_err("kvm_arch already initialized?\n"); | |
378 | return -EINVAL; | |
379 | } | |
380 | ||
381 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); | |
382 | if (!pgd) | |
383 | return -ENOMEM; | |
384 | ||
d5d8184d | 385 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
c62ee2b2 | 386 | kvm_clean_pgd(pgd); |
d5d8184d CD |
387 | kvm->arch.pgd = pgd; |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
d5d8184d CD |
392 | /** |
393 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
394 | * @kvm: The VM pointer | |
395 | * @start: The intermediate physical base address of the range to unmap | |
396 | * @size: The size of the area to unmap | |
397 | * | |
398 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
399 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
400 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
401 | * with things behind our backs. | |
402 | */ | |
403 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |
404 | { | |
d4cb9df5 | 405 | unmap_range(kvm, kvm->arch.pgd, start, size); |
d5d8184d CD |
406 | } |
407 | ||
408 | /** | |
409 | * kvm_free_stage2_pgd - free all stage-2 tables | |
410 | * @kvm: The KVM struct pointer for the VM. | |
411 | * | |
412 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | |
413 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | |
414 | * and setting the struct pointer to NULL. | |
415 | * | |
416 | * Note we don't need locking here as this is only called when the VM is | |
417 | * destroyed, which can only be done once. | |
418 | */ | |
419 | void kvm_free_stage2_pgd(struct kvm *kvm) | |
420 | { | |
421 | if (kvm->arch.pgd == NULL) | |
422 | return; | |
423 | ||
424 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | |
425 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | |
426 | kvm->arch.pgd = NULL; | |
427 | } | |
428 | ||
429 | ||
430 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
431 | phys_addr_t addr, const pte_t *new_pte, bool iomap) | |
432 | { | |
433 | pgd_t *pgd; | |
434 | pud_t *pud; | |
435 | pmd_t *pmd; | |
436 | pte_t *pte, old_pte; | |
437 | ||
438 | /* Create 2nd stage page table mapping - Level 1 */ | |
439 | pgd = kvm->arch.pgd + pgd_index(addr); | |
440 | pud = pud_offset(pgd, addr); | |
441 | if (pud_none(*pud)) { | |
442 | if (!cache) | |
443 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
444 | pmd = mmu_memory_cache_alloc(cache); | |
445 | pud_populate(NULL, pud, pmd); | |
d5d8184d | 446 | get_page(virt_to_page(pud)); |
c62ee2b2 MZ |
447 | } |
448 | ||
449 | pmd = pmd_offset(pud, addr); | |
d5d8184d CD |
450 | |
451 | /* Create 2nd stage page table mapping - Level 2 */ | |
452 | if (pmd_none(*pmd)) { | |
453 | if (!cache) | |
454 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
455 | pte = mmu_memory_cache_alloc(cache); | |
c62ee2b2 | 456 | kvm_clean_pte(pte); |
d5d8184d | 457 | pmd_populate_kernel(NULL, pmd, pte); |
d5d8184d | 458 | get_page(virt_to_page(pmd)); |
c62ee2b2 MZ |
459 | } |
460 | ||
461 | pte = pte_offset_kernel(pmd, addr); | |
d5d8184d CD |
462 | |
463 | if (iomap && pte_present(*pte)) | |
464 | return -EFAULT; | |
465 | ||
466 | /* Create 2nd stage page table mapping - Level 3 */ | |
467 | old_pte = *pte; | |
468 | kvm_set_pte(pte, *new_pte); | |
469 | if (pte_present(old_pte)) | |
48762767 | 470 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d5d8184d CD |
471 | else |
472 | get_page(virt_to_page(pte)); | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | /** | |
478 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
479 | * | |
480 | * @kvm: The KVM pointer | |
481 | * @guest_ipa: The IPA at which to insert the mapping | |
482 | * @pa: The physical address of the device | |
483 | * @size: The size of the mapping | |
484 | */ | |
485 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
486 | phys_addr_t pa, unsigned long size) | |
487 | { | |
488 | phys_addr_t addr, end; | |
489 | int ret = 0; | |
490 | unsigned long pfn; | |
491 | struct kvm_mmu_memory_cache cache = { 0, }; | |
492 | ||
493 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | |
494 | pfn = __phys_to_pfn(pa); | |
495 | ||
496 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | |
c62ee2b2 MZ |
497 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
498 | kvm_set_s2pte_writable(&pte); | |
d5d8184d CD |
499 | |
500 | ret = mmu_topup_memory_cache(&cache, 2, 2); | |
501 | if (ret) | |
502 | goto out; | |
503 | spin_lock(&kvm->mmu_lock); | |
504 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); | |
505 | spin_unlock(&kvm->mmu_lock); | |
506 | if (ret) | |
507 | goto out; | |
508 | ||
509 | pfn++; | |
510 | } | |
511 | ||
512 | out: | |
513 | mmu_free_memory_cache(&cache); | |
514 | return ret; | |
515 | } | |
516 | ||
94f8e641 CD |
517 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
518 | gfn_t gfn, struct kvm_memory_slot *memslot, | |
519 | unsigned long fault_status) | |
520 | { | |
521 | pte_t new_pte; | |
522 | pfn_t pfn; | |
523 | int ret; | |
524 | bool write_fault, writable; | |
525 | unsigned long mmu_seq; | |
526 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | |
527 | ||
7393b599 | 528 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); |
94f8e641 CD |
529 | if (fault_status == FSC_PERM && !write_fault) { |
530 | kvm_err("Unexpected L2 read permission error\n"); | |
531 | return -EFAULT; | |
532 | } | |
533 | ||
534 | /* We need minimum second+third level pages */ | |
535 | ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); | |
536 | if (ret) | |
537 | return ret; | |
538 | ||
539 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
540 | /* | |
541 | * Ensure the read of mmu_notifier_seq happens before we call | |
542 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
543 | * the page we just got a reference to gets unmapped before we have a | |
544 | * chance to grab the mmu_lock, which ensure that if the page gets | |
545 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | |
546 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | |
547 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
548 | */ | |
549 | smp_rmb(); | |
550 | ||
551 | pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); | |
552 | if (is_error_pfn(pfn)) | |
553 | return -EFAULT; | |
554 | ||
555 | new_pte = pfn_pte(pfn, PAGE_S2); | |
556 | coherent_icache_guest_page(vcpu->kvm, gfn); | |
557 | ||
558 | spin_lock(&vcpu->kvm->mmu_lock); | |
559 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | |
560 | goto out_unlock; | |
561 | if (writable) { | |
c62ee2b2 | 562 | kvm_set_s2pte_writable(&new_pte); |
94f8e641 CD |
563 | kvm_set_pfn_dirty(pfn); |
564 | } | |
565 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); | |
566 | ||
567 | out_unlock: | |
568 | spin_unlock(&vcpu->kvm->mmu_lock); | |
569 | kvm_release_pfn_clean(pfn); | |
570 | return 0; | |
571 | } | |
572 | ||
573 | /** | |
574 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
575 | * @vcpu: the VCPU pointer | |
576 | * @run: the kvm_run structure | |
577 | * | |
578 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
579 | * missing second stage translation table entry, which can mean that either the | |
580 | * guest simply needs more memory and we must allocate an appropriate page or it | |
581 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
582 | * space. The distinction is based on the IPA causing the fault and whether this | |
583 | * memory region has been registered as standard RAM by user space. | |
584 | */ | |
342cd0ab CD |
585 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
586 | { | |
94f8e641 CD |
587 | unsigned long fault_status; |
588 | phys_addr_t fault_ipa; | |
589 | struct kvm_memory_slot *memslot; | |
590 | bool is_iabt; | |
591 | gfn_t gfn; | |
592 | int ret, idx; | |
593 | ||
52d1dba9 | 594 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
7393b599 | 595 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
94f8e641 | 596 | |
7393b599 MZ |
597 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
598 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | |
94f8e641 CD |
599 | |
600 | /* Check the stage-2 fault is trans. fault or write fault */ | |
1cc287dd | 601 | fault_status = kvm_vcpu_trap_get_fault(vcpu); |
94f8e641 | 602 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
52d1dba9 MZ |
603 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", |
604 | kvm_vcpu_trap_get_class(vcpu), fault_status); | |
94f8e641 CD |
605 | return -EFAULT; |
606 | } | |
607 | ||
608 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
609 | ||
610 | gfn = fault_ipa >> PAGE_SHIFT; | |
611 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
612 | if (is_iabt) { | |
613 | /* Prefetch Abort on I/O address */ | |
7393b599 | 614 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
94f8e641 CD |
615 | ret = 1; |
616 | goto out_unlock; | |
617 | } | |
618 | ||
619 | if (fault_status != FSC_FAULT) { | |
620 | kvm_err("Unsupported fault status on io memory: %#lx\n", | |
621 | fault_status); | |
622 | ret = -EFAULT; | |
623 | goto out_unlock; | |
624 | } | |
625 | ||
cfe3950c MZ |
626 | /* |
627 | * The IPA is reported as [MAX:12], so we need to | |
628 | * complement it with the bottom 12 bits from the | |
629 | * faulting VA. This is always 12 bits, irrespective | |
630 | * of the page size. | |
631 | */ | |
632 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
45e96ea6 | 633 | ret = io_mem_abort(vcpu, run, fault_ipa); |
94f8e641 CD |
634 | goto out_unlock; |
635 | } | |
636 | ||
637 | memslot = gfn_to_memslot(vcpu->kvm, gfn); | |
94f8e641 CD |
638 | |
639 | ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); | |
640 | if (ret == 0) | |
641 | ret = 1; | |
642 | out_unlock: | |
643 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
644 | return ret; | |
342cd0ab CD |
645 | } |
646 | ||
d5d8184d CD |
647 | static void handle_hva_to_gpa(struct kvm *kvm, |
648 | unsigned long start, | |
649 | unsigned long end, | |
650 | void (*handler)(struct kvm *kvm, | |
651 | gpa_t gpa, void *data), | |
652 | void *data) | |
653 | { | |
654 | struct kvm_memslots *slots; | |
655 | struct kvm_memory_slot *memslot; | |
656 | ||
657 | slots = kvm_memslots(kvm); | |
658 | ||
659 | /* we only care about the pages that the guest sees */ | |
660 | kvm_for_each_memslot(memslot, slots) { | |
661 | unsigned long hva_start, hva_end; | |
662 | gfn_t gfn, gfn_end; | |
663 | ||
664 | hva_start = max(start, memslot->userspace_addr); | |
665 | hva_end = min(end, memslot->userspace_addr + | |
666 | (memslot->npages << PAGE_SHIFT)); | |
667 | if (hva_start >= hva_end) | |
668 | continue; | |
669 | ||
670 | /* | |
671 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
672 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | |
673 | */ | |
674 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
675 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
676 | ||
677 | for (; gfn < gfn_end; ++gfn) { | |
678 | gpa_t gpa = gfn << PAGE_SHIFT; | |
679 | handler(kvm, gpa, data); | |
680 | } | |
681 | } | |
682 | } | |
683 | ||
684 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
685 | { | |
686 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | |
d5d8184d CD |
687 | } |
688 | ||
689 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
690 | { | |
691 | unsigned long end = hva + PAGE_SIZE; | |
692 | ||
693 | if (!kvm->arch.pgd) | |
694 | return 0; | |
695 | ||
696 | trace_kvm_unmap_hva(hva); | |
697 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | |
698 | return 0; | |
699 | } | |
700 | ||
701 | int kvm_unmap_hva_range(struct kvm *kvm, | |
702 | unsigned long start, unsigned long end) | |
703 | { | |
704 | if (!kvm->arch.pgd) | |
705 | return 0; | |
706 | ||
707 | trace_kvm_unmap_hva_range(start, end); | |
708 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | |
709 | return 0; | |
710 | } | |
711 | ||
712 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
713 | { | |
714 | pte_t *pte = (pte_t *)data; | |
715 | ||
716 | stage2_set_pte(kvm, NULL, gpa, pte, false); | |
717 | } | |
718 | ||
719 | ||
720 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
721 | { | |
722 | unsigned long end = hva + PAGE_SIZE; | |
723 | pte_t stage2_pte; | |
724 | ||
725 | if (!kvm->arch.pgd) | |
726 | return; | |
727 | ||
728 | trace_kvm_set_spte_hva(hva); | |
729 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | |
730 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | |
731 | } | |
732 | ||
733 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |
734 | { | |
735 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | |
736 | } | |
737 | ||
342cd0ab CD |
738 | phys_addr_t kvm_mmu_get_httbr(void) |
739 | { | |
342cd0ab CD |
740 | return virt_to_phys(hyp_pgd); |
741 | } | |
742 | ||
5a677ce0 MZ |
743 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
744 | { | |
745 | return virt_to_phys(boot_hyp_pgd); | |
746 | } | |
747 | ||
748 | phys_addr_t kvm_get_idmap_vector(void) | |
749 | { | |
750 | return hyp_idmap_vector; | |
751 | } | |
752 | ||
342cd0ab CD |
753 | int kvm_mmu_init(void) |
754 | { | |
2fb41059 MZ |
755 | int err; |
756 | ||
5a677ce0 MZ |
757 | hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start); |
758 | hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end); | |
759 | hyp_idmap_vector = virt_to_phys(__kvm_hyp_init); | |
760 | ||
761 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | |
762 | /* | |
763 | * Our init code is crossing a page boundary. Allocate | |
764 | * a bounce page, copy the code over and use that. | |
765 | */ | |
766 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | |
767 | phys_addr_t phys_base; | |
768 | ||
769 | init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
770 | if (!init_bounce_page) { | |
771 | kvm_err("Couldn't allocate HYP init bounce page\n"); | |
772 | err = -ENOMEM; | |
773 | goto out; | |
774 | } | |
775 | ||
776 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | |
777 | /* | |
778 | * Warning: the code we just copied to the bounce page | |
779 | * must be flushed to the point of coherency. | |
780 | * Otherwise, the data may be sitting in L2, and HYP | |
781 | * mode won't be able to observe it as it runs with | |
782 | * caches off at that point. | |
783 | */ | |
784 | kvm_flush_dcache_to_poc(init_bounce_page, len); | |
785 | ||
786 | phys_base = virt_to_phys(init_bounce_page); | |
787 | hyp_idmap_vector += phys_base - hyp_idmap_start; | |
788 | hyp_idmap_start = phys_base; | |
789 | hyp_idmap_end = phys_base + len; | |
790 | ||
791 | kvm_info("Using HYP init bounce page @%lx\n", | |
792 | (unsigned long)phys_base); | |
793 | } | |
794 | ||
2fb41059 | 795 | hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
5a677ce0 MZ |
796 | boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
797 | if (!hyp_pgd || !boot_hyp_pgd) { | |
d5d8184d | 798 | kvm_err("Hyp mode PGD not allocated\n"); |
2fb41059 MZ |
799 | err = -ENOMEM; |
800 | goto out; | |
801 | } | |
802 | ||
803 | /* Create the idmap in the boot page tables */ | |
804 | err = __create_hyp_mappings(boot_hyp_pgd, | |
805 | hyp_idmap_start, hyp_idmap_end, | |
806 | __phys_to_pfn(hyp_idmap_start), | |
807 | PAGE_HYP); | |
808 | ||
809 | if (err) { | |
810 | kvm_err("Failed to idmap %lx-%lx\n", | |
811 | hyp_idmap_start, hyp_idmap_end); | |
812 | goto out; | |
d5d8184d CD |
813 | } |
814 | ||
5a677ce0 MZ |
815 | /* Map the very same page at the trampoline VA */ |
816 | err = __create_hyp_mappings(boot_hyp_pgd, | |
817 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
818 | __phys_to_pfn(hyp_idmap_start), | |
819 | PAGE_HYP); | |
820 | if (err) { | |
821 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", | |
822 | TRAMPOLINE_VA); | |
823 | goto out; | |
824 | } | |
825 | ||
826 | /* Map the same page again into the runtime page tables */ | |
827 | err = __create_hyp_mappings(hyp_pgd, | |
828 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
829 | __phys_to_pfn(hyp_idmap_start), | |
830 | PAGE_HYP); | |
831 | if (err) { | |
832 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", | |
833 | TRAMPOLINE_VA); | |
834 | goto out; | |
835 | } | |
836 | ||
d5d8184d | 837 | return 0; |
2fb41059 | 838 | out: |
4f728276 | 839 | free_hyp_pgds(); |
2fb41059 | 840 | return err; |
342cd0ab | 841 | } |