Commit | Line | Data |
---|---|---|
2e04ef76 RR |
1 | /*P:700 |
2 | * The pagetable code, on the other hand, still shows the scars of | |
f938d2c8 RR |
3 | * previous encounters. It's functional, and as neat as it can be in the |
4 | * circumstances, but be wary, for these things are subtle and break easily. | |
5 | * The Guest provides a virtual to physical mapping, but we can neither trust | |
a6bd8e13 | 6 | * it nor use it: we verify and convert it here then point the CPU to the |
2e04ef76 RR |
7 | * converted Guest pages when running the Guest. |
8 | :*/ | |
f938d2c8 | 9 | |
6d0cda93 | 10 | /* Copyright (C) Rusty Russell IBM Corporation 2013. |
d7e28ffe RR |
11 | * GPL v2 and any later version */ |
12 | #include <linux/mm.h> | |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
d7e28ffe RR |
14 | #include <linux/types.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/random.h> | |
17 | #include <linux/percpu.h> | |
18 | #include <asm/tlbflush.h> | |
47436aa4 | 19 | #include <asm/uaccess.h> |
d7e28ffe RR |
20 | #include "lg.h" |
21 | ||
2e04ef76 RR |
22 | /*M:008 |
23 | * We hold reference to pages, which prevents them from being swapped. | |
f56a384e RR |
24 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants |
25 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | |
2e04ef76 RR |
26 | * could probably consider launching Guests as non-root. |
27 | :*/ | |
f56a384e | 28 | |
bff672e6 RR |
29 | /*H:300 |
30 | * The Page Table Code | |
31 | * | |
a91d74a3 RR |
32 | * We use two-level page tables for the Guest, or three-level with PAE. If |
33 | * you're not entirely comfortable with virtual addresses, physical addresses | |
34 | * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page | |
35 | * Table Handling" (with diagrams!). | |
bff672e6 RR |
36 | * |
37 | * The Guest keeps page tables, but we maintain the actual ones here: these are | |
38 | * called "shadow" page tables. Which is a very Guest-centric name: these are | |
39 | * the real page tables the CPU uses, although we keep them up to date to | |
40 | * reflect the Guest's. (See what I mean about weird naming? Since when do | |
41 | * shadows reflect anything?) | |
42 | * | |
43 | * Anyway, this is the most complicated part of the Host code. There are seven | |
44 | * parts to this: | |
e1e72965 RR |
45 | * (i) Looking up a page table entry when the Guest faults, |
46 | * (ii) Making sure the Guest stack is mapped, | |
47 | * (iii) Setting up a page table entry when the Guest tells us one has changed, | |
bff672e6 | 48 | * (iv) Switching page tables, |
e1e72965 | 49 | * (v) Flushing (throwing away) page tables, |
bff672e6 RR |
50 | * (vi) Mapping the Switcher when the Guest is about to run, |
51 | * (vii) Setting up the page tables initially. | |
2e04ef76 | 52 | :*/ |
bff672e6 | 53 | |
2e04ef76 | 54 | /* |
a91d74a3 RR |
55 | * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) |
56 | * or 512 PTE entries with PAE (2MB). | |
2e04ef76 | 57 | */ |
df29f43e | 58 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) |
d7e28ffe | 59 | |
2e04ef76 RR |
60 | /* |
61 | * For PAE we need the PMD index as well. We use the last 2MB, so we | |
62 | * will need the last pmd entry of the last pmd page. | |
63 | */ | |
acdd0b62 | 64 | #ifdef CONFIG_X86_PAE |
acdd0b62 MZ |
65 | #define CHECK_GPGD_MASK _PAGE_PRESENT |
66 | #else | |
acdd0b62 MZ |
67 | #define CHECK_GPGD_MASK _PAGE_TABLE |
68 | #endif | |
69 | ||
2e04ef76 RR |
70 | /*H:320 |
71 | * The page table code is curly enough to need helper functions to keep it | |
a91d74a3 | 72 | * clear and clean. The kernel itself provides many of them; one advantage |
e3d1848f | 73 | * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting. |
bff672e6 | 74 | * |
df29f43e | 75 | * There are two functions which return pointers to the shadow (aka "real") |
bff672e6 RR |
76 | * page tables. |
77 | * | |
78 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | |
e1e72965 RR |
79 | * page directory entry (PGD) for that address. Since we keep track of several |
80 | * page tables, the "i" argument tells us which one we're interested in (it's | |
2e04ef76 RR |
81 | * usually the current one). |
82 | */ | |
382ac6b3 | 83 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) |
d7e28ffe | 84 | { |
df29f43e | 85 | unsigned int index = pgd_index(vaddr); |
d7e28ffe | 86 | |
bff672e6 | 87 | /* Return a pointer index'th pgd entry for the i'th page table. */ |
382ac6b3 | 88 | return &cpu->lg->pgdirs[i].pgdir[index]; |
d7e28ffe RR |
89 | } |
90 | ||
acdd0b62 | 91 | #ifdef CONFIG_X86_PAE |
2e04ef76 RR |
92 | /* |
93 | * This routine then takes the PGD entry given above, which contains the | |
acdd0b62 | 94 | * address of the PMD page. It then returns a pointer to the PMD entry for the |
2e04ef76 RR |
95 | * given address. |
96 | */ | |
acdd0b62 MZ |
97 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
98 | { | |
99 | unsigned int index = pmd_index(vaddr); | |
100 | pmd_t *page; | |
101 | ||
acdd0b62 MZ |
102 | /* You should never call this if the PGD entry wasn't valid */ |
103 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); | |
104 | page = __va(pgd_pfn(spgd) << PAGE_SHIFT); | |
105 | ||
106 | return &page[index]; | |
107 | } | |
108 | #endif | |
109 | ||
2e04ef76 RR |
110 | /* |
111 | * This routine then takes the page directory entry returned above, which | |
e1e72965 | 112 | * contains the address of the page table entry (PTE) page. It then returns a |
2e04ef76 RR |
113 | * pointer to the PTE entry for the given address. |
114 | */ | |
acdd0b62 | 115 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
d7e28ffe | 116 | { |
acdd0b62 MZ |
117 | #ifdef CONFIG_X86_PAE |
118 | pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); | |
119 | pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); | |
120 | ||
121 | /* You should never call this if the PMD entry wasn't valid */ | |
122 | BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); | |
123 | #else | |
df29f43e | 124 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); |
bff672e6 | 125 | /* You should never call this if the PGD entry wasn't valid */ |
df29f43e | 126 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); |
acdd0b62 MZ |
127 | #endif |
128 | ||
90603d15 | 129 | return &page[pte_index(vaddr)]; |
d7e28ffe RR |
130 | } |
131 | ||
2e04ef76 | 132 | /* |
9f54288d | 133 | * These functions are just like the above, except they access the Guest |
2e04ef76 RR |
134 | * page tables. Hence they return a Guest address. |
135 | */ | |
1713608f | 136 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 137 | { |
df29f43e | 138 | unsigned int index = vaddr >> (PGDIR_SHIFT); |
1713608f | 139 | return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); |
d7e28ffe RR |
140 | } |
141 | ||
acdd0b62 | 142 | #ifdef CONFIG_X86_PAE |
a91d74a3 | 143 | /* Follow the PGD to the PMD. */ |
acdd0b62 | 144 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) |
d7e28ffe | 145 | { |
df29f43e MZ |
146 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
147 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
acdd0b62 MZ |
148 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); |
149 | } | |
acdd0b62 | 150 | |
a91d74a3 | 151 | /* Follow the PMD to the PTE. */ |
acdd0b62 | 152 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
92b4d8df | 153 | pmd_t gpmd, unsigned long vaddr) |
acdd0b62 | 154 | { |
92b4d8df | 155 | unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; |
acdd0b62 | 156 | |
acdd0b62 | 157 | BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); |
92b4d8df RR |
158 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
159 | } | |
acdd0b62 | 160 | #else |
a91d74a3 | 161 | /* Follow the PGD to the PTE (no mid-level for !PAE). */ |
92b4d8df RR |
162 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
163 | pgd_t gpgd, unsigned long vaddr) | |
164 | { | |
165 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | |
166 | ||
167 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
90603d15 | 168 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
d7e28ffe | 169 | } |
92b4d8df | 170 | #endif |
a6bd8e13 RR |
171 | /*:*/ |
172 | ||
9f54288d | 173 | /*M:007 |
2e04ef76 RR |
174 | * get_pfn is slow: we could probably try to grab batches of pages here as |
175 | * an optimization (ie. pre-faulting). | |
176 | :*/ | |
d7e28ffe | 177 | |
2e04ef76 RR |
178 | /*H:350 |
179 | * This routine takes a page number given by the Guest and converts it to | |
bff672e6 RR |
180 | * an actual, physical page number. It can fail for several reasons: the |
181 | * virtual address might not be mapped by the Launcher, the write flag is set | |
182 | * and the page is read-only, or the write flag was set and the page was | |
183 | * shared so had to be copied, but we ran out of memory. | |
184 | * | |
a6bd8e13 | 185 | * This holds a reference to the page, so release_pte() is careful to put that |
2e04ef76 RR |
186 | * back. |
187 | */ | |
d7e28ffe RR |
188 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
189 | { | |
190 | struct page *page; | |
71a3f4ed RR |
191 | |
192 | /* gup me one page at this address please! */ | |
193 | if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) | |
194 | return page_to_pfn(page); | |
195 | ||
bff672e6 | 196 | /* This value indicates failure. */ |
71a3f4ed | 197 | return -1UL; |
d7e28ffe RR |
198 | } |
199 | ||
2e04ef76 RR |
200 | /*H:340 |
201 | * Converting a Guest page table entry to a shadow (ie. real) page table | |
bff672e6 RR |
202 | * entry can be a little tricky. The flags are (almost) the same, but the |
203 | * Guest PTE contains a virtual page number: the CPU needs the real page | |
2e04ef76 RR |
204 | * number. |
205 | */ | |
382ac6b3 | 206 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) |
d7e28ffe | 207 | { |
df29f43e | 208 | unsigned long pfn, base, flags; |
d7e28ffe | 209 | |
2e04ef76 RR |
210 | /* |
211 | * The Guest sets the global flag, because it thinks that it is using | |
bff672e6 RR |
212 | * PGE. We only told it to use PGE so it would tell us whether it was |
213 | * flushing a kernel mapping or a userspace mapping. We don't actually | |
2e04ef76 RR |
214 | * use the global bit, so throw it away. |
215 | */ | |
df29f43e | 216 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); |
bff672e6 | 217 | |
3c6b5bfa | 218 | /* The Guest's pages are offset inside the Launcher. */ |
382ac6b3 | 219 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; |
3c6b5bfa | 220 | |
2e04ef76 RR |
221 | /* |
222 | * We need a temporary "unsigned long" variable to hold the answer from | |
bff672e6 RR |
223 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't |
224 | * fit in spte.pfn. get_pfn() finds the real physical number of the | |
2e04ef76 RR |
225 | * page, given the virtual number. |
226 | */ | |
df29f43e | 227 | pfn = get_pfn(base + pte_pfn(gpte), write); |
d7e28ffe | 228 | if (pfn == -1UL) { |
382ac6b3 | 229 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); |
2e04ef76 RR |
230 | /* |
231 | * When we destroy the Guest, we'll go through the shadow page | |
bff672e6 | 232 | * tables and release_pte() them. Make sure we don't think |
2e04ef76 RR |
233 | * this one is valid! |
234 | */ | |
df29f43e | 235 | flags = 0; |
d7e28ffe | 236 | } |
df29f43e MZ |
237 | /* Now we assemble our shadow PTE from the page number and flags. */ |
238 | return pfn_pte(pfn, __pgprot(flags)); | |
d7e28ffe RR |
239 | } |
240 | ||
bff672e6 | 241 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
df29f43e | 242 | static void release_pte(pte_t pte) |
d7e28ffe | 243 | { |
2e04ef76 RR |
244 | /* |
245 | * Remember that get_user_pages_fast() took a reference to the page, in | |
246 | * get_pfn()? We have to put it back now. | |
247 | */ | |
df29f43e | 248 | if (pte_flags(pte) & _PAGE_PRESENT) |
90603d15 | 249 | put_page(pte_page(pte)); |
d7e28ffe | 250 | } |
bff672e6 | 251 | /*:*/ |
d7e28ffe | 252 | |
7313d521 RR |
253 | static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte) |
254 | { | |
255 | /* We don't handle large pages. */ | |
256 | if (pte_flags(gpte) & _PAGE_PSE) | |
257 | return false; | |
258 | ||
259 | return (pte_pfn(gpte) >= cpu->lg->pfn_limit | |
260 | && pte_pfn(gpte) < cpu->lg->device_limit); | |
261 | } | |
262 | ||
e1d12606 | 263 | static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) |
d7e28ffe | 264 | { |
31f4b46e | 265 | if ((pte_flags(gpte) & _PAGE_PSE) || |
e1d12606 | 266 | pte_pfn(gpte) >= cpu->lg->pfn_limit) { |
382ac6b3 | 267 | kill_guest(cpu, "bad page table entry"); |
e1d12606 RR |
268 | return false; |
269 | } | |
270 | return true; | |
d7e28ffe RR |
271 | } |
272 | ||
e1d12606 | 273 | static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) |
d7e28ffe | 274 | { |
acdd0b62 | 275 | if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || |
e1d12606 | 276 | (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) { |
382ac6b3 | 277 | kill_guest(cpu, "bad page directory entry"); |
e1d12606 RR |
278 | return false; |
279 | } | |
280 | return true; | |
d7e28ffe RR |
281 | } |
282 | ||
acdd0b62 | 283 | #ifdef CONFIG_X86_PAE |
e1d12606 | 284 | static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) |
acdd0b62 MZ |
285 | { |
286 | if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || | |
e1d12606 | 287 | (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) { |
acdd0b62 | 288 | kill_guest(cpu, "bad page middle directory entry"); |
e1d12606 RR |
289 | return false; |
290 | } | |
291 | return true; | |
acdd0b62 MZ |
292 | } |
293 | #endif | |
294 | ||
17427e08 RR |
295 | /*H:331 |
296 | * This is the core routine to walk the shadow page tables and find the page | |
297 | * table entry for a specific address. | |
bff672e6 | 298 | * |
17427e08 RR |
299 | * If allocate is set, then we allocate any missing levels, setting the flags |
300 | * on the new page directory and mid-level directories using the arguments | |
301 | * (which are copied from the Guest's page table entries). | |
2e04ef76 | 302 | */ |
17427e08 RR |
303 | static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate, |
304 | int pgd_flags, int pmd_flags) | |
d7e28ffe | 305 | { |
df29f43e | 306 | pgd_t *spgd; |
a91d74a3 | 307 | /* Mid level for PAE. */ |
acdd0b62 MZ |
308 | #ifdef CONFIG_X86_PAE |
309 | pmd_t *spmd; | |
acdd0b62 MZ |
310 | #endif |
311 | ||
17427e08 | 312 | /* Get top level entry. */ |
382ac6b3 | 313 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); |
df29f43e | 314 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { |
bff672e6 | 315 | /* No shadow entry: allocate a new shadow PTE page. */ |
17427e08 RR |
316 | unsigned long ptepage; |
317 | ||
318 | /* If they didn't want us to allocate anything, stop. */ | |
319 | if (!allocate) | |
320 | return NULL; | |
321 | ||
322 | ptepage = get_zeroed_page(GFP_KERNEL); | |
2e04ef76 RR |
323 | /* |
324 | * This is not really the Guest's fault, but killing it is | |
325 | * simple for this corner case. | |
326 | */ | |
d7e28ffe | 327 | if (!ptepage) { |
382ac6b3 | 328 | kill_guest(cpu, "out of memory allocating pte page"); |
17427e08 | 329 | return NULL; |
d7e28ffe | 330 | } |
2e04ef76 RR |
331 | /* |
332 | * And we copy the flags to the shadow PGD entry. The page | |
333 | * number in the shadow PGD is the page we just allocated. | |
334 | */ | |
17427e08 | 335 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags)); |
d7e28ffe RR |
336 | } |
337 | ||
17427e08 RR |
338 | /* |
339 | * Intel's Physical Address Extension actually uses three levels of | |
340 | * page tables, so we need to look in the mid-level. | |
341 | */ | |
acdd0b62 | 342 | #ifdef CONFIG_X86_PAE |
17427e08 | 343 | /* Now look at the mid-level shadow entry. */ |
acdd0b62 MZ |
344 | spmd = spmd_addr(cpu, *spgd, vaddr); |
345 | ||
346 | if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { | |
347 | /* No shadow entry: allocate a new shadow PTE page. */ | |
17427e08 RR |
348 | unsigned long ptepage; |
349 | ||
350 | /* If they didn't want us to allocate anything, stop. */ | |
351 | if (!allocate) | |
352 | return NULL; | |
353 | ||
354 | ptepage = get_zeroed_page(GFP_KERNEL); | |
acdd0b62 | 355 | |
2e04ef76 RR |
356 | /* |
357 | * This is not really the Guest's fault, but killing it is | |
358 | * simple for this corner case. | |
359 | */ | |
acdd0b62 | 360 | if (!ptepage) { |
17427e08 RR |
361 | kill_guest(cpu, "out of memory allocating pmd page"); |
362 | return NULL; | |
acdd0b62 MZ |
363 | } |
364 | ||
2e04ef76 RR |
365 | /* |
366 | * And we copy the flags to the shadow PMD entry. The page | |
367 | * number in the shadow PMD is the page we just allocated. | |
368 | */ | |
17427e08 RR |
369 | set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags)); |
370 | } | |
371 | #endif | |
372 | ||
373 | /* Get the pointer to the shadow PTE entry we're going to set. */ | |
374 | return spte_addr(cpu, *spgd, vaddr); | |
375 | } | |
376 | ||
bff672e6 | 377 | /*H:330 |
e1e72965 | 378 | * (i) Looking up a page table entry when the Guest faults. |
bff672e6 RR |
379 | * |
380 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | |
381 | * come here. That's because we only set up the shadow page tables lazily as | |
382 | * they're needed, so we get page faults all the time and quietly fix them up | |
383 | * and return to the Guest without it knowing. | |
384 | * | |
385 | * If we fixed up the fault (ie. we mapped the address), this routine returns | |
2e04ef76 | 386 | * true. Otherwise, it was a real fault and we need to tell the Guest. |
7313d521 RR |
387 | * |
388 | * There's a corner case: they're trying to access memory between | |
389 | * pfn_limit and device_limit, which is I/O memory. In this case, we | |
390 | * return false and set @iomem to the physical address, so the the | |
391 | * Launcher can handle the instruction manually. | |
2e04ef76 | 392 | */ |
7313d521 RR |
393 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode, |
394 | unsigned long *iomem) | |
d7e28ffe | 395 | { |
d7e28ffe | 396 | unsigned long gpte_ptr; |
df29f43e MZ |
397 | pte_t gpte; |
398 | pte_t *spte; | |
acdd0b62 | 399 | pmd_t gpmd; |
17427e08 | 400 | pgd_t gpgd; |
acdd0b62 | 401 | |
7313d521 RR |
402 | *iomem = 0; |
403 | ||
68a644d7 RR |
404 | /* We never demand page the Switcher, so trying is a mistake. */ |
405 | if (vaddr >= switcher_addr) | |
406 | return false; | |
407 | ||
bff672e6 | 408 | /* First step: get the top-level Guest page table entry. */ |
5dea1c88 RR |
409 | if (unlikely(cpu->linear_pages)) { |
410 | /* Faking up a linear mapping. */ | |
411 | gpgd = __pgd(CHECK_GPGD_MASK); | |
412 | } else { | |
413 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); | |
414 | /* Toplevel not present? We can't map it in. */ | |
415 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) | |
416 | return false; | |
d7e28ffe | 417 | |
17427e08 RR |
418 | /* |
419 | * This kills the Guest if it has weird flags or tries to | |
420 | * refer to a "physical" address outside the bounds. | |
2e04ef76 | 421 | */ |
e1d12606 RR |
422 | if (!check_gpgd(cpu, gpgd)) |
423 | return false; | |
d7e28ffe RR |
424 | } |
425 | ||
17427e08 RR |
426 | /* This "mid-level" entry is only used for non-linear, PAE mode. */ |
427 | gpmd = __pmd(_PAGE_TABLE); | |
428 | ||
acdd0b62 | 429 | #ifdef CONFIG_X86_PAE |
17427e08 | 430 | if (likely(!cpu->linear_pages)) { |
5dea1c88 RR |
431 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); |
432 | /* Middle level not present? We can't map it in. */ | |
433 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) | |
434 | return false; | |
acdd0b62 | 435 | |
17427e08 RR |
436 | /* |
437 | * This kills the Guest if it has weird flags or tries to | |
438 | * refer to a "physical" address outside the bounds. | |
2e04ef76 | 439 | */ |
e1d12606 RR |
440 | if (!check_gpmd(cpu, gpmd)) |
441 | return false; | |
acdd0b62 | 442 | } |
92b4d8df | 443 | |
2e04ef76 RR |
444 | /* |
445 | * OK, now we look at the lower level in the Guest page table: keep its | |
446 | * address, because we might update it later. | |
447 | */ | |
92b4d8df RR |
448 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); |
449 | #else | |
2e04ef76 RR |
450 | /* |
451 | * OK, now we look at the lower level in the Guest page table: keep its | |
452 | * address, because we might update it later. | |
453 | */ | |
acdd0b62 | 454 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); |
92b4d8df | 455 | #endif |
a91d74a3 | 456 | |
5dea1c88 RR |
457 | if (unlikely(cpu->linear_pages)) { |
458 | /* Linear? Make up a PTE which points to same page. */ | |
459 | gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); | |
460 | } else { | |
461 | /* Read the actual PTE value. */ | |
462 | gpte = lgread(cpu, gpte_ptr, pte_t); | |
463 | } | |
d7e28ffe | 464 | |
bff672e6 | 465 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
df29f43e | 466 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
df1693ab | 467 | return false; |
d7e28ffe | 468 | |
2e04ef76 RR |
469 | /* |
470 | * Check they're not trying to write to a page the Guest wants | |
471 | * read-only (bit 2 of errcode == write). | |
472 | */ | |
df29f43e | 473 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
df1693ab | 474 | return false; |
d7e28ffe | 475 | |
e1e72965 | 476 | /* User access to a kernel-only page? (bit 3 == user access) */ |
df29f43e | 477 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
df1693ab | 478 | return false; |
d7e28ffe | 479 | |
7313d521 RR |
480 | /* If they're accessing io memory, we expect a fault. */ |
481 | if (gpte_in_iomem(cpu, gpte)) { | |
482 | *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); | |
483 | return false; | |
484 | } | |
485 | ||
2e04ef76 RR |
486 | /* |
487 | * Check that the Guest PTE flags are OK, and the page number is below | |
488 | * the pfn_limit (ie. not mapping the Launcher binary). | |
489 | */ | |
e1d12606 RR |
490 | if (!check_gpte(cpu, gpte)) |
491 | return false; | |
e1e72965 | 492 | |
bff672e6 | 493 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
df29f43e | 494 | gpte = pte_mkyoung(gpte); |
d7e28ffe | 495 | if (errcode & 2) |
df29f43e | 496 | gpte = pte_mkdirty(gpte); |
d7e28ffe | 497 | |
bff672e6 | 498 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
17427e08 RR |
499 | spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); |
500 | if (!spte) | |
501 | return false; | |
2e04ef76 RR |
502 | |
503 | /* | |
504 | * If there was a valid shadow PTE entry here before, we release it. | |
505 | * This can happen with a write to a previously read-only entry. | |
506 | */ | |
d7e28ffe RR |
507 | release_pte(*spte); |
508 | ||
2e04ef76 RR |
509 | /* |
510 | * If this is a write, we insist that the Guest page is writable (the | |
511 | * final arg to gpte_to_spte()). | |
512 | */ | |
df29f43e | 513 | if (pte_dirty(gpte)) |
382ac6b3 | 514 | *spte = gpte_to_spte(cpu, gpte, 1); |
df29f43e | 515 | else |
2e04ef76 RR |
516 | /* |
517 | * If this is a read, don't set the "writable" bit in the page | |
bff672e6 | 518 | * table entry, even if the Guest says it's writable. That way |
e1e72965 | 519 | * we will come back here when a write does actually occur, so |
2e04ef76 RR |
520 | * we can update the Guest's _PAGE_DIRTY flag. |
521 | */ | |
4c1ea3dd | 522 | set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); |
d7e28ffe | 523 | |
2e04ef76 RR |
524 | /* |
525 | * Finally, we write the Guest PTE entry back: we've set the | |
526 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. | |
527 | */ | |
5dea1c88 RR |
528 | if (likely(!cpu->linear_pages)) |
529 | lgwrite(cpu, gpte_ptr, pte_t, gpte); | |
bff672e6 | 530 | |
2e04ef76 RR |
531 | /* |
532 | * The fault is fixed, the page table is populated, the mapping | |
e1e72965 RR |
533 | * manipulated, the result returned and the code complete. A small |
534 | * delay and a trace of alliteration are the only indications the Guest | |
2e04ef76 RR |
535 | * has that a page fault occurred at all. |
536 | */ | |
df1693ab | 537 | return true; |
d7e28ffe RR |
538 | } |
539 | ||
e1e72965 RR |
540 | /*H:360 |
541 | * (ii) Making sure the Guest stack is mapped. | |
bff672e6 | 542 | * |
e1e72965 RR |
543 | * Remember that direct traps into the Guest need a mapped Guest kernel stack. |
544 | * pin_stack_pages() calls us here: we could simply call demand_page(), but as | |
545 | * we've seen that logic is quite long, and usually the stack pages are already | |
546 | * mapped, so it's overkill. | |
bff672e6 RR |
547 | * |
548 | * This is a quick version which answers the question: is this virtual address | |
2e04ef76 RR |
549 | * mapped by the shadow page tables, and is it writable? |
550 | */ | |
df1693ab | 551 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 552 | { |
17427e08 | 553 | pte_t *spte; |
d7e28ffe RR |
554 | unsigned long flags; |
555 | ||
68a644d7 RR |
556 | /* You can't put your stack in the Switcher! */ |
557 | if (vaddr >= switcher_addr) | |
df1693ab | 558 | return false; |
d7e28ffe | 559 | |
17427e08 RR |
560 | /* If there's no shadow PTE, it's not writable. */ |
561 | spte = find_spte(cpu, vaddr, false, 0, 0); | |
562 | if (!spte) | |
acdd0b62 | 563 | return false; |
acdd0b62 | 564 | |
2e04ef76 RR |
565 | /* |
566 | * Check the flags on the pte entry itself: it must be present and | |
567 | * writable. | |
568 | */ | |
17427e08 | 569 | flags = pte_flags(*spte); |
d7e28ffe RR |
570 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
571 | } | |
572 | ||
2e04ef76 RR |
573 | /* |
574 | * So, when pin_stack_pages() asks us to pin a page, we check if it's already | |
bff672e6 | 575 | * in the page tables, and if not, we call demand_page() with error code 2 |
2e04ef76 RR |
576 | * (meaning "write"). |
577 | */ | |
1713608f | 578 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 579 | { |
7313d521 RR |
580 | unsigned long iomem; |
581 | ||
582 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem)) | |
382ac6b3 | 583 | kill_guest(cpu, "bad stack page %#lx", vaddr); |
d7e28ffe | 584 | } |
a91d74a3 | 585 | /*:*/ |
d7e28ffe | 586 | |
acdd0b62 MZ |
587 | #ifdef CONFIG_X86_PAE |
588 | static void release_pmd(pmd_t *spmd) | |
589 | { | |
590 | /* If the entry's not present, there's nothing to release. */ | |
591 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | |
592 | unsigned int i; | |
593 | pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); | |
594 | /* For each entry in the page, we might need to release it. */ | |
595 | for (i = 0; i < PTRS_PER_PTE; i++) | |
596 | release_pte(ptepage[i]); | |
597 | /* Now we can free the page of PTEs */ | |
598 | free_page((long)ptepage); | |
599 | /* And zero out the PMD entry so we never release it twice. */ | |
4c1ea3dd | 600 | set_pmd(spmd, __pmd(0)); |
acdd0b62 MZ |
601 | } |
602 | } | |
603 | ||
604 | static void release_pgd(pgd_t *spgd) | |
605 | { | |
606 | /* If the entry's not present, there's nothing to release. */ | |
607 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | |
608 | unsigned int i; | |
609 | pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | |
610 | ||
611 | for (i = 0; i < PTRS_PER_PMD; i++) | |
612 | release_pmd(&pmdpage[i]); | |
613 | ||
614 | /* Now we can free the page of PMDs */ | |
615 | free_page((long)pmdpage); | |
616 | /* And zero out the PGD entry so we never release it twice. */ | |
617 | set_pgd(spgd, __pgd(0)); | |
618 | } | |
619 | } | |
620 | ||
621 | #else /* !CONFIG_X86_PAE */ | |
a91d74a3 RR |
622 | /*H:450 |
623 | * If we chase down the release_pgd() code, the non-PAE version looks like | |
624 | * this. The PAE version is almost identical, but instead of calling | |
625 | * release_pte it calls release_pmd(), which looks much like this. | |
626 | */ | |
90603d15 | 627 | static void release_pgd(pgd_t *spgd) |
d7e28ffe | 628 | { |
bff672e6 | 629 | /* If the entry's not present, there's nothing to release. */ |
df29f43e | 630 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
d7e28ffe | 631 | unsigned int i; |
2e04ef76 RR |
632 | /* |
633 | * Converting the pfn to find the actual PTE page is easy: turn | |
bff672e6 | 634 | * the page number into a physical address, then convert to a |
2e04ef76 RR |
635 | * virtual address (easy for kernel pages like this one). |
636 | */ | |
df29f43e | 637 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
bff672e6 | 638 | /* For each entry in the page, we might need to release it. */ |
df29f43e | 639 | for (i = 0; i < PTRS_PER_PTE; i++) |
d7e28ffe | 640 | release_pte(ptepage[i]); |
bff672e6 | 641 | /* Now we can free the page of PTEs */ |
d7e28ffe | 642 | free_page((long)ptepage); |
e1e72965 | 643 | /* And zero out the PGD entry so we never release it twice. */ |
df29f43e | 644 | *spgd = __pgd(0); |
d7e28ffe RR |
645 | } |
646 | } | |
acdd0b62 | 647 | #endif |
2e04ef76 RR |
648 | |
649 | /*H:445 | |
650 | * We saw flush_user_mappings() twice: once from the flush_user_mappings() | |
e1e72965 | 651 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. |
2e04ef76 RR |
652 | * It simply releases every PTE page from 0 up to the Guest's kernel address. |
653 | */ | |
d7e28ffe RR |
654 | static void flush_user_mappings(struct lguest *lg, int idx) |
655 | { | |
656 | unsigned int i; | |
bff672e6 | 657 | /* Release every pgd entry up to the kernel's address. */ |
47436aa4 | 658 | for (i = 0; i < pgd_index(lg->kernel_address); i++) |
90603d15 | 659 | release_pgd(lg->pgdirs[idx].pgdir + i); |
d7e28ffe RR |
660 | } |
661 | ||
2e04ef76 RR |
662 | /*H:440 |
663 | * (v) Flushing (throwing away) page tables, | |
e1e72965 RR |
664 | * |
665 | * The Guest has a hypercall to throw away the page tables: it's used when a | |
2e04ef76 RR |
666 | * large number of mappings have been changed. |
667 | */ | |
1713608f | 668 | void guest_pagetable_flush_user(struct lg_cpu *cpu) |
d7e28ffe | 669 | { |
bff672e6 | 670 | /* Drop the userspace part of the current page table. */ |
1713608f | 671 | flush_user_mappings(cpu->lg, cpu->cpu_pgd); |
d7e28ffe | 672 | } |
bff672e6 | 673 | /*:*/ |
d7e28ffe | 674 | |
47436aa4 | 675 | /* We walk down the guest page tables to get a guest-physical address */ |
c9e433e4 | 676 | bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr) |
47436aa4 RR |
677 | { |
678 | pgd_t gpgd; | |
679 | pte_t gpte; | |
acdd0b62 MZ |
680 | #ifdef CONFIG_X86_PAE |
681 | pmd_t gpmd; | |
682 | #endif | |
5dea1c88 RR |
683 | |
684 | /* Still not set up? Just map 1:1. */ | |
c9e433e4 RR |
685 | if (unlikely(cpu->linear_pages)) { |
686 | *paddr = vaddr; | |
687 | return true; | |
688 | } | |
5dea1c88 | 689 | |
47436aa4 | 690 | /* First step: get the top-level Guest page table entry. */ |
382ac6b3 | 691 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); |
47436aa4 | 692 | /* Toplevel not present? We can't map it in. */ |
c9e433e4 RR |
693 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) |
694 | goto fail; | |
47436aa4 | 695 | |
acdd0b62 MZ |
696 | #ifdef CONFIG_X86_PAE |
697 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | |
c9e433e4 RR |
698 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) |
699 | goto fail; | |
92b4d8df RR |
700 | gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); |
701 | #else | |
acdd0b62 | 702 | gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); |
92b4d8df | 703 | #endif |
47436aa4 | 704 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
c9e433e4 RR |
705 | goto fail; |
706 | ||
707 | *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | |
708 | return true; | |
709 | ||
710 | fail: | |
711 | *paddr = -1UL; | |
712 | return false; | |
713 | } | |
47436aa4 | 714 | |
c9e433e4 RR |
715 | /* |
716 | * This is the version we normally use: kills the Guest if it uses a | |
717 | * bad address | |
718 | */ | |
719 | unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |
720 | { | |
721 | unsigned long paddr; | |
722 | ||
723 | if (!__guest_pa(cpu, vaddr, &paddr)) | |
724 | kill_guest(cpu, "Bad address %#lx", vaddr); | |
725 | return paddr; | |
47436aa4 RR |
726 | } |
727 | ||
2e04ef76 RR |
728 | /* |
729 | * We keep several page tables. This is a simple routine to find the page | |
bff672e6 | 730 | * table (if any) corresponding to this top-level address the Guest has given |
2e04ef76 RR |
731 | * us. |
732 | */ | |
d7e28ffe RR |
733 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
734 | { | |
735 | unsigned int i; | |
736 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
4357bd94 | 737 | if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) |
d7e28ffe RR |
738 | break; |
739 | return i; | |
740 | } | |
741 | ||
2e04ef76 RR |
742 | /*H:435 |
743 | * And this is us, creating the new page directory. If we really do | |
bff672e6 | 744 | * allocate a new one (and so the kernel parts are not there), we set |
2e04ef76 RR |
745 | * blank_pgdir. |
746 | */ | |
1713608f | 747 | static unsigned int new_pgdir(struct lg_cpu *cpu, |
ee3db0f2 | 748 | unsigned long gpgdir, |
d7e28ffe RR |
749 | int *blank_pgdir) |
750 | { | |
751 | unsigned int next; | |
752 | ||
2e04ef76 RR |
753 | /* |
754 | * We pick one entry at random to throw out. Choosing the Least | |
755 | * Recently Used might be better, but this is easy. | |
756 | */ | |
10fdc141 | 757 | next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs); |
bff672e6 | 758 | /* If it's never been allocated at all before, try now. */ |
382ac6b3 GOC |
759 | if (!cpu->lg->pgdirs[next].pgdir) { |
760 | cpu->lg->pgdirs[next].pgdir = | |
761 | (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
bff672e6 | 762 | /* If the allocation fails, just keep using the one we have */ |
382ac6b3 | 763 | if (!cpu->lg->pgdirs[next].pgdir) |
1713608f | 764 | next = cpu->cpu_pgd; |
acdd0b62 | 765 | else { |
2e04ef76 | 766 | /* |
3412b6ae RR |
767 | * This is a blank page, so there are no kernel |
768 | * mappings: caller must map the stack! | |
2e04ef76 | 769 | */ |
d7e28ffe | 770 | *blank_pgdir = 1; |
acdd0b62 | 771 | } |
d7e28ffe | 772 | } |
bff672e6 | 773 | /* Record which Guest toplevel this shadows. */ |
382ac6b3 | 774 | cpu->lg->pgdirs[next].gpgdir = gpgdir; |
d7e28ffe | 775 | /* Release all the non-kernel mappings. */ |
382ac6b3 | 776 | flush_user_mappings(cpu->lg, next); |
d7e28ffe | 777 | |
6d0cda93 RR |
778 | /* This hasn't run on any CPU at all. */ |
779 | cpu->lg->pgdirs[next].last_host_cpu = -1; | |
780 | ||
d7e28ffe RR |
781 | return next; |
782 | } | |
783 | ||
3412b6ae RR |
784 | /*H:501 |
785 | * We do need the Switcher code mapped at all times, so we allocate that | |
86935fc4 RR |
786 | * part of the Guest page table here. We map the Switcher code immediately, |
787 | * but defer mapping of the guest register page and IDT/LDT etc page until | |
788 | * just before we run the guest in map_switcher_in_guest(). | |
789 | * | |
790 | * We *could* do this setup in map_switcher_in_guest(), but at that point | |
791 | * we've interrupts disabled, and allocating pages like that is fraught: we | |
792 | * can't sleep if we need to free up some memory. | |
3412b6ae RR |
793 | */ |
794 | static bool allocate_switcher_mapping(struct lg_cpu *cpu) | |
795 | { | |
796 | int i; | |
797 | ||
798 | for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { | |
86935fc4 RR |
799 | pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true, |
800 | CHECK_GPGD_MASK, _PAGE_TABLE); | |
801 | if (!pte) | |
3412b6ae | 802 | return false; |
86935fc4 RR |
803 | |
804 | /* | |
805 | * Map the switcher page if not already there. It might | |
806 | * already be there because we call allocate_switcher_mapping() | |
807 | * in guest_set_pgd() just in case it did discard our Switcher | |
808 | * mapping, but it probably didn't. | |
809 | */ | |
810 | if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) { | |
811 | /* Get a reference to the Switcher page. */ | |
812 | get_page(lg_switcher_pages[0]); | |
813 | /* Create a read-only, exectuable, kernel-style PTE */ | |
814 | set_pte(pte, | |
815 | mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX)); | |
816 | } | |
3412b6ae | 817 | } |
86935fc4 | 818 | cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true; |
3412b6ae RR |
819 | return true; |
820 | } | |
821 | ||
2e04ef76 RR |
822 | /*H:470 |
823 | * Finally, a routine which throws away everything: all PGD entries in all | |
e1e72965 | 824 | * the shadow page tables, including the Guest's kernel mappings. This is used |
2e04ef76 RR |
825 | * when we destroy the Guest. |
826 | */ | |
d7e28ffe RR |
827 | static void release_all_pagetables(struct lguest *lg) |
828 | { | |
829 | unsigned int i, j; | |
830 | ||
bff672e6 | 831 | /* Every shadow pagetable this Guest has */ |
3412b6ae RR |
832 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) { |
833 | if (!lg->pgdirs[i].pgdir) | |
834 | continue; | |
acdd0b62 | 835 | |
3412b6ae RR |
836 | /* Every PGD entry. */ |
837 | for (j = 0; j < PTRS_PER_PGD; j++) | |
838 | release_pgd(lg->pgdirs[i].pgdir + j); | |
86935fc4 | 839 | lg->pgdirs[i].switcher_mapped = false; |
6d0cda93 | 840 | lg->pgdirs[i].last_host_cpu = -1; |
3412b6ae | 841 | } |
d7e28ffe RR |
842 | } |
843 | ||
2e04ef76 RR |
844 | /* |
845 | * We also throw away everything when a Guest tells us it's changed a kernel | |
bff672e6 | 846 | * mapping. Since kernel mappings are in every page table, it's easiest to |
e1e72965 | 847 | * throw them all away. This traps the Guest in amber for a while as |
2e04ef76 RR |
848 | * everything faults back in, but it's rare. |
849 | */ | |
4665ac8e | 850 | void guest_pagetable_clear_all(struct lg_cpu *cpu) |
d7e28ffe | 851 | { |
4665ac8e | 852 | release_all_pagetables(cpu->lg); |
bff672e6 | 853 | /* We need the Guest kernel stack mapped again. */ |
4665ac8e | 854 | pin_stack_pages(cpu); |
3412b6ae RR |
855 | /* And we need Switcher allocated. */ |
856 | if (!allocate_switcher_mapping(cpu)) | |
857 | kill_guest(cpu, "Cannot populate switcher mapping"); | |
d7e28ffe | 858 | } |
5dea1c88 RR |
859 | |
860 | /*H:430 | |
861 | * (iv) Switching page tables | |
862 | * | |
863 | * Now we've seen all the page table setting and manipulation, let's see | |
864 | * what happens when the Guest changes page tables (ie. changes the top-level | |
865 | * pgdir). This occurs on almost every context switch. | |
866 | */ | |
867 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | |
868 | { | |
869 | int newpgdir, repin = 0; | |
870 | ||
871 | /* | |
872 | * The very first time they call this, we're actually running without | |
873 | * any page tables; we've been making it up. Throw them away now. | |
874 | */ | |
875 | if (unlikely(cpu->linear_pages)) { | |
876 | release_all_pagetables(cpu->lg); | |
877 | cpu->linear_pages = false; | |
878 | /* Force allocation of a new pgdir. */ | |
879 | newpgdir = ARRAY_SIZE(cpu->lg->pgdirs); | |
880 | } else { | |
881 | /* Look to see if we have this one already. */ | |
882 | newpgdir = find_pgdir(cpu->lg, pgtable); | |
883 | } | |
884 | ||
885 | /* | |
886 | * If not, we allocate or mug an existing one: if it's a fresh one, | |
887 | * repin gets set to 1. | |
888 | */ | |
889 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) | |
890 | newpgdir = new_pgdir(cpu, pgtable, &repin); | |
891 | /* Change the current pgd index to the new one. */ | |
892 | cpu->cpu_pgd = newpgdir; | |
3412b6ae RR |
893 | /* |
894 | * If it was completely blank, we map in the Guest kernel stack and | |
895 | * the Switcher. | |
896 | */ | |
5dea1c88 RR |
897 | if (repin) |
898 | pin_stack_pages(cpu); | |
3412b6ae | 899 | |
86935fc4 RR |
900 | if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) { |
901 | if (!allocate_switcher_mapping(cpu)) | |
902 | kill_guest(cpu, "Cannot populate switcher mapping"); | |
903 | } | |
5dea1c88 | 904 | } |
e1e72965 | 905 | /*:*/ |
2e04ef76 RR |
906 | |
907 | /*M:009 | |
908 | * Since we throw away all mappings when a kernel mapping changes, our | |
e1e72965 RR |
909 | * performance sucks for guests using highmem. In fact, a guest with |
910 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | |
911 | * usually slower than a Guest with less memory. | |
912 | * | |
913 | * This, of course, cannot be fixed. It would take some kind of... well, I | |
2e04ef76 RR |
914 | * don't know, but the term "puissant code-fu" comes to mind. |
915 | :*/ | |
d7e28ffe | 916 | |
2e04ef76 RR |
917 | /*H:420 |
918 | * This is the routine which actually sets the page table entry for then | |
bff672e6 RR |
919 | * "idx"'th shadow page table. |
920 | * | |
921 | * Normally, we can just throw out the old entry and replace it with 0: if they | |
922 | * use it demand_page() will put the new entry in. We need to do this anyway: | |
923 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page | |
924 | * is read from, and _PAGE_DIRTY when it's written to. | |
925 | * | |
926 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set | |
927 | * these bits on PTEs immediately anyway. This is done to save the CPU from | |
928 | * having to update them, but it helps us the same way: if they set | |
929 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if | |
930 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. | |
931 | */ | |
179e0963 | 932 | static void __guest_set_pte(struct lg_cpu *cpu, int idx, |
df29f43e | 933 | unsigned long vaddr, pte_t gpte) |
d7e28ffe | 934 | { |
e1e72965 | 935 | /* Look up the matching shadow page directory entry. */ |
382ac6b3 | 936 | pgd_t *spgd = spgd_addr(cpu, idx, vaddr); |
acdd0b62 MZ |
937 | #ifdef CONFIG_X86_PAE |
938 | pmd_t *spmd; | |
939 | #endif | |
bff672e6 RR |
940 | |
941 | /* If the top level isn't present, there's no entry to update. */ | |
df29f43e | 942 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
acdd0b62 MZ |
943 | #ifdef CONFIG_X86_PAE |
944 | spmd = spmd_addr(cpu, *spgd, vaddr); | |
945 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | |
946 | #endif | |
2e04ef76 | 947 | /* Otherwise, start by releasing the existing entry. */ |
acdd0b62 MZ |
948 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); |
949 | release_pte(*spte); | |
950 | ||
2e04ef76 RR |
951 | /* |
952 | * If they're setting this entry as dirty or accessed, | |
953 | * we might as well put that entry they've given us in | |
954 | * now. This shaves 10% off a copy-on-write | |
955 | * micro-benchmark. | |
956 | */ | |
7313d521 RR |
957 | if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) |
958 | && !gpte_in_iomem(cpu, gpte)) { | |
e1d12606 RR |
959 | if (!check_gpte(cpu, gpte)) |
960 | return; | |
4c1ea3dd RR |
961 | set_pte(spte, |
962 | gpte_to_spte(cpu, gpte, | |
acdd0b62 | 963 | pte_flags(gpte) & _PAGE_DIRTY)); |
2e04ef76 RR |
964 | } else { |
965 | /* | |
966 | * Otherwise kill it and we can demand_page() | |
967 | * it in later. | |
968 | */ | |
4c1ea3dd | 969 | set_pte(spte, __pte(0)); |
2e04ef76 | 970 | } |
acdd0b62 MZ |
971 | #ifdef CONFIG_X86_PAE |
972 | } | |
973 | #endif | |
d7e28ffe RR |
974 | } |
975 | } | |
976 | ||
2e04ef76 RR |
977 | /*H:410 |
978 | * Updating a PTE entry is a little trickier. | |
bff672e6 RR |
979 | * |
980 | * We keep track of several different page tables (the Guest uses one for each | |
981 | * process, so it makes sense to cache at least a few). Each of these have | |
982 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for | |
983 | * all processes. So when the page table above that address changes, we update | |
984 | * all the page tables, not just the current one. This is rare. | |
985 | * | |
a6bd8e13 | 986 | * The benefit is that when we have to track a new page table, we can keep all |
2e04ef76 RR |
987 | * the kernel mappings. This speeds up context switch immensely. |
988 | */ | |
382ac6b3 | 989 | void guest_set_pte(struct lg_cpu *cpu, |
ee3db0f2 | 990 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
d7e28ffe | 991 | { |
68a644d7 RR |
992 | /* We don't let you remap the Switcher; we need it to get back! */ |
993 | if (vaddr >= switcher_addr) { | |
994 | kill_guest(cpu, "attempt to set pte into Switcher pages"); | |
995 | return; | |
996 | } | |
997 | ||
2e04ef76 RR |
998 | /* |
999 | * Kernel mappings must be changed on all top levels. Slow, but doesn't | |
1000 | * happen often. | |
1001 | */ | |
382ac6b3 | 1002 | if (vaddr >= cpu->lg->kernel_address) { |
d7e28ffe | 1003 | unsigned int i; |
382ac6b3 GOC |
1004 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
1005 | if (cpu->lg->pgdirs[i].pgdir) | |
179e0963 | 1006 | __guest_set_pte(cpu, i, vaddr, gpte); |
d7e28ffe | 1007 | } else { |
bff672e6 | 1008 | /* Is this page table one we have a shadow for? */ |
382ac6b3 GOC |
1009 | int pgdir = find_pgdir(cpu->lg, gpgdir); |
1010 | if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) | |
bff672e6 | 1011 | /* If so, do the update. */ |
179e0963 | 1012 | __guest_set_pte(cpu, pgdir, vaddr, gpte); |
d7e28ffe RR |
1013 | } |
1014 | } | |
1015 | ||
bff672e6 | 1016 | /*H:400 |
e1e72965 | 1017 | * (iii) Setting up a page table entry when the Guest tells us one has changed. |
bff672e6 RR |
1018 | * |
1019 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | |
1020 | * with the other side of page tables while we're here: what happens when the | |
1021 | * Guest asks for a page table to be updated? | |
1022 | * | |
1023 | * We already saw that demand_page() will fill in the shadow page tables when | |
1024 | * needed, so we can simply remove shadow page table entries whenever the Guest | |
1025 | * tells us they've changed. When the Guest tries to use the new entry it will | |
1026 | * fault and demand_page() will fix it up. | |
1027 | * | |
fd589a8f | 1028 | * So with that in mind here's our code to update a (top-level) PGD entry: |
bff672e6 | 1029 | */ |
ebe0ba84 | 1030 | void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) |
d7e28ffe RR |
1031 | { |
1032 | int pgdir; | |
1033 | ||
3412b6ae RR |
1034 | if (idx > PTRS_PER_PGD) { |
1035 | kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u", | |
1036 | idx, PTRS_PER_PGD); | |
d7e28ffe | 1037 | return; |
3412b6ae | 1038 | } |
d7e28ffe | 1039 | |
bff672e6 | 1040 | /* If they're talking about a page table we have a shadow for... */ |
ee3db0f2 | 1041 | pgdir = find_pgdir(lg, gpgdir); |
3412b6ae | 1042 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) { |
bff672e6 | 1043 | /* ... throw it away. */ |
90603d15 | 1044 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); |
3412b6ae RR |
1045 | /* That might have been the Switcher mapping, remap it. */ |
1046 | if (!allocate_switcher_mapping(&lg->cpus[0])) { | |
1047 | kill_guest(&lg->cpus[0], | |
1048 | "Cannot populate switcher mapping"); | |
1049 | } | |
f616fe4f | 1050 | lg->pgdirs[pgdir].last_host_cpu = -1; |
3412b6ae | 1051 | } |
d7e28ffe | 1052 | } |
a91d74a3 | 1053 | |
acdd0b62 | 1054 | #ifdef CONFIG_X86_PAE |
a91d74a3 | 1055 | /* For setting a mid-level, we just throw everything away. It's easy. */ |
acdd0b62 MZ |
1056 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) |
1057 | { | |
1058 | guest_pagetable_clear_all(&lg->cpus[0]); | |
1059 | } | |
1060 | #endif | |
d7e28ffe | 1061 | |
2e04ef76 RR |
1062 | /*H:500 |
1063 | * (vii) Setting up the page tables initially. | |
bff672e6 | 1064 | * |
5dea1c88 RR |
1065 | * When a Guest is first created, set initialize a shadow page table which |
1066 | * we will populate on future faults. The Guest doesn't have any actual | |
1067 | * pagetables yet, so we set linear_pages to tell demand_page() to fake it | |
1068 | * for the moment. | |
3412b6ae RR |
1069 | * |
1070 | * We do need the Switcher to be mapped at all times, so we allocate that | |
1071 | * part of the Guest page table here. | |
2e04ef76 | 1072 | */ |
58a24566 | 1073 | int init_guest_pagetable(struct lguest *lg) |
d7e28ffe | 1074 | { |
5dea1c88 RR |
1075 | struct lg_cpu *cpu = &lg->cpus[0]; |
1076 | int allocated = 0; | |
58a24566 | 1077 | |
5dea1c88 RR |
1078 | /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */ |
1079 | cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated); | |
1080 | if (!allocated) | |
d7e28ffe | 1081 | return -ENOMEM; |
a91d74a3 | 1082 | |
5dea1c88 RR |
1083 | /* We start with a linear mapping until the initialize. */ |
1084 | cpu->linear_pages = true; | |
3412b6ae RR |
1085 | |
1086 | /* Allocate the page tables for the Switcher. */ | |
1087 | if (!allocate_switcher_mapping(cpu)) { | |
1088 | release_all_pagetables(lg); | |
1089 | return -ENOMEM; | |
1090 | } | |
1091 | ||
d7e28ffe RR |
1092 | return 0; |
1093 | } | |
1094 | ||
a91d74a3 | 1095 | /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ |
382ac6b3 | 1096 | void page_table_guest_data_init(struct lg_cpu *cpu) |
47436aa4 | 1097 | { |
c215a8b9 RR |
1098 | /* |
1099 | * We tell the Guest that it can't use the virtual addresses | |
1100 | * used by the Switcher. This trick is equivalent to 4GB - | |
1101 | * switcher_addr. | |
1102 | */ | |
1103 | u32 top = ~switcher_addr + 1; | |
1104 | ||
47436aa4 | 1105 | /* We get the kernel address: above this is all kernel memory. */ |
382ac6b3 | 1106 | if (get_user(cpu->lg->kernel_address, |
c215a8b9 | 1107 | &cpu->lg->lguest_data->kernel_address) |
2e04ef76 | 1108 | /* |
c215a8b9 RR |
1109 | * We tell the Guest that it can't use the top virtual |
1110 | * addresses (used by the Switcher). | |
2e04ef76 | 1111 | */ |
c215a8b9 | 1112 | || put_user(top, &cpu->lg->lguest_data->reserve_mem)) { |
382ac6b3 | 1113 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
5dea1c88 RR |
1114 | return; |
1115 | } | |
47436aa4 | 1116 | |
2e04ef76 RR |
1117 | /* |
1118 | * In flush_user_mappings() we loop from 0 to | |
47436aa4 | 1119 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the |
2e04ef76 RR |
1120 | * Switcher mappings, so check that now. |
1121 | */ | |
68a644d7 | 1122 | if (cpu->lg->kernel_address >= switcher_addr) |
382ac6b3 GOC |
1123 | kill_guest(cpu, "bad kernel address %#lx", |
1124 | cpu->lg->kernel_address); | |
47436aa4 RR |
1125 | } |
1126 | ||
bff672e6 | 1127 | /* When a Guest dies, our cleanup is fairly simple. */ |
d7e28ffe RR |
1128 | void free_guest_pagetable(struct lguest *lg) |
1129 | { | |
1130 | unsigned int i; | |
1131 | ||
bff672e6 | 1132 | /* Throw away all page table pages. */ |
d7e28ffe | 1133 | release_all_pagetables(lg); |
bff672e6 | 1134 | /* Now free the top levels: free_page() can handle 0 just fine. */ |
d7e28ffe RR |
1135 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
1136 | free_page((long)lg->pgdirs[i].pgdir); | |
1137 | } | |
1138 | ||
6d0cda93 RR |
1139 | /*H:481 |
1140 | * This clears the Switcher mappings for cpu #i. | |
2e04ef76 | 1141 | */ |
6d0cda93 | 1142 | static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i) |
d7e28ffe | 1143 | { |
6d0cda93 RR |
1144 | unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2; |
1145 | pte_t *pte; | |
d7e28ffe | 1146 | |
6d0cda93 RR |
1147 | /* Clear the mappings for both pages. */ |
1148 | pte = find_spte(cpu, base, false, 0, 0); | |
1149 | release_pte(*pte); | |
1150 | set_pte(pte, __pte(0)); | |
acdd0b62 | 1151 | |
6d0cda93 RR |
1152 | pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); |
1153 | release_pte(*pte); | |
1154 | set_pte(pte, __pte(0)); | |
d7e28ffe RR |
1155 | } |
1156 | ||
2e04ef76 RR |
1157 | /*H:480 |
1158 | * (vi) Mapping the Switcher when the Guest is about to run. | |
bff672e6 | 1159 | * |
6d0cda93 RR |
1160 | * The Switcher and the two pages for this CPU need to be visible in the Guest |
1161 | * (and not the pages for other CPUs). | |
bff672e6 | 1162 | * |
6d0cda93 RR |
1163 | * The pages for the pagetables have all been allocated before: we just need |
1164 | * to make sure the actual PTEs are up-to-date for the CPU we're about to run | |
1165 | * on. | |
2e04ef76 | 1166 | */ |
0c78441c | 1167 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) |
d7e28ffe | 1168 | { |
6d0cda93 | 1169 | unsigned long base; |
3412b6ae RR |
1170 | struct page *percpu_switcher_page, *regs_page; |
1171 | pte_t *pte; | |
6d0cda93 | 1172 | struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd]; |
d7e28ffe | 1173 | |
6d0cda93 RR |
1174 | /* Switcher page should always be mapped by now! */ |
1175 | BUG_ON(!pgdir->switcher_mapped); | |
df29f43e | 1176 | |
6d0cda93 RR |
1177 | /* |
1178 | * Remember that we have two pages for each Host CPU, so we can run a | |
1179 | * Guest on each CPU without them interfering. We need to make sure | |
1180 | * those pages are mapped correctly in the Guest, but since we usually | |
1181 | * run on the same CPU, we cache that, and only update the mappings | |
1182 | * when we move. | |
1183 | */ | |
1184 | if (pgdir->last_host_cpu == raw_smp_processor_id()) | |
1185 | return; | |
d7e28ffe | 1186 | |
6d0cda93 RR |
1187 | /* -1 means unknown so we remove everything. */ |
1188 | if (pgdir->last_host_cpu == -1) { | |
1189 | unsigned int i; | |
1190 | for_each_possible_cpu(i) | |
1191 | remove_switcher_percpu_map(cpu, i); | |
1192 | } else { | |
1193 | /* We know exactly what CPU mapping to remove. */ | |
1194 | remove_switcher_percpu_map(cpu, pgdir->last_host_cpu); | |
d7e28ffe RR |
1195 | } |
1196 | ||
2e04ef76 | 1197 | /* |
3412b6ae RR |
1198 | * When we're running the Guest, we want the Guest's "regs" page to |
1199 | * appear where the first Switcher page for this CPU is. This is an | |
1200 | * optimization: when the Switcher saves the Guest registers, it saves | |
1201 | * them into the first page of this CPU's "struct lguest_pages": if we | |
1202 | * make sure the Guest's register page is already mapped there, we | |
1203 | * don't have to copy them out again. | |
2e04ef76 | 1204 | */ |
3412b6ae RR |
1205 | /* Find the shadow PTE for this regs page. */ |
1206 | base = switcher_addr + PAGE_SIZE | |
1207 | + raw_smp_processor_id() * sizeof(struct lguest_pages); | |
1208 | pte = find_spte(cpu, base, false, 0, 0); | |
1209 | regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT); | |
1210 | get_page(regs_page); | |
1211 | set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL))); | |
df29f43e | 1212 | |
2e04ef76 | 1213 | /* |
3412b6ae RR |
1214 | * We map the second page of the struct lguest_pages read-only in |
1215 | * the Guest: the IDT, GDT and other things it's not supposed to | |
1216 | * change. | |
2e04ef76 | 1217 | */ |
6d0cda93 | 1218 | pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); |
3412b6ae RR |
1219 | percpu_switcher_page |
1220 | = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1]; | |
1221 | get_page(percpu_switcher_page); | |
1222 | set_pte(pte, mk_pte(percpu_switcher_page, | |
1223 | __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL))); | |
6d0cda93 RR |
1224 | |
1225 | pgdir->last_host_cpu = raw_smp_processor_id(); | |
d7e28ffe RR |
1226 | } |
1227 | ||
6d0cda93 | 1228 | /*H:490 |
2e04ef76 | 1229 | * We've made it through the page table code. Perhaps our tired brains are |
e1e72965 RR |
1230 | * still processing the details, or perhaps we're simply glad it's over. |
1231 | * | |
a6bd8e13 RR |
1232 | * If nothing else, note that all this complexity in juggling shadow page tables |
1233 | * in sync with the Guest's page tables is for one reason: for most Guests this | |
1234 | * page table dance determines how bad performance will be. This is why Xen | |
1235 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD | |
1236 | * have implemented shadow page table support directly into hardware. | |
e1e72965 | 1237 | * |
2e04ef76 RR |
1238 | * There is just one file remaining in the Host. |
1239 | */ |