Commit | Line | Data |
---|---|---|
2e04ef76 RR |
1 | /*P:700 |
2 | * The pagetable code, on the other hand, still shows the scars of | |
f938d2c8 RR |
3 | * previous encounters. It's functional, and as neat as it can be in the |
4 | * circumstances, but be wary, for these things are subtle and break easily. | |
5 | * The Guest provides a virtual to physical mapping, but we can neither trust | |
a6bd8e13 | 6 | * it nor use it: we verify and convert it here then point the CPU to the |
2e04ef76 RR |
7 | * converted Guest pages when running the Guest. |
8 | :*/ | |
f938d2c8 RR |
9 | |
10 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | |
d7e28ffe RR |
11 | * GPL v2 and any later version */ |
12 | #include <linux/mm.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/random.h> | |
16 | #include <linux/percpu.h> | |
17 | #include <asm/tlbflush.h> | |
47436aa4 | 18 | #include <asm/uaccess.h> |
58a24566 | 19 | #include <asm/bootparam.h> |
d7e28ffe RR |
20 | #include "lg.h" |
21 | ||
2e04ef76 RR |
22 | /*M:008 |
23 | * We hold reference to pages, which prevents them from being swapped. | |
f56a384e RR |
24 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants |
25 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | |
2e04ef76 RR |
26 | * could probably consider launching Guests as non-root. |
27 | :*/ | |
f56a384e | 28 | |
bff672e6 RR |
29 | /*H:300 |
30 | * The Page Table Code | |
31 | * | |
32 | * We use two-level page tables for the Guest. If you're not entirely | |
33 | * comfortable with virtual addresses, physical addresses and page tables then | |
e1e72965 RR |
34 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with |
35 | * diagrams!). | |
bff672e6 RR |
36 | * |
37 | * The Guest keeps page tables, but we maintain the actual ones here: these are | |
38 | * called "shadow" page tables. Which is a very Guest-centric name: these are | |
39 | * the real page tables the CPU uses, although we keep them up to date to | |
40 | * reflect the Guest's. (See what I mean about weird naming? Since when do | |
41 | * shadows reflect anything?) | |
42 | * | |
43 | * Anyway, this is the most complicated part of the Host code. There are seven | |
44 | * parts to this: | |
e1e72965 RR |
45 | * (i) Looking up a page table entry when the Guest faults, |
46 | * (ii) Making sure the Guest stack is mapped, | |
47 | * (iii) Setting up a page table entry when the Guest tells us one has changed, | |
bff672e6 | 48 | * (iv) Switching page tables, |
e1e72965 | 49 | * (v) Flushing (throwing away) page tables, |
bff672e6 RR |
50 | * (vi) Mapping the Switcher when the Guest is about to run, |
51 | * (vii) Setting up the page tables initially. | |
2e04ef76 | 52 | :*/ |
bff672e6 | 53 | |
2e04ef76 RR |
54 | /* |
55 | * 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | |
bff672e6 | 56 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE |
2e04ef76 RR |
57 | * page. |
58 | */ | |
df29f43e | 59 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) |
d7e28ffe | 60 | |
2e04ef76 RR |
61 | /* |
62 | * For PAE we need the PMD index as well. We use the last 2MB, so we | |
63 | * will need the last pmd entry of the last pmd page. | |
64 | */ | |
acdd0b62 MZ |
65 | #ifdef CONFIG_X86_PAE |
66 | #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) | |
67 | #define RESERVE_MEM 2U | |
68 | #define CHECK_GPGD_MASK _PAGE_PRESENT | |
69 | #else | |
70 | #define RESERVE_MEM 4U | |
71 | #define CHECK_GPGD_MASK _PAGE_TABLE | |
72 | #endif | |
73 | ||
2e04ef76 RR |
74 | /* |
75 | * We actually need a separate PTE page for each CPU. Remember that after the | |
bff672e6 | 76 | * Switcher code itself comes two pages for each CPU, and we don't want this |
2e04ef76 RR |
77 | * CPU's guest to see the pages of any other CPU. |
78 | */ | |
df29f43e | 79 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); |
d7e28ffe RR |
80 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
81 | ||
2e04ef76 RR |
82 | /*H:320 |
83 | * The page table code is curly enough to need helper functions to keep it | |
e1e72965 | 84 | * clear and clean. |
bff672e6 | 85 | * |
df29f43e | 86 | * There are two functions which return pointers to the shadow (aka "real") |
bff672e6 RR |
87 | * page tables. |
88 | * | |
89 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | |
e1e72965 RR |
90 | * page directory entry (PGD) for that address. Since we keep track of several |
91 | * page tables, the "i" argument tells us which one we're interested in (it's | |
2e04ef76 RR |
92 | * usually the current one). |
93 | */ | |
382ac6b3 | 94 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) |
d7e28ffe | 95 | { |
df29f43e | 96 | unsigned int index = pgd_index(vaddr); |
d7e28ffe | 97 | |
acdd0b62 | 98 | #ifndef CONFIG_X86_PAE |
bff672e6 | 99 | /* We kill any Guest trying to touch the Switcher addresses. */ |
d7e28ffe | 100 | if (index >= SWITCHER_PGD_INDEX) { |
382ac6b3 | 101 | kill_guest(cpu, "attempt to access switcher pages"); |
d7e28ffe RR |
102 | index = 0; |
103 | } | |
acdd0b62 | 104 | #endif |
bff672e6 | 105 | /* Return a pointer index'th pgd entry for the i'th page table. */ |
382ac6b3 | 106 | return &cpu->lg->pgdirs[i].pgdir[index]; |
d7e28ffe RR |
107 | } |
108 | ||
acdd0b62 | 109 | #ifdef CONFIG_X86_PAE |
2e04ef76 RR |
110 | /* |
111 | * This routine then takes the PGD entry given above, which contains the | |
acdd0b62 | 112 | * address of the PMD page. It then returns a pointer to the PMD entry for the |
2e04ef76 RR |
113 | * given address. |
114 | */ | |
acdd0b62 MZ |
115 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
116 | { | |
117 | unsigned int index = pmd_index(vaddr); | |
118 | pmd_t *page; | |
119 | ||
120 | /* We kill any Guest trying to touch the Switcher addresses. */ | |
121 | if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && | |
122 | index >= SWITCHER_PMD_INDEX) { | |
123 | kill_guest(cpu, "attempt to access switcher pages"); | |
124 | index = 0; | |
125 | } | |
126 | ||
127 | /* You should never call this if the PGD entry wasn't valid */ | |
128 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); | |
129 | page = __va(pgd_pfn(spgd) << PAGE_SHIFT); | |
130 | ||
131 | return &page[index]; | |
132 | } | |
133 | #endif | |
134 | ||
2e04ef76 RR |
135 | /* |
136 | * This routine then takes the page directory entry returned above, which | |
e1e72965 | 137 | * contains the address of the page table entry (PTE) page. It then returns a |
2e04ef76 RR |
138 | * pointer to the PTE entry for the given address. |
139 | */ | |
acdd0b62 | 140 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
d7e28ffe | 141 | { |
acdd0b62 MZ |
142 | #ifdef CONFIG_X86_PAE |
143 | pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); | |
144 | pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); | |
145 | ||
146 | /* You should never call this if the PMD entry wasn't valid */ | |
147 | BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); | |
148 | #else | |
df29f43e | 149 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); |
bff672e6 | 150 | /* You should never call this if the PGD entry wasn't valid */ |
df29f43e | 151 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); |
acdd0b62 MZ |
152 | #endif |
153 | ||
90603d15 | 154 | return &page[pte_index(vaddr)]; |
d7e28ffe RR |
155 | } |
156 | ||
2e04ef76 RR |
157 | /* |
158 | * These two functions just like the above two, except they access the Guest | |
159 | * page tables. Hence they return a Guest address. | |
160 | */ | |
1713608f | 161 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 162 | { |
df29f43e | 163 | unsigned int index = vaddr >> (PGDIR_SHIFT); |
1713608f | 164 | return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); |
d7e28ffe RR |
165 | } |
166 | ||
acdd0b62 MZ |
167 | #ifdef CONFIG_X86_PAE |
168 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | |
d7e28ffe | 169 | { |
df29f43e MZ |
170 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
171 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
acdd0b62 MZ |
172 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); |
173 | } | |
acdd0b62 MZ |
174 | |
175 | static unsigned long gpte_addr(struct lg_cpu *cpu, | |
92b4d8df | 176 | pmd_t gpmd, unsigned long vaddr) |
acdd0b62 | 177 | { |
92b4d8df | 178 | unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; |
acdd0b62 | 179 | |
acdd0b62 | 180 | BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); |
92b4d8df RR |
181 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
182 | } | |
acdd0b62 | 183 | #else |
92b4d8df RR |
184 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
185 | pgd_t gpgd, unsigned long vaddr) | |
186 | { | |
187 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | |
188 | ||
189 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
90603d15 | 190 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
d7e28ffe | 191 | } |
92b4d8df | 192 | #endif |
a6bd8e13 RR |
193 | /*:*/ |
194 | ||
2e04ef76 RR |
195 | /*M:014 |
196 | * get_pfn is slow: we could probably try to grab batches of pages here as | |
197 | * an optimization (ie. pre-faulting). | |
198 | :*/ | |
d7e28ffe | 199 | |
2e04ef76 RR |
200 | /*H:350 |
201 | * This routine takes a page number given by the Guest and converts it to | |
bff672e6 RR |
202 | * an actual, physical page number. It can fail for several reasons: the |
203 | * virtual address might not be mapped by the Launcher, the write flag is set | |
204 | * and the page is read-only, or the write flag was set and the page was | |
205 | * shared so had to be copied, but we ran out of memory. | |
206 | * | |
a6bd8e13 | 207 | * This holds a reference to the page, so release_pte() is careful to put that |
2e04ef76 RR |
208 | * back. |
209 | */ | |
d7e28ffe RR |
210 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
211 | { | |
212 | struct page *page; | |
71a3f4ed RR |
213 | |
214 | /* gup me one page at this address please! */ | |
215 | if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) | |
216 | return page_to_pfn(page); | |
217 | ||
bff672e6 | 218 | /* This value indicates failure. */ |
71a3f4ed | 219 | return -1UL; |
d7e28ffe RR |
220 | } |
221 | ||
2e04ef76 RR |
222 | /*H:340 |
223 | * Converting a Guest page table entry to a shadow (ie. real) page table | |
bff672e6 RR |
224 | * entry can be a little tricky. The flags are (almost) the same, but the |
225 | * Guest PTE contains a virtual page number: the CPU needs the real page | |
2e04ef76 RR |
226 | * number. |
227 | */ | |
382ac6b3 | 228 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) |
d7e28ffe | 229 | { |
df29f43e | 230 | unsigned long pfn, base, flags; |
d7e28ffe | 231 | |
2e04ef76 RR |
232 | /* |
233 | * The Guest sets the global flag, because it thinks that it is using | |
bff672e6 RR |
234 | * PGE. We only told it to use PGE so it would tell us whether it was |
235 | * flushing a kernel mapping or a userspace mapping. We don't actually | |
2e04ef76 RR |
236 | * use the global bit, so throw it away. |
237 | */ | |
df29f43e | 238 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); |
bff672e6 | 239 | |
3c6b5bfa | 240 | /* The Guest's pages are offset inside the Launcher. */ |
382ac6b3 | 241 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; |
3c6b5bfa | 242 | |
2e04ef76 RR |
243 | /* |
244 | * We need a temporary "unsigned long" variable to hold the answer from | |
bff672e6 RR |
245 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't |
246 | * fit in spte.pfn. get_pfn() finds the real physical number of the | |
2e04ef76 RR |
247 | * page, given the virtual number. |
248 | */ | |
df29f43e | 249 | pfn = get_pfn(base + pte_pfn(gpte), write); |
d7e28ffe | 250 | if (pfn == -1UL) { |
382ac6b3 | 251 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); |
2e04ef76 RR |
252 | /* |
253 | * When we destroy the Guest, we'll go through the shadow page | |
bff672e6 | 254 | * tables and release_pte() them. Make sure we don't think |
2e04ef76 RR |
255 | * this one is valid! |
256 | */ | |
df29f43e | 257 | flags = 0; |
d7e28ffe | 258 | } |
df29f43e MZ |
259 | /* Now we assemble our shadow PTE from the page number and flags. */ |
260 | return pfn_pte(pfn, __pgprot(flags)); | |
d7e28ffe RR |
261 | } |
262 | ||
bff672e6 | 263 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
df29f43e | 264 | static void release_pte(pte_t pte) |
d7e28ffe | 265 | { |
2e04ef76 RR |
266 | /* |
267 | * Remember that get_user_pages_fast() took a reference to the page, in | |
268 | * get_pfn()? We have to put it back now. | |
269 | */ | |
df29f43e | 270 | if (pte_flags(pte) & _PAGE_PRESENT) |
90603d15 | 271 | put_page(pte_page(pte)); |
d7e28ffe | 272 | } |
bff672e6 | 273 | /*:*/ |
d7e28ffe | 274 | |
382ac6b3 | 275 | static void check_gpte(struct lg_cpu *cpu, pte_t gpte) |
d7e28ffe | 276 | { |
31f4b46e AD |
277 | if ((pte_flags(gpte) & _PAGE_PSE) || |
278 | pte_pfn(gpte) >= cpu->lg->pfn_limit) | |
382ac6b3 | 279 | kill_guest(cpu, "bad page table entry"); |
d7e28ffe RR |
280 | } |
281 | ||
382ac6b3 | 282 | static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) |
d7e28ffe | 283 | { |
acdd0b62 | 284 | if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || |
382ac6b3 GOC |
285 | (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) |
286 | kill_guest(cpu, "bad page directory entry"); | |
d7e28ffe RR |
287 | } |
288 | ||
acdd0b62 MZ |
289 | #ifdef CONFIG_X86_PAE |
290 | static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) | |
291 | { | |
292 | if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || | |
293 | (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) | |
294 | kill_guest(cpu, "bad page middle directory entry"); | |
295 | } | |
296 | #endif | |
297 | ||
bff672e6 | 298 | /*H:330 |
e1e72965 | 299 | * (i) Looking up a page table entry when the Guest faults. |
bff672e6 RR |
300 | * |
301 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | |
302 | * come here. That's because we only set up the shadow page tables lazily as | |
303 | * they're needed, so we get page faults all the time and quietly fix them up | |
304 | * and return to the Guest without it knowing. | |
305 | * | |
306 | * If we fixed up the fault (ie. we mapped the address), this routine returns | |
2e04ef76 RR |
307 | * true. Otherwise, it was a real fault and we need to tell the Guest. |
308 | */ | |
df1693ab | 309 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) |
d7e28ffe | 310 | { |
df29f43e MZ |
311 | pgd_t gpgd; |
312 | pgd_t *spgd; | |
d7e28ffe | 313 | unsigned long gpte_ptr; |
df29f43e MZ |
314 | pte_t gpte; |
315 | pte_t *spte; | |
d7e28ffe | 316 | |
acdd0b62 MZ |
317 | #ifdef CONFIG_X86_PAE |
318 | pmd_t *spmd; | |
319 | pmd_t gpmd; | |
320 | #endif | |
321 | ||
bff672e6 | 322 | /* First step: get the top-level Guest page table entry. */ |
382ac6b3 | 323 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); |
bff672e6 | 324 | /* Toplevel not present? We can't map it in. */ |
df29f43e | 325 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) |
df1693ab | 326 | return false; |
d7e28ffe | 327 | |
bff672e6 | 328 | /* Now look at the matching shadow entry. */ |
382ac6b3 | 329 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); |
df29f43e | 330 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { |
bff672e6 | 331 | /* No shadow entry: allocate a new shadow PTE page. */ |
d7e28ffe | 332 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
2e04ef76 RR |
333 | /* |
334 | * This is not really the Guest's fault, but killing it is | |
335 | * simple for this corner case. | |
336 | */ | |
d7e28ffe | 337 | if (!ptepage) { |
382ac6b3 | 338 | kill_guest(cpu, "out of memory allocating pte page"); |
df1693ab | 339 | return false; |
d7e28ffe | 340 | } |
bff672e6 | 341 | /* We check that the Guest pgd is OK. */ |
382ac6b3 | 342 | check_gpgd(cpu, gpgd); |
2e04ef76 RR |
343 | /* |
344 | * And we copy the flags to the shadow PGD entry. The page | |
345 | * number in the shadow PGD is the page we just allocated. | |
346 | */ | |
acdd0b62 | 347 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); |
d7e28ffe RR |
348 | } |
349 | ||
acdd0b62 MZ |
350 | #ifdef CONFIG_X86_PAE |
351 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | |
2e04ef76 | 352 | /* Middle level not present? We can't map it in. */ |
acdd0b62 MZ |
353 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) |
354 | return false; | |
355 | ||
356 | /* Now look at the matching shadow entry. */ | |
357 | spmd = spmd_addr(cpu, *spgd, vaddr); | |
358 | ||
359 | if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { | |
360 | /* No shadow entry: allocate a new shadow PTE page. */ | |
361 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | |
362 | ||
2e04ef76 RR |
363 | /* |
364 | * This is not really the Guest's fault, but killing it is | |
365 | * simple for this corner case. | |
366 | */ | |
acdd0b62 MZ |
367 | if (!ptepage) { |
368 | kill_guest(cpu, "out of memory allocating pte page"); | |
369 | return false; | |
370 | } | |
371 | ||
372 | /* We check that the Guest pmd is OK. */ | |
373 | check_gpmd(cpu, gpmd); | |
374 | ||
2e04ef76 RR |
375 | /* |
376 | * And we copy the flags to the shadow PMD entry. The page | |
377 | * number in the shadow PMD is the page we just allocated. | |
378 | */ | |
acdd0b62 MZ |
379 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); |
380 | } | |
92b4d8df | 381 | |
2e04ef76 RR |
382 | /* |
383 | * OK, now we look at the lower level in the Guest page table: keep its | |
384 | * address, because we might update it later. | |
385 | */ | |
92b4d8df RR |
386 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); |
387 | #else | |
2e04ef76 RR |
388 | /* |
389 | * OK, now we look at the lower level in the Guest page table: keep its | |
390 | * address, because we might update it later. | |
391 | */ | |
acdd0b62 | 392 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); |
92b4d8df | 393 | #endif |
382ac6b3 | 394 | gpte = lgread(cpu, gpte_ptr, pte_t); |
d7e28ffe | 395 | |
bff672e6 | 396 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
df29f43e | 397 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
df1693ab | 398 | return false; |
d7e28ffe | 399 | |
2e04ef76 RR |
400 | /* |
401 | * Check they're not trying to write to a page the Guest wants | |
402 | * read-only (bit 2 of errcode == write). | |
403 | */ | |
df29f43e | 404 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
df1693ab | 405 | return false; |
d7e28ffe | 406 | |
e1e72965 | 407 | /* User access to a kernel-only page? (bit 3 == user access) */ |
df29f43e | 408 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
df1693ab | 409 | return false; |
d7e28ffe | 410 | |
2e04ef76 RR |
411 | /* |
412 | * Check that the Guest PTE flags are OK, and the page number is below | |
413 | * the pfn_limit (ie. not mapping the Launcher binary). | |
414 | */ | |
382ac6b3 | 415 | check_gpte(cpu, gpte); |
e1e72965 | 416 | |
bff672e6 | 417 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
df29f43e | 418 | gpte = pte_mkyoung(gpte); |
d7e28ffe | 419 | if (errcode & 2) |
df29f43e | 420 | gpte = pte_mkdirty(gpte); |
d7e28ffe | 421 | |
bff672e6 | 422 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
acdd0b62 | 423 | spte = spte_addr(cpu, *spgd, vaddr); |
2e04ef76 RR |
424 | |
425 | /* | |
426 | * If there was a valid shadow PTE entry here before, we release it. | |
427 | * This can happen with a write to a previously read-only entry. | |
428 | */ | |
d7e28ffe RR |
429 | release_pte(*spte); |
430 | ||
2e04ef76 RR |
431 | /* |
432 | * If this is a write, we insist that the Guest page is writable (the | |
433 | * final arg to gpte_to_spte()). | |
434 | */ | |
df29f43e | 435 | if (pte_dirty(gpte)) |
382ac6b3 | 436 | *spte = gpte_to_spte(cpu, gpte, 1); |
df29f43e | 437 | else |
2e04ef76 RR |
438 | /* |
439 | * If this is a read, don't set the "writable" bit in the page | |
bff672e6 | 440 | * table entry, even if the Guest says it's writable. That way |
e1e72965 | 441 | * we will come back here when a write does actually occur, so |
2e04ef76 RR |
442 | * we can update the Guest's _PAGE_DIRTY flag. |
443 | */ | |
90603d15 | 444 | native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); |
d7e28ffe | 445 | |
2e04ef76 RR |
446 | /* |
447 | * Finally, we write the Guest PTE entry back: we've set the | |
448 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. | |
449 | */ | |
382ac6b3 | 450 | lgwrite(cpu, gpte_ptr, pte_t, gpte); |
bff672e6 | 451 | |
2e04ef76 RR |
452 | /* |
453 | * The fault is fixed, the page table is populated, the mapping | |
e1e72965 RR |
454 | * manipulated, the result returned and the code complete. A small |
455 | * delay and a trace of alliteration are the only indications the Guest | |
2e04ef76 RR |
456 | * has that a page fault occurred at all. |
457 | */ | |
df1693ab | 458 | return true; |
d7e28ffe RR |
459 | } |
460 | ||
e1e72965 RR |
461 | /*H:360 |
462 | * (ii) Making sure the Guest stack is mapped. | |
bff672e6 | 463 | * |
e1e72965 RR |
464 | * Remember that direct traps into the Guest need a mapped Guest kernel stack. |
465 | * pin_stack_pages() calls us here: we could simply call demand_page(), but as | |
466 | * we've seen that logic is quite long, and usually the stack pages are already | |
467 | * mapped, so it's overkill. | |
bff672e6 RR |
468 | * |
469 | * This is a quick version which answers the question: is this virtual address | |
2e04ef76 RR |
470 | * mapped by the shadow page tables, and is it writable? |
471 | */ | |
df1693ab | 472 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 473 | { |
df29f43e | 474 | pgd_t *spgd; |
d7e28ffe RR |
475 | unsigned long flags; |
476 | ||
acdd0b62 MZ |
477 | #ifdef CONFIG_X86_PAE |
478 | pmd_t *spmd; | |
479 | #endif | |
e1e72965 | 480 | /* Look at the current top level entry: is it present? */ |
382ac6b3 | 481 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); |
df29f43e | 482 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) |
df1693ab | 483 | return false; |
d7e28ffe | 484 | |
acdd0b62 MZ |
485 | #ifdef CONFIG_X86_PAE |
486 | spmd = spmd_addr(cpu, *spgd, vaddr); | |
487 | if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) | |
488 | return false; | |
489 | #endif | |
490 | ||
2e04ef76 RR |
491 | /* |
492 | * Check the flags on the pte entry itself: it must be present and | |
493 | * writable. | |
494 | */ | |
acdd0b62 | 495 | flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); |
df29f43e | 496 | |
d7e28ffe RR |
497 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
498 | } | |
499 | ||
2e04ef76 RR |
500 | /* |
501 | * So, when pin_stack_pages() asks us to pin a page, we check if it's already | |
bff672e6 | 502 | * in the page tables, and if not, we call demand_page() with error code 2 |
2e04ef76 RR |
503 | * (meaning "write"). |
504 | */ | |
1713608f | 505 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) |
d7e28ffe | 506 | { |
1713608f | 507 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) |
382ac6b3 | 508 | kill_guest(cpu, "bad stack page %#lx", vaddr); |
d7e28ffe RR |
509 | } |
510 | ||
acdd0b62 MZ |
511 | #ifdef CONFIG_X86_PAE |
512 | static void release_pmd(pmd_t *spmd) | |
513 | { | |
514 | /* If the entry's not present, there's nothing to release. */ | |
515 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | |
516 | unsigned int i; | |
517 | pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); | |
518 | /* For each entry in the page, we might need to release it. */ | |
519 | for (i = 0; i < PTRS_PER_PTE; i++) | |
520 | release_pte(ptepage[i]); | |
521 | /* Now we can free the page of PTEs */ | |
522 | free_page((long)ptepage); | |
523 | /* And zero out the PMD entry so we never release it twice. */ | |
524 | native_set_pmd(spmd, __pmd(0)); | |
525 | } | |
526 | } | |
527 | ||
528 | static void release_pgd(pgd_t *spgd) | |
529 | { | |
530 | /* If the entry's not present, there's nothing to release. */ | |
531 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | |
532 | unsigned int i; | |
533 | pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | |
534 | ||
535 | for (i = 0; i < PTRS_PER_PMD; i++) | |
536 | release_pmd(&pmdpage[i]); | |
537 | ||
538 | /* Now we can free the page of PMDs */ | |
539 | free_page((long)pmdpage); | |
540 | /* And zero out the PGD entry so we never release it twice. */ | |
541 | set_pgd(spgd, __pgd(0)); | |
542 | } | |
543 | } | |
544 | ||
545 | #else /* !CONFIG_X86_PAE */ | |
bff672e6 | 546 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ |
90603d15 | 547 | static void release_pgd(pgd_t *spgd) |
d7e28ffe | 548 | { |
bff672e6 | 549 | /* If the entry's not present, there's nothing to release. */ |
df29f43e | 550 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
d7e28ffe | 551 | unsigned int i; |
2e04ef76 RR |
552 | /* |
553 | * Converting the pfn to find the actual PTE page is easy: turn | |
bff672e6 | 554 | * the page number into a physical address, then convert to a |
2e04ef76 RR |
555 | * virtual address (easy for kernel pages like this one). |
556 | */ | |
df29f43e | 557 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
bff672e6 | 558 | /* For each entry in the page, we might need to release it. */ |
df29f43e | 559 | for (i = 0; i < PTRS_PER_PTE; i++) |
d7e28ffe | 560 | release_pte(ptepage[i]); |
bff672e6 | 561 | /* Now we can free the page of PTEs */ |
d7e28ffe | 562 | free_page((long)ptepage); |
e1e72965 | 563 | /* And zero out the PGD entry so we never release it twice. */ |
df29f43e | 564 | *spgd = __pgd(0); |
d7e28ffe RR |
565 | } |
566 | } | |
acdd0b62 | 567 | #endif |
2e04ef76 RR |
568 | |
569 | /*H:445 | |
570 | * We saw flush_user_mappings() twice: once from the flush_user_mappings() | |
e1e72965 | 571 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. |
2e04ef76 RR |
572 | * It simply releases every PTE page from 0 up to the Guest's kernel address. |
573 | */ | |
d7e28ffe RR |
574 | static void flush_user_mappings(struct lguest *lg, int idx) |
575 | { | |
576 | unsigned int i; | |
bff672e6 | 577 | /* Release every pgd entry up to the kernel's address. */ |
47436aa4 | 578 | for (i = 0; i < pgd_index(lg->kernel_address); i++) |
90603d15 | 579 | release_pgd(lg->pgdirs[idx].pgdir + i); |
d7e28ffe RR |
580 | } |
581 | ||
2e04ef76 RR |
582 | /*H:440 |
583 | * (v) Flushing (throwing away) page tables, | |
e1e72965 RR |
584 | * |
585 | * The Guest has a hypercall to throw away the page tables: it's used when a | |
2e04ef76 RR |
586 | * large number of mappings have been changed. |
587 | */ | |
1713608f | 588 | void guest_pagetable_flush_user(struct lg_cpu *cpu) |
d7e28ffe | 589 | { |
bff672e6 | 590 | /* Drop the userspace part of the current page table. */ |
1713608f | 591 | flush_user_mappings(cpu->lg, cpu->cpu_pgd); |
d7e28ffe | 592 | } |
bff672e6 | 593 | /*:*/ |
d7e28ffe | 594 | |
47436aa4 | 595 | /* We walk down the guest page tables to get a guest-physical address */ |
1713608f | 596 | unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) |
47436aa4 RR |
597 | { |
598 | pgd_t gpgd; | |
599 | pte_t gpte; | |
acdd0b62 MZ |
600 | #ifdef CONFIG_X86_PAE |
601 | pmd_t gpmd; | |
602 | #endif | |
47436aa4 | 603 | /* First step: get the top-level Guest page table entry. */ |
382ac6b3 | 604 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); |
47436aa4 | 605 | /* Toplevel not present? We can't map it in. */ |
6afbdd05 | 606 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { |
382ac6b3 | 607 | kill_guest(cpu, "Bad address %#lx", vaddr); |
6afbdd05 RR |
608 | return -1UL; |
609 | } | |
47436aa4 | 610 | |
acdd0b62 MZ |
611 | #ifdef CONFIG_X86_PAE |
612 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | |
613 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) | |
614 | kill_guest(cpu, "Bad address %#lx", vaddr); | |
92b4d8df RR |
615 | gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); |
616 | #else | |
acdd0b62 | 617 | gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); |
92b4d8df | 618 | #endif |
47436aa4 | 619 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
382ac6b3 | 620 | kill_guest(cpu, "Bad address %#lx", vaddr); |
47436aa4 RR |
621 | |
622 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | |
623 | } | |
624 | ||
2e04ef76 RR |
625 | /* |
626 | * We keep several page tables. This is a simple routine to find the page | |
bff672e6 | 627 | * table (if any) corresponding to this top-level address the Guest has given |
2e04ef76 RR |
628 | * us. |
629 | */ | |
d7e28ffe RR |
630 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
631 | { | |
632 | unsigned int i; | |
633 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
4357bd94 | 634 | if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) |
d7e28ffe RR |
635 | break; |
636 | return i; | |
637 | } | |
638 | ||
2e04ef76 RR |
639 | /*H:435 |
640 | * And this is us, creating the new page directory. If we really do | |
bff672e6 | 641 | * allocate a new one (and so the kernel parts are not there), we set |
2e04ef76 RR |
642 | * blank_pgdir. |
643 | */ | |
1713608f | 644 | static unsigned int new_pgdir(struct lg_cpu *cpu, |
ee3db0f2 | 645 | unsigned long gpgdir, |
d7e28ffe RR |
646 | int *blank_pgdir) |
647 | { | |
648 | unsigned int next; | |
acdd0b62 MZ |
649 | #ifdef CONFIG_X86_PAE |
650 | pmd_t *pmd_table; | |
651 | #endif | |
d7e28ffe | 652 | |
2e04ef76 RR |
653 | /* |
654 | * We pick one entry at random to throw out. Choosing the Least | |
655 | * Recently Used might be better, but this is easy. | |
656 | */ | |
382ac6b3 | 657 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); |
bff672e6 | 658 | /* If it's never been allocated at all before, try now. */ |
382ac6b3 GOC |
659 | if (!cpu->lg->pgdirs[next].pgdir) { |
660 | cpu->lg->pgdirs[next].pgdir = | |
661 | (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
bff672e6 | 662 | /* If the allocation fails, just keep using the one we have */ |
382ac6b3 | 663 | if (!cpu->lg->pgdirs[next].pgdir) |
1713608f | 664 | next = cpu->cpu_pgd; |
acdd0b62 MZ |
665 | else { |
666 | #ifdef CONFIG_X86_PAE | |
2e04ef76 RR |
667 | /* |
668 | * In PAE mode, allocate a pmd page and populate the | |
669 | * last pgd entry. | |
670 | */ | |
acdd0b62 MZ |
671 | pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
672 | if (!pmd_table) { | |
673 | free_page((long)cpu->lg->pgdirs[next].pgdir); | |
674 | set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); | |
675 | next = cpu->cpu_pgd; | |
676 | } else { | |
677 | set_pgd(cpu->lg->pgdirs[next].pgdir + | |
678 | SWITCHER_PGD_INDEX, | |
679 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | |
2e04ef76 RR |
680 | /* |
681 | * This is a blank page, so there are no kernel | |
682 | * mappings: caller must map the stack! | |
683 | */ | |
acdd0b62 MZ |
684 | *blank_pgdir = 1; |
685 | } | |
686 | #else | |
d7e28ffe | 687 | *blank_pgdir = 1; |
acdd0b62 MZ |
688 | #endif |
689 | } | |
d7e28ffe | 690 | } |
bff672e6 | 691 | /* Record which Guest toplevel this shadows. */ |
382ac6b3 | 692 | cpu->lg->pgdirs[next].gpgdir = gpgdir; |
d7e28ffe | 693 | /* Release all the non-kernel mappings. */ |
382ac6b3 | 694 | flush_user_mappings(cpu->lg, next); |
d7e28ffe RR |
695 | |
696 | return next; | |
697 | } | |
698 | ||
2e04ef76 RR |
699 | /*H:430 |
700 | * (iv) Switching page tables | |
bff672e6 | 701 | * |
90603d15 | 702 | * Now we've seen all the page table setting and manipulation, let's see |
e1e72965 | 703 | * what happens when the Guest changes page tables (ie. changes the top-level |
2e04ef76 RR |
704 | * pgdir). This occurs on almost every context switch. |
705 | */ | |
4665ac8e | 706 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) |
d7e28ffe RR |
707 | { |
708 | int newpgdir, repin = 0; | |
709 | ||
bff672e6 | 710 | /* Look to see if we have this one already. */ |
382ac6b3 | 711 | newpgdir = find_pgdir(cpu->lg, pgtable); |
2e04ef76 RR |
712 | /* |
713 | * If not, we allocate or mug an existing one: if it's a fresh one, | |
714 | * repin gets set to 1. | |
715 | */ | |
382ac6b3 | 716 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) |
1713608f | 717 | newpgdir = new_pgdir(cpu, pgtable, &repin); |
bff672e6 | 718 | /* Change the current pgd index to the new one. */ |
1713608f | 719 | cpu->cpu_pgd = newpgdir; |
bff672e6 | 720 | /* If it was completely blank, we map in the Guest kernel stack */ |
d7e28ffe | 721 | if (repin) |
4665ac8e | 722 | pin_stack_pages(cpu); |
d7e28ffe RR |
723 | } |
724 | ||
2e04ef76 RR |
725 | /*H:470 |
726 | * Finally, a routine which throws away everything: all PGD entries in all | |
e1e72965 | 727 | * the shadow page tables, including the Guest's kernel mappings. This is used |
2e04ef76 RR |
728 | * when we destroy the Guest. |
729 | */ | |
d7e28ffe RR |
730 | static void release_all_pagetables(struct lguest *lg) |
731 | { | |
732 | unsigned int i, j; | |
733 | ||
bff672e6 | 734 | /* Every shadow pagetable this Guest has */ |
d7e28ffe | 735 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
acdd0b62 MZ |
736 | if (lg->pgdirs[i].pgdir) { |
737 | #ifdef CONFIG_X86_PAE | |
738 | pgd_t *spgd; | |
739 | pmd_t *pmdpage; | |
740 | unsigned int k; | |
741 | ||
742 | /* Get the last pmd page. */ | |
743 | spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; | |
744 | pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | |
745 | ||
2e04ef76 RR |
746 | /* |
747 | * And release the pmd entries of that pmd page, | |
748 | * except for the switcher pmd. | |
749 | */ | |
acdd0b62 MZ |
750 | for (k = 0; k < SWITCHER_PMD_INDEX; k++) |
751 | release_pmd(&pmdpage[k]); | |
752 | #endif | |
bff672e6 | 753 | /* Every PGD entry except the Switcher at the top */ |
d7e28ffe | 754 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) |
90603d15 | 755 | release_pgd(lg->pgdirs[i].pgdir + j); |
acdd0b62 | 756 | } |
d7e28ffe RR |
757 | } |
758 | ||
2e04ef76 RR |
759 | /* |
760 | * We also throw away everything when a Guest tells us it's changed a kernel | |
bff672e6 | 761 | * mapping. Since kernel mappings are in every page table, it's easiest to |
e1e72965 | 762 | * throw them all away. This traps the Guest in amber for a while as |
2e04ef76 RR |
763 | * everything faults back in, but it's rare. |
764 | */ | |
4665ac8e | 765 | void guest_pagetable_clear_all(struct lg_cpu *cpu) |
d7e28ffe | 766 | { |
4665ac8e | 767 | release_all_pagetables(cpu->lg); |
bff672e6 | 768 | /* We need the Guest kernel stack mapped again. */ |
4665ac8e | 769 | pin_stack_pages(cpu); |
d7e28ffe | 770 | } |
e1e72965 | 771 | /*:*/ |
2e04ef76 RR |
772 | |
773 | /*M:009 | |
774 | * Since we throw away all mappings when a kernel mapping changes, our | |
e1e72965 RR |
775 | * performance sucks for guests using highmem. In fact, a guest with |
776 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | |
777 | * usually slower than a Guest with less memory. | |
778 | * | |
779 | * This, of course, cannot be fixed. It would take some kind of... well, I | |
2e04ef76 RR |
780 | * don't know, but the term "puissant code-fu" comes to mind. |
781 | :*/ | |
d7e28ffe | 782 | |
2e04ef76 RR |
783 | /*H:420 |
784 | * This is the routine which actually sets the page table entry for then | |
bff672e6 RR |
785 | * "idx"'th shadow page table. |
786 | * | |
787 | * Normally, we can just throw out the old entry and replace it with 0: if they | |
788 | * use it demand_page() will put the new entry in. We need to do this anyway: | |
789 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page | |
790 | * is read from, and _PAGE_DIRTY when it's written to. | |
791 | * | |
792 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set | |
793 | * these bits on PTEs immediately anyway. This is done to save the CPU from | |
794 | * having to update them, but it helps us the same way: if they set | |
795 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if | |
796 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. | |
797 | */ | |
382ac6b3 | 798 | static void do_set_pte(struct lg_cpu *cpu, int idx, |
df29f43e | 799 | unsigned long vaddr, pte_t gpte) |
d7e28ffe | 800 | { |
e1e72965 | 801 | /* Look up the matching shadow page directory entry. */ |
382ac6b3 | 802 | pgd_t *spgd = spgd_addr(cpu, idx, vaddr); |
acdd0b62 MZ |
803 | #ifdef CONFIG_X86_PAE |
804 | pmd_t *spmd; | |
805 | #endif | |
bff672e6 RR |
806 | |
807 | /* If the top level isn't present, there's no entry to update. */ | |
df29f43e | 808 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
acdd0b62 MZ |
809 | #ifdef CONFIG_X86_PAE |
810 | spmd = spmd_addr(cpu, *spgd, vaddr); | |
811 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | |
812 | #endif | |
2e04ef76 | 813 | /* Otherwise, start by releasing the existing entry. */ |
acdd0b62 MZ |
814 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); |
815 | release_pte(*spte); | |
816 | ||
2e04ef76 RR |
817 | /* |
818 | * If they're setting this entry as dirty or accessed, | |
819 | * we might as well put that entry they've given us in | |
820 | * now. This shaves 10% off a copy-on-write | |
821 | * micro-benchmark. | |
822 | */ | |
acdd0b62 MZ |
823 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
824 | check_gpte(cpu, gpte); | |
825 | native_set_pte(spte, | |
826 | gpte_to_spte(cpu, gpte, | |
827 | pte_flags(gpte) & _PAGE_DIRTY)); | |
2e04ef76 RR |
828 | } else { |
829 | /* | |
830 | * Otherwise kill it and we can demand_page() | |
831 | * it in later. | |
832 | */ | |
acdd0b62 | 833 | native_set_pte(spte, __pte(0)); |
2e04ef76 | 834 | } |
acdd0b62 MZ |
835 | #ifdef CONFIG_X86_PAE |
836 | } | |
837 | #endif | |
d7e28ffe RR |
838 | } |
839 | } | |
840 | ||
2e04ef76 RR |
841 | /*H:410 |
842 | * Updating a PTE entry is a little trickier. | |
bff672e6 RR |
843 | * |
844 | * We keep track of several different page tables (the Guest uses one for each | |
845 | * process, so it makes sense to cache at least a few). Each of these have | |
846 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for | |
847 | * all processes. So when the page table above that address changes, we update | |
848 | * all the page tables, not just the current one. This is rare. | |
849 | * | |
a6bd8e13 | 850 | * The benefit is that when we have to track a new page table, we can keep all |
2e04ef76 RR |
851 | * the kernel mappings. This speeds up context switch immensely. |
852 | */ | |
382ac6b3 | 853 | void guest_set_pte(struct lg_cpu *cpu, |
ee3db0f2 | 854 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
d7e28ffe | 855 | { |
2e04ef76 RR |
856 | /* |
857 | * Kernel mappings must be changed on all top levels. Slow, but doesn't | |
858 | * happen often. | |
859 | */ | |
382ac6b3 | 860 | if (vaddr >= cpu->lg->kernel_address) { |
d7e28ffe | 861 | unsigned int i; |
382ac6b3 GOC |
862 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
863 | if (cpu->lg->pgdirs[i].pgdir) | |
864 | do_set_pte(cpu, i, vaddr, gpte); | |
d7e28ffe | 865 | } else { |
bff672e6 | 866 | /* Is this page table one we have a shadow for? */ |
382ac6b3 GOC |
867 | int pgdir = find_pgdir(cpu->lg, gpgdir); |
868 | if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) | |
bff672e6 | 869 | /* If so, do the update. */ |
382ac6b3 | 870 | do_set_pte(cpu, pgdir, vaddr, gpte); |
d7e28ffe RR |
871 | } |
872 | } | |
873 | ||
bff672e6 | 874 | /*H:400 |
e1e72965 | 875 | * (iii) Setting up a page table entry when the Guest tells us one has changed. |
bff672e6 RR |
876 | * |
877 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | |
878 | * with the other side of page tables while we're here: what happens when the | |
879 | * Guest asks for a page table to be updated? | |
880 | * | |
881 | * We already saw that demand_page() will fill in the shadow page tables when | |
882 | * needed, so we can simply remove shadow page table entries whenever the Guest | |
883 | * tells us they've changed. When the Guest tries to use the new entry it will | |
884 | * fault and demand_page() will fix it up. | |
885 | * | |
886 | * So with that in mind here's our code to to update a (top-level) PGD entry: | |
887 | */ | |
ebe0ba84 | 888 | void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) |
d7e28ffe RR |
889 | { |
890 | int pgdir; | |
891 | ||
892 | if (idx >= SWITCHER_PGD_INDEX) | |
893 | return; | |
894 | ||
bff672e6 | 895 | /* If they're talking about a page table we have a shadow for... */ |
ee3db0f2 | 896 | pgdir = find_pgdir(lg, gpgdir); |
d7e28ffe | 897 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) |
bff672e6 | 898 | /* ... throw it away. */ |
90603d15 | 899 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); |
d7e28ffe | 900 | } |
acdd0b62 MZ |
901 | #ifdef CONFIG_X86_PAE |
902 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) | |
903 | { | |
904 | guest_pagetable_clear_all(&lg->cpus[0]); | |
905 | } | |
906 | #endif | |
d7e28ffe | 907 | |
2e04ef76 RR |
908 | /* |
909 | * Once we know how much memory we have we can construct simple identity (which | |
910 | * set virtual == physical) and linear mappings which will get the Guest far | |
911 | * enough into the boot to create its own. | |
58a24566 MZ |
912 | * |
913 | * We lay them out of the way, just below the initrd (which is why we need to | |
2e04ef76 RR |
914 | * know its size here). |
915 | */ | |
58a24566 MZ |
916 | static unsigned long setup_pagetables(struct lguest *lg, |
917 | unsigned long mem, | |
918 | unsigned long initrd_size) | |
919 | { | |
920 | pgd_t __user *pgdir; | |
921 | pte_t __user *linear; | |
58a24566 | 922 | unsigned long mem_base = (unsigned long)lg->mem_base; |
acdd0b62 MZ |
923 | unsigned int mapped_pages, i, linear_pages; |
924 | #ifdef CONFIG_X86_PAE | |
925 | pmd_t __user *pmds; | |
926 | unsigned int j; | |
927 | pgd_t pgd; | |
928 | pmd_t pmd; | |
929 | #else | |
930 | unsigned int phys_linear; | |
931 | #endif | |
58a24566 | 932 | |
2e04ef76 RR |
933 | /* |
934 | * We have mapped_pages frames to map, so we need linear_pages page | |
935 | * tables to map them. | |
936 | */ | |
58a24566 MZ |
937 | mapped_pages = mem / PAGE_SIZE; |
938 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; | |
939 | ||
940 | /* We put the toplevel page directory page at the top of memory. */ | |
941 | pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE); | |
942 | ||
943 | /* Now we use the next linear_pages pages as pte pages */ | |
944 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; | |
945 | ||
acdd0b62 MZ |
946 | #ifdef CONFIG_X86_PAE |
947 | pmds = (void *)linear - PAGE_SIZE; | |
948 | #endif | |
2e04ef76 RR |
949 | /* |
950 | * Linear mapping is easy: put every page's address into the | |
951 | * mapping in order. | |
952 | */ | |
58a24566 MZ |
953 | for (i = 0; i < mapped_pages; i++) { |
954 | pte_t pte; | |
955 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); | |
956 | if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0) | |
957 | return -EFAULT; | |
958 | } | |
959 | ||
2e04ef76 RR |
960 | /* |
961 | * The top level points to the linear page table pages above. | |
962 | * We setup the identity and linear mappings here. | |
963 | */ | |
acdd0b62 | 964 | #ifdef CONFIG_X86_PAE |
92b4d8df | 965 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; |
acdd0b62 MZ |
966 | i += PTRS_PER_PTE, j++) { |
967 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) | |
968 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | |
969 | ||
970 | if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) | |
971 | return -EFAULT; | |
972 | } | |
973 | ||
974 | set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); | |
975 | if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) | |
976 | return -EFAULT; | |
977 | if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) | |
978 | return -EFAULT; | |
979 | #else | |
58a24566 MZ |
980 | phys_linear = (unsigned long)linear - mem_base; |
981 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { | |
982 | pgd_t pgd; | |
983 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | | |
984 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | |
985 | ||
986 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) | |
987 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) | |
988 | + i / PTRS_PER_PTE], | |
989 | &pgd, sizeof(pgd))) | |
990 | return -EFAULT; | |
991 | } | |
acdd0b62 | 992 | #endif |
58a24566 | 993 | |
2e04ef76 RR |
994 | /* |
995 | * We return the top level (guest-physical) address: remember where | |
996 | * this is. | |
997 | */ | |
58a24566 MZ |
998 | return (unsigned long)pgdir - mem_base; |
999 | } | |
1000 | ||
2e04ef76 RR |
1001 | /*H:500 |
1002 | * (vii) Setting up the page tables initially. | |
bff672e6 RR |
1003 | * |
1004 | * When a Guest is first created, the Launcher tells us where the toplevel of | |
2e04ef76 RR |
1005 | * its first page table is. We set some things up here: |
1006 | */ | |
58a24566 | 1007 | int init_guest_pagetable(struct lguest *lg) |
d7e28ffe | 1008 | { |
58a24566 MZ |
1009 | u64 mem; |
1010 | u32 initrd_size; | |
1011 | struct boot_params __user *boot = (struct boot_params *)lg->mem_base; | |
acdd0b62 MZ |
1012 | #ifdef CONFIG_X86_PAE |
1013 | pgd_t *pgd; | |
1014 | pmd_t *pmd_table; | |
1015 | #endif | |
2e04ef76 RR |
1016 | /* |
1017 | * Get the Guest memory size and the ramdisk size from the boot header | |
1018 | * located at lg->mem_base (Guest address 0). | |
1019 | */ | |
58a24566 MZ |
1020 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) |
1021 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) | |
1022 | return -EFAULT; | |
1023 | ||
2e04ef76 RR |
1024 | /* |
1025 | * We start on the first shadow page table, and give it a blank PGD | |
1026 | * page. | |
1027 | */ | |
58a24566 MZ |
1028 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); |
1029 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) | |
1030 | return lg->pgdirs[0].gpgdir; | |
1713608f GOC |
1031 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); |
1032 | if (!lg->pgdirs[0].pgdir) | |
d7e28ffe | 1033 | return -ENOMEM; |
acdd0b62 MZ |
1034 | #ifdef CONFIG_X86_PAE |
1035 | pgd = lg->pgdirs[0].pgdir; | |
1036 | pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); | |
1037 | if (!pmd_table) | |
1038 | return -ENOMEM; | |
1039 | ||
1040 | set_pgd(pgd + SWITCHER_PGD_INDEX, | |
1041 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | |
1042 | #endif | |
1713608f | 1043 | lg->cpus[0].cpu_pgd = 0; |
d7e28ffe RR |
1044 | return 0; |
1045 | } | |
1046 | ||
47436aa4 | 1047 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ |
382ac6b3 | 1048 | void page_table_guest_data_init(struct lg_cpu *cpu) |
47436aa4 RR |
1049 | { |
1050 | /* We get the kernel address: above this is all kernel memory. */ | |
382ac6b3 | 1051 | if (get_user(cpu->lg->kernel_address, |
acdd0b62 | 1052 | &cpu->lg->lguest_data->kernel_address) |
2e04ef76 RR |
1053 | /* |
1054 | * We tell the Guest that it can't use the top 2 or 4 MB | |
1055 | * of virtual addresses used by the Switcher. | |
1056 | */ | |
acdd0b62 MZ |
1057 | || put_user(RESERVE_MEM * 1024 * 1024, |
1058 | &cpu->lg->lguest_data->reserve_mem) | |
1059 | || put_user(cpu->lg->pgdirs[0].gpgdir, | |
1060 | &cpu->lg->lguest_data->pgdir)) | |
382ac6b3 | 1061 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
47436aa4 | 1062 | |
2e04ef76 RR |
1063 | /* |
1064 | * In flush_user_mappings() we loop from 0 to | |
47436aa4 | 1065 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the |
2e04ef76 RR |
1066 | * Switcher mappings, so check that now. |
1067 | */ | |
acdd0b62 MZ |
1068 | #ifdef CONFIG_X86_PAE |
1069 | if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && | |
1070 | pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) | |
1071 | #else | |
382ac6b3 | 1072 | if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) |
acdd0b62 | 1073 | #endif |
382ac6b3 GOC |
1074 | kill_guest(cpu, "bad kernel address %#lx", |
1075 | cpu->lg->kernel_address); | |
47436aa4 RR |
1076 | } |
1077 | ||
bff672e6 | 1078 | /* When a Guest dies, our cleanup is fairly simple. */ |
d7e28ffe RR |
1079 | void free_guest_pagetable(struct lguest *lg) |
1080 | { | |
1081 | unsigned int i; | |
1082 | ||
bff672e6 | 1083 | /* Throw away all page table pages. */ |
d7e28ffe | 1084 | release_all_pagetables(lg); |
bff672e6 | 1085 | /* Now free the top levels: free_page() can handle 0 just fine. */ |
d7e28ffe RR |
1086 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
1087 | free_page((long)lg->pgdirs[i].pgdir); | |
1088 | } | |
1089 | ||
2e04ef76 RR |
1090 | /*H:480 |
1091 | * (vi) Mapping the Switcher when the Guest is about to run. | |
bff672e6 | 1092 | * |
e1e72965 | 1093 | * The Switcher and the two pages for this CPU need to be visible in the |
bff672e6 | 1094 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
e1e72965 | 1095 | * for each CPU already set up, we just need to hook them in now we know which |
2e04ef76 RR |
1096 | * Guest is about to run on this CPU. |
1097 | */ | |
0c78441c | 1098 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) |
d7e28ffe | 1099 | { |
df29f43e | 1100 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
df29f43e | 1101 | pte_t regs_pte; |
a53a35a8 | 1102 | unsigned long pfn; |
d7e28ffe | 1103 | |
acdd0b62 MZ |
1104 | #ifdef CONFIG_X86_PAE |
1105 | pmd_t switcher_pmd; | |
1106 | pmd_t *pmd_table; | |
1107 | ||
1108 | native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> | |
1109 | PAGE_SHIFT, PAGE_KERNEL_EXEC)); | |
1110 | ||
1111 | pmd_table = __va(pgd_pfn(cpu->lg-> | |
1112 | pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) | |
1113 | << PAGE_SHIFT); | |
1114 | native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); | |
1115 | #else | |
1116 | pgd_t switcher_pgd; | |
1117 | ||
2e04ef76 RR |
1118 | /* |
1119 | * Make the last PGD entry for this Guest point to the Switcher's PTE | |
1120 | * page for this CPU (with appropriate flags). | |
1121 | */ | |
ed1dc778 | 1122 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); |
df29f43e | 1123 | |
1713608f | 1124 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; |
d7e28ffe | 1125 | |
acdd0b62 | 1126 | #endif |
2e04ef76 RR |
1127 | /* |
1128 | * We also change the Switcher PTE page. When we're running the Guest, | |
bff672e6 RR |
1129 | * we want the Guest's "regs" page to appear where the first Switcher |
1130 | * page for this CPU is. This is an optimization: when the Switcher | |
1131 | * saves the Guest registers, it saves them into the first page of this | |
1132 | * CPU's "struct lguest_pages": if we make sure the Guest's register | |
1133 | * page is already mapped there, we don't have to copy them out | |
2e04ef76 RR |
1134 | * again. |
1135 | */ | |
a53a35a8 | 1136 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; |
90603d15 MZ |
1137 | native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); |
1138 | native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], | |
1139 | regs_pte); | |
d7e28ffe | 1140 | } |
bff672e6 | 1141 | /*:*/ |
d7e28ffe RR |
1142 | |
1143 | static void free_switcher_pte_pages(void) | |
1144 | { | |
1145 | unsigned int i; | |
1146 | ||
1147 | for_each_possible_cpu(i) | |
1148 | free_page((long)switcher_pte_page(i)); | |
1149 | } | |
1150 | ||
2e04ef76 RR |
1151 | /*H:520 |
1152 | * Setting up the Switcher PTE page for given CPU is fairly easy, given | |
bff672e6 RR |
1153 | * the CPU number and the "struct page"s for the Switcher code itself. |
1154 | * | |
2e04ef76 RR |
1155 | * Currently the Switcher is less than a page long, so "pages" is always 1. |
1156 | */ | |
d7e28ffe RR |
1157 | static __init void populate_switcher_pte_page(unsigned int cpu, |
1158 | struct page *switcher_page[], | |
1159 | unsigned int pages) | |
1160 | { | |
1161 | unsigned int i; | |
df29f43e | 1162 | pte_t *pte = switcher_pte_page(cpu); |
d7e28ffe | 1163 | |
bff672e6 | 1164 | /* The first entries are easy: they map the Switcher code. */ |
d7e28ffe | 1165 | for (i = 0; i < pages; i++) { |
90603d15 MZ |
1166 | native_set_pte(&pte[i], mk_pte(switcher_page[i], |
1167 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | |
d7e28ffe RR |
1168 | } |
1169 | ||
bff672e6 | 1170 | /* The only other thing we map is this CPU's pair of pages. */ |
d7e28ffe RR |
1171 | i = pages + cpu*2; |
1172 | ||
bff672e6 | 1173 | /* First page (Guest registers) is writable from the Guest */ |
90603d15 MZ |
1174 | native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), |
1175 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); | |
df29f43e | 1176 | |
2e04ef76 RR |
1177 | /* |
1178 | * The second page contains the "struct lguest_ro_state", and is | |
1179 | * read-only. | |
1180 | */ | |
90603d15 MZ |
1181 | native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), |
1182 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | |
d7e28ffe RR |
1183 | } |
1184 | ||
2e04ef76 RR |
1185 | /* |
1186 | * We've made it through the page table code. Perhaps our tired brains are | |
e1e72965 RR |
1187 | * still processing the details, or perhaps we're simply glad it's over. |
1188 | * | |
a6bd8e13 RR |
1189 | * If nothing else, note that all this complexity in juggling shadow page tables |
1190 | * in sync with the Guest's page tables is for one reason: for most Guests this | |
1191 | * page table dance determines how bad performance will be. This is why Xen | |
1192 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD | |
1193 | * have implemented shadow page table support directly into hardware. | |
e1e72965 | 1194 | * |
2e04ef76 RR |
1195 | * There is just one file remaining in the Host. |
1196 | */ | |
e1e72965 | 1197 | |
2e04ef76 RR |
1198 | /*H:510 |
1199 | * At boot or module load time, init_pagetables() allocates and populates | |
1200 | * the Switcher PTE page for each CPU. | |
1201 | */ | |
d7e28ffe RR |
1202 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
1203 | { | |
1204 | unsigned int i; | |
1205 | ||
1206 | for_each_possible_cpu(i) { | |
df29f43e | 1207 | switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); |
d7e28ffe RR |
1208 | if (!switcher_pte_page(i)) { |
1209 | free_switcher_pte_pages(); | |
1210 | return -ENOMEM; | |
1211 | } | |
1212 | populate_switcher_pte_page(i, switcher_page, pages); | |
1213 | } | |
1214 | return 0; | |
1215 | } | |
bff672e6 | 1216 | /*:*/ |
d7e28ffe | 1217 | |
bff672e6 | 1218 | /* Cleaning up simply involves freeing the PTE page for each CPU. */ |
d7e28ffe RR |
1219 | void free_pagetables(void) |
1220 | { | |
1221 | free_switcher_pte_pages(); | |
1222 | } |