Commit | Line | Data |
---|---|---|
3b827c1b JF |
1 | /* |
2 | * Xen mmu operations | |
3 | * | |
4 | * This file contains the various mmu fetch and update operations. | |
5 | * The most important job they must perform is the mapping between the | |
6 | * domain's pfn and the overall machine mfns. | |
7 | * | |
8 | * Xen allows guests to directly update the pagetable, in a controlled | |
9 | * fashion. In other words, the guest modifies the same pagetable | |
10 | * that the CPU actually uses, which eliminates the overhead of having | |
11 | * a separate shadow pagetable. | |
12 | * | |
13 | * In order to allow this, it falls on the guest domain to map its | |
14 | * notion of a "physical" pfn - which is just a domain-local linear | |
15 | * address - into a real "machine address" which the CPU's MMU can | |
16 | * use. | |
17 | * | |
18 | * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be | |
19 | * inserted directly into the pagetable. When creating a new | |
20 | * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, | |
21 | * when reading the content back with __(pgd|pmd|pte)_val, it converts | |
22 | * the mfn back into a pfn. | |
23 | * | |
24 | * The other constraint is that all pages which make up a pagetable | |
25 | * must be mapped read-only in the guest. This prevents uncontrolled | |
26 | * guest updates to the pagetable. Xen strictly enforces this, and | |
27 | * will disallow any pagetable update which will end up mapping a | |
28 | * pagetable page RW, and will disallow using any writable page as a | |
29 | * pagetable. | |
30 | * | |
31 | * Naively, when loading %cr3 with the base of a new pagetable, Xen | |
32 | * would need to validate the whole pagetable before going on. | |
33 | * Naturally, this is quite slow. The solution is to "pin" a | |
34 | * pagetable, which enforces all the constraints on the pagetable even | |
35 | * when it is not actively in use. This menas that Xen can be assured | |
36 | * that it is still valid when you do load it into %cr3, and doesn't | |
37 | * need to revalidate it. | |
38 | * | |
39 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
40 | */ | |
f120f13e | 41 | #include <linux/sched.h> |
f4f97b3e | 42 | #include <linux/highmem.h> |
3b827c1b | 43 | #include <linux/bug.h> |
3b827c1b JF |
44 | |
45 | #include <asm/pgtable.h> | |
46 | #include <asm/tlbflush.h> | |
47 | #include <asm/mmu_context.h> | |
f4f97b3e | 48 | #include <asm/paravirt.h> |
3b827c1b JF |
49 | |
50 | #include <asm/xen/hypercall.h> | |
f4f97b3e | 51 | #include <asm/xen/hypervisor.h> |
3b827c1b JF |
52 | |
53 | #include <xen/page.h> | |
54 | #include <xen/interface/xen.h> | |
55 | ||
f4f97b3e | 56 | #include "multicalls.h" |
3b827c1b JF |
57 | #include "mmu.h" |
58 | ||
59 | xmaddr_t arbitrary_virt_to_machine(unsigned long address) | |
60 | { | |
da7bfc50 | 61 | unsigned int level; |
f0646e43 | 62 | pte_t *pte = lookup_address(address, &level); |
de067814 | 63 | unsigned offset = address & ~PAGE_MASK; |
3b827c1b JF |
64 | |
65 | BUG_ON(pte == NULL); | |
66 | ||
67 | return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset); | |
68 | } | |
69 | ||
70 | void make_lowmem_page_readonly(void *vaddr) | |
71 | { | |
72 | pte_t *pte, ptev; | |
73 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 74 | unsigned int level; |
3b827c1b | 75 | |
f0646e43 | 76 | pte = lookup_address(address, &level); |
3b827c1b JF |
77 | BUG_ON(pte == NULL); |
78 | ||
79 | ptev = pte_wrprotect(*pte); | |
80 | ||
81 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
82 | BUG(); | |
83 | } | |
84 | ||
85 | void make_lowmem_page_readwrite(void *vaddr) | |
86 | { | |
87 | pte_t *pte, ptev; | |
88 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 89 | unsigned int level; |
3b827c1b | 90 | |
f0646e43 | 91 | pte = lookup_address(address, &level); |
3b827c1b JF |
92 | BUG_ON(pte == NULL); |
93 | ||
94 | ptev = pte_mkwrite(*pte); | |
95 | ||
96 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
97 | BUG(); | |
98 | } | |
99 | ||
100 | ||
3b827c1b JF |
101 | void xen_set_pmd(pmd_t *ptr, pmd_t val) |
102 | { | |
d66bf8fc JF |
103 | struct multicall_space mcs; |
104 | struct mmu_update *u; | |
3b827c1b | 105 | |
d66bf8fc JF |
106 | preempt_disable(); |
107 | ||
108 | mcs = xen_mc_entry(sizeof(*u)); | |
109 | u = mcs.args; | |
110 | u->ptr = virt_to_machine(ptr).maddr; | |
111 | u->val = pmd_val_ma(val); | |
112 | MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); | |
113 | ||
114 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
115 | ||
116 | preempt_enable(); | |
3b827c1b JF |
117 | } |
118 | ||
3b827c1b JF |
119 | /* |
120 | * Associate a virtual page frame with a given physical page frame | |
121 | * and protection flags for that frame. | |
122 | */ | |
123 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |
124 | { | |
125 | pgd_t *pgd; | |
126 | pud_t *pud; | |
127 | pmd_t *pmd; | |
128 | pte_t *pte; | |
129 | ||
130 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
131 | if (pgd_none(*pgd)) { | |
132 | BUG(); | |
133 | return; | |
134 | } | |
135 | pud = pud_offset(pgd, vaddr); | |
136 | if (pud_none(*pud)) { | |
137 | BUG(); | |
138 | return; | |
139 | } | |
140 | pmd = pmd_offset(pud, vaddr); | |
141 | if (pmd_none(*pmd)) { | |
142 | BUG(); | |
143 | return; | |
144 | } | |
145 | pte = pte_offset_kernel(pmd, vaddr); | |
146 | /* <mfn,flags> stored as-is, to permit clearing entries */ | |
147 | xen_set_pte(pte, mfn_pte(mfn, flags)); | |
148 | ||
149 | /* | |
150 | * It's enough to flush this one mapping. | |
151 | * (PGE mappings get flushed as well) | |
152 | */ | |
153 | __flush_tlb_one(vaddr); | |
154 | } | |
155 | ||
156 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
157 | pte_t *ptep, pte_t pteval) | |
158 | { | |
2bd50036 JF |
159 | /* updates to init_mm may be done without lock */ |
160 | if (mm == &init_mm) | |
161 | preempt_disable(); | |
162 | ||
d66bf8fc | 163 | if (mm == current->mm || mm == &init_mm) { |
8965c1c0 | 164 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { |
d66bf8fc JF |
165 | struct multicall_space mcs; |
166 | mcs = xen_mc_entry(0); | |
167 | ||
168 | MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); | |
169 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
2bd50036 | 170 | goto out; |
d66bf8fc JF |
171 | } else |
172 | if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) | |
2bd50036 | 173 | goto out; |
d66bf8fc JF |
174 | } |
175 | xen_set_pte(ptep, pteval); | |
2bd50036 JF |
176 | |
177 | out: | |
178 | if (mm == &init_mm) | |
179 | preempt_enable(); | |
3b827c1b JF |
180 | } |
181 | ||
ebb9cfe2 JF |
182 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
183 | static pteval_t pte_mfn_to_pfn(pteval_t val) | |
947a69c9 | 184 | { |
ebb9cfe2 JF |
185 | if (val & _PAGE_PRESENT) { |
186 | unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT; | |
187 | pteval_t flags = val & ~PTE_MASK; | |
188 | val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; | |
189 | } | |
947a69c9 | 190 | |
ebb9cfe2 JF |
191 | return val; |
192 | } | |
947a69c9 | 193 | |
ebb9cfe2 JF |
194 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
195 | { | |
196 | if (val & _PAGE_PRESENT) { | |
197 | unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT; | |
198 | pteval_t flags = val & ~PTE_MASK; | |
199 | val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; | |
200 | } | |
201 | ||
202 | return val; | |
203 | } | |
204 | ||
205 | pteval_t xen_pte_val(pte_t pte) | |
206 | { | |
207 | return pte_mfn_to_pfn(pte.pte); | |
947a69c9 JF |
208 | } |
209 | ||
210 | pgdval_t xen_pgd_val(pgd_t pgd) | |
211 | { | |
ebb9cfe2 | 212 | return pte_mfn_to_pfn(pgd.pgd); |
947a69c9 JF |
213 | } |
214 | ||
215 | pte_t xen_make_pte(pteval_t pte) | |
216 | { | |
ebb9cfe2 JF |
217 | pte = pte_pfn_to_mfn(pte); |
218 | return native_make_pte(pte); | |
947a69c9 JF |
219 | } |
220 | ||
221 | pgd_t xen_make_pgd(pgdval_t pgd) | |
222 | { | |
ebb9cfe2 JF |
223 | pgd = pte_pfn_to_mfn(pgd); |
224 | return native_make_pgd(pgd); | |
947a69c9 JF |
225 | } |
226 | ||
227 | pmdval_t xen_pmd_val(pmd_t pmd) | |
228 | { | |
ebb9cfe2 | 229 | return pte_mfn_to_pfn(pmd.pmd); |
947a69c9 | 230 | } |
28499143 | 231 | |
f4f97b3e JF |
232 | void xen_set_pud(pud_t *ptr, pud_t val) |
233 | { | |
d66bf8fc JF |
234 | struct multicall_space mcs; |
235 | struct mmu_update *u; | |
f4f97b3e | 236 | |
d66bf8fc JF |
237 | preempt_disable(); |
238 | ||
239 | mcs = xen_mc_entry(sizeof(*u)); | |
240 | u = mcs.args; | |
241 | u->ptr = virt_to_machine(ptr).maddr; | |
242 | u->val = pud_val_ma(val); | |
243 | MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); | |
244 | ||
245 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
246 | ||
247 | preempt_enable(); | |
f4f97b3e JF |
248 | } |
249 | ||
250 | void xen_set_pte(pte_t *ptep, pte_t pte) | |
251 | { | |
252 | ptep->pte_high = pte.pte_high; | |
253 | smp_wmb(); | |
254 | ptep->pte_low = pte.pte_low; | |
255 | } | |
256 | ||
3b827c1b JF |
257 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
258 | { | |
259 | set_64bit((u64 *)ptep, pte_val_ma(pte)); | |
260 | } | |
261 | ||
262 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
263 | { | |
264 | ptep->pte_low = 0; | |
265 | smp_wmb(); /* make sure low gets written first */ | |
266 | ptep->pte_high = 0; | |
267 | } | |
268 | ||
269 | void xen_pmd_clear(pmd_t *pmdp) | |
270 | { | |
271 | xen_set_pmd(pmdp, __pmd(0)); | |
272 | } | |
273 | ||
abf33038 | 274 | pmd_t xen_make_pmd(pmdval_t pmd) |
3b827c1b | 275 | { |
ebb9cfe2 | 276 | pmd = pte_pfn_to_mfn(pmd); |
947a69c9 | 277 | return native_make_pmd(pmd); |
3b827c1b | 278 | } |
3b827c1b | 279 | |
f4f97b3e JF |
280 | /* |
281 | (Yet another) pagetable walker. This one is intended for pinning a | |
282 | pagetable. This means that it walks a pagetable and calls the | |
283 | callback function on each page it finds making up the page table, | |
284 | at every level. It walks the entire pagetable, but it only bothers | |
285 | pinning pte pages which are below pte_limit. In the normal case | |
286 | this will be TASK_SIZE, but at boot we need to pin up to | |
287 | FIXADDR_TOP. But the important bit is that we don't pin beyond | |
288 | there, because then we start getting into Xen's ptes. | |
289 | */ | |
74260714 | 290 | static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level), |
f4f97b3e | 291 | unsigned long limit) |
3b827c1b JF |
292 | { |
293 | pgd_t *pgd = pgd_base; | |
f4f97b3e JF |
294 | int flush = 0; |
295 | unsigned long addr = 0; | |
296 | unsigned long pgd_next; | |
297 | ||
298 | BUG_ON(limit > FIXADDR_TOP); | |
3b827c1b JF |
299 | |
300 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
f4f97b3e JF |
301 | return 0; |
302 | ||
303 | for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) { | |
304 | pud_t *pud; | |
305 | unsigned long pud_limit, pud_next; | |
3b827c1b | 306 | |
f4f97b3e JF |
307 | pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP); |
308 | ||
309 | if (!pgd_val(*pgd)) | |
3b827c1b | 310 | continue; |
f4f97b3e | 311 | |
3b827c1b JF |
312 | pud = pud_offset(pgd, 0); |
313 | ||
314 | if (PTRS_PER_PUD > 1) /* not folded */ | |
74260714 | 315 | flush |= (*func)(virt_to_page(pud), PT_PUD); |
f4f97b3e JF |
316 | |
317 | for (; addr != pud_limit; pud++, addr = pud_next) { | |
318 | pmd_t *pmd; | |
319 | unsigned long pmd_limit; | |
320 | ||
321 | pud_next = pud_addr_end(addr, pud_limit); | |
322 | ||
323 | if (pud_next < limit) | |
324 | pmd_limit = pud_next; | |
325 | else | |
326 | pmd_limit = limit; | |
3b827c1b | 327 | |
3b827c1b JF |
328 | if (pud_none(*pud)) |
329 | continue; | |
f4f97b3e | 330 | |
3b827c1b JF |
331 | pmd = pmd_offset(pud, 0); |
332 | ||
333 | if (PTRS_PER_PMD > 1) /* not folded */ | |
74260714 | 334 | flush |= (*func)(virt_to_page(pmd), PT_PMD); |
f4f97b3e JF |
335 | |
336 | for (; addr != pmd_limit; pmd++) { | |
337 | addr += (PAGE_SIZE * PTRS_PER_PTE); | |
338 | if ((pmd_limit-1) < (addr-1)) { | |
339 | addr = pmd_limit; | |
340 | break; | |
341 | } | |
3b827c1b | 342 | |
3b827c1b JF |
343 | if (pmd_none(*pmd)) |
344 | continue; | |
345 | ||
74260714 | 346 | flush |= (*func)(pmd_page(*pmd), PT_PTE); |
3b827c1b JF |
347 | } |
348 | } | |
349 | } | |
350 | ||
74260714 | 351 | flush |= (*func)(virt_to_page(pgd_base), PT_PGD); |
f4f97b3e JF |
352 | |
353 | return flush; | |
3b827c1b JF |
354 | } |
355 | ||
74260714 JF |
356 | static spinlock_t *lock_pte(struct page *page) |
357 | { | |
358 | spinlock_t *ptl = NULL; | |
359 | ||
360 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | |
361 | ptl = __pte_lockptr(page); | |
362 | spin_lock(ptl); | |
363 | #endif | |
364 | ||
365 | return ptl; | |
366 | } | |
367 | ||
368 | static void do_unlock(void *v) | |
369 | { | |
370 | spinlock_t *ptl = v; | |
371 | spin_unlock(ptl); | |
372 | } | |
373 | ||
374 | static void xen_do_pin(unsigned level, unsigned long pfn) | |
375 | { | |
376 | struct mmuext_op *op; | |
377 | struct multicall_space mcs; | |
378 | ||
379 | mcs = __xen_mc_entry(sizeof(*op)); | |
380 | op = mcs.args; | |
381 | op->cmd = level; | |
382 | op->arg1.mfn = pfn_to_mfn(pfn); | |
383 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
384 | } | |
385 | ||
386 | static int pin_page(struct page *page, enum pt_level level) | |
f4f97b3e | 387 | { |
d60cd46b | 388 | unsigned pgfl = TestSetPagePinned(page); |
f4f97b3e JF |
389 | int flush; |
390 | ||
391 | if (pgfl) | |
392 | flush = 0; /* already pinned */ | |
393 | else if (PageHighMem(page)) | |
394 | /* kmaps need flushing if we found an unpinned | |
395 | highpage */ | |
396 | flush = 1; | |
397 | else { | |
398 | void *pt = lowmem_page_address(page); | |
399 | unsigned long pfn = page_to_pfn(page); | |
400 | struct multicall_space mcs = __xen_mc_entry(0); | |
74260714 | 401 | spinlock_t *ptl; |
f4f97b3e JF |
402 | |
403 | flush = 0; | |
404 | ||
74260714 JF |
405 | ptl = NULL; |
406 | if (level == PT_PTE) | |
407 | ptl = lock_pte(page); | |
408 | ||
f4f97b3e JF |
409 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
410 | pfn_pte(pfn, PAGE_KERNEL_RO), | |
74260714 JF |
411 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
412 | ||
413 | if (level == PT_PTE) | |
414 | xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); | |
415 | ||
416 | if (ptl) { | |
417 | /* Queue a deferred unlock for when this batch | |
418 | is completed. */ | |
419 | xen_mc_callback(do_unlock, ptl); | |
420 | } | |
f4f97b3e JF |
421 | } |
422 | ||
423 | return flush; | |
424 | } | |
3b827c1b | 425 | |
f4f97b3e JF |
426 | /* This is called just after a mm has been created, but it has not |
427 | been used yet. We need to make sure that its pagetable is all | |
428 | read-only, and can be pinned. */ | |
3b827c1b JF |
429 | void xen_pgd_pin(pgd_t *pgd) |
430 | { | |
f4f97b3e | 431 | xen_mc_batch(); |
3b827c1b | 432 | |
f87e4cac JF |
433 | if (pgd_walk(pgd, pin_page, TASK_SIZE)) { |
434 | /* re-enable interrupts for kmap_flush_unused */ | |
435 | xen_mc_issue(0); | |
f4f97b3e | 436 | kmap_flush_unused(); |
f87e4cac JF |
437 | xen_mc_batch(); |
438 | } | |
f4f97b3e | 439 | |
28499143 | 440 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
f4f97b3e | 441 | xen_mc_issue(0); |
3b827c1b JF |
442 | } |
443 | ||
f4f97b3e JF |
444 | /* The init_mm pagetable is really pinned as soon as its created, but |
445 | that's before we have page structures to store the bits. So do all | |
446 | the book-keeping now. */ | |
74260714 | 447 | static __init int mark_pinned(struct page *page, enum pt_level level) |
3b827c1b | 448 | { |
f4f97b3e JF |
449 | SetPagePinned(page); |
450 | return 0; | |
451 | } | |
3b827c1b | 452 | |
f4f97b3e JF |
453 | void __init xen_mark_init_mm_pinned(void) |
454 | { | |
455 | pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP); | |
456 | } | |
3b827c1b | 457 | |
74260714 | 458 | static int unpin_page(struct page *page, enum pt_level level) |
f4f97b3e | 459 | { |
d60cd46b | 460 | unsigned pgfl = TestClearPagePinned(page); |
3b827c1b | 461 | |
f4f97b3e JF |
462 | if (pgfl && !PageHighMem(page)) { |
463 | void *pt = lowmem_page_address(page); | |
464 | unsigned long pfn = page_to_pfn(page); | |
74260714 JF |
465 | spinlock_t *ptl = NULL; |
466 | struct multicall_space mcs; | |
467 | ||
468 | if (level == PT_PTE) { | |
469 | ptl = lock_pte(page); | |
470 | ||
471 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | |
472 | } | |
473 | ||
474 | mcs = __xen_mc_entry(0); | |
f4f97b3e JF |
475 | |
476 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | |
477 | pfn_pte(pfn, PAGE_KERNEL), | |
74260714 JF |
478 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
479 | ||
480 | if (ptl) { | |
481 | /* unlock when batch completed */ | |
482 | xen_mc_callback(do_unlock, ptl); | |
483 | } | |
f4f97b3e JF |
484 | } |
485 | ||
486 | return 0; /* never need to flush on unpin */ | |
3b827c1b JF |
487 | } |
488 | ||
f4f97b3e JF |
489 | /* Release a pagetables pages back as normal RW */ |
490 | static void xen_pgd_unpin(pgd_t *pgd) | |
491 | { | |
f4f97b3e JF |
492 | xen_mc_batch(); |
493 | ||
74260714 | 494 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
f4f97b3e JF |
495 | |
496 | pgd_walk(pgd, unpin_page, TASK_SIZE); | |
497 | ||
498 | xen_mc_issue(0); | |
499 | } | |
3b827c1b JF |
500 | |
501 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
502 | { | |
f4f97b3e | 503 | spin_lock(&next->page_table_lock); |
3b827c1b | 504 | xen_pgd_pin(next->pgd); |
f4f97b3e | 505 | spin_unlock(&next->page_table_lock); |
3b827c1b JF |
506 | } |
507 | ||
508 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | |
509 | { | |
f4f97b3e | 510 | spin_lock(&mm->page_table_lock); |
3b827c1b | 511 | xen_pgd_pin(mm->pgd); |
f4f97b3e | 512 | spin_unlock(&mm->page_table_lock); |
3b827c1b JF |
513 | } |
514 | ||
3b827c1b | 515 | |
f87e4cac JF |
516 | #ifdef CONFIG_SMP |
517 | /* Another cpu may still have their %cr3 pointing at the pagetable, so | |
518 | we need to repoint it somewhere else before we can unpin it. */ | |
519 | static void drop_other_mm_ref(void *info) | |
520 | { | |
521 | struct mm_struct *mm = info; | |
3b827c1b | 522 | |
f87e4cac JF |
523 | if (__get_cpu_var(cpu_tlbstate).active_mm == mm) |
524 | leave_mm(smp_processor_id()); | |
9f79991d JF |
525 | |
526 | /* If this cpu still has a stale cr3 reference, then make sure | |
527 | it has been flushed. */ | |
528 | if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { | |
529 | load_cr3(swapper_pg_dir); | |
530 | arch_flush_lazy_cpu_mode(); | |
531 | } | |
f87e4cac | 532 | } |
3b827c1b | 533 | |
f87e4cac JF |
534 | static void drop_mm_ref(struct mm_struct *mm) |
535 | { | |
9f79991d JF |
536 | cpumask_t mask; |
537 | unsigned cpu; | |
538 | ||
f87e4cac JF |
539 | if (current->active_mm == mm) { |
540 | if (current->mm == mm) | |
541 | load_cr3(swapper_pg_dir); | |
542 | else | |
543 | leave_mm(smp_processor_id()); | |
9f79991d JF |
544 | arch_flush_lazy_cpu_mode(); |
545 | } | |
546 | ||
547 | /* Get the "official" set of cpus referring to our pagetable. */ | |
548 | mask = mm->cpu_vm_mask; | |
549 | ||
550 | /* It's possible that a vcpu may have a stale reference to our | |
551 | cr3, because its in lazy mode, and it hasn't yet flushed | |
552 | its set of pending hypercalls yet. In this case, we can | |
553 | look at its actual current cr3 value, and force it to flush | |
554 | if needed. */ | |
555 | for_each_online_cpu(cpu) { | |
556 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | |
557 | cpu_set(cpu, mask); | |
3b827c1b JF |
558 | } |
559 | ||
9f79991d JF |
560 | if (!cpus_empty(mask)) |
561 | xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | |
f87e4cac JF |
562 | } |
563 | #else | |
564 | static void drop_mm_ref(struct mm_struct *mm) | |
565 | { | |
566 | if (current->active_mm == mm) | |
567 | load_cr3(swapper_pg_dir); | |
568 | } | |
569 | #endif | |
570 | ||
571 | /* | |
572 | * While a process runs, Xen pins its pagetables, which means that the | |
573 | * hypervisor forces it to be read-only, and it controls all updates | |
574 | * to it. This means that all pagetable updates have to go via the | |
575 | * hypervisor, which is moderately expensive. | |
576 | * | |
577 | * Since we're pulling the pagetable down, we switch to use init_mm, | |
578 | * unpin old process pagetable and mark it all read-write, which | |
579 | * allows further operations on it to be simple memory accesses. | |
580 | * | |
581 | * The only subtle point is that another CPU may be still using the | |
582 | * pagetable because of lazy tlb flushing. This means we need need to | |
583 | * switch all CPUs off this pagetable before we can unpin it. | |
584 | */ | |
585 | void xen_exit_mmap(struct mm_struct *mm) | |
586 | { | |
587 | get_cpu(); /* make sure we don't move around */ | |
588 | drop_mm_ref(mm); | |
589 | put_cpu(); | |
3b827c1b | 590 | |
f120f13e | 591 | spin_lock(&mm->page_table_lock); |
df912ea4 JF |
592 | |
593 | /* pgd may not be pinned in the error exit path of execve */ | |
594 | if (PagePinned(virt_to_page(mm->pgd))) | |
595 | xen_pgd_unpin(mm->pgd); | |
74260714 | 596 | |
f120f13e | 597 | spin_unlock(&mm->page_table_lock); |
3b827c1b | 598 | } |