CONFIG_PM_SLEEP fix: xen: fix compilation when CONFIG_PM_SLEEP is disabled
[deliverable/linux.git] / arch / x86 / xen / mmu.c
CommitLineData
3b827c1b
JF
1/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
f120f13e 41#include <linux/sched.h>
f4f97b3e 42#include <linux/highmem.h>
3b827c1b 43#include <linux/bug.h>
3b827c1b
JF
44
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
f4f97b3e 48#include <asm/paravirt.h>
3b827c1b
JF
49
50#include <asm/xen/hypercall.h>
f4f97b3e 51#include <asm/xen/hypervisor.h>
3b827c1b
JF
52
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
f4f97b3e 56#include "multicalls.h"
3b827c1b
JF
57#include "mmu.h"
58
d451bb7a 59#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
cf0923ea 60#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
d451bb7a 61
cf0923ea
JF
62/* Placeholder for holes in the address space */
63static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
64 __attribute__((section(".data.page_aligned"))) =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
68static unsigned long *p2m_top[TOP_ENTRIES]
69 __attribute__((section(".data.page_aligned"))) =
70 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
d451bb7a 71
d5edbc1f
JF
72/* Arrays of p2m arrays expressed in mfns used for save/restore */
73static unsigned long p2m_top_mfn[TOP_ENTRIES]
74 __attribute__((section(".bss.page_aligned")));
75
b20aeccd
IM
76static unsigned long p2m_top_mfn_list[
77 PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
d5edbc1f
JF
78 __attribute__((section(".bss.page_aligned")));
79
d451bb7a
JF
80static inline unsigned p2m_top_index(unsigned long pfn)
81{
8006ec3e 82 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
d451bb7a
JF
83 return pfn / P2M_ENTRIES_PER_PAGE;
84}
85
86static inline unsigned p2m_index(unsigned long pfn)
87{
88 return pfn % P2M_ENTRIES_PER_PAGE;
89}
90
d5edbc1f
JF
91/* Build the parallel p2m_top_mfn structures */
92void xen_setup_mfn_list_list(void)
93{
94 unsigned pfn, idx;
95
96 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
97 unsigned topidx = p2m_top_index(pfn);
98
99 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
100 }
101
102 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
103 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
104 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
105 }
106
107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108
109 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
110 virt_to_mfn(p2m_top_mfn_list);
111 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
112}
113
114/* Set up p2m_top to point to the domain-builder provided p2m pages */
d451bb7a
JF
115void __init xen_build_dynamic_phys_to_machine(void)
116{
d451bb7a 117 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
8006ec3e 118 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
d5edbc1f 119 unsigned pfn;
d451bb7a 120
8006ec3e 121 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
d451bb7a
JF
122 unsigned topidx = p2m_top_index(pfn);
123
124 p2m_top[topidx] = &mfn_list[pfn];
125 }
126}
127
128unsigned long get_phys_to_machine(unsigned long pfn)
129{
130 unsigned topidx, idx;
131
8006ec3e
JF
132 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
133 return INVALID_P2M_ENTRY;
134
d451bb7a 135 topidx = p2m_top_index(pfn);
d451bb7a
JF
136 idx = p2m_index(pfn);
137 return p2m_top[topidx][idx];
138}
139
d5edbc1f 140static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
d451bb7a
JF
141{
142 unsigned long *p;
143 unsigned i;
144
145 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
146 BUG_ON(p == NULL);
147
148 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
149 p[i] = INVALID_P2M_ENTRY;
150
cf0923ea 151 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
d451bb7a 152 free_page((unsigned long)p);
d5edbc1f
JF
153 else
154 *mfnp = virt_to_mfn(p);
d451bb7a
JF
155}
156
157void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
158{
159 unsigned topidx, idx;
160
161 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
162 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
8006ec3e
JF
163 return;
164 }
165
166 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
167 BUG_ON(mfn != INVALID_P2M_ENTRY);
d451bb7a
JF
168 return;
169 }
170
171 topidx = p2m_top_index(pfn);
cf0923ea 172 if (p2m_top[topidx] == p2m_missing) {
d451bb7a
JF
173 /* no need to allocate a page to store an invalid entry */
174 if (mfn == INVALID_P2M_ENTRY)
175 return;
d5edbc1f 176 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
d451bb7a
JF
177 }
178
179 idx = p2m_index(pfn);
180 p2m_top[topidx][idx] = mfn;
181}
182
3b827c1b
JF
183xmaddr_t arbitrary_virt_to_machine(unsigned long address)
184{
da7bfc50 185 unsigned int level;
f0646e43 186 pte_t *pte = lookup_address(address, &level);
3b827c1b
JF
187 unsigned offset = address & PAGE_MASK;
188
189 BUG_ON(pte == NULL);
190
191 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
192}
193
194void make_lowmem_page_readonly(void *vaddr)
195{
196 pte_t *pte, ptev;
197 unsigned long address = (unsigned long)vaddr;
da7bfc50 198 unsigned int level;
3b827c1b 199
f0646e43 200 pte = lookup_address(address, &level);
3b827c1b
JF
201 BUG_ON(pte == NULL);
202
203 ptev = pte_wrprotect(*pte);
204
205 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
206 BUG();
207}
208
209void make_lowmem_page_readwrite(void *vaddr)
210{
211 pte_t *pte, ptev;
212 unsigned long address = (unsigned long)vaddr;
da7bfc50 213 unsigned int level;
3b827c1b 214
f0646e43 215 pte = lookup_address(address, &level);
3b827c1b
JF
216 BUG_ON(pte == NULL);
217
218 ptev = pte_mkwrite(*pte);
219
220 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
221 BUG();
222}
223
224
3b827c1b
JF
225void xen_set_pmd(pmd_t *ptr, pmd_t val)
226{
d66bf8fc
JF
227 struct multicall_space mcs;
228 struct mmu_update *u;
3b827c1b 229
d66bf8fc
JF
230 preempt_disable();
231
232 mcs = xen_mc_entry(sizeof(*u));
233 u = mcs.args;
234 u->ptr = virt_to_machine(ptr).maddr;
235 u->val = pmd_val_ma(val);
236 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
237
238 xen_mc_issue(PARAVIRT_LAZY_MMU);
239
240 preempt_enable();
3b827c1b
JF
241}
242
3b827c1b
JF
243/*
244 * Associate a virtual page frame with a given physical page frame
245 * and protection flags for that frame.
246 */
247void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
248{
249 pgd_t *pgd;
250 pud_t *pud;
251 pmd_t *pmd;
252 pte_t *pte;
253
254 pgd = swapper_pg_dir + pgd_index(vaddr);
255 if (pgd_none(*pgd)) {
256 BUG();
257 return;
258 }
259 pud = pud_offset(pgd, vaddr);
260 if (pud_none(*pud)) {
261 BUG();
262 return;
263 }
264 pmd = pmd_offset(pud, vaddr);
265 if (pmd_none(*pmd)) {
266 BUG();
267 return;
268 }
269 pte = pte_offset_kernel(pmd, vaddr);
270 /* <mfn,flags> stored as-is, to permit clearing entries */
271 xen_set_pte(pte, mfn_pte(mfn, flags));
272
273 /*
274 * It's enough to flush this one mapping.
275 * (PGE mappings get flushed as well)
276 */
277 __flush_tlb_one(vaddr);
278}
279
280void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep, pte_t pteval)
282{
2bd50036
JF
283 /* updates to init_mm may be done without lock */
284 if (mm == &init_mm)
285 preempt_disable();
286
d66bf8fc 287 if (mm == current->mm || mm == &init_mm) {
8965c1c0 288 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
d66bf8fc
JF
289 struct multicall_space mcs;
290 mcs = xen_mc_entry(0);
291
292 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
293 xen_mc_issue(PARAVIRT_LAZY_MMU);
2bd50036 294 goto out;
d66bf8fc
JF
295 } else
296 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
2bd50036 297 goto out;
d66bf8fc
JF
298 }
299 xen_set_pte(ptep, pteval);
2bd50036
JF
300
301out:
302 if (mm == &init_mm)
303 preempt_enable();
3b827c1b
JF
304}
305
947a69c9
JF
306pteval_t xen_pte_val(pte_t pte)
307{
308 pteval_t ret = pte.pte;
309
310 if (ret & _PAGE_PRESENT)
311 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
312
313 return ret;
314}
315
316pgdval_t xen_pgd_val(pgd_t pgd)
317{
318 pgdval_t ret = pgd.pgd;
319 if (ret & _PAGE_PRESENT)
320 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
321 return ret;
322}
323
324pte_t xen_make_pte(pteval_t pte)
325{
326 if (pte & _PAGE_PRESENT) {
327 pte = phys_to_machine(XPADDR(pte)).maddr;
328 pte &= ~(_PAGE_PCD | _PAGE_PWT);
329 }
330
331 return (pte_t){ .pte = pte };
332}
333
334pgd_t xen_make_pgd(pgdval_t pgd)
335{
336 if (pgd & _PAGE_PRESENT)
337 pgd = phys_to_machine(XPADDR(pgd)).maddr;
338
339 return (pgd_t){ pgd };
340}
341
342pmdval_t xen_pmd_val(pmd_t pmd)
343{
344 pmdval_t ret = native_pmd_val(pmd);
345 if (ret & _PAGE_PRESENT)
346 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
347 return ret;
348}
3843fc25 349
f4f97b3e
JF
350void xen_set_pud(pud_t *ptr, pud_t val)
351{
d66bf8fc
JF
352 struct multicall_space mcs;
353 struct mmu_update *u;
f4f97b3e 354
d66bf8fc
JF
355 preempt_disable();
356
357 mcs = xen_mc_entry(sizeof(*u));
358 u = mcs.args;
359 u->ptr = virt_to_machine(ptr).maddr;
360 u->val = pud_val_ma(val);
361 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
362
363 xen_mc_issue(PARAVIRT_LAZY_MMU);
364
365 preempt_enable();
f4f97b3e
JF
366}
367
368void xen_set_pte(pte_t *ptep, pte_t pte)
369{
370 ptep->pte_high = pte.pte_high;
371 smp_wmb();
372 ptep->pte_low = pte.pte_low;
373}
374
3b827c1b
JF
375void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
376{
377 set_64bit((u64 *)ptep, pte_val_ma(pte));
378}
379
380void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
381{
382 ptep->pte_low = 0;
383 smp_wmb(); /* make sure low gets written first */
384 ptep->pte_high = 0;
385}
386
387void xen_pmd_clear(pmd_t *pmdp)
388{
389 xen_set_pmd(pmdp, __pmd(0));
390}
391
abf33038 392pmd_t xen_make_pmd(pmdval_t pmd)
3b827c1b 393{
430442e3 394 if (pmd & _PAGE_PRESENT)
3b827c1b
JF
395 pmd = phys_to_machine(XPADDR(pmd)).maddr;
396
947a69c9 397 return native_make_pmd(pmd);
3b827c1b 398}
3b827c1b 399
f4f97b3e
JF
400/*
401 (Yet another) pagetable walker. This one is intended for pinning a
402 pagetable. This means that it walks a pagetable and calls the
403 callback function on each page it finds making up the page table,
404 at every level. It walks the entire pagetable, but it only bothers
405 pinning pte pages which are below pte_limit. In the normal case
406 this will be TASK_SIZE, but at boot we need to pin up to
407 FIXADDR_TOP. But the important bit is that we don't pin beyond
408 there, because then we start getting into Xen's ptes.
409*/
74260714 410static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
f4f97b3e 411 unsigned long limit)
3b827c1b
JF
412{
413 pgd_t *pgd = pgd_base;
f4f97b3e
JF
414 int flush = 0;
415 unsigned long addr = 0;
416 unsigned long pgd_next;
417
418 BUG_ON(limit > FIXADDR_TOP);
3b827c1b
JF
419
420 if (xen_feature(XENFEAT_auto_translated_physmap))
f4f97b3e
JF
421 return 0;
422
423 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
424 pud_t *pud;
425 unsigned long pud_limit, pud_next;
3b827c1b 426
f4f97b3e
JF
427 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
428
429 if (!pgd_val(*pgd))
3b827c1b 430 continue;
f4f97b3e 431
3b827c1b
JF
432 pud = pud_offset(pgd, 0);
433
434 if (PTRS_PER_PUD > 1) /* not folded */
74260714 435 flush |= (*func)(virt_to_page(pud), PT_PUD);
f4f97b3e
JF
436
437 for (; addr != pud_limit; pud++, addr = pud_next) {
438 pmd_t *pmd;
439 unsigned long pmd_limit;
440
441 pud_next = pud_addr_end(addr, pud_limit);
442
443 if (pud_next < limit)
444 pmd_limit = pud_next;
445 else
446 pmd_limit = limit;
3b827c1b 447
3b827c1b
JF
448 if (pud_none(*pud))
449 continue;
f4f97b3e 450
3b827c1b
JF
451 pmd = pmd_offset(pud, 0);
452
453 if (PTRS_PER_PMD > 1) /* not folded */
74260714 454 flush |= (*func)(virt_to_page(pmd), PT_PMD);
f4f97b3e
JF
455
456 for (; addr != pmd_limit; pmd++) {
457 addr += (PAGE_SIZE * PTRS_PER_PTE);
458 if ((pmd_limit-1) < (addr-1)) {
459 addr = pmd_limit;
460 break;
461 }
3b827c1b 462
3b827c1b
JF
463 if (pmd_none(*pmd))
464 continue;
465
74260714 466 flush |= (*func)(pmd_page(*pmd), PT_PTE);
3b827c1b
JF
467 }
468 }
469 }
470
74260714 471 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
f4f97b3e
JF
472
473 return flush;
3b827c1b
JF
474}
475
74260714
JF
476static spinlock_t *lock_pte(struct page *page)
477{
478 spinlock_t *ptl = NULL;
479
480#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
481 ptl = __pte_lockptr(page);
482 spin_lock(ptl);
483#endif
484
485 return ptl;
486}
487
488static void do_unlock(void *v)
489{
490 spinlock_t *ptl = v;
491 spin_unlock(ptl);
492}
493
494static void xen_do_pin(unsigned level, unsigned long pfn)
495{
496 struct mmuext_op *op;
497 struct multicall_space mcs;
498
499 mcs = __xen_mc_entry(sizeof(*op));
500 op = mcs.args;
501 op->cmd = level;
502 op->arg1.mfn = pfn_to_mfn(pfn);
503 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
504}
505
506static int pin_page(struct page *page, enum pt_level level)
f4f97b3e 507{
d60cd46b 508 unsigned pgfl = TestSetPagePinned(page);
f4f97b3e
JF
509 int flush;
510
511 if (pgfl)
512 flush = 0; /* already pinned */
513 else if (PageHighMem(page))
514 /* kmaps need flushing if we found an unpinned
515 highpage */
516 flush = 1;
517 else {
518 void *pt = lowmem_page_address(page);
519 unsigned long pfn = page_to_pfn(page);
520 struct multicall_space mcs = __xen_mc_entry(0);
74260714 521 spinlock_t *ptl;
f4f97b3e
JF
522
523 flush = 0;
524
74260714
JF
525 ptl = NULL;
526 if (level == PT_PTE)
527 ptl = lock_pte(page);
528
f4f97b3e
JF
529 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
530 pfn_pte(pfn, PAGE_KERNEL_RO),
74260714
JF
531 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
532
533 if (level == PT_PTE)
534 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
535
536 if (ptl) {
537 /* Queue a deferred unlock for when this batch
538 is completed. */
539 xen_mc_callback(do_unlock, ptl);
540 }
f4f97b3e
JF
541 }
542
543 return flush;
544}
3b827c1b 545
f4f97b3e
JF
546/* This is called just after a mm has been created, but it has not
547 been used yet. We need to make sure that its pagetable is all
548 read-only, and can be pinned. */
3b827c1b
JF
549void xen_pgd_pin(pgd_t *pgd)
550{
f4f97b3e 551 xen_mc_batch();
3b827c1b 552
f87e4cac
JF
553 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
554 /* re-enable interrupts for kmap_flush_unused */
555 xen_mc_issue(0);
f4f97b3e 556 kmap_flush_unused();
f87e4cac
JF
557 xen_mc_batch();
558 }
f4f97b3e 559
3843fc25 560 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
f4f97b3e 561 xen_mc_issue(0);
3b827c1b
JF
562}
563
0e91398f
JF
564/*
565 * On save, we need to pin all pagetables to make sure they get their
566 * mfns turned into pfns. Search the list for any unpinned pgds and pin
567 * them (unpinned pgds are not currently in use, probably because the
568 * process is under construction or destruction).
569 */
570void xen_mm_pin_all(void)
571{
572 unsigned long flags;
573 struct page *page;
574
575 spin_lock_irqsave(&pgd_lock, flags);
576
577 list_for_each_entry(page, &pgd_list, lru) {
578 if (!PagePinned(page)) {
579 xen_pgd_pin((pgd_t *)page_address(page));
580 SetPageSavePinned(page);
581 }
582 }
583
584 spin_unlock_irqrestore(&pgd_lock, flags);
585}
586
f4f97b3e
JF
587/* The init_mm pagetable is really pinned as soon as its created, but
588 that's before we have page structures to store the bits. So do all
589 the book-keeping now. */
74260714 590static __init int mark_pinned(struct page *page, enum pt_level level)
3b827c1b 591{
f4f97b3e
JF
592 SetPagePinned(page);
593 return 0;
594}
3b827c1b 595
f4f97b3e
JF
596void __init xen_mark_init_mm_pinned(void)
597{
598 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
599}
3b827c1b 600
74260714 601static int unpin_page(struct page *page, enum pt_level level)
f4f97b3e 602{
d60cd46b 603 unsigned pgfl = TestClearPagePinned(page);
3b827c1b 604
f4f97b3e
JF
605 if (pgfl && !PageHighMem(page)) {
606 void *pt = lowmem_page_address(page);
607 unsigned long pfn = page_to_pfn(page);
74260714
JF
608 spinlock_t *ptl = NULL;
609 struct multicall_space mcs;
610
611 if (level == PT_PTE) {
612 ptl = lock_pte(page);
613
614 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
615 }
616
617 mcs = __xen_mc_entry(0);
f4f97b3e
JF
618
619 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
620 pfn_pte(pfn, PAGE_KERNEL),
74260714
JF
621 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
622
623 if (ptl) {
624 /* unlock when batch completed */
625 xen_mc_callback(do_unlock, ptl);
626 }
f4f97b3e
JF
627 }
628
629 return 0; /* never need to flush on unpin */
3b827c1b
JF
630}
631
f4f97b3e
JF
632/* Release a pagetables pages back as normal RW */
633static void xen_pgd_unpin(pgd_t *pgd)
634{
f4f97b3e
JF
635 xen_mc_batch();
636
74260714 637 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
f4f97b3e
JF
638
639 pgd_walk(pgd, unpin_page, TASK_SIZE);
640
641 xen_mc_issue(0);
642}
3b827c1b 643
0e91398f
JF
644/*
645 * On resume, undo any pinning done at save, so that the rest of the
646 * kernel doesn't see any unexpected pinned pagetables.
647 */
648void xen_mm_unpin_all(void)
649{
650 unsigned long flags;
651 struct page *page;
652
653 spin_lock_irqsave(&pgd_lock, flags);
654
655 list_for_each_entry(page, &pgd_list, lru) {
656 if (PageSavePinned(page)) {
657 BUG_ON(!PagePinned(page));
658 printk("unpinning pinned %p\n", page_address(page));
659 xen_pgd_unpin((pgd_t *)page_address(page));
660 ClearPageSavePinned(page);
661 }
662 }
663
664 spin_unlock_irqrestore(&pgd_lock, flags);
665}
666
3b827c1b
JF
667void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
668{
f4f97b3e 669 spin_lock(&next->page_table_lock);
3b827c1b 670 xen_pgd_pin(next->pgd);
f4f97b3e 671 spin_unlock(&next->page_table_lock);
3b827c1b
JF
672}
673
674void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
675{
f4f97b3e 676 spin_lock(&mm->page_table_lock);
3b827c1b 677 xen_pgd_pin(mm->pgd);
f4f97b3e 678 spin_unlock(&mm->page_table_lock);
3b827c1b
JF
679}
680
3b827c1b 681
f87e4cac
JF
682#ifdef CONFIG_SMP
683/* Another cpu may still have their %cr3 pointing at the pagetable, so
684 we need to repoint it somewhere else before we can unpin it. */
685static void drop_other_mm_ref(void *info)
686{
687 struct mm_struct *mm = info;
3b827c1b 688
f87e4cac
JF
689 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
690 leave_mm(smp_processor_id());
9f79991d
JF
691
692 /* If this cpu still has a stale cr3 reference, then make sure
693 it has been flushed. */
694 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
695 load_cr3(swapper_pg_dir);
696 arch_flush_lazy_cpu_mode();
697 }
f87e4cac 698}
3b827c1b 699
f87e4cac
JF
700static void drop_mm_ref(struct mm_struct *mm)
701{
9f79991d
JF
702 cpumask_t mask;
703 unsigned cpu;
704
f87e4cac
JF
705 if (current->active_mm == mm) {
706 if (current->mm == mm)
707 load_cr3(swapper_pg_dir);
708 else
709 leave_mm(smp_processor_id());
9f79991d
JF
710 arch_flush_lazy_cpu_mode();
711 }
712
713 /* Get the "official" set of cpus referring to our pagetable. */
714 mask = mm->cpu_vm_mask;
715
716 /* It's possible that a vcpu may have a stale reference to our
717 cr3, because its in lazy mode, and it hasn't yet flushed
718 its set of pending hypercalls yet. In this case, we can
719 look at its actual current cr3 value, and force it to flush
720 if needed. */
721 for_each_online_cpu(cpu) {
722 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
723 cpu_set(cpu, mask);
3b827c1b
JF
724 }
725
9f79991d
JF
726 if (!cpus_empty(mask))
727 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
f87e4cac
JF
728}
729#else
730static void drop_mm_ref(struct mm_struct *mm)
731{
732 if (current->active_mm == mm)
733 load_cr3(swapper_pg_dir);
734}
735#endif
736
737/*
738 * While a process runs, Xen pins its pagetables, which means that the
739 * hypervisor forces it to be read-only, and it controls all updates
740 * to it. This means that all pagetable updates have to go via the
741 * hypervisor, which is moderately expensive.
742 *
743 * Since we're pulling the pagetable down, we switch to use init_mm,
744 * unpin old process pagetable and mark it all read-write, which
745 * allows further operations on it to be simple memory accesses.
746 *
747 * The only subtle point is that another CPU may be still using the
748 * pagetable because of lazy tlb flushing. This means we need need to
749 * switch all CPUs off this pagetable before we can unpin it.
750 */
751void xen_exit_mmap(struct mm_struct *mm)
752{
753 get_cpu(); /* make sure we don't move around */
754 drop_mm_ref(mm);
755 put_cpu();
3b827c1b 756
f120f13e 757 spin_lock(&mm->page_table_lock);
df912ea4
JF
758
759 /* pgd may not be pinned in the error exit path of execve */
760 if (PagePinned(virt_to_page(mm->pgd)))
761 xen_pgd_unpin(mm->pgd);
74260714 762
f120f13e 763 spin_unlock(&mm->page_table_lock);
3b827c1b 764}
This page took 0.150887 seconds and 5 git commands to generate.