Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / x86 / xen / mmu.c
CommitLineData
3b827c1b
JF
1/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
f120f13e 41#include <linux/sched.h>
f4f97b3e 42#include <linux/highmem.h>
994025ca 43#include <linux/debugfs.h>
3b827c1b 44#include <linux/bug.h>
d2cb2145 45#include <linux/vmalloc.h>
44408ad7 46#include <linux/module.h>
5a0e3ad6 47#include <linux/gfp.h>
a9ce6bc1 48#include <linux/memblock.h>
2222e71b 49#include <linux/seq_file.h>
3b827c1b
JF
50
51#include <asm/pgtable.h>
52#include <asm/tlbflush.h>
5deb30d1 53#include <asm/fixmap.h>
3b827c1b 54#include <asm/mmu_context.h>
319f3ba5 55#include <asm/setup.h>
f4f97b3e 56#include <asm/paravirt.h>
7347b408 57#include <asm/e820.h>
cbcd79c2 58#include <asm/linkage.h>
08bbc9da 59#include <asm/page.h>
fef5ba79 60#include <asm/init.h>
41f2e477 61#include <asm/pat.h>
3b827c1b
JF
62
63#include <asm/xen/hypercall.h>
f4f97b3e 64#include <asm/xen/hypervisor.h>
3b827c1b 65
c0011dbf 66#include <xen/xen.h>
3b827c1b
JF
67#include <xen/page.h>
68#include <xen/interface/xen.h>
59151001 69#include <xen/interface/hvm/hvm_op.h>
319f3ba5 70#include <xen/interface/version.h>
c0011dbf 71#include <xen/interface/memory.h>
319f3ba5 72#include <xen/hvc-console.h>
3b827c1b 73
f4f97b3e 74#include "multicalls.h"
3b827c1b 75#include "mmu.h"
994025ca
JF
76#include "debugfs.h"
77
78#define MMU_UPDATE_HISTO 30
79
19001c8c
AN
80/*
81 * Protects atomic reservation decrease/increase against concurrent increases.
06f521d5 82 * Also protects non-atomic updates of current_pages and balloon lists.
19001c8c
AN
83 */
84DEFINE_SPINLOCK(xen_reservation_lock);
85
994025ca
JF
86#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats;
118
119static u8 zero_stats;
120
121static inline void check_zero(void)
122{
123 if (unlikely(zero_stats)) {
124 memset(&mmu_stats, 0, sizeof(mmu_stats));
125 zero_stats = 0;
126 }
127}
128
129#define ADD_STATS(elem, val) \
130 do { check_zero(); mmu_stats.elem += (val); } while(0)
131
132#else /* !CONFIG_XEN_DEBUG_FS */
133
134#define ADD_STATS(elem, val) do { (void)(val); } while(0)
135
136#endif /* CONFIG_XEN_DEBUG_FS */
3b827c1b 137
319f3ba5
JF
138
139/*
140 * Identity map, in addition to plain kernel map. This needs to be
141 * large enough to allocate page table pages to allocate the rest.
142 * Each page can map 2MB.
143 */
764f0138
JF
144#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
145static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
319f3ba5
JF
146
147#ifdef CONFIG_X86_64
148/* l3 pud for userspace vsyscall mapping */
149static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
150#endif /* CONFIG_X86_64 */
151
152/*
153 * Note about cr3 (pagetable base) values:
154 *
155 * xen_cr3 contains the current logical cr3 value; it contains the
156 * last set cr3. This may not be the current effective cr3, because
157 * its update may be being lazily deferred. However, a vcpu looking
158 * at its own cr3 can use this value knowing that it everything will
159 * be self-consistent.
160 *
161 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
162 * hypercall to set the vcpu cr3 is complete (so it may be a little
163 * out of date, but it will never be set early). If one vcpu is
164 * looking at another vcpu's cr3 value, it should use this variable.
165 */
166DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
167DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
168
169
d6182fbf
JF
170/*
171 * Just beyond the highest usermode address. STACK_TOP_MAX has a
172 * redzone above it, so round it up to a PGD boundary.
173 */
174#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
175
9976b39b
JF
176unsigned long arbitrary_virt_to_mfn(void *vaddr)
177{
178 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
179
180 return PFN_DOWN(maddr.maddr);
181}
182
ce803e70 183xmaddr_t arbitrary_virt_to_machine(void *vaddr)
3b827c1b 184{
ce803e70 185 unsigned long address = (unsigned long)vaddr;
da7bfc50 186 unsigned int level;
9f32d21c
CL
187 pte_t *pte;
188 unsigned offset;
3b827c1b 189
9f32d21c
CL
190 /*
191 * if the PFN is in the linear mapped vaddr range, we can just use
192 * the (quick) virt_to_machine() p2m lookup
193 */
194 if (virt_addr_valid(vaddr))
195 return virt_to_machine(vaddr);
196
197 /* otherwise we have to do a (slower) full page-table walk */
3b827c1b 198
9f32d21c
CL
199 pte = lookup_address(address, &level);
200 BUG_ON(pte == NULL);
201 offset = address & ~PAGE_MASK;
ebd879e3 202 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
3b827c1b 203}
de23be5f 204EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
3b827c1b
JF
205
206void make_lowmem_page_readonly(void *vaddr)
207{
208 pte_t *pte, ptev;
209 unsigned long address = (unsigned long)vaddr;
da7bfc50 210 unsigned int level;
3b827c1b 211
f0646e43 212 pte = lookup_address(address, &level);
fef5ba79
JF
213 if (pte == NULL)
214 return; /* vaddr missing */
3b827c1b
JF
215
216 ptev = pte_wrprotect(*pte);
217
218 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
219 BUG();
220}
221
222void make_lowmem_page_readwrite(void *vaddr)
223{
224 pte_t *pte, ptev;
225 unsigned long address = (unsigned long)vaddr;
da7bfc50 226 unsigned int level;
3b827c1b 227
f0646e43 228 pte = lookup_address(address, &level);
fef5ba79
JF
229 if (pte == NULL)
230 return; /* vaddr missing */
3b827c1b
JF
231
232 ptev = pte_mkwrite(*pte);
233
234 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
235 BUG();
236}
237
238
7708ad64 239static bool xen_page_pinned(void *ptr)
e2426cf8
JF
240{
241 struct page *page = virt_to_page(ptr);
242
243 return PagePinned(page);
244}
245
c0011dbf
JF
246static bool xen_iomap_pte(pte_t pte)
247{
7347b408 248 return pte_flags(pte) & _PAGE_IOMAP;
c0011dbf
JF
249}
250
eba3ff8b 251void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
c0011dbf
JF
252{
253 struct multicall_space mcs;
254 struct mmu_update *u;
255
256 mcs = xen_mc_entry(sizeof(*u));
257 u = mcs.args;
258
259 /* ptep might be kmapped when using 32-bit HIGHPTE */
260 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
261 u->val = pte_val_ma(pteval);
262
eba3ff8b 263 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
c0011dbf
JF
264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266}
eba3ff8b
JF
267EXPORT_SYMBOL_GPL(xen_set_domain_pte);
268
269static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
270{
271 xen_set_domain_pte(ptep, pteval, DOMID_IO);
272}
c0011dbf 273
7708ad64 274static void xen_extend_mmu_update(const struct mmu_update *update)
3b827c1b 275{
d66bf8fc
JF
276 struct multicall_space mcs;
277 struct mmu_update *u;
3b827c1b 278
400d3494
JF
279 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
280
994025ca
JF
281 if (mcs.mc != NULL) {
282 ADD_STATS(mmu_update_extended, 1);
283 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
284
400d3494 285 mcs.mc->args[1]++;
994025ca
JF
286
287 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
288 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
289 else
290 ADD_STATS(mmu_update_histo[0], 1);
291 } else {
292 ADD_STATS(mmu_update, 1);
400d3494
JF
293 mcs = __xen_mc_entry(sizeof(*u));
294 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
994025ca 295 ADD_STATS(mmu_update_histo[1], 1);
400d3494 296 }
d66bf8fc 297
d66bf8fc 298 u = mcs.args;
400d3494
JF
299 *u = *update;
300}
301
302void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
303{
304 struct mmu_update u;
305
306 preempt_disable();
307
308 xen_mc_batch();
309
ce803e70
JF
310 /* ptr may be ioremapped for 64-bit pagetable setup */
311 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 312 u.val = pmd_val_ma(val);
7708ad64 313 xen_extend_mmu_update(&u);
d66bf8fc 314
994025ca
JF
315 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
316
d66bf8fc
JF
317 xen_mc_issue(PARAVIRT_LAZY_MMU);
318
319 preempt_enable();
3b827c1b
JF
320}
321
e2426cf8
JF
322void xen_set_pmd(pmd_t *ptr, pmd_t val)
323{
994025ca
JF
324 ADD_STATS(pmd_update, 1);
325
e2426cf8
JF
326 /* If page is not pinned, we can just update the entry
327 directly */
7708ad64 328 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
329 *ptr = val;
330 return;
331 }
332
994025ca
JF
333 ADD_STATS(pmd_update_pinned, 1);
334
e2426cf8
JF
335 xen_set_pmd_hyper(ptr, val);
336}
337
3b827c1b
JF
338/*
339 * Associate a virtual page frame with a given physical page frame
340 * and protection flags for that frame.
341 */
342void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
343{
836fe2f2 344 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
3b827c1b
JF
345}
346
347void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
348 pte_t *ptep, pte_t pteval)
349{
c0011dbf
JF
350 if (xen_iomap_pte(pteval)) {
351 xen_set_iomap_pte(ptep, pteval);
352 goto out;
353 }
354
994025ca
JF
355 ADD_STATS(set_pte_at, 1);
356// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
357 ADD_STATS(set_pte_at_current, mm == current->mm);
358 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
359
d66bf8fc 360 if (mm == current->mm || mm == &init_mm) {
8965c1c0 361 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
d66bf8fc
JF
362 struct multicall_space mcs;
363 mcs = xen_mc_entry(0);
364
365 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
994025ca 366 ADD_STATS(set_pte_at_batched, 1);
d66bf8fc 367 xen_mc_issue(PARAVIRT_LAZY_MMU);
2bd50036 368 goto out;
d66bf8fc
JF
369 } else
370 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
2bd50036 371 goto out;
d66bf8fc
JF
372 }
373 xen_set_pte(ptep, pteval);
2bd50036 374
2829b449 375out: return;
3b827c1b
JF
376}
377
f63c2f24
T
378pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
379 unsigned long addr, pte_t *ptep)
947a69c9 380{
e57778a1
JF
381 /* Just return the pte as-is. We preserve the bits on commit */
382 return *ptep;
383}
384
385void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
386 pte_t *ptep, pte_t pte)
387{
400d3494 388 struct mmu_update u;
e57778a1 389
400d3494 390 xen_mc_batch();
947a69c9 391
9f32d21c 392 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
400d3494 393 u.val = pte_val_ma(pte);
7708ad64 394 xen_extend_mmu_update(&u);
947a69c9 395
994025ca
JF
396 ADD_STATS(prot_commit, 1);
397 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
398
e57778a1 399 xen_mc_issue(PARAVIRT_LAZY_MMU);
947a69c9
JF
400}
401
ebb9cfe2
JF
402/* Assume pteval_t is equivalent to all the other *val_t types. */
403static pteval_t pte_mfn_to_pfn(pteval_t val)
947a69c9 404{
ebb9cfe2 405 if (val & _PAGE_PRESENT) {
59438c9f 406 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
77be1fab 407 pteval_t flags = val & PTE_FLAGS_MASK;
d8355aca 408 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
ebb9cfe2 409 }
947a69c9 410
ebb9cfe2 411 return val;
947a69c9
JF
412}
413
ebb9cfe2 414static pteval_t pte_pfn_to_mfn(pteval_t val)
947a69c9 415{
ebb9cfe2 416 if (val & _PAGE_PRESENT) {
59438c9f 417 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
77be1fab 418 pteval_t flags = val & PTE_FLAGS_MASK;
fb38923e 419 unsigned long mfn;
cfd8951e 420
fb38923e
KRW
421 if (!xen_feature(XENFEAT_auto_translated_physmap))
422 mfn = get_phys_to_machine(pfn);
423 else
424 mfn = pfn;
cfd8951e
JF
425 /*
426 * If there's no mfn for the pfn, then just create an
427 * empty non-present pte. Unfortunately this loses
428 * information about the original pfn, so
429 * pte_mfn_to_pfn is asymmetric.
430 */
431 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
432 mfn = 0;
433 flags = 0;
fb38923e
KRW
434 } else {
435 /*
436 * Paramount to do this test _after_ the
437 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
438 * IDENTITY_FRAME_BIT resolves to true.
439 */
440 mfn &= ~FOREIGN_FRAME_BIT;
441 if (mfn & IDENTITY_FRAME_BIT) {
442 mfn &= ~IDENTITY_FRAME_BIT;
443 flags |= _PAGE_IOMAP;
444 }
cfd8951e 445 }
cfd8951e 446 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
947a69c9
JF
447 }
448
ebb9cfe2 449 return val;
947a69c9
JF
450}
451
c0011dbf
JF
452static pteval_t iomap_pte(pteval_t val)
453{
454 if (val & _PAGE_PRESENT) {
455 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
456 pteval_t flags = val & PTE_FLAGS_MASK;
457
458 /* We assume the pte frame number is a MFN, so
459 just use it as-is. */
460 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
461 }
462
463 return val;
464}
465
ebb9cfe2 466pteval_t xen_pte_val(pte_t pte)
947a69c9 467{
41f2e477 468 pteval_t pteval = pte.pte;
c0011dbf 469
41f2e477
JF
470 /* If this is a WC pte, convert back from Xen WC to Linux WC */
471 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
472 WARN_ON(!pat_enabled);
473 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
474 }
c0011dbf 475
41f2e477
JF
476 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
477 return pteval;
478
479 return pte_mfn_to_pfn(pteval);
947a69c9 480}
da5de7c2 481PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
947a69c9 482
947a69c9
JF
483pgdval_t xen_pgd_val(pgd_t pgd)
484{
ebb9cfe2 485 return pte_mfn_to_pfn(pgd.pgd);
947a69c9 486}
da5de7c2 487PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
947a69c9 488
41f2e477
JF
489/*
490 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
491 * are reserved for now, to correspond to the Intel-reserved PAT
492 * types.
493 *
494 * We expect Linux's PAT set as follows:
495 *
496 * Idx PTE flags Linux Xen Default
497 * 0 WB WB WB
498 * 1 PWT WC WT WT
499 * 2 PCD UC- UC- UC-
500 * 3 PCD PWT UC UC UC
501 * 4 PAT WB WC WB
502 * 5 PAT PWT WC WP WT
503 * 6 PAT PCD UC- UC UC-
504 * 7 PAT PCD PWT UC UC UC
505 */
506
507void xen_set_pat(u64 pat)
508{
509 /* We expect Linux to use a PAT setting of
510 * UC UC- WC WB (ignoring the PAT flag) */
511 WARN_ON(pat != 0x0007010600070106ull);
512}
513
947a69c9
JF
514pte_t xen_make_pte(pteval_t pte)
515{
7347b408
AN
516 phys_addr_t addr = (pte & PTE_PFN_MASK);
517
41f2e477
JF
518 /* If Linux is trying to set a WC pte, then map to the Xen WC.
519 * If _PAGE_PAT is set, then it probably means it is really
520 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
521 * things work out OK...
522 *
523 * (We should never see kernel mappings with _PAGE_PSE set,
524 * but we could see hugetlbfs mappings, I think.).
525 */
526 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
527 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
528 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
529 }
530
7347b408
AN
531 /*
532 * Unprivileged domains are allowed to do IOMAPpings for
533 * PCI passthrough, but not map ISA space. The ISA
534 * mappings are just dummy local mappings to keep other
535 * parts of the kernel happy.
536 */
537 if (unlikely(pte & _PAGE_IOMAP) &&
538 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
c0011dbf 539 pte = iomap_pte(pte);
7347b408
AN
540 } else {
541 pte &= ~_PAGE_IOMAP;
c0011dbf 542 pte = pte_pfn_to_mfn(pte);
7347b408 543 }
c0011dbf 544
ebb9cfe2 545 return native_make_pte(pte);
947a69c9 546}
da5de7c2 547PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
947a69c9 548
fc25151d
KRW
549#ifdef CONFIG_XEN_DEBUG
550pte_t xen_make_pte_debug(pteval_t pte)
551{
552 phys_addr_t addr = (pte & PTE_PFN_MASK);
553 phys_addr_t other_addr;
554 bool io_page = false;
555 pte_t _pte;
556
557 if (pte & _PAGE_IOMAP)
558 io_page = true;
559
560 _pte = xen_make_pte(pte);
561
562 if (!addr)
563 return _pte;
564
565 if (io_page &&
566 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
567 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
d88885d0 568 WARN_ONCE(addr != other_addr,
fc25151d
KRW
569 "0x%lx is using VM_IO, but it is 0x%lx!\n",
570 (unsigned long)addr, (unsigned long)other_addr);
571 } else {
572 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
573 other_addr = (_pte.pte & PTE_PFN_MASK);
d88885d0 574 WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
fc25151d
KRW
575 "0x%lx is missing VM_IO (and wasn't fixed)!\n",
576 (unsigned long)addr);
577 }
578
579 return _pte;
580}
581PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
582#endif
583
947a69c9
JF
584pgd_t xen_make_pgd(pgdval_t pgd)
585{
ebb9cfe2
JF
586 pgd = pte_pfn_to_mfn(pgd);
587 return native_make_pgd(pgd);
947a69c9 588}
da5de7c2 589PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
947a69c9
JF
590
591pmdval_t xen_pmd_val(pmd_t pmd)
592{
ebb9cfe2 593 return pte_mfn_to_pfn(pmd.pmd);
947a69c9 594}
da5de7c2 595PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
28499143 596
e2426cf8 597void xen_set_pud_hyper(pud_t *ptr, pud_t val)
f4f97b3e 598{
400d3494 599 struct mmu_update u;
f4f97b3e 600
d66bf8fc
JF
601 preempt_disable();
602
400d3494
JF
603 xen_mc_batch();
604
ce803e70
JF
605 /* ptr may be ioremapped for 64-bit pagetable setup */
606 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 607 u.val = pud_val_ma(val);
7708ad64 608 xen_extend_mmu_update(&u);
d66bf8fc 609
994025ca
JF
610 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
611
d66bf8fc
JF
612 xen_mc_issue(PARAVIRT_LAZY_MMU);
613
614 preempt_enable();
f4f97b3e
JF
615}
616
e2426cf8
JF
617void xen_set_pud(pud_t *ptr, pud_t val)
618{
994025ca
JF
619 ADD_STATS(pud_update, 1);
620
e2426cf8
JF
621 /* If page is not pinned, we can just update the entry
622 directly */
7708ad64 623 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
624 *ptr = val;
625 return;
626 }
627
994025ca
JF
628 ADD_STATS(pud_update_pinned, 1);
629
e2426cf8
JF
630 xen_set_pud_hyper(ptr, val);
631}
632
f4f97b3e
JF
633void xen_set_pte(pte_t *ptep, pte_t pte)
634{
c0011dbf
JF
635 if (xen_iomap_pte(pte)) {
636 xen_set_iomap_pte(ptep, pte);
637 return;
638 }
639
994025ca
JF
640 ADD_STATS(pte_update, 1);
641// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
642 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
643
f6e58732 644#ifdef CONFIG_X86_PAE
f4f97b3e
JF
645 ptep->pte_high = pte.pte_high;
646 smp_wmb();
647 ptep->pte_low = pte.pte_low;
f6e58732
JF
648#else
649 *ptep = pte;
650#endif
f4f97b3e
JF
651}
652
f6e58732 653#ifdef CONFIG_X86_PAE
3b827c1b
JF
654void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
655{
c0011dbf
JF
656 if (xen_iomap_pte(pte)) {
657 xen_set_iomap_pte(ptep, pte);
658 return;
659 }
660
f6e58732 661 set_64bit((u64 *)ptep, native_pte_val(pte));
3b827c1b
JF
662}
663
664void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
665{
666 ptep->pte_low = 0;
667 smp_wmb(); /* make sure low gets written first */
668 ptep->pte_high = 0;
669}
670
671void xen_pmd_clear(pmd_t *pmdp)
672{
e2426cf8 673 set_pmd(pmdp, __pmd(0));
3b827c1b 674}
f6e58732 675#endif /* CONFIG_X86_PAE */
3b827c1b 676
abf33038 677pmd_t xen_make_pmd(pmdval_t pmd)
3b827c1b 678{
ebb9cfe2 679 pmd = pte_pfn_to_mfn(pmd);
947a69c9 680 return native_make_pmd(pmd);
3b827c1b 681}
da5de7c2 682PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
3b827c1b 683
f6e58732
JF
684#if PAGETABLE_LEVELS == 4
685pudval_t xen_pud_val(pud_t pud)
686{
687 return pte_mfn_to_pfn(pud.pud);
688}
da5de7c2 689PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
f6e58732
JF
690
691pud_t xen_make_pud(pudval_t pud)
692{
693 pud = pte_pfn_to_mfn(pud);
694
695 return native_make_pud(pud);
696}
da5de7c2 697PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
f6e58732 698
d6182fbf 699pgd_t *xen_get_user_pgd(pgd_t *pgd)
f6e58732 700{
d6182fbf
JF
701 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
702 unsigned offset = pgd - pgd_page;
703 pgd_t *user_ptr = NULL;
f6e58732 704
d6182fbf
JF
705 if (offset < pgd_index(USER_LIMIT)) {
706 struct page *page = virt_to_page(pgd_page);
707 user_ptr = (pgd_t *)page->private;
708 if (user_ptr)
709 user_ptr += offset;
710 }
f6e58732 711
d6182fbf
JF
712 return user_ptr;
713}
714
715static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
716{
717 struct mmu_update u;
f6e58732
JF
718
719 u.ptr = virt_to_machine(ptr).maddr;
720 u.val = pgd_val_ma(val);
7708ad64 721 xen_extend_mmu_update(&u);
d6182fbf
JF
722}
723
724/*
725 * Raw hypercall-based set_pgd, intended for in early boot before
726 * there's a page structure. This implies:
727 * 1. The only existing pagetable is the kernel's
728 * 2. It is always pinned
729 * 3. It has no user pagetable attached to it
730 */
731void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
732{
733 preempt_disable();
734
735 xen_mc_batch();
736
737 __xen_set_pgd_hyper(ptr, val);
f6e58732
JF
738
739 xen_mc_issue(PARAVIRT_LAZY_MMU);
740
741 preempt_enable();
742}
743
744void xen_set_pgd(pgd_t *ptr, pgd_t val)
745{
d6182fbf
JF
746 pgd_t *user_ptr = xen_get_user_pgd(ptr);
747
994025ca
JF
748 ADD_STATS(pgd_update, 1);
749
f6e58732
JF
750 /* If page is not pinned, we can just update the entry
751 directly */
7708ad64 752 if (!xen_page_pinned(ptr)) {
f6e58732 753 *ptr = val;
d6182fbf 754 if (user_ptr) {
7708ad64 755 WARN_ON(xen_page_pinned(user_ptr));
d6182fbf
JF
756 *user_ptr = val;
757 }
f6e58732
JF
758 return;
759 }
760
994025ca
JF
761 ADD_STATS(pgd_update_pinned, 1);
762 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
763
d6182fbf
JF
764 /* If it's pinned, then we can at least batch the kernel and
765 user updates together. */
766 xen_mc_batch();
767
768 __xen_set_pgd_hyper(ptr, val);
769 if (user_ptr)
770 __xen_set_pgd_hyper(user_ptr, val);
771
772 xen_mc_issue(PARAVIRT_LAZY_MMU);
f6e58732
JF
773}
774#endif /* PAGETABLE_LEVELS == 4 */
775
f4f97b3e 776/*
5deb30d1
JF
777 * (Yet another) pagetable walker. This one is intended for pinning a
778 * pagetable. This means that it walks a pagetable and calls the
779 * callback function on each page it finds making up the page table,
780 * at every level. It walks the entire pagetable, but it only bothers
781 * pinning pte pages which are below limit. In the normal case this
782 * will be STACK_TOP_MAX, but at boot we need to pin up to
783 * FIXADDR_TOP.
784 *
785 * For 32-bit the important bit is that we don't pin beyond there,
786 * because then we start getting into Xen's ptes.
787 *
788 * For 64-bit, we must skip the Xen hole in the middle of the address
789 * space, just after the big x86-64 virtual hole.
790 */
86bbc2c2
IC
791static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
792 int (*func)(struct mm_struct *mm, struct page *,
793 enum pt_level),
794 unsigned long limit)
3b827c1b 795{
f4f97b3e 796 int flush = 0;
5deb30d1
JF
797 unsigned hole_low, hole_high;
798 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
799 unsigned pgdidx, pudidx, pmdidx;
f4f97b3e 800
5deb30d1
JF
801 /* The limit is the last byte to be touched */
802 limit--;
803 BUG_ON(limit >= FIXADDR_TOP);
3b827c1b
JF
804
805 if (xen_feature(XENFEAT_auto_translated_physmap))
f4f97b3e
JF
806 return 0;
807
5deb30d1
JF
808 /*
809 * 64-bit has a great big hole in the middle of the address
810 * space, which contains the Xen mappings. On 32-bit these
811 * will end up making a zero-sized hole and so is a no-op.
812 */
d6182fbf 813 hole_low = pgd_index(USER_LIMIT);
5deb30d1
JF
814 hole_high = pgd_index(PAGE_OFFSET);
815
816 pgdidx_limit = pgd_index(limit);
817#if PTRS_PER_PUD > 1
818 pudidx_limit = pud_index(limit);
819#else
820 pudidx_limit = 0;
821#endif
822#if PTRS_PER_PMD > 1
823 pmdidx_limit = pmd_index(limit);
824#else
825 pmdidx_limit = 0;
826#endif
827
5deb30d1 828 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
f4f97b3e 829 pud_t *pud;
3b827c1b 830
5deb30d1
JF
831 if (pgdidx >= hole_low && pgdidx < hole_high)
832 continue;
f4f97b3e 833
5deb30d1 834 if (!pgd_val(pgd[pgdidx]))
3b827c1b 835 continue;
f4f97b3e 836
5deb30d1 837 pud = pud_offset(&pgd[pgdidx], 0);
3b827c1b
JF
838
839 if (PTRS_PER_PUD > 1) /* not folded */
eefb47f6 840 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
f4f97b3e 841
5deb30d1 842 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
f4f97b3e 843 pmd_t *pmd;
f4f97b3e 844
5deb30d1
JF
845 if (pgdidx == pgdidx_limit &&
846 pudidx > pudidx_limit)
847 goto out;
3b827c1b 848
5deb30d1 849 if (pud_none(pud[pudidx]))
3b827c1b 850 continue;
f4f97b3e 851
5deb30d1 852 pmd = pmd_offset(&pud[pudidx], 0);
3b827c1b
JF
853
854 if (PTRS_PER_PMD > 1) /* not folded */
eefb47f6 855 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
f4f97b3e 856
5deb30d1
JF
857 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
858 struct page *pte;
859
860 if (pgdidx == pgdidx_limit &&
861 pudidx == pudidx_limit &&
862 pmdidx > pmdidx_limit)
863 goto out;
3b827c1b 864
5deb30d1 865 if (pmd_none(pmd[pmdidx]))
3b827c1b
JF
866 continue;
867
5deb30d1 868 pte = pmd_page(pmd[pmdidx]);
eefb47f6 869 flush |= (*func)(mm, pte, PT_PTE);
3b827c1b
JF
870 }
871 }
872 }
11ad93e5 873
5deb30d1 874out:
11ad93e5
JF
875 /* Do the top level last, so that the callbacks can use it as
876 a cue to do final things like tlb flushes. */
eefb47f6 877 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
f4f97b3e
JF
878
879 return flush;
3b827c1b
JF
880}
881
86bbc2c2
IC
882static int xen_pgd_walk(struct mm_struct *mm,
883 int (*func)(struct mm_struct *mm, struct page *,
884 enum pt_level),
885 unsigned long limit)
886{
887 return __xen_pgd_walk(mm, mm->pgd, func, limit);
888}
889
7708ad64
JF
890/* If we're using split pte locks, then take the page's lock and
891 return a pointer to it. Otherwise return NULL. */
eefb47f6 892static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
74260714
JF
893{
894 spinlock_t *ptl = NULL;
895
f7d0b926 896#if USE_SPLIT_PTLOCKS
74260714 897 ptl = __pte_lockptr(page);
eefb47f6 898 spin_lock_nest_lock(ptl, &mm->page_table_lock);
74260714
JF
899#endif
900
901 return ptl;
902}
903
7708ad64 904static void xen_pte_unlock(void *v)
74260714
JF
905{
906 spinlock_t *ptl = v;
907 spin_unlock(ptl);
908}
909
910static void xen_do_pin(unsigned level, unsigned long pfn)
911{
912 struct mmuext_op *op;
913 struct multicall_space mcs;
914
915 mcs = __xen_mc_entry(sizeof(*op));
916 op = mcs.args;
917 op->cmd = level;
918 op->arg1.mfn = pfn_to_mfn(pfn);
919 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
920}
921
eefb47f6
JF
922static int xen_pin_page(struct mm_struct *mm, struct page *page,
923 enum pt_level level)
f4f97b3e 924{
d60cd46b 925 unsigned pgfl = TestSetPagePinned(page);
f4f97b3e
JF
926 int flush;
927
928 if (pgfl)
929 flush = 0; /* already pinned */
930 else if (PageHighMem(page))
931 /* kmaps need flushing if we found an unpinned
932 highpage */
933 flush = 1;
934 else {
935 void *pt = lowmem_page_address(page);
936 unsigned long pfn = page_to_pfn(page);
937 struct multicall_space mcs = __xen_mc_entry(0);
74260714 938 spinlock_t *ptl;
f4f97b3e
JF
939
940 flush = 0;
941
11ad93e5
JF
942 /*
943 * We need to hold the pagetable lock between the time
944 * we make the pagetable RO and when we actually pin
945 * it. If we don't, then other users may come in and
946 * attempt to update the pagetable by writing it,
947 * which will fail because the memory is RO but not
948 * pinned, so Xen won't do the trap'n'emulate.
949 *
950 * If we're using split pte locks, we can't hold the
951 * entire pagetable's worth of locks during the
952 * traverse, because we may wrap the preempt count (8
953 * bits). The solution is to mark RO and pin each PTE
954 * page while holding the lock. This means the number
955 * of locks we end up holding is never more than a
956 * batch size (~32 entries, at present).
957 *
958 * If we're not using split pte locks, we needn't pin
959 * the PTE pages independently, because we're
960 * protected by the overall pagetable lock.
961 */
74260714
JF
962 ptl = NULL;
963 if (level == PT_PTE)
eefb47f6 964 ptl = xen_pte_lock(page, mm);
74260714 965
f4f97b3e
JF
966 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
967 pfn_pte(pfn, PAGE_KERNEL_RO),
74260714
JF
968 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
969
11ad93e5 970 if (ptl) {
74260714
JF
971 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
972
74260714
JF
973 /* Queue a deferred unlock for when this batch
974 is completed. */
7708ad64 975 xen_mc_callback(xen_pte_unlock, ptl);
74260714 976 }
f4f97b3e
JF
977 }
978
979 return flush;
980}
3b827c1b 981
f4f97b3e
JF
982/* This is called just after a mm has been created, but it has not
983 been used yet. We need to make sure that its pagetable is all
984 read-only, and can be pinned. */
eefb47f6 985static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
3b827c1b 986{
f4f97b3e 987 xen_mc_batch();
3b827c1b 988
86bbc2c2 989 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
d05fdf31 990 /* re-enable interrupts for flushing */
f87e4cac 991 xen_mc_issue(0);
d05fdf31 992
f4f97b3e 993 kmap_flush_unused();
d05fdf31 994
f87e4cac
JF
995 xen_mc_batch();
996 }
f4f97b3e 997
d6182fbf
JF
998#ifdef CONFIG_X86_64
999 {
1000 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1001
1002 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1003
1004 if (user_pgd) {
eefb47f6 1005 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
f63c2f24
T
1006 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1007 PFN_DOWN(__pa(user_pgd)));
d6182fbf
JF
1008 }
1009 }
1010#else /* CONFIG_X86_32 */
5deb30d1
JF
1011#ifdef CONFIG_X86_PAE
1012 /* Need to make sure unshared kernel PMD is pinnable */
47cb2ed9 1013 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 1014 PT_PMD);
5deb30d1 1015#endif
28499143 1016 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
d6182fbf 1017#endif /* CONFIG_X86_64 */
f4f97b3e 1018 xen_mc_issue(0);
3b827c1b
JF
1019}
1020
eefb47f6
JF
1021static void xen_pgd_pin(struct mm_struct *mm)
1022{
1023 __xen_pgd_pin(mm, mm->pgd);
1024}
1025
0e91398f
JF
1026/*
1027 * On save, we need to pin all pagetables to make sure they get their
1028 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1029 * them (unpinned pgds are not currently in use, probably because the
1030 * process is under construction or destruction).
eefb47f6
JF
1031 *
1032 * Expected to be called in stop_machine() ("equivalent to taking
1033 * every spinlock in the system"), so the locking doesn't really
1034 * matter all that much.
0e91398f
JF
1035 */
1036void xen_mm_pin_all(void)
1037{
0e91398f 1038 struct page *page;
74260714 1039
a79e53d8 1040 spin_lock(&pgd_lock);
f4f97b3e 1041
0e91398f
JF
1042 list_for_each_entry(page, &pgd_list, lru) {
1043 if (!PagePinned(page)) {
eefb47f6 1044 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
1045 SetPageSavePinned(page);
1046 }
1047 }
1048
a79e53d8 1049 spin_unlock(&pgd_lock);
3b827c1b
JF
1050}
1051
c1f2f09e
EH
1052/*
1053 * The init_mm pagetable is really pinned as soon as its created, but
1054 * that's before we have page structures to store the bits. So do all
1055 * the book-keeping now.
1056 */
eefb47f6
JF
1057static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1058 enum pt_level level)
3b827c1b 1059{
f4f97b3e
JF
1060 SetPagePinned(page);
1061 return 0;
1062}
3b827c1b 1063
b96229b5 1064static void __init xen_mark_init_mm_pinned(void)
f4f97b3e 1065{
eefb47f6 1066 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
f4f97b3e 1067}
3b827c1b 1068
eefb47f6
JF
1069static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1070 enum pt_level level)
f4f97b3e 1071{
d60cd46b 1072 unsigned pgfl = TestClearPagePinned(page);
3b827c1b 1073
f4f97b3e
JF
1074 if (pgfl && !PageHighMem(page)) {
1075 void *pt = lowmem_page_address(page);
1076 unsigned long pfn = page_to_pfn(page);
74260714
JF
1077 spinlock_t *ptl = NULL;
1078 struct multicall_space mcs;
1079
11ad93e5
JF
1080 /*
1081 * Do the converse to pin_page. If we're using split
1082 * pte locks, we must be holding the lock for while
1083 * the pte page is unpinned but still RO to prevent
1084 * concurrent updates from seeing it in this
1085 * partially-pinned state.
1086 */
74260714 1087 if (level == PT_PTE) {
eefb47f6 1088 ptl = xen_pte_lock(page, mm);
74260714 1089
11ad93e5
JF
1090 if (ptl)
1091 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
74260714
JF
1092 }
1093
1094 mcs = __xen_mc_entry(0);
f4f97b3e
JF
1095
1096 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1097 pfn_pte(pfn, PAGE_KERNEL),
74260714
JF
1098 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1099
1100 if (ptl) {
1101 /* unlock when batch completed */
7708ad64 1102 xen_mc_callback(xen_pte_unlock, ptl);
74260714 1103 }
f4f97b3e
JF
1104 }
1105
1106 return 0; /* never need to flush on unpin */
3b827c1b
JF
1107}
1108
f4f97b3e 1109/* Release a pagetables pages back as normal RW */
eefb47f6 1110static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
f4f97b3e 1111{
f4f97b3e
JF
1112 xen_mc_batch();
1113
74260714 1114 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
f4f97b3e 1115
d6182fbf
JF
1116#ifdef CONFIG_X86_64
1117 {
1118 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1119
1120 if (user_pgd) {
f63c2f24
T
1121 xen_do_pin(MMUEXT_UNPIN_TABLE,
1122 PFN_DOWN(__pa(user_pgd)));
eefb47f6 1123 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
d6182fbf
JF
1124 }
1125 }
1126#endif
1127
5deb30d1
JF
1128#ifdef CONFIG_X86_PAE
1129 /* Need to make sure unshared kernel PMD is unpinned */
47cb2ed9 1130 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 1131 PT_PMD);
5deb30d1 1132#endif
d6182fbf 1133
86bbc2c2 1134 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
f4f97b3e
JF
1135
1136 xen_mc_issue(0);
1137}
3b827c1b 1138
eefb47f6
JF
1139static void xen_pgd_unpin(struct mm_struct *mm)
1140{
1141 __xen_pgd_unpin(mm, mm->pgd);
1142}
1143
0e91398f
JF
1144/*
1145 * On resume, undo any pinning done at save, so that the rest of the
1146 * kernel doesn't see any unexpected pinned pagetables.
1147 */
1148void xen_mm_unpin_all(void)
1149{
0e91398f
JF
1150 struct page *page;
1151
a79e53d8 1152 spin_lock(&pgd_lock);
0e91398f
JF
1153
1154 list_for_each_entry(page, &pgd_list, lru) {
1155 if (PageSavePinned(page)) {
1156 BUG_ON(!PagePinned(page));
eefb47f6 1157 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
1158 ClearPageSavePinned(page);
1159 }
1160 }
1161
a79e53d8 1162 spin_unlock(&pgd_lock);
0e91398f
JF
1163}
1164
3b827c1b
JF
1165void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1166{
f4f97b3e 1167 spin_lock(&next->page_table_lock);
eefb47f6 1168 xen_pgd_pin(next);
f4f97b3e 1169 spin_unlock(&next->page_table_lock);
3b827c1b
JF
1170}
1171
1172void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1173{
f4f97b3e 1174 spin_lock(&mm->page_table_lock);
eefb47f6 1175 xen_pgd_pin(mm);
f4f97b3e 1176 spin_unlock(&mm->page_table_lock);
3b827c1b
JF
1177}
1178
3b827c1b 1179
f87e4cac
JF
1180#ifdef CONFIG_SMP
1181/* Another cpu may still have their %cr3 pointing at the pagetable, so
1182 we need to repoint it somewhere else before we can unpin it. */
1183static void drop_other_mm_ref(void *info)
1184{
1185 struct mm_struct *mm = info;
ce87b3d3 1186 struct mm_struct *active_mm;
3b827c1b 1187
9eb912d1 1188 active_mm = percpu_read(cpu_tlbstate.active_mm);
ce87b3d3
JF
1189
1190 if (active_mm == mm)
f87e4cac 1191 leave_mm(smp_processor_id());
9f79991d
JF
1192
1193 /* If this cpu still has a stale cr3 reference, then make sure
1194 it has been flushed. */
7fd7d83d 1195 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
9f79991d 1196 load_cr3(swapper_pg_dir);
f87e4cac 1197}
3b827c1b 1198
7708ad64 1199static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac 1200{
e4d98207 1201 cpumask_var_t mask;
9f79991d
JF
1202 unsigned cpu;
1203
f87e4cac
JF
1204 if (current->active_mm == mm) {
1205 if (current->mm == mm)
1206 load_cr3(swapper_pg_dir);
1207 else
1208 leave_mm(smp_processor_id());
9f79991d
JF
1209 }
1210
1211 /* Get the "official" set of cpus referring to our pagetable. */
e4d98207
MT
1212 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1213 for_each_online_cpu(cpu) {
78f1c4d6 1214 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
e4d98207
MT
1215 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1216 continue;
1217 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1218 }
1219 return;
1220 }
78f1c4d6 1221 cpumask_copy(mask, mm_cpumask(mm));
9f79991d
JF
1222
1223 /* It's possible that a vcpu may have a stale reference to our
1224 cr3, because its in lazy mode, and it hasn't yet flushed
1225 its set of pending hypercalls yet. In this case, we can
1226 look at its actual current cr3 value, and force it to flush
1227 if needed. */
1228 for_each_online_cpu(cpu) {
1229 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
e4d98207 1230 cpumask_set_cpu(cpu, mask);
3b827c1b
JF
1231 }
1232
e4d98207
MT
1233 if (!cpumask_empty(mask))
1234 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1235 free_cpumask_var(mask);
f87e4cac
JF
1236}
1237#else
7708ad64 1238static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac
JF
1239{
1240 if (current->active_mm == mm)
1241 load_cr3(swapper_pg_dir);
1242}
1243#endif
1244
1245/*
1246 * While a process runs, Xen pins its pagetables, which means that the
1247 * hypervisor forces it to be read-only, and it controls all updates
1248 * to it. This means that all pagetable updates have to go via the
1249 * hypervisor, which is moderately expensive.
1250 *
1251 * Since we're pulling the pagetable down, we switch to use init_mm,
1252 * unpin old process pagetable and mark it all read-write, which
1253 * allows further operations on it to be simple memory accesses.
1254 *
1255 * The only subtle point is that another CPU may be still using the
1256 * pagetable because of lazy tlb flushing. This means we need need to
1257 * switch all CPUs off this pagetable before we can unpin it.
1258 */
1259void xen_exit_mmap(struct mm_struct *mm)
1260{
1261 get_cpu(); /* make sure we don't move around */
7708ad64 1262 xen_drop_mm_ref(mm);
f87e4cac 1263 put_cpu();
3b827c1b 1264
f120f13e 1265 spin_lock(&mm->page_table_lock);
df912ea4
JF
1266
1267 /* pgd may not be pinned in the error exit path of execve */
7708ad64 1268 if (xen_page_pinned(mm->pgd))
eefb47f6 1269 xen_pgd_unpin(mm);
74260714 1270
f120f13e 1271 spin_unlock(&mm->page_table_lock);
3b827c1b 1272}
994025ca 1273
319f3ba5
JF
1274static __init void xen_pagetable_setup_start(pgd_t *base)
1275{
1276}
1277
279b706b
SS
1278static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1279{
1280 /* reserve the range used */
1281 native_pagetable_reserve(start, end);
1282
1283 /* set as RW the rest */
1284 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1285 PFN_PHYS(pgt_buf_top));
1286 while (end < PFN_PHYS(pgt_buf_top)) {
1287 make_lowmem_page_readwrite(__va(end));
1288 end += PAGE_SIZE;
1289 }
1290}
1291
f1d7062a
TG
1292static void xen_post_allocator_init(void);
1293
319f3ba5
JF
1294static __init void xen_pagetable_setup_done(pgd_t *base)
1295{
1296 xen_setup_shared_info();
f1d7062a 1297 xen_post_allocator_init();
319f3ba5
JF
1298}
1299
1300static void xen_write_cr2(unsigned long cr2)
1301{
1302 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1303}
1304
1305static unsigned long xen_read_cr2(void)
1306{
1307 return percpu_read(xen_vcpu)->arch.cr2;
1308}
1309
1310unsigned long xen_read_cr2_direct(void)
1311{
1312 return percpu_read(xen_vcpu_info.arch.cr2);
1313}
1314
1315static void xen_flush_tlb(void)
1316{
1317 struct mmuext_op *op;
1318 struct multicall_space mcs;
1319
1320 preempt_disable();
1321
1322 mcs = xen_mc_entry(sizeof(*op));
1323
1324 op = mcs.args;
1325 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1326 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1327
1328 xen_mc_issue(PARAVIRT_LAZY_MMU);
1329
1330 preempt_enable();
1331}
1332
1333static void xen_flush_tlb_single(unsigned long addr)
1334{
1335 struct mmuext_op *op;
1336 struct multicall_space mcs;
1337
1338 preempt_disable();
1339
1340 mcs = xen_mc_entry(sizeof(*op));
1341 op = mcs.args;
1342 op->cmd = MMUEXT_INVLPG_LOCAL;
1343 op->arg1.linear_addr = addr & PAGE_MASK;
1344 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1345
1346 xen_mc_issue(PARAVIRT_LAZY_MMU);
1347
1348 preempt_enable();
1349}
1350
1351static void xen_flush_tlb_others(const struct cpumask *cpus,
1352 struct mm_struct *mm, unsigned long va)
1353{
1354 struct {
1355 struct mmuext_op op;
1356 DECLARE_BITMAP(mask, NR_CPUS);
1357 } *args;
1358 struct multicall_space mcs;
1359
e3f8a74e
JF
1360 if (cpumask_empty(cpus))
1361 return; /* nothing to do */
319f3ba5
JF
1362
1363 mcs = xen_mc_entry(sizeof(*args));
1364 args = mcs.args;
1365 args->op.arg2.vcpumask = to_cpumask(args->mask);
1366
1367 /* Remove us, and any offline CPUS. */
1368 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1369 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
319f3ba5
JF
1370
1371 if (va == TLB_FLUSH_ALL) {
1372 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1373 } else {
1374 args->op.cmd = MMUEXT_INVLPG_MULTI;
1375 args->op.arg1.linear_addr = va;
1376 }
1377
1378 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1379
319f3ba5
JF
1380 xen_mc_issue(PARAVIRT_LAZY_MMU);
1381}
1382
1383static unsigned long xen_read_cr3(void)
1384{
1385 return percpu_read(xen_cr3);
1386}
1387
1388static void set_current_cr3(void *v)
1389{
1390 percpu_write(xen_current_cr3, (unsigned long)v);
1391}
1392
1393static void __xen_write_cr3(bool kernel, unsigned long cr3)
1394{
1395 struct mmuext_op *op;
1396 struct multicall_space mcs;
1397 unsigned long mfn;
1398
1399 if (cr3)
1400 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1401 else
1402 mfn = 0;
1403
1404 WARN_ON(mfn == 0 && kernel);
1405
1406 mcs = __xen_mc_entry(sizeof(*op));
1407
1408 op = mcs.args;
1409 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1410 op->arg1.mfn = mfn;
1411
1412 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1413
1414 if (kernel) {
1415 percpu_write(xen_cr3, cr3);
1416
1417 /* Update xen_current_cr3 once the batch has actually
1418 been submitted. */
1419 xen_mc_callback(set_current_cr3, (void *)cr3);
1420 }
1421}
1422
1423static void xen_write_cr3(unsigned long cr3)
1424{
1425 BUG_ON(preemptible());
1426
1427 xen_mc_batch(); /* disables interrupts */
1428
1429 /* Update while interrupts are disabled, so its atomic with
1430 respect to ipis */
1431 percpu_write(xen_cr3, cr3);
1432
1433 __xen_write_cr3(true, cr3);
1434
1435#ifdef CONFIG_X86_64
1436 {
1437 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1438 if (user_pgd)
1439 __xen_write_cr3(false, __pa(user_pgd));
1440 else
1441 __xen_write_cr3(false, 0);
1442 }
1443#endif
1444
1445 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1446}
1447
1448static int xen_pgd_alloc(struct mm_struct *mm)
1449{
1450 pgd_t *pgd = mm->pgd;
1451 int ret = 0;
1452
1453 BUG_ON(PagePinned(virt_to_page(pgd)));
1454
1455#ifdef CONFIG_X86_64
1456 {
1457 struct page *page = virt_to_page(pgd);
1458 pgd_t *user_pgd;
1459
1460 BUG_ON(page->private != 0);
1461
1462 ret = -ENOMEM;
1463
1464 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1465 page->private = (unsigned long)user_pgd;
1466
1467 if (user_pgd != NULL) {
1468 user_pgd[pgd_index(VSYSCALL_START)] =
1469 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1470 ret = 0;
1471 }
1472
1473 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1474 }
1475#endif
1476
1477 return ret;
1478}
1479
1480static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1481{
1482#ifdef CONFIG_X86_64
1483 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1484
1485 if (user_pgd)
1486 free_page((unsigned long)user_pgd);
1487#endif
1488}
1489
ee176455 1490#ifdef CONFIG_X86_32
1f4f9315
JF
1491static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1492{
1493 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1494 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1495 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1496 pte_val_ma(pte));
ee176455
SS
1497
1498 return pte;
1499}
1500#else /* CONFIG_X86_64 */
1501static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1502{
1503 unsigned long pfn = pte_pfn(pte);
fef5ba79
JF
1504
1505 /*
1506 * If the new pfn is within the range of the newly allocated
1507 * kernel pagetable, and it isn't being mapped into an
d8aa5ec3
SS
1508 * early_ioremap fixmap slot as a freshly allocated page, make sure
1509 * it is RO.
fef5ba79 1510 */
d8aa5ec3 1511 if (((!is_early_ioremap_ptep(ptep) &&
b9269dc7 1512 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
d8aa5ec3 1513 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
fef5ba79 1514 pte = pte_wrprotect(pte);
1f4f9315
JF
1515
1516 return pte;
1517}
ee176455 1518#endif /* CONFIG_X86_64 */
1f4f9315
JF
1519
1520/* Init-time set_pte while constructing initial pagetables, which
1521 doesn't allow RO pagetable pages to be remapped RW */
1522static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1523{
1524 pte = mask_rw_pte(ptep, pte);
1525
1526 xen_set_pte(ptep, pte);
1527}
319f3ba5 1528
b96229b5
JF
1529static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1530{
1531 struct mmuext_op op;
1532 op.cmd = cmd;
1533 op.arg1.mfn = pfn_to_mfn(pfn);
1534 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1535 BUG();
1536}
1537
319f3ba5
JF
1538/* Early in boot, while setting up the initial pagetable, assume
1539 everything is pinned. */
1540static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1541{
b96229b5
JF
1542#ifdef CONFIG_FLATMEM
1543 BUG_ON(mem_map); /* should only be used early */
1544#endif
1545 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1546 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1547}
1548
1549/* Used for pmd and pud */
1550static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1551{
319f3ba5
JF
1552#ifdef CONFIG_FLATMEM
1553 BUG_ON(mem_map); /* should only be used early */
1554#endif
1555 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1556}
1557
1558/* Early release_pte assumes that all pts are pinned, since there's
1559 only init_mm and anything attached to that is pinned. */
b96229b5 1560static __init void xen_release_pte_init(unsigned long pfn)
319f3ba5 1561{
b96229b5 1562 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
319f3ba5
JF
1563 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1564}
1565
b96229b5 1566static __init void xen_release_pmd_init(unsigned long pfn)
319f3ba5 1567{
b96229b5 1568 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
319f3ba5
JF
1569}
1570
1571/* This needs to make sure the new pte page is pinned iff its being
1572 attached to a pinned pagetable. */
1573static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1574{
1575 struct page *page = pfn_to_page(pfn);
1576
1577 if (PagePinned(virt_to_page(mm->pgd))) {
1578 SetPagePinned(page);
1579
319f3ba5
JF
1580 if (!PageHighMem(page)) {
1581 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1582 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1583 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1584 } else {
1585 /* make sure there are no stray mappings of
1586 this page */
1587 kmap_flush_unused();
1588 }
1589 }
1590}
1591
1592static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1593{
1594 xen_alloc_ptpage(mm, pfn, PT_PTE);
1595}
1596
1597static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1598{
1599 xen_alloc_ptpage(mm, pfn, PT_PMD);
1600}
1601
1602/* This should never happen until we're OK to use struct page */
1603static void xen_release_ptpage(unsigned long pfn, unsigned level)
1604{
1605 struct page *page = pfn_to_page(pfn);
1606
1607 if (PagePinned(page)) {
1608 if (!PageHighMem(page)) {
1609 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1610 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1611 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1612 }
1613 ClearPagePinned(page);
1614 }
1615}
1616
1617static void xen_release_pte(unsigned long pfn)
1618{
1619 xen_release_ptpage(pfn, PT_PTE);
1620}
1621
1622static void xen_release_pmd(unsigned long pfn)
1623{
1624 xen_release_ptpage(pfn, PT_PMD);
1625}
1626
1627#if PAGETABLE_LEVELS == 4
1628static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1629{
1630 xen_alloc_ptpage(mm, pfn, PT_PUD);
1631}
1632
1633static void xen_release_pud(unsigned long pfn)
1634{
1635 xen_release_ptpage(pfn, PT_PUD);
1636}
1637#endif
1638
1639void __init xen_reserve_top(void)
1640{
1641#ifdef CONFIG_X86_32
1642 unsigned long top = HYPERVISOR_VIRT_START;
1643 struct xen_platform_parameters pp;
1644
1645 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1646 top = pp.virt_start;
1647
1648 reserve_top_address(-top);
1649#endif /* CONFIG_X86_32 */
1650}
1651
1652/*
1653 * Like __va(), but returns address in the kernel mapping (which is
1654 * all we have until the physical memory mapping has been set up.
1655 */
1656static void *__ka(phys_addr_t paddr)
1657{
1658#ifdef CONFIG_X86_64
1659 return (void *)(paddr + __START_KERNEL_map);
1660#else
1661 return __va(paddr);
1662#endif
1663}
1664
1665/* Convert a machine address to physical address */
1666static unsigned long m2p(phys_addr_t maddr)
1667{
1668 phys_addr_t paddr;
1669
1670 maddr &= PTE_PFN_MASK;
1671 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1672
1673 return paddr;
1674}
1675
1676/* Convert a machine address to kernel virtual */
1677static void *m2v(phys_addr_t maddr)
1678{
1679 return __ka(m2p(maddr));
1680}
1681
4ec5387c 1682/* Set the page permissions on an identity-mapped pages */
319f3ba5
JF
1683static void set_page_prot(void *addr, pgprot_t prot)
1684{
1685 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1686 pte_t pte = pfn_pte(pfn, prot);
1687
1688 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1689 BUG();
1690}
1691
1692static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1693{
1694 unsigned pmdidx, pteidx;
1695 unsigned ident_pte;
1696 unsigned long pfn;
1697
764f0138
JF
1698 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1699 PAGE_SIZE);
1700
319f3ba5
JF
1701 ident_pte = 0;
1702 pfn = 0;
1703 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1704 pte_t *pte_page;
1705
1706 /* Reuse or allocate a page of ptes */
1707 if (pmd_present(pmd[pmdidx]))
1708 pte_page = m2v(pmd[pmdidx].pmd);
1709 else {
1710 /* Check for free pte pages */
764f0138 1711 if (ident_pte == LEVEL1_IDENT_ENTRIES)
319f3ba5
JF
1712 break;
1713
1714 pte_page = &level1_ident_pgt[ident_pte];
1715 ident_pte += PTRS_PER_PTE;
1716
1717 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1718 }
1719
1720 /* Install mappings */
1721 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1722 pte_t pte;
1723
319f3ba5
JF
1724 if (!pte_none(pte_page[pteidx]))
1725 continue;
1726
1727 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1728 pte_page[pteidx] = pte;
1729 }
1730 }
1731
1732 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1733 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1734
1735 set_page_prot(pmd, PAGE_KERNEL_RO);
1736}
1737
7e77506a
IC
1738void __init xen_setup_machphys_mapping(void)
1739{
1740 struct xen_machphys_mapping mapping;
1741 unsigned long machine_to_phys_nr_ents;
1742
1743 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1744 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1745 machine_to_phys_nr_ents = mapping.max_mfn + 1;
1746 } else {
1747 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
1748 }
1749 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
1750}
1751
319f3ba5
JF
1752#ifdef CONFIG_X86_64
1753static void convert_pfn_mfn(void *v)
1754{
1755 pte_t *pte = v;
1756 int i;
1757
1758 /* All levels are converted the same way, so just treat them
1759 as ptes. */
1760 for (i = 0; i < PTRS_PER_PTE; i++)
1761 pte[i] = xen_make_pte(pte[i].pte);
1762}
1763
1764/*
0d2eb44f 1765 * Set up the initial kernel pagetable.
319f3ba5
JF
1766 *
1767 * We can construct this by grafting the Xen provided pagetable into
1768 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1769 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1770 * means that only the kernel has a physical mapping to start with -
1771 * but that's enough to get __va working. We need to fill in the rest
1772 * of the physical mapping once some sort of allocator has been set
1773 * up.
1774 */
1775__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1776 unsigned long max_pfn)
1777{
1778 pud_t *l3;
1779 pmd_t *l2;
1780
14988a4d
SS
1781 /* max_pfn_mapped is the last pfn mapped in the initial memory
1782 * mappings. Considering that on Xen after the kernel mappings we
1783 * have the mappings of some pages that don't exist in pfn space, we
1784 * set max_pfn_mapped to the last real pfn mapped. */
1785 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1786
319f3ba5
JF
1787 /* Zap identity mapping */
1788 init_level4_pgt[0] = __pgd(0);
1789
1790 /* Pre-constructed entries are in pfn, so convert to mfn */
1791 convert_pfn_mfn(init_level4_pgt);
1792 convert_pfn_mfn(level3_ident_pgt);
1793 convert_pfn_mfn(level3_kernel_pgt);
1794
1795 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1796 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1797
1798 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1799 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1800
1801 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1802 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1803 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1804
1805 /* Set up identity map */
1806 xen_map_identity_early(level2_ident_pgt, max_pfn);
1807
1808 /* Make pagetable pieces RO */
1809 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1810 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1811 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1812 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1813 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1814 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1815
1816 /* Pin down new L4 */
1817 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1818 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1819
1820 /* Unpin Xen-provided one */
1821 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1822
1823 /* Switch over */
1824 pgd = init_level4_pgt;
1825
1826 /*
1827 * At this stage there can be no user pgd, and no page
1828 * structure to attach it to, so make sure we just set kernel
1829 * pgd.
1830 */
1831 xen_mc_batch();
1832 __xen_write_cr3(true, __pa(pgd));
1833 xen_mc_issue(PARAVIRT_LAZY_CPU);
1834
a9ce6bc1 1835 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
319f3ba5
JF
1836 __pa(xen_start_info->pt_base +
1837 xen_start_info->nr_pt_frames * PAGE_SIZE),
1838 "XEN PAGETABLES");
1839
1840 return pgd;
1841}
1842#else /* !CONFIG_X86_64 */
5b5c1af1
IC
1843static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1844static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1845
1846static __init void xen_write_cr3_init(unsigned long cr3)
1847{
1848 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1849
1850 BUG_ON(read_cr3() != __pa(initial_page_table));
1851 BUG_ON(cr3 != __pa(swapper_pg_dir));
1852
1853 /*
1854 * We are switching to swapper_pg_dir for the first time (from
1855 * initial_page_table) and therefore need to mark that page
1856 * read-only and then pin it.
1857 *
1858 * Xen disallows sharing of kernel PMDs for PAE
1859 * guests. Therefore we must copy the kernel PMD from
1860 * initial_page_table into a new kernel PMD to be used in
1861 * swapper_pg_dir.
1862 */
1863 swapper_kernel_pmd =
1864 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1865 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1866 sizeof(pmd_t) * PTRS_PER_PMD);
1867 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1868 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1869 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1870
1871 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1872 xen_write_cr3(cr3);
1873 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1874
1875 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1876 PFN_DOWN(__pa(initial_page_table)));
1877 set_page_prot(initial_page_table, PAGE_KERNEL);
1878 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1879
1880 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1881}
319f3ba5
JF
1882
1883__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1884 unsigned long max_pfn)
1885{
1886 pmd_t *kernel_pmd;
1887
5b5c1af1
IC
1888 initial_kernel_pmd =
1889 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
f0991802 1890
14988a4d 1891 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
319f3ba5
JF
1892
1893 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
5b5c1af1 1894 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
319f3ba5 1895
5b5c1af1 1896 xen_map_identity_early(initial_kernel_pmd, max_pfn);
319f3ba5 1897
5b5c1af1
IC
1898 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1899 initial_page_table[KERNEL_PGD_BOUNDARY] =
1900 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
319f3ba5 1901
5b5c1af1
IC
1902 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1903 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
319f3ba5
JF
1904 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1905
1906 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1907
5b5c1af1
IC
1908 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1909 PFN_DOWN(__pa(initial_page_table)));
1910 xen_write_cr3(__pa(initial_page_table));
319f3ba5 1911
a9ce6bc1 1912 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
33df4db0
JF
1913 __pa(xen_start_info->pt_base +
1914 xen_start_info->nr_pt_frames * PAGE_SIZE),
1915 "XEN PAGETABLES");
1916
5b5c1af1 1917 return initial_page_table;
319f3ba5
JF
1918}
1919#endif /* CONFIG_X86_64 */
1920
98511f35
JF
1921static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1922
3b3809ac 1923static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
319f3ba5
JF
1924{
1925 pte_t pte;
1926
1927 phys >>= PAGE_SHIFT;
1928
1929 switch (idx) {
1930 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1931#ifdef CONFIG_X86_F00F_BUG
1932 case FIX_F00F_IDT:
1933#endif
1934#ifdef CONFIG_X86_32
1935 case FIX_WP_TEST:
1936 case FIX_VDSO:
1937# ifdef CONFIG_HIGHMEM
1938 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1939# endif
1940#else
1941 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
319f3ba5 1942#endif
3ecb1b7d
JF
1943 case FIX_TEXT_POKE0:
1944 case FIX_TEXT_POKE1:
1945 /* All local page mappings */
319f3ba5
JF
1946 pte = pfn_pte(phys, prot);
1947 break;
1948
98511f35
JF
1949#ifdef CONFIG_X86_LOCAL_APIC
1950 case FIX_APIC_BASE: /* maps dummy local APIC */
1951 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1952 break;
1953#endif
1954
1955#ifdef CONFIG_X86_IO_APIC
1956 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1957 /*
1958 * We just don't map the IO APIC - all access is via
1959 * hypercalls. Keep the address in the pte for reference.
1960 */
1961 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1962 break;
1963#endif
1964
c0011dbf
JF
1965 case FIX_PARAVIRT_BOOTMAP:
1966 /* This is an MFN, but it isn't an IO mapping from the
1967 IO domain */
319f3ba5
JF
1968 pte = mfn_pte(phys, prot);
1969 break;
c0011dbf
JF
1970
1971 default:
1972 /* By default, set_fixmap is used for hardware mappings */
1973 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1974 break;
319f3ba5
JF
1975 }
1976
1977 __native_set_fixmap(idx, pte);
1978
1979#ifdef CONFIG_X86_64
1980 /* Replicate changes to map the vsyscall page into the user
1981 pagetable vsyscall mapping. */
1982 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1983 unsigned long vaddr = __fix_to_virt(idx);
1984 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1985 }
1986#endif
1987}
1988
4ec5387c
JQ
1989__init void xen_ident_map_ISA(void)
1990{
1991 unsigned long pa;
1992
1993 /*
1994 * If we're dom0, then linear map the ISA machine addresses into
1995 * the kernel's address space.
1996 */
1997 if (!xen_initial_domain())
1998 return;
1999
2000 xen_raw_printk("Xen: setup ISA identity maps\n");
2001
2002 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
2003 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
2004
2005 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
2006 BUG();
2007 }
2008
2009 xen_flush_tlb();
2010}
2011
f1d7062a 2012static __init void xen_post_allocator_init(void)
319f3ba5 2013{
fc25151d
KRW
2014#ifdef CONFIG_XEN_DEBUG
2015 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
2016#endif
319f3ba5
JF
2017 pv_mmu_ops.set_pte = xen_set_pte;
2018 pv_mmu_ops.set_pmd = xen_set_pmd;
2019 pv_mmu_ops.set_pud = xen_set_pud;
2020#if PAGETABLE_LEVELS == 4
2021 pv_mmu_ops.set_pgd = xen_set_pgd;
2022#endif
2023
2024 /* This will work as long as patching hasn't happened yet
2025 (which it hasn't) */
2026 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2027 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2028 pv_mmu_ops.release_pte = xen_release_pte;
2029 pv_mmu_ops.release_pmd = xen_release_pmd;
2030#if PAGETABLE_LEVELS == 4
2031 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2032 pv_mmu_ops.release_pud = xen_release_pud;
2033#endif
2034
2035#ifdef CONFIG_X86_64
2036 SetPagePinned(virt_to_page(level3_user_vsyscall));
2037#endif
2038 xen_mark_init_mm_pinned();
2039}
2040
b407fc57
JF
2041static void xen_leave_lazy_mmu(void)
2042{
5caecb94 2043 preempt_disable();
b407fc57
JF
2044 xen_mc_flush();
2045 paravirt_leave_lazy_mmu();
5caecb94 2046 preempt_enable();
b407fc57 2047}
319f3ba5 2048
030cb6c0 2049static const struct pv_mmu_ops xen_mmu_ops __initdata = {
319f3ba5
JF
2050 .read_cr2 = xen_read_cr2,
2051 .write_cr2 = xen_write_cr2,
2052
2053 .read_cr3 = xen_read_cr3,
5b5c1af1
IC
2054#ifdef CONFIG_X86_32
2055 .write_cr3 = xen_write_cr3_init,
2056#else
319f3ba5 2057 .write_cr3 = xen_write_cr3,
5b5c1af1 2058#endif
319f3ba5
JF
2059
2060 .flush_tlb_user = xen_flush_tlb,
2061 .flush_tlb_kernel = xen_flush_tlb,
2062 .flush_tlb_single = xen_flush_tlb_single,
2063 .flush_tlb_others = xen_flush_tlb_others,
2064
2065 .pte_update = paravirt_nop,
2066 .pte_update_defer = paravirt_nop,
2067
2068 .pgd_alloc = xen_pgd_alloc,
2069 .pgd_free = xen_pgd_free,
2070
2071 .alloc_pte = xen_alloc_pte_init,
2072 .release_pte = xen_release_pte_init,
b96229b5 2073 .alloc_pmd = xen_alloc_pmd_init,
b96229b5 2074 .release_pmd = xen_release_pmd_init,
319f3ba5 2075
319f3ba5 2076 .set_pte = xen_set_pte_init,
319f3ba5
JF
2077 .set_pte_at = xen_set_pte_at,
2078 .set_pmd = xen_set_pmd_hyper,
2079
2080 .ptep_modify_prot_start = __ptep_modify_prot_start,
2081 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2082
da5de7c2
JF
2083 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2084 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
319f3ba5 2085
da5de7c2
JF
2086 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2087 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
319f3ba5
JF
2088
2089#ifdef CONFIG_X86_PAE
2090 .set_pte_atomic = xen_set_pte_atomic,
319f3ba5
JF
2091 .pte_clear = xen_pte_clear,
2092 .pmd_clear = xen_pmd_clear,
2093#endif /* CONFIG_X86_PAE */
2094 .set_pud = xen_set_pud_hyper,
2095
da5de7c2
JF
2096 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2097 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
319f3ba5
JF
2098
2099#if PAGETABLE_LEVELS == 4
da5de7c2
JF
2100 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2101 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
319f3ba5
JF
2102 .set_pgd = xen_set_pgd_hyper,
2103
b96229b5
JF
2104 .alloc_pud = xen_alloc_pmd_init,
2105 .release_pud = xen_release_pmd_init,
319f3ba5
JF
2106#endif /* PAGETABLE_LEVELS == 4 */
2107
2108 .activate_mm = xen_activate_mm,
2109 .dup_mmap = xen_dup_mmap,
2110 .exit_mmap = xen_exit_mmap,
2111
2112 .lazy_mode = {
2113 .enter = paravirt_enter_lazy_mmu,
b407fc57 2114 .leave = xen_leave_lazy_mmu,
319f3ba5
JF
2115 },
2116
2117 .set_fixmap = xen_set_fixmap,
2118};
2119
030cb6c0
TG
2120void __init xen_init_mmu_ops(void)
2121{
279b706b 2122 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
030cb6c0
TG
2123 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2124 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2125 pv_mmu_ops = xen_mmu_ops;
d2cb2145 2126
98511f35 2127 memset(dummy_mapping, 0xff, PAGE_SIZE);
030cb6c0 2128}
319f3ba5 2129
08bbc9da
AN
2130/* Protected by xen_reservation_lock. */
2131#define MAX_CONTIG_ORDER 9 /* 2MB */
2132static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2133
2134#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2135static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2136 unsigned long *in_frames,
2137 unsigned long *out_frames)
2138{
2139 int i;
2140 struct multicall_space mcs;
2141
2142 xen_mc_batch();
2143 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2144 mcs = __xen_mc_entry(0);
2145
2146 if (in_frames)
2147 in_frames[i] = virt_to_mfn(vaddr);
2148
2149 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
6eaa412f 2150 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
08bbc9da
AN
2151
2152 if (out_frames)
2153 out_frames[i] = virt_to_pfn(vaddr);
2154 }
2155 xen_mc_issue(0);
2156}
2157
2158/*
2159 * Update the pfn-to-mfn mappings for a virtual address range, either to
2160 * point to an array of mfns, or contiguously from a single starting
2161 * mfn.
2162 */
2163static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2164 unsigned long *mfns,
2165 unsigned long first_mfn)
2166{
2167 unsigned i, limit;
2168 unsigned long mfn;
2169
2170 xen_mc_batch();
2171
2172 limit = 1u << order;
2173 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2174 struct multicall_space mcs;
2175 unsigned flags;
2176
2177 mcs = __xen_mc_entry(0);
2178 if (mfns)
2179 mfn = mfns[i];
2180 else
2181 mfn = first_mfn + i;
2182
2183 if (i < (limit - 1))
2184 flags = 0;
2185 else {
2186 if (order == 0)
2187 flags = UVMF_INVLPG | UVMF_ALL;
2188 else
2189 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2190 }
2191
2192 MULTI_update_va_mapping(mcs.mc, vaddr,
2193 mfn_pte(mfn, PAGE_KERNEL), flags);
2194
2195 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2196 }
2197
2198 xen_mc_issue(0);
2199}
2200
2201/*
2202 * Perform the hypercall to exchange a region of our pfns to point to
2203 * memory with the required contiguous alignment. Takes the pfns as
2204 * input, and populates mfns as output.
2205 *
2206 * Returns a success code indicating whether the hypervisor was able to
2207 * satisfy the request or not.
2208 */
2209static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2210 unsigned long *pfns_in,
2211 unsigned long extents_out,
2212 unsigned int order_out,
2213 unsigned long *mfns_out,
2214 unsigned int address_bits)
2215{
2216 long rc;
2217 int success;
2218
2219 struct xen_memory_exchange exchange = {
2220 .in = {
2221 .nr_extents = extents_in,
2222 .extent_order = order_in,
2223 .extent_start = pfns_in,
2224 .domid = DOMID_SELF
2225 },
2226 .out = {
2227 .nr_extents = extents_out,
2228 .extent_order = order_out,
2229 .extent_start = mfns_out,
2230 .address_bits = address_bits,
2231 .domid = DOMID_SELF
2232 }
2233 };
2234
2235 BUG_ON(extents_in << order_in != extents_out << order_out);
2236
2237 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2238 success = (exchange.nr_exchanged == extents_in);
2239
2240 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2241 BUG_ON(success && (rc != 0));
2242
2243 return success;
2244}
2245
2246int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2247 unsigned int address_bits)
2248{
2249 unsigned long *in_frames = discontig_frames, out_frame;
2250 unsigned long flags;
2251 int success;
2252
2253 /*
2254 * Currently an auto-translated guest will not perform I/O, nor will
2255 * it require PAE page directories below 4GB. Therefore any calls to
2256 * this function are redundant and can be ignored.
2257 */
2258
2259 if (xen_feature(XENFEAT_auto_translated_physmap))
2260 return 0;
2261
2262 if (unlikely(order > MAX_CONTIG_ORDER))
2263 return -ENOMEM;
2264
2265 memset((void *) vstart, 0, PAGE_SIZE << order);
2266
08bbc9da
AN
2267 spin_lock_irqsave(&xen_reservation_lock, flags);
2268
2269 /* 1. Zap current PTEs, remembering MFNs. */
2270 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2271
2272 /* 2. Get a new contiguous memory extent. */
2273 out_frame = virt_to_pfn(vstart);
2274 success = xen_exchange_memory(1UL << order, 0, in_frames,
2275 1, order, &out_frame,
2276 address_bits);
2277
2278 /* 3. Map the new extent in place of old pages. */
2279 if (success)
2280 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2281 else
2282 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2283
2284 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2285
2286 return success ? 0 : -ENOMEM;
2287}
2288EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2289
2290void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2291{
2292 unsigned long *out_frames = discontig_frames, in_frame;
2293 unsigned long flags;
2294 int success;
2295
2296 if (xen_feature(XENFEAT_auto_translated_physmap))
2297 return;
2298
2299 if (unlikely(order > MAX_CONTIG_ORDER))
2300 return;
2301
2302 memset((void *) vstart, 0, PAGE_SIZE << order);
2303
08bbc9da
AN
2304 spin_lock_irqsave(&xen_reservation_lock, flags);
2305
2306 /* 1. Find start MFN of contiguous extent. */
2307 in_frame = virt_to_mfn(vstart);
2308
2309 /* 2. Zap current PTEs. */
2310 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2311
2312 /* 3. Do the exchange for non-contiguous MFNs. */
2313 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2314 0, out_frames, 0);
2315
2316 /* 4. Map new pages in place of old pages. */
2317 if (success)
2318 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2319 else
2320 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2321
2322 spin_unlock_irqrestore(&xen_reservation_lock, flags);
030cb6c0 2323}
08bbc9da 2324EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
319f3ba5 2325
ca65f9fc 2326#ifdef CONFIG_XEN_PVHVM
59151001
SS
2327static void xen_hvm_exit_mmap(struct mm_struct *mm)
2328{
2329 struct xen_hvm_pagetable_dying a;
2330 int rc;
2331
2332 a.domid = DOMID_SELF;
2333 a.gpa = __pa(mm->pgd);
2334 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2335 WARN_ON_ONCE(rc < 0);
2336}
2337
2338static int is_pagetable_dying_supported(void)
2339{
2340 struct xen_hvm_pagetable_dying a;
2341 int rc = 0;
2342
2343 a.domid = DOMID_SELF;
2344 a.gpa = 0x00;
2345 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2346 if (rc < 0) {
2347 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2348 return 0;
2349 }
2350 return 1;
2351}
2352
2353void __init xen_hvm_init_mmu_ops(void)
2354{
2355 if (is_pagetable_dying_supported())
2356 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2357}
ca65f9fc 2358#endif
59151001 2359
de1ef206
IC
2360#define REMAP_BATCH_SIZE 16
2361
2362struct remap_data {
2363 unsigned long mfn;
2364 pgprot_t prot;
2365 struct mmu_update *mmu_update;
2366};
2367
2368static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2369 unsigned long addr, void *data)
2370{
2371 struct remap_data *rmd = data;
2372 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2373
2374 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
2375 rmd->mmu_update->val = pte_val_ma(pte);
2376 rmd->mmu_update++;
2377
2378 return 0;
2379}
2380
2381int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2382 unsigned long addr,
2383 unsigned long mfn, int nr,
2384 pgprot_t prot, unsigned domid)
2385{
2386 struct remap_data rmd;
2387 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2388 int batch;
2389 unsigned long range;
2390 int err = 0;
2391
2392 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2393
e060e7af
SS
2394 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2395 (VM_PFNMAP | VM_RESERVED | VM_IO)));
de1ef206
IC
2396
2397 rmd.mfn = mfn;
2398 rmd.prot = prot;
2399
2400 while (nr) {
2401 batch = min(REMAP_BATCH_SIZE, nr);
2402 range = (unsigned long)batch << PAGE_SHIFT;
2403
2404 rmd.mmu_update = mmu_update;
2405 err = apply_to_page_range(vma->vm_mm, addr, range,
2406 remap_area_mfn_pte_fn, &rmd);
2407 if (err)
2408 goto out;
2409
2410 err = -EFAULT;
2411 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2412 goto out;
2413
2414 nr -= batch;
2415 addr += range;
2416 }
2417
2418 err = 0;
2419out:
2420
2421 flush_tlb_all();
2422
2423 return err;
2424}
2425EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2426
994025ca
JF
2427#ifdef CONFIG_XEN_DEBUG_FS
2428
2222e71b
KRW
2429static int p2m_dump_open(struct inode *inode, struct file *filp)
2430{
2431 return single_open(filp, p2m_dump_show, NULL);
2432}
2433
2434static const struct file_operations p2m_dump_fops = {
2435 .open = p2m_dump_open,
2436 .read = seq_read,
2437 .llseek = seq_lseek,
2438 .release = single_release,
2439};
2440
994025ca
JF
2441static struct dentry *d_mmu_debug;
2442
2443static int __init xen_mmu_debugfs(void)
2444{
2445 struct dentry *d_xen = xen_init_debugfs();
2446
2447 if (d_xen == NULL)
2448 return -ENOMEM;
2449
2450 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2451
2452 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2453
2454 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2455 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2456 &mmu_stats.pgd_update_pinned);
2457 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2458 &mmu_stats.pgd_update_pinned);
2459
2460 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2461 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2462 &mmu_stats.pud_update_pinned);
2463 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2464 &mmu_stats.pud_update_pinned);
2465
2466 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2467 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2468 &mmu_stats.pmd_update_pinned);
2469 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2470 &mmu_stats.pmd_update_pinned);
2471
2472 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2473// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2474// &mmu_stats.pte_update_pinned);
2475 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2476 &mmu_stats.pte_update_pinned);
2477
2478 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2479 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2480 &mmu_stats.mmu_update_extended);
2481 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2482 mmu_stats.mmu_update_histo, 20);
2483
2484 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2485 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2486 &mmu_stats.set_pte_at_batched);
2487 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2488 &mmu_stats.set_pte_at_current);
2489 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2490 &mmu_stats.set_pte_at_kernel);
2491
2492 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2493 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2494 &mmu_stats.prot_commit_batched);
2495
2222e71b 2496 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
994025ca
JF
2497 return 0;
2498}
2499fs_initcall(xen_mmu_debugfs);
2500
2501#endif /* CONFIG_XEN_DEBUG_FS */
This page took 0.621481 seconds and 5 git commands to generate.