xen: defer building p2m mfn structures until kernel is mapped
[deliverable/linux.git] / arch / x86 / xen / mmu.c
index 413b19b3d0fe5322ebe5a492d432eb132304aadf..9b43bb398d377b2fef4b432f1d63be30fb9f0957 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/highmem.h>
 #include <linux/debugfs.h>
 #include <linux/bug.h>
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/gfp.h>
 
 #include <asm/mmu_context.h>
 #include <asm/setup.h>
 #include <asm/paravirt.h>
+#include <asm/e820.h>
 #include <asm/linkage.h>
+#include <asm/page.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 
+#include <xen/xen.h>
 #include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/hvm/hvm_op.h>
 #include <xen/interface/version.h>
+#include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
 
 #include "multicalls.h"
 
 #define MMU_UPDATE_HISTO       30
 
+/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and driver_pages, and
+ * balloon lists.
+ */
+DEFINE_SPINLOCK(xen_reservation_lock);
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct {
@@ -126,7 +138,8 @@ static inline void check_zero(void)
  * large enough to allocate page table pages to allocate the rest.
  * Each page can map 2MB.
  */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
 
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
@@ -157,49 +170,162 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);   /* actual vcpu cr3 */
  */
 #define USER_LIMIT     ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid     p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
+
+static unsigned long max_p2m_pfn __read_mostly;
 
-#define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+#define P2M_PER_PAGE           (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long **))
 
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
-               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define MAX_P2M_PFN            (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
-               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
 
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
 
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
-       __page_aligned_bss;
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
-       BUG_ON(pfn >= MAX_DOMAIN_PAGES);
-       return pfn / P2M_ENTRIES_PER_PAGE;
+       BUG_ON(pfn >= MAX_P2M_PFN);
+       return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+       return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
 static inline unsigned p2m_index(unsigned long pfn)
 {
-       return pfn % P2M_ENTRIES_PER_PAGE;
+       return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = p2m_missing;
 }
 
-/* Build the parallel p2m_top_mfn structures */
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = virt_to_mfn(p2m_missing);
+}
+
+static void p2m_init(unsigned long *p2m)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
 void xen_build_mfn_list_list(void)
 {
-       unsigned pfn, idx;
+       unsigned pfn;
 
-       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
-               unsigned topidx = p2m_top_index(pfn);
+       /* Pre-initialize p2m_top_mfn to be completely missing */
+       if (p2m_top_mfn == NULL) {
+               p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
 
-               p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+               p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_init(p2m_top_mfn);
        }
 
-       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
-               unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
-               p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+       for (pfn = 0; pfn < max_p2m_pfn; pfn += P2M_PER_PAGE) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
+               unsigned long **mid;
+               unsigned long mid_mfn;
+               unsigned long *mid_mfn_p;
+
+               mid = p2m_top[topidx];
+
+               /* Don't bother allocating any mfn mid levels if
+                  they're just missing */
+               if (mid[mididx] == p2m_missing)
+                       continue;
+
+               mid_mfn = p2m_top_mfn[topidx];
+               mid_mfn_p = mfn_to_virt(mid_mfn);
+
+               if (mid_mfn_p == p2m_mid_missing_mfn) {
+                       /*
+                        * XXX boot-time only!  We should never find
+                        * missing parts of the mfn tree after
+                        * runtime.  extend_brk() will BUG if we call
+                        * it too late.
+                        */
+                       mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_mfn_init(mid_mfn_p);
+
+                       mid_mfn = virt_to_mfn(mid_mfn_p);
+                       
+                       p2m_top_mfn[topidx] = mid_mfn;
+               }
+
+               mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
        }
 }
 
@@ -208,8 +334,8 @@ void xen_setup_mfn_list_list(void)
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-               virt_to_mfn(p2m_top_mfn_list);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+               virt_to_mfn(p2m_top_mfn);
+       HYPERVISOR_shared_info->arch.max_pfn = max_p2m_pfn;
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -219,96 +345,170 @@ void __init xen_build_dynamic_phys_to_machine(void)
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
        unsigned pfn;
 
-       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       max_p2m_pfn = max_pfn;
+
+       p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_init(p2m_missing);
+
+       p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_mid_init(p2m_mid_missing);
+
+       p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_top_init(p2m_top);
+
+       /*
+        * The domain builder gives us a pre-constructed p2m array in
+        * mfn_list for all the pages initially given to us, so we just
+        * need to graft that into our tree structure.
+        */
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
 
-               p2m_top[topidx] = &mfn_list[pfn];
-       }
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_init(mid);
 
-       xen_build_mfn_list_list();
+                       p2m_top[topidx] = mid;
+               }
+
+               p2m_top[topidx][mididx] = &mfn_list[pfn];
+       }
 }
 
 unsigned long get_phys_to_machine(unsigned long pfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+       if (unlikely(pfn >= MAX_P2M_PFN))
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       return p2m_top[topidx][idx];
+
+       return p2m_top[topidx][mididx][idx];
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
 
-/* install a  new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
 {
-       unsigned topidx = p2m_top_index(pfn);
-       unsigned long **pfnp, *mfnp;
-       unsigned i;
+       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
 
-       pfnp = &p2m_top[topidx];
-       mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+       free_page((unsigned long)p);
+}
 
-       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
-               p[i] = INVALID_P2M_ENTRY;
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx, mididx;
+       unsigned long ***top_p, **mid;
+       unsigned long *top_mfn_p, *mid_mfn;
 
-       if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
-               *mfnp = virt_to_mfn(p);
-               return true;
+       topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
+
+       top_p = &p2m_top[topidx];
+       mid = *top_p;
+
+       if (mid == p2m_mid_missing) {
+               /* Mid level is missing, allocate a new one */
+               mid = alloc_p2m_page();
+               if (!mid)
+                       return false;
+
+               p2m_mid_init(mid);
+
+               if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+                       free_p2m_page(mid);
        }
 
-       return false;
-}
+       top_mfn_p = &p2m_top_mfn[topidx];
+       mid_mfn = mfn_to_virt(*top_mfn_p);
 
-static void alloc_p2m(unsigned long pfn)
-{
-       unsigned long *p;
+       if (mid_mfn == p2m_mid_missing_mfn) {
+               /* Separately check the mid mfn level */
+               unsigned long missing_mfn;
+               unsigned long mid_mfn_mfn;
 
-       p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
-       BUG_ON(p == NULL);
+               mid_mfn = alloc_p2m_page();
+               if (!mid_mfn)
+                       return false;
 
-       if (!install_p2mtop_page(pfn, p))
-               free_page((unsigned long)p);
+               p2m_mid_mfn_init(mid_mfn);
+               
+               missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+               mid_mfn_mfn = virt_to_mfn(mid_mfn);
+               if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+                       free_p2m_page(mid_mfn);
+       }
+
+       if (p2m_top[topidx][mididx] == p2m_missing) {
+               /* p2m leaf page is missing */
+               unsigned long *p2m;
+
+               p2m = alloc_p2m_page();
+               if (!p2m)
+                       return false;
+
+               p2m_init(p2m);
+
+               if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+                       free_p2m_page(p2m);
+               else
+                       mid_mfn[mididx] = virt_to_mfn(p2m);
+       }
+
+       return true;
 }
 
 /* Try to install p2m mapping; fail if intermediate bits missing */
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+       if (unlikely(pfn >= MAX_P2M_PFN)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
        }
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == p2m_missing) {
-               if (mfn == INVALID_P2M_ENTRY)
-                       return true;
-               return false;
-       }
-
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       p2m_top[topidx][idx] = mfn;
+
+       if (p2m_top[topidx][mididx] == p2m_missing)
+               return mfn == INVALID_P2M_ENTRY;
+
+       p2m_top[topidx][mididx][idx] = mfn;
 
        return true;
 }
 
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
                BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return;
+               return true;
        }
 
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-               alloc_p2m(pfn);
+               if (!alloc_p2m(pfn))
+                       return false;
 
                if (!__set_phys_to_machine(pfn, mfn))
-                       BUG();
+                       return false;
        }
+
+       return true;
 }
 
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -378,6 +578,28 @@ static bool xen_page_pinned(void *ptr)
        return PagePinned(page);
 }
 
+static bool xen_iomap_pte(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_IOMAP;
+}
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+       struct multicall_space mcs;
+       struct mmu_update *u;
+
+       mcs = xen_mc_entry(sizeof(*u));
+       u = mcs.args;
+
+       /* ptep might be kmapped when using 32-bit HIGHPTE */
+       u->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       u->val = pte_val_ma(pteval);
+
+       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
        struct multicall_space mcs;
@@ -454,6 +676,11 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
+       if (xen_iomap_pte(pteval)) {
+               xen_set_iomap_pte(ptep, pteval);
+               goto out;
+       }
+
        ADD_STATS(set_pte_at, 1);
 //     ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
        ADD_STATS(set_pte_at_current, mm == current->mm);
@@ -524,8 +751,25 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        return val;
 }
 
+static pteval_t iomap_pte(pteval_t val)
+{
+       if (val & _PAGE_PRESENT) {
+               unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+               pteval_t flags = val & PTE_FLAGS_MASK;
+
+               /* We assume the pte frame number is a MFN, so
+                  just use it as-is. */
+               val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
+       }
+
+       return val;
+}
+
 pteval_t xen_pte_val(pte_t pte)
 {
+       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
+               return pte.pte;
+
        return pte_mfn_to_pfn(pte.pte);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
@@ -538,7 +782,22 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
 pte_t xen_make_pte(pteval_t pte)
 {
-       pte = pte_pfn_to_mfn(pte);
+       phys_addr_t addr = (pte & PTE_PFN_MASK);
+
+       /*
+        * Unprivileged domains are allowed to do IOMAPpings for
+        * PCI passthrough, but not map ISA space.  The ISA
+        * mappings are just dummy local mappings to keep other
+        * parts of the kernel happy.
+        */
+       if (unlikely(pte & _PAGE_IOMAP) &&
+           (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
+               pte = iomap_pte(pte);
+       } else {
+               pte &= ~_PAGE_IOMAP;
+               pte = pte_pfn_to_mfn(pte);
+       }
+
        return native_make_pte(pte);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
@@ -594,6 +853,11 @@ void xen_set_pud(pud_t *ptr, pud_t val)
 
 void xen_set_pte(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        ADD_STATS(pte_update, 1);
 //     ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
        ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
@@ -610,6 +874,11 @@ void xen_set_pte(pte_t *ptep, pte_t pte)
 #ifdef CONFIG_X86_PAE
 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        set_64bit((u64 *)ptep, native_pte_val(pte));
 }
 
@@ -936,8 +1205,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
    read-only, and can be pinned. */
 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
 {
-       vm_unmap_aliases();
-
        xen_mc_batch();
 
        if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -1501,7 +1768,6 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
        if (PagePinned(virt_to_page(mm->pgd))) {
                SetPagePinned(page);
 
-               vm_unmap_aliases();
                if (!PageHighMem(page)) {
                        make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
                        if (level == PT_PTE && USE_SPLIT_PTLOCKS)
@@ -1619,6 +1885,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
        unsigned ident_pte;
        unsigned long pfn;
 
+       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+                                     PAGE_SIZE);
+
        ident_pte = 0;
        pfn = 0;
        for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1629,7 +1898,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                        pte_page = m2v(pmd[pmdidx].pmd);
                else {
                        /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
                                break;
 
                        pte_page = &level1_ident_pgt[ident_pte];
@@ -1744,13 +2013,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        return pgd;
 }
 #else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
 
 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
                                         unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
                                  512*1024);
@@ -1812,9 +2083,16 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
                pte = pfn_pte(phys, prot);
                break;
 
-       default:
+       case FIX_PARAVIRT_BOOTMAP:
+               /* This is an MFN, but it isn't an IO mapping from the
+                  IO domain */
                pte = mfn_pte(phys, prot);
                break;
+
+       default:
+               /* By default, set_fixmap is used for hardware mappings */
+               pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
+               break;
        }
 
        __native_set_fixmap(idx, pte);
@@ -1940,7 +2218,205 @@ void __init xen_init_mmu_ops(void)
        x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
        pv_mmu_ops = xen_mmu_ops;
+
+       vmap_lazy_unmap = false;
+}
+
+/* Protected by xen_reservation_lock. */
+#define MAX_CONTIG_ORDER 9 /* 2MB */
+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+
+#define VOID_PTE (mfn_pte(0, __pgprot(0)))
+static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+                               unsigned long *in_frames,
+                               unsigned long *out_frames)
+{
+       int i;
+       struct multicall_space mcs;
+
+       xen_mc_batch();
+       for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
+               mcs = __xen_mc_entry(0);
+
+               if (in_frames)
+                       in_frames[i] = virt_to_mfn(vaddr);
+
+               MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
+               set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+
+               if (out_frames)
+                       out_frames[i] = virt_to_pfn(vaddr);
+       }
+       xen_mc_issue(0);
+}
+
+/*
+ * Update the pfn-to-mfn mappings for a virtual address range, either to
+ * point to an array of mfns, or contiguously from a single starting
+ * mfn.
+ */
+static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
+                                    unsigned long *mfns,
+                                    unsigned long first_mfn)
+{
+       unsigned i, limit;
+       unsigned long mfn;
+
+       xen_mc_batch();
+
+       limit = 1u << order;
+       for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
+               struct multicall_space mcs;
+               unsigned flags;
+
+               mcs = __xen_mc_entry(0);
+               if (mfns)
+                       mfn = mfns[i];
+               else
+                       mfn = first_mfn + i;
+
+               if (i < (limit - 1))
+                       flags = 0;
+               else {
+                       if (order == 0)
+                               flags = UVMF_INVLPG | UVMF_ALL;
+                       else
+                               flags = UVMF_TLB_FLUSH | UVMF_ALL;
+               }
+
+               MULTI_update_va_mapping(mcs.mc, vaddr,
+                               mfn_pte(mfn, PAGE_KERNEL), flags);
+
+               set_phys_to_machine(virt_to_pfn(vaddr), mfn);
+       }
+
+       xen_mc_issue(0);
+}
+
+/*
+ * Perform the hypercall to exchange a region of our pfns to point to
+ * memory with the required contiguous alignment.  Takes the pfns as
+ * input, and populates mfns as output.
+ *
+ * Returns a success code indicating whether the hypervisor was able to
+ * satisfy the request or not.
+ */
+static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
+                              unsigned long *pfns_in,
+                              unsigned long extents_out,
+                              unsigned int order_out,
+                              unsigned long *mfns_out,
+                              unsigned int address_bits)
+{
+       long rc;
+       int success;
+
+       struct xen_memory_exchange exchange = {
+               .in = {
+                       .nr_extents   = extents_in,
+                       .extent_order = order_in,
+                       .extent_start = pfns_in,
+                       .domid        = DOMID_SELF
+               },
+               .out = {
+                       .nr_extents   = extents_out,
+                       .extent_order = order_out,
+                       .extent_start = mfns_out,
+                       .address_bits = address_bits,
+                       .domid        = DOMID_SELF
+               }
+       };
+
+       BUG_ON(extents_in << order_in != extents_out << order_out);
+
+       rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
+       success = (exchange.nr_exchanged == extents_in);
+
+       BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
+       BUG_ON(success && (rc != 0));
+
+       return success;
+}
+
+int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
+                                unsigned int address_bits)
+{
+       unsigned long *in_frames = discontig_frames, out_frame;
+       unsigned long  flags;
+       int            success;
+
+       /*
+        * Currently an auto-translated guest will not perform I/O, nor will
+        * it require PAE page directories below 4GB. Therefore any calls to
+        * this function are redundant and can be ignored.
+        */
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return 0;
+
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return -ENOMEM;
+
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+
+       /* 1. Zap current PTEs, remembering MFNs. */
+       xen_zap_pfn_range(vstart, order, in_frames, NULL);
+
+       /* 2. Get a new contiguous memory extent. */
+       out_frame = virt_to_pfn(vstart);
+       success = xen_exchange_memory(1UL << order, 0, in_frames,
+                                     1, order, &out_frame,
+                                     address_bits);
+
+       /* 3. Map the new extent in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
+       else
+               xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
+
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
+
+       return success ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
+{
+       unsigned long *out_frames = discontig_frames, in_frame;
+       unsigned long  flags;
+       int success;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return;
+
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return;
+
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+
+       /* 1. Find start MFN of contiguous extent. */
+       in_frame = virt_to_mfn(vstart);
+
+       /* 2. Zap current PTEs. */
+       xen_zap_pfn_range(vstart, order, NULL, out_frames);
+
+       /* 3. Do the exchange for non-contiguous MFNs. */
+       success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
+                                       0, out_frames, 0);
+
+       /* 4. Map new pages in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
+       else
+               xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
+
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
 }
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 
 #ifdef CONFIG_XEN_PVHVM
 static void xen_hvm_exit_mmap(struct mm_struct *mm)
This page took 0.035369 seconds and 5 git commands to generate.