#include <xen/features.h>
#include <xen/platform_pci.h>
#include <xen/xenbus.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/mm.h>
+
struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info);
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+/* These are unused until we support booting "pre-ballooned" */
+unsigned long xen_released_pages;
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
/* TODO: to be removed */
__read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
static __read_mostly int xen_events_irq = -1;
+/* map fgmfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+ unsigned int domid)
+{
+ int rc;
+ struct xen_add_to_physmap_range xatp = {
+ .domid = DOMID_SELF,
+ .foreign_domid = domid,
+ .size = 1,
+ .space = XENMAPSPACE_gmfn_foreign,
+ };
+ xen_ulong_t idx = fgmfn;
+ xen_pfn_t gpfn = lpfn;
+
+ set_xen_guest_handle(xatp.idxs, &idx);
+ set_xen_guest_handle(xatp.gpfns, &gpfn);
+
+ rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+ if (rc) {
+ pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
+ rc, lpfn, fgmfn);
+ return 1;
+ }
+ return 0;
+}
+
+struct remap_data {
+ xen_pfn_t fgmfn; /* foreign domain's gmfn */
+ pgprot_t prot;
+ domid_t domid;
+ struct vm_area_struct *vma;
+ int index;
+ struct page **pages;
+ struct xen_remap_mfn_info *info;
+};
+
+static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct remap_data *info = data;
+ struct page *page = info->pages[info->index++];
+ unsigned long pfn = page_to_pfn(page);
+ pte_t pte = pfn_pte(pfn, info->prot);
+
+ if (map_foreign_page(pfn, info->fgmfn, info->domid))
+ return -EFAULT;
+ set_pte_at(info->vma->vm_mm, addr, ptep, pte);
+
+ return 0;
+}
+
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid)
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
{
- return -ENOSYS;
+ int err;
+ struct remap_data data;
+
+ /* TBD: Batching, current sole caller only does page at a time */
+ if (nr > 1)
+ return -EINVAL;
+
+ data.fgmfn = mfn;
+ data.prot = prot;
+ data.domid = domid;
+ data.vma = vma;
+ data.index = 0;
+ data.pages = pages;
+ err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
+ remap_pte_fn, &data);
+ return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct xen_remove_from_physmap xrp;
+ unsigned long rc, pfn;
+
+ pfn = page_to_pfn(pages[i]);
+
+ xrp.domid = DOMID_SELF;
+ xrp.gpfn = pfn;
+ rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
+ if (rc) {
+ pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
+ pfn, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+
/*
* see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format.
return 0;
}
postcore_initcall(xen_init_events);
-/* XXX: only until balloon is properly working */
-int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
-{
- *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL,
- get_order(nr_pages));
- if (*pages == NULL)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(alloc_xenballooned_pages);
-
-void free_xenballooned_pages(int nr_pages, struct page **pages)
-{
- kfree(*pages);
- *pages = NULL;
-}
-EXPORT_SYMBOL_GPL(free_xenballooned_pages);
+
-EXPORT_SYMBOL_GPL(privcmd_call);
+
+ /* In the hypervisor.S file. */
+ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
+ EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
++EXPORT_SYMBOL_GPL(privcmd_call);
return this_cpu_read(xen_vcpu_info.arch.cr2);
}
+ void xen_flush_tlb_all(void)
+ {
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb_all(0);
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_ALL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+ }
static void xen_flush_tlb(void)
{
struct mmuext_op *op;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid)
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
+
{
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
err = 0;
out:
- flush_tlb_all();
+ xen_flush_tlb_all();
return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
+/* Returns: 0 success */
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages)
+{
+ if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
ifneq ($(CONFIG_ARM),y)
-obj-y += manage.o balloon.o
+obj-y += manage.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
-obj-y += grant-table.o features.o events.o
+ obj-$(CONFIG_X86) += fallback.o
+obj-y += grant-table.o features.o events.o balloon.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
-dom0-$(CONFIG_ACPI) += acpi.o
+dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y)
+xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include <xen/balloon.h>
#include "privcmd.h"
MODULE_LICENSE("GPL");
+#define PRIV_VMA_LOCKED ((void *)1)
+
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
- st->domain);
+ st->domain, NULL);
if (rc < 0)
return rc;
LIST_HEAD(pagelist);
struct mmap_mfn_state state;
- if (!xen_initial_domain())
- return -EPERM;
+ /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -ENOSYS;
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
domid_t domain;
unsigned long va;
struct vm_area_struct *vma;
+ int index;
/* A tristate:
* 0 for no errors
* 1 if at least one error has happened (and no
xen_pfn_t __user *user_mfn;
};
+/* auto translated dom0 note: if domU being created is PV, then mfn is
+ * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+ */
static int mmap_batch_fn(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
+ struct vm_area_struct *vma = st->vma;
+ struct page **pages = vma->vm_private_data;
+ struct page *cur_page = NULL;
int ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ cur_page = pages[st->index++];
+
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
- st->vma->vm_page_prot, st->domain);
+ st->vma->vm_page_prot, st->domain,
+ &cur_page);
/* Store error code for second pass. */
*(st->err++) = ret;
return __put_user(*mfnp, st->user_mfn++);
}
+/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+ * the vma with the page info to use later.
+ * Returns: 0 if success, otherwise -errno
+ */
+static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
+{
+ int rc;
+ struct page **pages;
+
+ pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+ if (pages == NULL)
+ return -ENOMEM;
+
+ rc = alloc_xenballooned_pages(numpgs, pages, 0);
+ if (rc != 0) {
+ pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
+ numpgs, rc);
+ kfree(pages);
+ return -ENOMEM;
+ }
+ BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
+ vma->vm_private_data = pages;
+
+ return 0;
+}
+
static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
int *err_array = NULL;
struct mmap_batch_state state;
- if (!xen_initial_domain())
- return -EPERM;
-
switch (version) {
case 1:
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
- ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
+ ret = -EINVAL;
goto out;
}
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = alloc_empty_pages(vma, m.num);
+ if (ret < 0) {
+ up_write(&mm->mmap_sem);
+ goto out;
+ }
+ }
state.domain = m.dom;
state.vma = vma;
state.va = m.addr;
+ state.index = 0;
state.global_error = 0;
state.err = err_array;
up_write(&mm->mmap_sem);
- if (state.global_error && (version == 1)) {
- /* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
- state.err = err_array;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors_v1, &state);
+ if (version == 1) {
+ if (state.global_error) {
+ /* Write back errors in second pass. */
+ state.user_mfn = (xen_pfn_t *)m.arr;
+ state.err = err_array;
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors_v1, &state);
+ } else
+ ret = 0;
+
} else if (version == 2) {
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
if (ret)
return ret;
}
+static void privcmd_close(struct vm_area_struct *vma)
+{
+ struct page **pages = vma->vm_private_data;
+ int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
+ return;
+
+ xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ free_xenballooned_pages(numpgs, pages);
+ kfree(pages);
+}
+
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
}
static struct vm_operations_struct privcmd_vm_ops = {
+ .close = privcmd_close,
.fault = privcmd_fault
};
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
- return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+ return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
}
const struct file_operations xen_privcmd_fops = {