1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
15 #include <linux/mman.h>
16 #include <linux/uaccess.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
26 #include <asm/xen/hypervisor.h>
27 #include <asm/xen/hypercall.h>
30 #include <xen/privcmd.h>
31 #include <xen/interface/xen.h>
32 #include <xen/features.h>
34 #include <xen/xen-ops.h>
36 #ifndef HAVE_ARCH_PRIVCMD_MMAP
37 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
);
40 static long privcmd_ioctl_hypercall(void __user
*udata
)
42 struct privcmd_hypercall hypercall
;
45 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
48 ret
= privcmd_call(hypercall
.op
,
49 hypercall
.arg
[0], hypercall
.arg
[1],
50 hypercall
.arg
[2], hypercall
.arg
[3],
56 static void free_page_list(struct list_head
*pages
)
60 list_for_each_entry_safe(p
, n
, pages
, lru
)
63 INIT_LIST_HEAD(pages
);
67 * Given an array of items in userspace, return a list of pages
68 * containing the data. If copying fails, either because of memory
69 * allocation failure or a problem reading user memory, return an
70 * error code; its up to the caller to dispose of any partial list.
72 static int gather_array(struct list_head
*pagelist
,
73 unsigned nelem
, size_t size
,
84 pagedata
= NULL
; /* quiet, gcc */
86 if (pageidx
> PAGE_SIZE
-size
) {
87 struct page
*page
= alloc_page(GFP_KERNEL
);
93 pagedata
= page_address(page
);
95 list_add_tail(&page
->lru
, pagelist
);
100 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
114 * Call function "fn" on each element of the array fragmented
115 * over a list of pages.
117 static int traverse_pages(unsigned nelem
, size_t size
,
118 struct list_head
*pos
,
119 int (*fn
)(void *data
, void *state
),
126 BUG_ON(size
> PAGE_SIZE
);
129 pagedata
= NULL
; /* hush, gcc */
132 if (pageidx
> PAGE_SIZE
-size
) {
135 page
= list_entry(pos
, struct page
, lru
);
136 pagedata
= page_address(page
);
140 ret
= (*fn
)(pagedata
+ pageidx
, state
);
149 struct mmap_mfn_state
{
151 struct vm_area_struct
*vma
;
155 static int mmap_mfn_range(void *data
, void *state
)
157 struct privcmd_mmap_entry
*msg
= data
;
158 struct mmap_mfn_state
*st
= state
;
159 struct vm_area_struct
*vma
= st
->vma
;
162 /* Do not allow range to wrap the address space. */
163 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
164 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
167 /* Range chunks must be contiguous in va space. */
168 if ((msg
->va
!= st
->va
) ||
169 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
172 rc
= xen_remap_domain_mfn_range(vma
,
174 msg
->mfn
, msg
->npages
,
180 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
185 static long privcmd_ioctl_mmap(void __user
*udata
)
187 struct privcmd_mmap mmapcmd
;
188 struct mm_struct
*mm
= current
->mm
;
189 struct vm_area_struct
*vma
;
192 struct mmap_mfn_state state
;
194 if (!xen_initial_domain())
197 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
200 rc
= gather_array(&pagelist
,
201 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
204 if (rc
|| list_empty(&pagelist
))
207 down_write(&mm
->mmap_sem
);
210 struct page
*page
= list_first_entry(&pagelist
,
212 struct privcmd_mmap_entry
*msg
= page_address(page
);
214 vma
= find_vma(mm
, msg
->va
);
217 if (!vma
|| (msg
->va
!= vma
->vm_start
) ||
218 !privcmd_enforce_singleshot_mapping(vma
))
222 state
.va
= vma
->vm_start
;
224 state
.domain
= mmapcmd
.dom
;
226 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
228 mmap_mfn_range
, &state
);
232 up_write(&mm
->mmap_sem
);
235 free_page_list(&pagelist
);
240 struct mmap_batch_state
{
243 struct vm_area_struct
*vma
;
246 xen_pfn_t __user
*user
;
249 static int mmap_batch_fn(void *data
, void *state
)
251 xen_pfn_t
*mfnp
= data
;
252 struct mmap_batch_state
*st
= state
;
254 if (xen_remap_domain_mfn_range(st
->vma
, st
->va
& PAGE_MASK
, *mfnp
, 1,
255 st
->vma
->vm_page_prot
, st
->domain
) < 0) {
256 *mfnp
|= 0xf0000000U
;
264 static int mmap_return_errors(void *data
, void *state
)
266 xen_pfn_t
*mfnp
= data
;
267 struct mmap_batch_state
*st
= state
;
269 return put_user(*mfnp
, st
->user
++);
272 static struct vm_operations_struct privcmd_vm_ops
;
274 static long privcmd_ioctl_mmap_batch(void __user
*udata
)
277 struct privcmd_mmapbatch m
;
278 struct mm_struct
*mm
= current
->mm
;
279 struct vm_area_struct
*vma
;
280 unsigned long nr_pages
;
282 struct mmap_batch_state state
;
284 if (!xen_initial_domain())
287 if (copy_from_user(&m
, udata
, sizeof(m
)))
291 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
294 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
),
297 if (ret
|| list_empty(&pagelist
))
300 down_write(&mm
->mmap_sem
);
302 vma
= find_vma(mm
, m
.addr
);
305 vma
->vm_ops
!= &privcmd_vm_ops
||
306 (m
.addr
!= vma
->vm_start
) ||
307 ((m
.addr
+ (nr_pages
<< PAGE_SHIFT
)) != vma
->vm_end
) ||
308 !privcmd_enforce_singleshot_mapping(vma
)) {
309 up_write(&mm
->mmap_sem
);
313 state
.domain
= m
.dom
;
318 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
319 &pagelist
, mmap_batch_fn
, &state
);
321 up_write(&mm
->mmap_sem
);
325 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
327 mmap_return_errors
, &state
);
331 free_page_list(&pagelist
);
336 static long privcmd_ioctl(struct file
*file
,
337 unsigned int cmd
, unsigned long data
)
340 void __user
*udata
= (void __user
*) data
;
343 case IOCTL_PRIVCMD_HYPERCALL
:
344 ret
= privcmd_ioctl_hypercall(udata
);
347 case IOCTL_PRIVCMD_MMAP
:
348 ret
= privcmd_ioctl_mmap(udata
);
351 case IOCTL_PRIVCMD_MMAPBATCH
:
352 ret
= privcmd_ioctl_mmap_batch(udata
);
363 #ifndef HAVE_ARCH_PRIVCMD_MMAP
364 static int privcmd_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
366 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
367 vma
, vma
->vm_start
, vma
->vm_end
,
368 vmf
->pgoff
, vmf
->virtual_address
);
370 return VM_FAULT_SIGBUS
;
373 static struct vm_operations_struct privcmd_vm_ops
= {
374 .fault
= privcmd_fault
377 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
379 /* Unsupported for auto-translate guests. */
380 if (xen_feature(XENFEAT_auto_translated_physmap
))
383 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
384 * how to recreate these mappings */
385 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_DONTCOPY
| VM_PFNMAP
;
386 vma
->vm_ops
= &privcmd_vm_ops
;
387 vma
->vm_private_data
= NULL
;
392 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
)
394 return (xchg(&vma
->vm_private_data
, (void *)1) == NULL
);
398 const struct file_operations privcmd_file_ops
= {
399 .unlocked_ioctl
= privcmd_ioctl
,
400 .mmap
= privcmd_mmap
,