2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <as-layout.h>
16 struct host_vm_change
{
18 enum { NONE
, MMAP
, MUNMAP
, MPROTECT
} type
;
44 #define INIT_HVC(mm, force) \
45 ((struct host_vm_change) \
46 { .ops = { { .type = NONE } }, \
47 .id = &mm->context.id, \
52 static int do_ops(struct host_vm_change
*hvc
, int end
,
55 struct host_vm_op
*op
;
58 for (i
= 0; i
< end
&& !ret
; i
++) {
62 ret
= map(hvc
->id
, op
->u
.mmap
.addr
, op
->u
.mmap
.len
,
63 op
->u
.mmap
.prot
, op
->u
.mmap
.fd
,
64 op
->u
.mmap
.offset
, finished
, &hvc
->data
);
67 ret
= unmap(hvc
->id
, op
->u
.munmap
.addr
,
68 op
->u
.munmap
.len
, finished
, &hvc
->data
);
71 ret
= protect(hvc
->id
, op
->u
.mprotect
.addr
,
72 op
->u
.mprotect
.len
, op
->u
.mprotect
.prot
,
73 finished
, &hvc
->data
);
76 printk(KERN_ERR
"Unknown op type %d in do_ops\n",
86 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
87 unsigned int prot
, struct host_vm_change
*hvc
)
90 struct host_vm_op
*last
;
93 fd
= phys_mapping(phys
, &offset
);
94 if (hvc
->index
!= 0) {
95 last
= &hvc
->ops
[hvc
->index
- 1];
96 if ((last
->type
== MMAP
) &&
97 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
98 (last
->u
.mmap
.prot
== prot
) && (last
->u
.mmap
.fd
== fd
) &&
99 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)) {
100 last
->u
.mmap
.len
+= len
;
105 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
106 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
110 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
112 .u
= { .mmap
= { .addr
= virt
,
121 static int add_munmap(unsigned long addr
, unsigned long len
,
122 struct host_vm_change
*hvc
)
124 struct host_vm_op
*last
;
127 if ((addr
>= STUB_START
) && (addr
< STUB_END
))
130 if (hvc
->index
!= 0) {
131 last
= &hvc
->ops
[hvc
->index
- 1];
132 if ((last
->type
== MUNMAP
) &&
133 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)) {
134 last
->u
.munmap
.len
+= len
;
139 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
140 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
144 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
146 .u
= { .munmap
= { .addr
= addr
,
151 static int add_mprotect(unsigned long addr
, unsigned long len
,
152 unsigned int prot
, struct host_vm_change
*hvc
)
154 struct host_vm_op
*last
;
157 if (hvc
->index
!= 0) {
158 last
= &hvc
->ops
[hvc
->index
- 1];
159 if ((last
->type
== MPROTECT
) &&
160 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
161 (last
->u
.mprotect
.prot
== prot
)) {
162 last
->u
.mprotect
.len
+= len
;
167 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
168 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
172 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
174 .u
= { .mprotect
= { .addr
= addr
,
180 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
182 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
184 struct host_vm_change
*hvc
)
187 int r
, w
, x
, prot
, ret
= 0;
189 pte
= pte_offset_kernel(pmd
, addr
);
191 if ((addr
>= STUB_START
) && (addr
< STUB_END
))
197 if (!pte_young(*pte
)) {
200 } else if (!pte_dirty(*pte
))
203 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
204 (x
? UM_PROT_EXEC
: 0));
205 if (hvc
->force
|| pte_newpage(*pte
)) {
206 if (pte_present(*pte
))
207 ret
= add_mmap(addr
, pte_val(*pte
) & PAGE_MASK
,
208 PAGE_SIZE
, prot
, hvc
);
210 ret
= add_munmap(addr
, PAGE_SIZE
, hvc
);
211 } else if (pte_newprot(*pte
))
212 ret
= add_mprotect(addr
, PAGE_SIZE
, prot
, hvc
);
213 *pte
= pte_mkuptodate(*pte
);
214 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
< end
) && !ret
));
218 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
220 struct host_vm_change
*hvc
)
226 pmd
= pmd_offset(pud
, addr
);
228 next
= pmd_addr_end(addr
, end
);
229 if (!pmd_present(*pmd
)) {
230 if (hvc
->force
|| pmd_newpage(*pmd
)) {
231 ret
= add_munmap(addr
, next
- addr
, hvc
);
232 pmd_mkuptodate(*pmd
);
235 else ret
= update_pte_range(pmd
, addr
, next
, hvc
);
236 } while (pmd
++, addr
= next
, ((addr
< end
) && !ret
));
240 static inline int update_pud_range(pgd_t
*pgd
, unsigned long addr
,
242 struct host_vm_change
*hvc
)
248 pud
= pud_offset(pgd
, addr
);
250 next
= pud_addr_end(addr
, end
);
251 if (!pud_present(*pud
)) {
252 if (hvc
->force
|| pud_newpage(*pud
)) {
253 ret
= add_munmap(addr
, next
- addr
, hvc
);
254 pud_mkuptodate(*pud
);
257 else ret
= update_pmd_range(pud
, addr
, next
, hvc
);
258 } while (pud
++, addr
= next
, ((addr
< end
) && !ret
));
262 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
263 unsigned long end_addr
, int force
)
266 struct host_vm_change hvc
;
267 unsigned long addr
= start_addr
, next
;
270 hvc
= INIT_HVC(mm
, force
);
271 pgd
= pgd_offset(mm
, addr
);
273 next
= pgd_addr_end(addr
, end_addr
);
274 if (!pgd_present(*pgd
)) {
275 if (force
|| pgd_newpage(*pgd
)) {
276 ret
= add_munmap(addr
, next
- addr
, &hvc
);
277 pgd_mkuptodate(*pgd
);
280 else ret
= update_pud_range(pgd
, addr
, next
, &hvc
);
281 } while (pgd
++, addr
= next
, ((addr
< end_addr
) && !ret
));
284 ret
= do_ops(&hvc
, hvc
.index
, 1);
286 /* This is not an else because ret is modified above */
288 printk(KERN_ERR
"fix_range_common: failed, killing current "
290 force_sig(SIGKILL
, current
);
294 static int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
296 struct mm_struct
*mm
;
301 unsigned long addr
, last
;
302 int updated
= 0, err
;
305 for (addr
= start
; addr
< end
;) {
306 pgd
= pgd_offset(mm
, addr
);
307 if (!pgd_present(*pgd
)) {
308 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
311 if (pgd_newpage(*pgd
)) {
313 err
= os_unmap_memory((void *) addr
,
316 panic("munmap failed, errno = %d\n",
323 pud
= pud_offset(pgd
, addr
);
324 if (!pud_present(*pud
)) {
325 last
= ADD_ROUND(addr
, PUD_SIZE
);
328 if (pud_newpage(*pud
)) {
330 err
= os_unmap_memory((void *) addr
,
333 panic("munmap failed, errno = %d\n",
340 pmd
= pmd_offset(pud
, addr
);
341 if (!pmd_present(*pmd
)) {
342 last
= ADD_ROUND(addr
, PMD_SIZE
);
345 if (pmd_newpage(*pmd
)) {
347 err
= os_unmap_memory((void *) addr
,
350 panic("munmap failed, errno = %d\n",
357 pte
= pte_offset_kernel(pmd
, addr
);
358 if (!pte_present(*pte
) || pte_newpage(*pte
)) {
360 err
= os_unmap_memory((void *) addr
,
363 panic("munmap failed, errno = %d\n",
365 if (pte_present(*pte
))
367 pte_val(*pte
) & PAGE_MASK
,
370 else if (pte_newprot(*pte
)) {
372 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
379 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
385 struct mm_struct
*mm
= vma
->vm_mm
;
387 int r
, w
, x
, prot
, err
= 0;
390 address
&= PAGE_MASK
;
391 pgd
= pgd_offset(mm
, address
);
392 if (!pgd_present(*pgd
))
395 pud
= pud_offset(pgd
, address
);
396 if (!pud_present(*pud
))
399 pmd
= pmd_offset(pud
, address
);
400 if (!pmd_present(*pmd
))
403 pte
= pte_offset_kernel(pmd
, address
);
408 if (!pte_young(*pte
)) {
411 } else if (!pte_dirty(*pte
)) {
415 mm_id
= &mm
->context
.id
;
416 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
417 (x
? UM_PROT_EXEC
: 0));
418 if (pte_newpage(*pte
)) {
419 if (pte_present(*pte
)) {
420 unsigned long long offset
;
423 fd
= phys_mapping(pte_val(*pte
) & PAGE_MASK
, &offset
);
424 err
= map(mm_id
, address
, PAGE_SIZE
, prot
, fd
, offset
,
427 else err
= unmap(mm_id
, address
, PAGE_SIZE
, 1, &flush
);
429 else if (pte_newprot(*pte
))
430 err
= protect(mm_id
, address
, PAGE_SIZE
, prot
, 1, &flush
);
435 *pte
= pte_mkuptodate(*pte
);
440 printk(KERN_ERR
"Failed to flush page for address 0x%lx\n", address
);
441 force_sig(SIGKILL
, current
);
444 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
446 return pgd_offset(mm
, address
);
449 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
451 return pud_offset(pgd
, address
);
454 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
456 return pmd_offset(pud
, address
);
459 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
461 return pte_offset_kernel(pmd
, address
);
464 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
466 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
467 pud_t
*pud
= pud_offset(pgd
, addr
);
468 pmd_t
*pmd
= pmd_offset(pud
, addr
);
470 return pte_offset_map(pmd
, addr
);
473 void flush_tlb_all(void)
475 flush_tlb_mm(current
->mm
);
478 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
480 flush_tlb_kernel_range_common(start
, end
);
483 void flush_tlb_kernel_vm(void)
485 flush_tlb_kernel_range_common(start_vm
, end_vm
);
488 void __flush_tlb_one(unsigned long addr
)
490 flush_tlb_kernel_range_common(addr
, addr
+ PAGE_SIZE
);
493 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
494 unsigned long end_addr
, int force
)
496 fix_range_common(mm
, start_addr
, end_addr
, force
);
499 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
502 if (vma
->vm_mm
== NULL
)
503 flush_tlb_kernel_range_common(start
, end
);
504 else fix_range(vma
->vm_mm
, start
, end
, 0);
506 EXPORT_SYMBOL(flush_tlb_range
);
508 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
512 * Don't bother flushing if this address space is about to be
515 if (atomic_read(&mm
->mm_users
) == 0)
518 fix_range(mm
, start
, end
, 0);
521 void flush_tlb_mm(struct mm_struct
*mm
)
523 struct vm_area_struct
*vma
= mm
->mmap
;
525 while (vma
!= NULL
) {
526 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 0);
531 void force_flush_all(void)
533 struct mm_struct
*mm
= current
->mm
;
534 struct vm_area_struct
*vma
= mm
->mmap
;
536 while (vma
!= NULL
) {
537 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 1);
This page took 0.122967 seconds and 6 git commands to generate.