2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks.
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 #include <linux/errno.h>
11 #include <asm/uaccess.h>
13 #include <asm/futex.h>
15 static inline int __handle_fault(struct mm_struct
*mm
, unsigned long address
,
18 struct vm_area_struct
*vma
;
21 down_read(&mm
->mmap_sem
);
22 vma
= find_vma(mm
, address
);
25 if (unlikely(vma
->vm_start
> address
)) {
26 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
28 if (expand_stack(vma
, address
))
33 /* page not present, check vm flags */
34 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
37 if (!(vma
->vm_flags
& VM_WRITE
))
42 switch (handle_mm_fault(mm
, vma
, address
, write_access
)) {
58 up_read(&mm
->mmap_sem
);
62 up_read(&mm
->mmap_sem
);
63 if (current
->pid
== 1) {
67 printk("VM: killing process %s\n", current
->comm
);
71 up_read(&mm
->mmap_sem
);
72 current
->thread
.prot_addr
= address
;
73 current
->thread
.trap_no
= 0x11;
74 force_sig(SIGBUS
, current
);
78 static inline size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
79 size_t n
, int write_user
)
81 struct mm_struct
*mm
= current
->mm
;
82 unsigned long offset
, pfn
, done
, size
;
90 spin_lock(&mm
->page_table_lock
);
92 pgd
= pgd_offset(mm
, uaddr
);
93 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
96 pmd
= pmd_offset(pgd
, uaddr
);
97 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
100 pte
= pte_offset_map(pmd
, uaddr
);
101 if (!pte
|| !pte_present(*pte
) ||
102 (write_user
&& !pte_write(*pte
)))
109 offset
= uaddr
& (PAGE_SIZE
- 1);
110 size
= min(n
- done
, PAGE_SIZE
- offset
);
112 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
115 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
118 memcpy(to
, from
, size
);
123 spin_unlock(&mm
->page_table_lock
);
126 spin_unlock(&mm
->page_table_lock
);
127 if (__handle_fault(mm
, uaddr
, write_user
))
132 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
136 if (segment_eq(get_fs(), KERNEL_DS
)) {
137 memcpy(to
, (void __kernel __force
*) from
, n
);
140 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
142 memset(to
+ n
- rc
, 0, rc
);
146 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
148 if (segment_eq(get_fs(), KERNEL_DS
)) {
149 memcpy((void __kernel __force
*) to
, from
, n
);
152 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
This page took 0.034285 seconds and 6 git commands to generate.