2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks.
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 #include <linux/errno.h>
12 #include <asm/uaccess.h>
13 #include <asm/futex.h>
15 static inline int __handle_fault(struct mm_struct
*mm
, unsigned long address
,
18 struct vm_area_struct
*vma
;
21 down_read(&mm
->mmap_sem
);
22 vma
= find_vma(mm
, address
);
25 if (unlikely(vma
->vm_start
> address
)) {
26 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
28 if (expand_stack(vma
, address
))
33 /* page not present, check vm flags */
34 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
37 if (!(vma
->vm_flags
& VM_WRITE
))
42 switch (handle_mm_fault(mm
, vma
, address
, write_access
)) {
58 up_read(&mm
->mmap_sem
);
62 up_read(&mm
->mmap_sem
);
63 if (is_init(current
)) {
65 down_read(&mm
->mmap_sem
);
68 printk("VM: killing process %s\n", current
->comm
);
72 up_read(&mm
->mmap_sem
);
73 current
->thread
.prot_addr
= address
;
74 current
->thread
.trap_no
= 0x11;
75 force_sig(SIGBUS
, current
);
79 static inline size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
80 size_t n
, int write_user
)
82 struct mm_struct
*mm
= current
->mm
;
83 unsigned long offset
, pfn
, done
, size
;
91 spin_lock(&mm
->page_table_lock
);
93 pgd
= pgd_offset(mm
, uaddr
);
94 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
97 pmd
= pmd_offset(pgd
, uaddr
);
98 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
101 pte
= pte_offset_map(pmd
, uaddr
);
102 if (!pte
|| !pte_present(*pte
) ||
103 (write_user
&& !pte_write(*pte
)))
110 offset
= uaddr
& (PAGE_SIZE
- 1);
111 size
= min(n
- done
, PAGE_SIZE
- offset
);
113 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
116 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
119 memcpy(to
, from
, size
);
124 spin_unlock(&mm
->page_table_lock
);
127 spin_unlock(&mm
->page_table_lock
);
128 if (__handle_fault(mm
, uaddr
, write_user
))
133 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
137 if (segment_eq(get_fs(), KERNEL_DS
)) {
138 memcpy(to
, (void __kernel __force
*) from
, n
);
141 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
143 memset(to
+ n
- rc
, 0, rc
);
147 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
149 if (segment_eq(get_fs(), KERNEL_DS
)) {
150 memcpy((void __kernel __force
*) to
, from
, n
);
153 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
This page took 0.064476 seconds and 5 git commands to generate.