Commit | Line | Data |
---|---|---|
59f35d53 GS |
1 | /* |
2 | * arch/s390/lib/uaccess_pt.c | |
3 | * | |
4 | * User access functions based on page table walks. | |
5 | * | |
6 | * Copyright IBM Corp. 2006 | |
7 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | |
8 | */ | |
9 | ||
10 | #include <linux/errno.h> | |
59f35d53 | 11 | #include <linux/mm.h> |
22155914 | 12 | #include <asm/uaccess.h> |
59f35d53 GS |
13 | #include <asm/futex.h> |
14 | ||
15 | static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | |
16 | int write_access) | |
17 | { | |
18 | struct vm_area_struct *vma; | |
19 | int ret = -EFAULT; | |
20 | ||
21 | down_read(&mm->mmap_sem); | |
22 | vma = find_vma(mm, address); | |
23 | if (unlikely(!vma)) | |
24 | goto out; | |
25 | if (unlikely(vma->vm_start > address)) { | |
26 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
27 | goto out; | |
28 | if (expand_stack(vma, address)) | |
29 | goto out; | |
30 | } | |
31 | ||
32 | if (!write_access) { | |
33 | /* page not present, check vm flags */ | |
34 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | |
35 | goto out; | |
36 | } else { | |
37 | if (!(vma->vm_flags & VM_WRITE)) | |
38 | goto out; | |
39 | } | |
40 | ||
41 | survive: | |
42 | switch (handle_mm_fault(mm, vma, address, write_access)) { | |
43 | case VM_FAULT_MINOR: | |
44 | current->min_flt++; | |
45 | break; | |
46 | case VM_FAULT_MAJOR: | |
47 | current->maj_flt++; | |
48 | break; | |
49 | case VM_FAULT_SIGBUS: | |
50 | goto out_sigbus; | |
51 | case VM_FAULT_OOM: | |
52 | goto out_of_memory; | |
53 | default: | |
54 | BUG(); | |
55 | } | |
56 | ret = 0; | |
57 | out: | |
58 | up_read(&mm->mmap_sem); | |
59 | return ret; | |
60 | ||
61 | out_of_memory: | |
62 | up_read(&mm->mmap_sem); | |
22155914 | 63 | if (is_init(current)) { |
59f35d53 | 64 | yield(); |
22155914 | 65 | down_read(&mm->mmap_sem); |
59f35d53 GS |
66 | goto survive; |
67 | } | |
68 | printk("VM: killing process %s\n", current->comm); | |
69 | return ret; | |
70 | ||
71 | out_sigbus: | |
72 | up_read(&mm->mmap_sem); | |
73 | current->thread.prot_addr = address; | |
74 | current->thread.trap_no = 0x11; | |
75 | force_sig(SIGBUS, current); | |
76 | return ret; | |
77 | } | |
78 | ||
79 | static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | |
80 | size_t n, int write_user) | |
81 | { | |
82 | struct mm_struct *mm = current->mm; | |
83 | unsigned long offset, pfn, done, size; | |
84 | pgd_t *pgd; | |
85 | pmd_t *pmd; | |
86 | pte_t *pte; | |
87 | void *from, *to; | |
88 | ||
89 | done = 0; | |
90 | retry: | |
91 | spin_lock(&mm->page_table_lock); | |
92 | do { | |
93 | pgd = pgd_offset(mm, uaddr); | |
94 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | |
95 | goto fault; | |
96 | ||
97 | pmd = pmd_offset(pgd, uaddr); | |
98 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | |
99 | goto fault; | |
100 | ||
101 | pte = pte_offset_map(pmd, uaddr); | |
102 | if (!pte || !pte_present(*pte) || | |
103 | (write_user && !pte_write(*pte))) | |
104 | goto fault; | |
105 | ||
106 | pfn = pte_pfn(*pte); | |
107 | if (!pfn_valid(pfn)) | |
108 | goto out; | |
109 | ||
110 | offset = uaddr & (PAGE_SIZE - 1); | |
111 | size = min(n - done, PAGE_SIZE - offset); | |
112 | if (write_user) { | |
113 | to = (void *)((pfn << PAGE_SHIFT) + offset); | |
114 | from = kptr + done; | |
115 | } else { | |
116 | from = (void *)((pfn << PAGE_SHIFT) + offset); | |
117 | to = kptr + done; | |
118 | } | |
119 | memcpy(to, from, size); | |
120 | done += size; | |
121 | uaddr += size; | |
122 | } while (done < n); | |
123 | out: | |
124 | spin_unlock(&mm->page_table_lock); | |
125 | return n - done; | |
126 | fault: | |
127 | spin_unlock(&mm->page_table_lock); | |
128 | if (__handle_fault(mm, uaddr, write_user)) | |
129 | return n - done; | |
130 | goto retry; | |
131 | } | |
132 | ||
133 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | |
134 | { | |
135 | size_t rc; | |
136 | ||
137 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
138 | memcpy(to, (void __kernel __force *) from, n); | |
139 | return 0; | |
140 | } | |
141 | rc = __user_copy_pt((unsigned long) from, to, n, 0); | |
142 | if (unlikely(rc)) | |
143 | memset(to + n - rc, 0, rc); | |
144 | return rc; | |
145 | } | |
146 | ||
147 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |
148 | { | |
149 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
150 | memcpy((void __kernel __force *) to, from, n); | |
151 | return 0; | |
152 | } | |
153 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | |
154 | } |