Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[deliverable/linux.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/page.h>
13
14 /*
15 * Copy To/From Userspace
16 */
17
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26 static __always_inline __must_check unsigned long
27 copy_user_generic(void *to, const void *from, unsigned len)
28 {
29 unsigned ret;
30
31 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
37 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46 }
47
48 __must_check unsigned long
49 copy_in_user(void __user *to, const void __user *from, unsigned len);
50
51 static __always_inline __must_check
52 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53 {
54 int ret = 0;
55
56 if (!__builtin_constant_p(size))
57 return copy_user_generic(dst, (__force void *)src, size);
58 switch (size) {
59 case 1:
60 __uaccess_begin();
61 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
62 ret, "b", "b", "=q", 1);
63 __uaccess_end();
64 return ret;
65 case 2:
66 __uaccess_begin();
67 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
68 ret, "w", "w", "=r", 2);
69 __uaccess_end();
70 return ret;
71 case 4:
72 __uaccess_begin();
73 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
74 ret, "l", "k", "=r", 4);
75 __uaccess_end();
76 return ret;
77 case 8:
78 __uaccess_begin();
79 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
80 ret, "q", "", "=r", 8);
81 __uaccess_end();
82 return ret;
83 case 10:
84 __uaccess_begin();
85 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
86 ret, "q", "", "=r", 10);
87 if (likely(!ret))
88 __get_user_asm(*(u16 *)(8 + (char *)dst),
89 (u16 __user *)(8 + (char __user *)src),
90 ret, "w", "w", "=r", 2);
91 __uaccess_end();
92 return ret;
93 case 16:
94 __uaccess_begin();
95 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
96 ret, "q", "", "=r", 16);
97 if (likely(!ret))
98 __get_user_asm(*(u64 *)(8 + (char *)dst),
99 (u64 __user *)(8 + (char __user *)src),
100 ret, "q", "", "=r", 8);
101 __uaccess_end();
102 return ret;
103 default:
104 return copy_user_generic(dst, (__force void *)src, size);
105 }
106 }
107
108 static __always_inline __must_check
109 int __copy_from_user(void *dst, const void __user *src, unsigned size)
110 {
111 might_fault();
112 return __copy_from_user_nocheck(dst, src, size);
113 }
114
115 static __always_inline __must_check
116 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
117 {
118 int ret = 0;
119
120 if (!__builtin_constant_p(size))
121 return copy_user_generic((__force void *)dst, src, size);
122 switch (size) {
123 case 1:
124 __uaccess_begin();
125 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
126 ret, "b", "b", "iq", 1);
127 __uaccess_end();
128 return ret;
129 case 2:
130 __uaccess_begin();
131 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
132 ret, "w", "w", "ir", 2);
133 __uaccess_end();
134 return ret;
135 case 4:
136 __uaccess_begin();
137 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
138 ret, "l", "k", "ir", 4);
139 __uaccess_end();
140 return ret;
141 case 8:
142 __uaccess_begin();
143 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
144 ret, "q", "", "er", 8);
145 __uaccess_end();
146 return ret;
147 case 10:
148 __uaccess_begin();
149 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
150 ret, "q", "", "er", 10);
151 if (likely(!ret)) {
152 asm("":::"memory");
153 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
154 ret, "w", "w", "ir", 2);
155 }
156 __uaccess_end();
157 return ret;
158 case 16:
159 __uaccess_begin();
160 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
161 ret, "q", "", "er", 16);
162 if (likely(!ret)) {
163 asm("":::"memory");
164 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
165 ret, "q", "", "er", 8);
166 }
167 __uaccess_end();
168 return ret;
169 default:
170 return copy_user_generic((__force void *)dst, src, size);
171 }
172 }
173
174 static __always_inline __must_check
175 int __copy_to_user(void __user *dst, const void *src, unsigned size)
176 {
177 might_fault();
178 return __copy_to_user_nocheck(dst, src, size);
179 }
180
181 static __always_inline __must_check
182 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
183 {
184 int ret = 0;
185
186 might_fault();
187 if (!__builtin_constant_p(size))
188 return copy_user_generic((__force void *)dst,
189 (__force void *)src, size);
190 switch (size) {
191 case 1: {
192 u8 tmp;
193 __uaccess_begin();
194 __get_user_asm(tmp, (u8 __user *)src,
195 ret, "b", "b", "=q", 1);
196 if (likely(!ret))
197 __put_user_asm(tmp, (u8 __user *)dst,
198 ret, "b", "b", "iq", 1);
199 __uaccess_end();
200 return ret;
201 }
202 case 2: {
203 u16 tmp;
204 __uaccess_begin();
205 __get_user_asm(tmp, (u16 __user *)src,
206 ret, "w", "w", "=r", 2);
207 if (likely(!ret))
208 __put_user_asm(tmp, (u16 __user *)dst,
209 ret, "w", "w", "ir", 2);
210 __uaccess_end();
211 return ret;
212 }
213
214 case 4: {
215 u32 tmp;
216 __uaccess_begin();
217 __get_user_asm(tmp, (u32 __user *)src,
218 ret, "l", "k", "=r", 4);
219 if (likely(!ret))
220 __put_user_asm(tmp, (u32 __user *)dst,
221 ret, "l", "k", "ir", 4);
222 __uaccess_end();
223 return ret;
224 }
225 case 8: {
226 u64 tmp;
227 __uaccess_begin();
228 __get_user_asm(tmp, (u64 __user *)src,
229 ret, "q", "", "=r", 8);
230 if (likely(!ret))
231 __put_user_asm(tmp, (u64 __user *)dst,
232 ret, "q", "", "er", 8);
233 __uaccess_end();
234 return ret;
235 }
236 default:
237 return copy_user_generic((__force void *)dst,
238 (__force void *)src, size);
239 }
240 }
241
242 static __must_check __always_inline int
243 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
244 {
245 return __copy_from_user_nocheck(dst, src, size);
246 }
247
248 static __must_check __always_inline int
249 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
250 {
251 return __copy_to_user_nocheck(dst, src, size);
252 }
253
254 extern long __copy_user_nocache(void *dst, const void __user *src,
255 unsigned size, int zerorest);
256
257 static inline int
258 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
259 {
260 might_fault();
261 return __copy_user_nocache(dst, src, size, 1);
262 }
263
264 static inline int
265 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
266 unsigned size)
267 {
268 return __copy_user_nocache(dst, src, size, 0);
269 }
270
271 unsigned long
272 copy_user_handle_tail(char *to, char *from, unsigned len);
273
274 #endif /* _ASM_X86_UACCESS_64_H */
This page took 0.036768 seconds and 5 git commands to generate.