Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_UACCESS_64_H |
2 | #define _ASM_X86_UACCESS_64_H | |
1da177e4 LT |
3 | |
4 | /* | |
5 | * User space memory access functions | |
6 | */ | |
1da177e4 LT |
7 | #include <linux/compiler.h> |
8 | #include <linux/errno.h> | |
16dbc6c9 | 9 | #include <linux/lockdep.h> |
1b1d9258 JB |
10 | #include <asm/alternative.h> |
11 | #include <asm/cpufeature.h> | |
1da177e4 LT |
12 | #include <asm/page.h> |
13 | ||
1da177e4 LT |
14 | /* |
15 | * Copy To/From Userspace | |
16 | */ | |
17 | ||
18 | /* Handles exceptions in both to and from, but doesn't do access_ok */ | |
95912008 | 19 | __must_check unsigned long |
954e482b FY |
20 | copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); |
21 | __must_check unsigned long | |
1b1d9258 JB |
22 | copy_user_generic_string(void *to, const void *from, unsigned len); |
23 | __must_check unsigned long | |
24 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); | |
25 | ||
26 | static __always_inline __must_check unsigned long | |
27 | copy_user_generic(void *to, const void *from, unsigned len) | |
28 | { | |
29 | unsigned ret; | |
30 | ||
954e482b FY |
31 | /* |
32 | * If CPU has ERMS feature, use copy_user_enhanced_fast_string. | |
33 | * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. | |
34 | * Otherwise, use copy_user_generic_unrolled. | |
35 | */ | |
36 | alternative_call_2(copy_user_generic_unrolled, | |
1b1d9258 JB |
37 | copy_user_generic_string, |
38 | X86_FEATURE_REP_GOOD, | |
954e482b FY |
39 | copy_user_enhanced_fast_string, |
40 | X86_FEATURE_ERMS, | |
1b1d9258 JB |
41 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), |
42 | "=d" (len)), | |
43 | "1" (to), "2" (from), "3" (len) | |
44 | : "memory", "rcx", "r8", "r9", "r10", "r11"); | |
45 | return ret; | |
46 | } | |
95912008 AK |
47 | |
48 | __must_check unsigned long | |
3c93ca00 | 49 | _copy_to_user(void __user *to, const void *from, unsigned len); |
95912008 | 50 | __must_check unsigned long |
9f0cf4ad | 51 | _copy_from_user(void *to, const void __user *from, unsigned len); |
95912008 AK |
52 | __must_check unsigned long |
53 | copy_in_user(void __user *to, const void __user *from, unsigned len); | |
54 | ||
9f0cf4ad AV |
55 | static inline unsigned long __must_check copy_from_user(void *to, |
56 | const void __user *from, | |
57 | unsigned long n) | |
58 | { | |
59 | int sz = __compiletime_object_size(to); | |
9f0cf4ad | 60 | |
3c93ca00 | 61 | might_fault(); |
9f0cf4ad | 62 | if (likely(sz == -1 || sz >= n)) |
409d02ef | 63 | n = _copy_from_user(to, from, n); |
9f0cf4ad AV |
64 | #ifdef CONFIG_DEBUG_VM |
65 | else | |
66 | WARN(1, "Buffer overflow detected!\n"); | |
67 | #endif | |
409d02ef | 68 | return n; |
9f0cf4ad AV |
69 | } |
70 | ||
3c93ca00 FW |
71 | static __always_inline __must_check |
72 | int copy_to_user(void __user *dst, const void *src, unsigned size) | |
73 | { | |
74 | might_fault(); | |
75 | ||
76 | return _copy_to_user(dst, src, size); | |
77 | } | |
9f0cf4ad | 78 | |
95912008 AK |
79 | static __always_inline __must_check |
80 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | |
b896313e | 81 | { |
383d079b | 82 | int ret = 0; |
c10d38dd | 83 | |
3ee1afa3 | 84 | might_fault(); |
1da177e4 | 85 | if (!__builtin_constant_p(size)) |
b896313e JP |
86 | return copy_user_generic(dst, (__force void *)src, size); |
87 | switch (size) { | |
88 | case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, | |
89 | ret, "b", "b", "=q", 1); | |
1da177e4 | 90 | return ret; |
b896313e JP |
91 | case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, |
92 | ret, "w", "w", "=r", 2); | |
1da177e4 | 93 | return ret; |
b896313e JP |
94 | case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, |
95 | ret, "l", "k", "=r", 4); | |
96 | return ret; | |
97 | case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, | |
98 | ret, "q", "", "=r", 8); | |
1da177e4 | 99 | return ret; |
1da177e4 | 100 | case 10: |
b896313e | 101 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
20a4a236 | 102 | ret, "q", "", "=r", 10); |
b896313e JP |
103 | if (unlikely(ret)) |
104 | return ret; | |
105 | __get_user_asm(*(u16 *)(8 + (char *)dst), | |
106 | (u16 __user *)(8 + (char __user *)src), | |
107 | ret, "w", "w", "=r", 2); | |
108 | return ret; | |
1da177e4 | 109 | case 16: |
b896313e JP |
110 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
111 | ret, "q", "", "=r", 16); | |
112 | if (unlikely(ret)) | |
113 | return ret; | |
114 | __get_user_asm(*(u64 *)(8 + (char *)dst), | |
115 | (u64 __user *)(8 + (char __user *)src), | |
116 | ret, "q", "", "=r", 8); | |
117 | return ret; | |
1da177e4 | 118 | default: |
b896313e | 119 | return copy_user_generic(dst, (__force void *)src, size); |
1da177e4 | 120 | } |
b896313e | 121 | } |
1da177e4 | 122 | |
95912008 AK |
123 | static __always_inline __must_check |
124 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | |
b896313e | 125 | { |
383d079b | 126 | int ret = 0; |
c10d38dd | 127 | |
3ee1afa3 | 128 | might_fault(); |
1da177e4 | 129 | if (!__builtin_constant_p(size)) |
b896313e JP |
130 | return copy_user_generic((__force void *)dst, src, size); |
131 | switch (size) { | |
132 | case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, | |
133 | ret, "b", "b", "iq", 1); | |
1da177e4 | 134 | return ret; |
b896313e JP |
135 | case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, |
136 | ret, "w", "w", "ir", 2); | |
1da177e4 | 137 | return ret; |
b896313e JP |
138 | case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, |
139 | ret, "l", "k", "ir", 4); | |
140 | return ret; | |
141 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | |
155b7352 | 142 | ret, "q", "", "er", 8); |
1da177e4 | 143 | return ret; |
1da177e4 | 144 | case 10: |
b896313e | 145 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
155b7352 | 146 | ret, "q", "", "er", 10); |
b896313e JP |
147 | if (unlikely(ret)) |
148 | return ret; | |
1da177e4 | 149 | asm("":::"memory"); |
b896313e JP |
150 | __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, |
151 | ret, "w", "w", "ir", 2); | |
152 | return ret; | |
1da177e4 | 153 | case 16: |
b896313e | 154 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
155b7352 | 155 | ret, "q", "", "er", 16); |
b896313e JP |
156 | if (unlikely(ret)) |
157 | return ret; | |
1da177e4 | 158 | asm("":::"memory"); |
b896313e | 159 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
155b7352 | 160 | ret, "q", "", "er", 8); |
b896313e | 161 | return ret; |
1da177e4 | 162 | default: |
b896313e | 163 | return copy_user_generic((__force void *)dst, src, size); |
1da177e4 | 164 | } |
b896313e | 165 | } |
1da177e4 | 166 | |
95912008 AK |
167 | static __always_inline __must_check |
168 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |
b896313e | 169 | { |
383d079b | 170 | int ret = 0; |
c10d38dd | 171 | |
3ee1afa3 | 172 | might_fault(); |
1da177e4 | 173 | if (!__builtin_constant_p(size)) |
b896313e JP |
174 | return copy_user_generic((__force void *)dst, |
175 | (__force void *)src, size); | |
176 | switch (size) { | |
177 | case 1: { | |
1da177e4 | 178 | u8 tmp; |
b896313e JP |
179 | __get_user_asm(tmp, (u8 __user *)src, |
180 | ret, "b", "b", "=q", 1); | |
1da177e4 | 181 | if (likely(!ret)) |
b896313e JP |
182 | __put_user_asm(tmp, (u8 __user *)dst, |
183 | ret, "b", "b", "iq", 1); | |
1da177e4 LT |
184 | return ret; |
185 | } | |
b896313e | 186 | case 2: { |
1da177e4 | 187 | u16 tmp; |
b896313e JP |
188 | __get_user_asm(tmp, (u16 __user *)src, |
189 | ret, "w", "w", "=r", 2); | |
1da177e4 | 190 | if (likely(!ret)) |
b896313e JP |
191 | __put_user_asm(tmp, (u16 __user *)dst, |
192 | ret, "w", "w", "ir", 2); | |
1da177e4 LT |
193 | return ret; |
194 | } | |
195 | ||
b896313e | 196 | case 4: { |
1da177e4 | 197 | u32 tmp; |
b896313e JP |
198 | __get_user_asm(tmp, (u32 __user *)src, |
199 | ret, "l", "k", "=r", 4); | |
1da177e4 | 200 | if (likely(!ret)) |
b896313e JP |
201 | __put_user_asm(tmp, (u32 __user *)dst, |
202 | ret, "l", "k", "ir", 4); | |
1da177e4 LT |
203 | return ret; |
204 | } | |
b896313e | 205 | case 8: { |
1da177e4 | 206 | u64 tmp; |
b896313e JP |
207 | __get_user_asm(tmp, (u64 __user *)src, |
208 | ret, "q", "", "=r", 8); | |
1da177e4 | 209 | if (likely(!ret)) |
b896313e | 210 | __put_user_asm(tmp, (u64 __user *)dst, |
155b7352 | 211 | ret, "q", "", "er", 8); |
1da177e4 LT |
212 | return ret; |
213 | } | |
214 | default: | |
b896313e JP |
215 | return copy_user_generic((__force void *)dst, |
216 | (__force void *)src, size); | |
1da177e4 | 217 | } |
b896313e | 218 | } |
1da177e4 | 219 | |
14722485 JB |
220 | static __must_check __always_inline int |
221 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | |
222 | { | |
223 | return copy_user_generic(dst, (__force const void *)src, size); | |
224 | } | |
b885808e AK |
225 | |
226 | static __must_check __always_inline int | |
227 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |
228 | { | |
229 | return copy_user_generic((__force void *)dst, src, size); | |
230 | } | |
1da177e4 | 231 | |
b896313e JP |
232 | extern long __copy_user_nocache(void *dst, const void __user *src, |
233 | unsigned size, int zerorest); | |
0812a579 | 234 | |
f1800536 IM |
235 | static inline int |
236 | __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) | |
0812a579 AK |
237 | { |
238 | might_sleep(); | |
f1800536 | 239 | return __copy_user_nocache(dst, src, size, 1); |
0812a579 AK |
240 | } |
241 | ||
f1800536 IM |
242 | static inline int |
243 | __copy_from_user_inatomic_nocache(void *dst, const void __user *src, | |
244 | unsigned size) | |
0812a579 | 245 | { |
f1800536 | 246 | return __copy_user_nocache(dst, src, size, 0); |
0812a579 AK |
247 | } |
248 | ||
1129585a VM |
249 | unsigned long |
250 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); | |
251 | ||
1965aae3 | 252 | #endif /* _ASM_X86_UACCESS_64_H */ |