x86: some lock annotations for user copy paths
[deliverable/linux.git] / include / asm-x86 / uaccess_64.h
CommitLineData
1da177e4
LT
1#ifndef __X86_64_UACCESS_H
2#define __X86_64_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
1da177e4
LT
7#include <linux/compiler.h>
8#include <linux/errno.h>
1da177e4
LT
9#include <linux/prefetch.h>
10#include <asm/page.h>
11
1da177e4
LT
12/*
13 * Copy To/From Userspace
14 */
15
16/* Handles exceptions in both to and from, but doesn't do access_ok */
95912008
AK
17__must_check unsigned long
18copy_user_generic(void *to, const void *from, unsigned len);
19
20__must_check unsigned long
21copy_to_user(void __user *to, const void *from, unsigned len);
22__must_check unsigned long
23copy_from_user(void *to, const void __user *from, unsigned len);
24__must_check unsigned long
25copy_in_user(void __user *to, const void __user *from, unsigned len);
26
27static __always_inline __must_check
28int __copy_from_user(void *dst, const void __user *src, unsigned size)
b896313e 29{
383d079b 30 int ret = 0;
c10d38dd
NP
31
32 might_sleep();
33 if (current->mm)
34 might_lock_read(&current->mm->mmap_sem);
1da177e4 35 if (!__builtin_constant_p(size))
b896313e
JP
36 return copy_user_generic(dst, (__force void *)src, size);
37 switch (size) {
38 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
39 ret, "b", "b", "=q", 1);
1da177e4 40 return ret;
b896313e
JP
41 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
42 ret, "w", "w", "=r", 2);
1da177e4 43 return ret;
b896313e
JP
44 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
45 ret, "l", "k", "=r", 4);
46 return ret;
47 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
48 ret, "q", "", "=r", 8);
1da177e4 49 return ret;
1da177e4 50 case 10:
b896313e
JP
51 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
52 ret, "q", "", "=r", 16);
53 if (unlikely(ret))
54 return ret;
55 __get_user_asm(*(u16 *)(8 + (char *)dst),
56 (u16 __user *)(8 + (char __user *)src),
57 ret, "w", "w", "=r", 2);
58 return ret;
1da177e4 59 case 16:
b896313e
JP
60 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
61 ret, "q", "", "=r", 16);
62 if (unlikely(ret))
63 return ret;
64 __get_user_asm(*(u64 *)(8 + (char *)dst),
65 (u64 __user *)(8 + (char __user *)src),
66 ret, "q", "", "=r", 8);
67 return ret;
1da177e4 68 default:
b896313e 69 return copy_user_generic(dst, (__force void *)src, size);
1da177e4 70 }
b896313e 71}
1da177e4 72
95912008
AK
73static __always_inline __must_check
74int __copy_to_user(void __user *dst, const void *src, unsigned size)
b896313e 75{
383d079b 76 int ret = 0;
c10d38dd
NP
77
78 might_sleep();
79 if (current->mm)
80 might_lock_read(&current->mm->mmap_sem);
1da177e4 81 if (!__builtin_constant_p(size))
b896313e
JP
82 return copy_user_generic((__force void *)dst, src, size);
83 switch (size) {
84 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
85 ret, "b", "b", "iq", 1);
1da177e4 86 return ret;
b896313e
JP
87 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
88 ret, "w", "w", "ir", 2);
1da177e4 89 return ret;
b896313e
JP
90 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
91 ret, "l", "k", "ir", 4);
92 return ret;
93 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
94 ret, "q", "", "ir", 8);
1da177e4 95 return ret;
1da177e4 96 case 10:
b896313e
JP
97 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
98 ret, "q", "", "ir", 10);
99 if (unlikely(ret))
100 return ret;
1da177e4 101 asm("":::"memory");
b896313e
JP
102 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
103 ret, "w", "w", "ir", 2);
104 return ret;
1da177e4 105 case 16:
b896313e
JP
106 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
107 ret, "q", "", "ir", 16);
108 if (unlikely(ret))
109 return ret;
1da177e4 110 asm("":::"memory");
b896313e
JP
111 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
112 ret, "q", "", "ir", 8);
113 return ret;
1da177e4 114 default:
b896313e 115 return copy_user_generic((__force void *)dst, src, size);
1da177e4 116 }
b896313e 117}
1da177e4 118
95912008
AK
119static __always_inline __must_check
120int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
b896313e 121{
383d079b 122 int ret = 0;
c10d38dd
NP
123
124 might_sleep();
125 if (current->mm)
126 might_lock_read(&current->mm->mmap_sem);
1da177e4 127 if (!__builtin_constant_p(size))
b896313e
JP
128 return copy_user_generic((__force void *)dst,
129 (__force void *)src, size);
130 switch (size) {
131 case 1: {
1da177e4 132 u8 tmp;
b896313e
JP
133 __get_user_asm(tmp, (u8 __user *)src,
134 ret, "b", "b", "=q", 1);
1da177e4 135 if (likely(!ret))
b896313e
JP
136 __put_user_asm(tmp, (u8 __user *)dst,
137 ret, "b", "b", "iq", 1);
1da177e4
LT
138 return ret;
139 }
b896313e 140 case 2: {
1da177e4 141 u16 tmp;
b896313e
JP
142 __get_user_asm(tmp, (u16 __user *)src,
143 ret, "w", "w", "=r", 2);
1da177e4 144 if (likely(!ret))
b896313e
JP
145 __put_user_asm(tmp, (u16 __user *)dst,
146 ret, "w", "w", "ir", 2);
1da177e4
LT
147 return ret;
148 }
149
b896313e 150 case 4: {
1da177e4 151 u32 tmp;
b896313e
JP
152 __get_user_asm(tmp, (u32 __user *)src,
153 ret, "l", "k", "=r", 4);
1da177e4 154 if (likely(!ret))
b896313e
JP
155 __put_user_asm(tmp, (u32 __user *)dst,
156 ret, "l", "k", "ir", 4);
1da177e4
LT
157 return ret;
158 }
b896313e 159 case 8: {
1da177e4 160 u64 tmp;
b896313e
JP
161 __get_user_asm(tmp, (u64 __user *)src,
162 ret, "q", "", "=r", 8);
1da177e4 163 if (likely(!ret))
b896313e
JP
164 __put_user_asm(tmp, (u64 __user *)dst,
165 ret, "q", "", "ir", 8);
1da177e4
LT
166 return ret;
167 }
168 default:
b896313e
JP
169 return copy_user_generic((__force void *)dst,
170 (__force void *)src, size);
1da177e4 171 }
b896313e 172}
1da177e4 173
b896313e 174__must_check long
95912008 175strncpy_from_user(char *dst, const char __user *src, long count);
b896313e 176__must_check long
95912008
AK
177__strncpy_from_user(char *dst, const char __user *src, long count);
178__must_check long strnlen_user(const char __user *str, long n);
179__must_check long __strnlen_user(const char __user *str, long n);
180__must_check long strlen_user(const char __user *str);
181__must_check unsigned long clear_user(void __user *mem, unsigned long len);
182__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
183
b896313e
JP
184__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
185 unsigned size);
b885808e
AK
186
187static __must_check __always_inline int
188__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
189{
190 return copy_user_generic((__force void *)dst, src, size);
191}
1da177e4 192
b896313e
JP
193extern long __copy_user_nocache(void *dst, const void __user *src,
194 unsigned size, int zerorest);
0812a579 195
b896313e
JP
196static inline int __copy_from_user_nocache(void *dst, const void __user *src,
197 unsigned size)
0812a579
AK
198{
199 might_sleep();
ecb7524c 200 return __copy_user_nocache(dst, src, size, 1);
0812a579
AK
201}
202
b896313e
JP
203static inline int __copy_from_user_inatomic_nocache(void *dst,
204 const void __user *src,
205 unsigned size)
0812a579 206{
ecb7524c 207 return __copy_user_nocache(dst, src, size, 0);
0812a579
AK
208}
209
1129585a
VM
210unsigned long
211copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
212
1da177e4 213#endif /* __X86_64_UACCESS_H */
This page took 0.774002 seconds and 5 git commands to generate.