Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef _ASM_UACCESS_H |
2 | #define _ASM_UACCESS_H | |
3 | ||
4 | /* | |
5 | * User space memory access functions | |
6 | */ | |
7 | ||
8 | #ifdef __KERNEL__ | |
9 | #include <linux/compiler.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/string.h> | |
12 | #include <asm/asi.h> | |
13 | #include <asm/system.h> | |
14 | #include <asm/spitfire.h> | |
15 | #include <asm-generic/uaccess.h> | |
16 | #endif | |
17 | ||
18 | #ifndef __ASSEMBLY__ | |
19 | ||
20 | /* | |
21 | * Sparc64 is segmented, though more like the M68K than the I386. | |
22 | * We use the secondary ASI to address user memory, which references a | |
23 | * completely different VM map, thus there is zero chance of the user | |
24 | * doing something queer and tricking us into poking kernel memory. | |
25 | * | |
26 | * What is left here is basically what is needed for the other parts of | |
27 | * the kernel that expect to be able to manipulate, erum, "segments". | |
28 | * Or perhaps more properly, permissions. | |
29 | * | |
30 | * "For historical reasons, these macros are grossly misnamed." -Linus | |
31 | */ | |
32 | ||
33 | #define KERNEL_DS ((mm_segment_t) { ASI_P }) | |
34 | #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ | |
35 | ||
36 | #define VERIFY_READ 0 | |
37 | #define VERIFY_WRITE 1 | |
38 | ||
39 | #define get_fs() ((mm_segment_t) { get_thread_current_ds() }) | |
40 | #define get_ds() (KERNEL_DS) | |
41 | ||
42 | #define segment_eq(a,b) ((a).seg == (b).seg) | |
43 | ||
44 | #define set_fs(val) \ | |
45 | do { \ | |
46 | set_thread_current_ds((val).seg); \ | |
47 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ | |
48 | } while(0) | |
49 | ||
50 | static inline int __access_ok(const void __user * addr, unsigned long size) | |
51 | { | |
52 | return 1; | |
53 | } | |
54 | ||
55 | static inline int access_ok(int type, const void __user * addr, unsigned long size) | |
56 | { | |
57 | return 1; | |
58 | } | |
59 | ||
60 | /* | |
61 | * The exception table consists of pairs of addresses: the first is the | |
62 | * address of an instruction that is allowed to fault, and the second is | |
63 | * the address at which the program should continue. No registers are | |
64 | * modified, so it is entirely up to the continuation code to figure out | |
65 | * what to do. | |
66 | * | |
67 | * All the routines below use bits of fixup code that are out of line | |
68 | * with the main instruction path. This means when everything is well, | |
69 | * we don't even have to jump over them. Further, they do not intrude | |
70 | * on our cache or tlb entries. | |
71 | */ | |
72 | ||
73 | struct exception_table_entry { | |
74 | unsigned int insn, fixup; | |
75 | }; | |
76 | ||
77 | extern void __ret_efault(void); | |
78 | extern void __retl_efault(void); | |
79 | ||
80 | /* Uh, these should become the main single-value transfer routines.. | |
81 | * They automatically use the right size if we just have the right | |
82 | * pointer type.. | |
83 | * | |
84 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | |
85 | * and yet we don't want to do any pointers, because that is too much | |
86 | * of a performance impact. Thus we have a few rather ugly macros here, | |
87 | * and hide all the ugliness from the user. | |
88 | */ | |
89 | #define put_user(x,ptr) ({ \ | |
90 | unsigned long __pu_addr = (unsigned long)(ptr); \ | |
91 | __chk_user_ptr(ptr); \ | |
92 | __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) | |
93 | ||
94 | #define get_user(x,ptr) ({ \ | |
95 | unsigned long __gu_addr = (unsigned long)(ptr); \ | |
96 | __chk_user_ptr(ptr); \ | |
97 | __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) | |
98 | ||
99 | #define __put_user(x,ptr) put_user(x,ptr) | |
100 | #define __get_user(x,ptr) get_user(x,ptr) | |
101 | ||
102 | struct __large_struct { unsigned long buf[100]; }; | |
103 | #define __m(x) ((struct __large_struct *)(x)) | |
104 | ||
105 | #define __put_user_nocheck(data,addr,size) ({ \ | |
106 | register int __pu_ret; \ | |
107 | switch (size) { \ | |
108 | case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ | |
109 | case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ | |
110 | case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ | |
111 | case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ | |
112 | default: __pu_ret = __put_user_bad(); break; \ | |
113 | } __pu_ret; }) | |
114 | ||
115 | #define __put_user_asm(x,size,addr,ret) \ | |
116 | __asm__ __volatile__( \ | |
117 | "/* Put user asm, inline. */\n" \ | |
118 | "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ | |
119 | "clr %0\n" \ | |
120 | "2:\n\n\t" \ | |
121 | ".section .fixup,#alloc,#execinstr\n\t" \ | |
122 | ".align 4\n" \ | |
123 | "3:\n\t" \ | |
124 | "sethi %%hi(2b), %0\n\t" \ | |
125 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | |
126 | " mov %3, %0\n\n\t" \ | |
127 | ".previous\n\t" \ | |
128 | ".section __ex_table,\"a\"\n\t" \ | |
129 | ".align 4\n\t" \ | |
130 | ".word 1b, 3b\n\t" \ | |
131 | ".previous\n\n\t" \ | |
132 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ | |
133 | "i" (-EFAULT)) | |
134 | ||
135 | extern int __put_user_bad(void); | |
136 | ||
137 | #define __get_user_nocheck(data,addr,size,type) ({ \ | |
138 | register int __gu_ret; \ | |
139 | register unsigned long __gu_val; \ | |
140 | switch (size) { \ | |
141 | case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ | |
142 | case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ | |
143 | case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ | |
144 | case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ | |
145 | default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ | |
146 | } data = (type) __gu_val; __gu_ret; }) | |
147 | ||
148 | #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ | |
149 | register unsigned long __gu_val __asm__ ("l1"); \ | |
150 | switch (size) { \ | |
151 | case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ | |
152 | case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ | |
153 | case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ | |
154 | case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ | |
155 | default: if (__get_user_bad()) return retval; \ | |
156 | } data = (type) __gu_val; }) | |
157 | ||
158 | #define __get_user_asm(x,size,addr,ret) \ | |
159 | __asm__ __volatile__( \ | |
160 | "/* Get user asm, inline. */\n" \ | |
161 | "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ | |
162 | "clr %0\n" \ | |
163 | "2:\n\n\t" \ | |
164 | ".section .fixup,#alloc,#execinstr\n\t" \ | |
165 | ".align 4\n" \ | |
166 | "3:\n\t" \ | |
167 | "sethi %%hi(2b), %0\n\t" \ | |
168 | "clr %1\n\t" \ | |
169 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | |
170 | " mov %3, %0\n\n\t" \ | |
171 | ".previous\n\t" \ | |
172 | ".section __ex_table,\"a\"\n\t" \ | |
173 | ".align 4\n\t" \ | |
174 | ".word 1b, 3b\n\n\t" \ | |
175 | ".previous\n\t" \ | |
176 | : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ | |
177 | "i" (-EFAULT)) | |
178 | ||
179 | #define __get_user_asm_ret(x,size,addr,retval) \ | |
180 | if (__builtin_constant_p(retval) && retval == -EFAULT) \ | |
181 | __asm__ __volatile__( \ | |
182 | "/* Get user asm ret, inline. */\n" \ | |
183 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ | |
184 | ".section __ex_table,\"a\"\n\t" \ | |
185 | ".align 4\n\t" \ | |
186 | ".word 1b,__ret_efault\n\n\t" \ | |
187 | ".previous\n\t" \ | |
188 | : "=r" (x) : "r" (__m(addr))); \ | |
189 | else \ | |
190 | __asm__ __volatile__( \ | |
191 | "/* Get user asm ret, inline. */\n" \ | |
192 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ | |
193 | ".section .fixup,#alloc,#execinstr\n\t" \ | |
194 | ".align 4\n" \ | |
195 | "3:\n\t" \ | |
196 | "ret\n\t" \ | |
197 | " restore %%g0, %2, %%o0\n\n\t" \ | |
198 | ".previous\n\t" \ | |
199 | ".section __ex_table,\"a\"\n\t" \ | |
200 | ".align 4\n\t" \ | |
201 | ".word 1b, 3b\n\n\t" \ | |
202 | ".previous\n\t" \ | |
203 | : "=r" (x) : "r" (__m(addr)), "i" (retval)) | |
204 | ||
205 | extern int __get_user_bad(void); | |
206 | ||
207 | extern unsigned long __must_check ___copy_from_user(void *to, | |
208 | const void __user *from, | |
209 | unsigned long size); | |
210 | extern unsigned long copy_from_user_fixup(void *to, const void __user *from, | |
211 | unsigned long size); | |
212 | static inline unsigned long __must_check | |
213 | copy_from_user(void *to, const void __user *from, unsigned long size) | |
214 | { | |
215 | unsigned long ret = ___copy_from_user(to, from, size); | |
216 | ||
217 | if (unlikely(ret)) | |
218 | ret = copy_from_user_fixup(to, from, size); | |
219 | return ret; | |
220 | } | |
221 | #define __copy_from_user copy_from_user | |
222 | ||
223 | extern unsigned long __must_check ___copy_to_user(void __user *to, | |
224 | const void *from, | |
225 | unsigned long size); | |
226 | extern unsigned long copy_to_user_fixup(void __user *to, const void *from, | |
227 | unsigned long size); | |
228 | static inline unsigned long __must_check | |
229 | copy_to_user(void __user *to, const void *from, unsigned long size) | |
230 | { | |
231 | unsigned long ret = ___copy_to_user(to, from, size); | |
232 | ||
233 | if (unlikely(ret)) | |
234 | ret = copy_to_user_fixup(to, from, size); | |
235 | return ret; | |
236 | } | |
237 | #define __copy_to_user copy_to_user | |
238 | ||
239 | extern unsigned long __must_check ___copy_in_user(void __user *to, | |
240 | const void __user *from, | |
241 | unsigned long size); | |
242 | extern unsigned long copy_in_user_fixup(void __user *to, void __user *from, | |
243 | unsigned long size); | |
244 | static inline unsigned long __must_check | |
245 | copy_in_user(void __user *to, void __user *from, unsigned long size) | |
246 | { | |
247 | unsigned long ret = ___copy_in_user(to, from, size); | |
248 | ||
249 | if (unlikely(ret)) | |
250 | ret = copy_in_user_fixup(to, from, size); | |
251 | return ret; | |
252 | } | |
253 | #define __copy_in_user copy_in_user | |
254 | ||
255 | extern unsigned long __must_check __clear_user(void __user *, unsigned long); | |
256 | ||
257 | #define clear_user __clear_user | |
258 | ||
259 | extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count); | |
260 | ||
261 | #define strncpy_from_user __strncpy_from_user | |
262 | ||
263 | extern long __strlen_user(const char __user *); | |
264 | extern long __strnlen_user(const char __user *, long len); | |
265 | ||
266 | #define strlen_user __strlen_user | |
267 | #define strnlen_user __strnlen_user | |
268 | #define __copy_to_user_inatomic __copy_to_user | |
269 | #define __copy_from_user_inatomic __copy_from_user | |
270 | ||
271 | #endif /* __ASSEMBLY__ */ | |
272 | ||
273 | #endif /* _ASM_UACCESS_H */ |