Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH64_UACCESS_H |
2 | #define __ASM_SH64_UACCESS_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
9 | * include/asm-sh64/uaccess.h | |
10 | * | |
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
12 | * Copyright (C) 2003, 2004 Paul Mundt | |
13 | * | |
14 | * User space memory access functions | |
15 | * | |
16 | * Copyright (C) 1999 Niibe Yutaka | |
17 | * | |
18 | * Based on: | |
19 | * MIPS implementation version 1.15 by | |
20 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | |
21 | * and i386 version. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/errno.h> | |
26 | #include <linux/sched.h> | |
27 | ||
28 | #define VERIFY_READ 0 | |
29 | #define VERIFY_WRITE 1 | |
30 | ||
31 | /* | |
32 | * The fs value determines whether argument validity checking should be | |
33 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
34 | * get_fs() == KERNEL_DS, checking is bypassed. | |
35 | * | |
36 | * For historical reasons (Data Segment Register?), these macros are misnamed. | |
37 | */ | |
38 | ||
39 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | |
40 | ||
41 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | |
42 | #define USER_DS MAKE_MM_SEG(0x80000000) | |
43 | ||
44 | #define get_ds() (KERNEL_DS) | |
45 | #define get_fs() (current_thread_info()->addr_limit) | |
46 | #define set_fs(x) (current_thread_info()->addr_limit=(x)) | |
47 | ||
48 | #define segment_eq(a,b) ((a).seg == (b).seg) | |
49 | ||
50 | #define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | |
51 | ||
52 | /* | |
53 | * Uhhuh, this needs 33-bit arithmetic. We have a carry.. | |
54 | * | |
55 | * sum := addr + size; carry? --> flag = true; | |
56 | * if (sum >= addr_limit) flag = true; | |
57 | */ | |
58 | #define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1) | |
59 | ||
60 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | |
61 | #define __access_ok(addr,size) (__range_ok(addr,size) == 0) | |
62 | ||
63 | /* this function will go away soon - use access_ok() instead */ | |
64 | extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) | |
65 | { | |
66 | return access_ok(type,addr,size) ? 0 : -EFAULT; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Uh, these should become the main single-value transfer routines ... | |
71 | * They automatically use the right size if we just have the right | |
72 | * pointer type ... | |
73 | * | |
74 | * As MIPS uses the same address space for kernel and user data, we | |
75 | * can just do these as direct assignments. | |
76 | * | |
77 | * Careful to not | |
78 | * (a) re-use the arguments for side effects (sizeof is ok) | |
79 | * (b) require any knowledge of processes at this stage | |
80 | */ | |
81 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | |
82 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | |
83 | ||
84 | /* | |
85 | * The "__xxx" versions do not do address space checking, useful when | |
86 | * doing multiple accesses to the same area (the user has to do the | |
87 | * checks by hand with "access_ok()") | |
88 | */ | |
89 | #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) | |
90 | #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | |
91 | ||
92 | /* | |
93 | * The "xxx_ret" versions return constant specified in third argument, if | |
94 | * something bad happens. These macros can be optimized for the | |
95 | * case of just returning from the function xxx_ret is used. | |
96 | */ | |
97 | ||
98 | #define put_user_ret(x,ptr,ret) ({ \ | |
99 | if (put_user(x,ptr)) return ret; }) | |
100 | ||
101 | #define get_user_ret(x,ptr,ret) ({ \ | |
102 | if (get_user(x,ptr)) return ret; }) | |
103 | ||
104 | #define __put_user_ret(x,ptr,ret) ({ \ | |
105 | if (__put_user(x,ptr)) return ret; }) | |
106 | ||
107 | #define __get_user_ret(x,ptr,ret) ({ \ | |
108 | if (__get_user(x,ptr)) return ret; }) | |
109 | ||
110 | struct __large_struct { unsigned long buf[100]; }; | |
111 | #define __m(x) (*(struct __large_struct *)(x)) | |
112 | ||
113 | #define __get_user_size(x,ptr,size,retval) \ | |
114 | do { \ | |
115 | retval = 0; \ | |
116 | switch (size) { \ | |
117 | case 1: \ | |
118 | retval = __get_user_asm_b(x, ptr); \ | |
119 | break; \ | |
120 | case 2: \ | |
121 | retval = __get_user_asm_w(x, ptr); \ | |
122 | break; \ | |
123 | case 4: \ | |
124 | retval = __get_user_asm_l(x, ptr); \ | |
125 | break; \ | |
126 | case 8: \ | |
127 | retval = __get_user_asm_q(x, ptr); \ | |
128 | break; \ | |
129 | default: \ | |
130 | __get_user_unknown(); \ | |
131 | break; \ | |
132 | } \ | |
133 | } while (0) | |
134 | ||
135 | #define __get_user_nocheck(x,ptr,size) \ | |
136 | ({ \ | |
137 | long __gu_addr = (long)(ptr); \ | |
138 | long __gu_err; \ | |
139 | __typeof(*(ptr)) __gu_val; \ | |
140 | __asm__ ("":"=r" (__gu_val)); \ | |
141 | __asm__ ("":"=r" (__gu_err)); \ | |
142 | __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ | |
143 | (x) = (__typeof__(*(ptr))) __gu_val; \ | |
144 | __gu_err; \ | |
145 | }) | |
146 | ||
147 | #define __get_user_check(x,ptr,size) \ | |
148 | ({ \ | |
149 | long __gu_addr = (long)(ptr); \ | |
150 | long __gu_err = -EFAULT; \ | |
151 | __typeof(*(ptr)) __gu_val; \ | |
152 | __asm__ ("":"=r" (__gu_val)); \ | |
153 | __asm__ ("":"=r" (__gu_err)); \ | |
154 | if (__access_ok(__gu_addr, (size))) \ | |
155 | __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ | |
156 | (x) = (__typeof__(*(ptr))) __gu_val; \ | |
157 | __gu_err; \ | |
158 | }) | |
159 | ||
160 | extern long __get_user_asm_b(void *, long); | |
161 | extern long __get_user_asm_w(void *, long); | |
162 | extern long __get_user_asm_l(void *, long); | |
163 | extern long __get_user_asm_q(void *, long); | |
164 | extern void __get_user_unknown(void); | |
165 | ||
166 | #define __put_user_size(x,ptr,size,retval) \ | |
167 | do { \ | |
168 | retval = 0; \ | |
169 | switch (size) { \ | |
170 | case 1: \ | |
171 | retval = __put_user_asm_b(x, ptr); \ | |
172 | break; \ | |
173 | case 2: \ | |
174 | retval = __put_user_asm_w(x, ptr); \ | |
175 | break; \ | |
176 | case 4: \ | |
177 | retval = __put_user_asm_l(x, ptr); \ | |
178 | break; \ | |
179 | case 8: \ | |
180 | retval = __put_user_asm_q(x, ptr); \ | |
181 | break; \ | |
182 | default: \ | |
183 | __put_user_unknown(); \ | |
184 | } \ | |
185 | } while (0) | |
186 | ||
187 | #define __put_user_nocheck(x,ptr,size) \ | |
188 | ({ \ | |
189 | long __pu_err; \ | |
190 | __typeof__(*(ptr)) __pu_val = (x); \ | |
191 | __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \ | |
192 | __pu_err; \ | |
193 | }) | |
194 | ||
195 | #define __put_user_check(x,ptr,size) \ | |
196 | ({ \ | |
197 | long __pu_err = -EFAULT; \ | |
198 | long __pu_addr = (long)(ptr); \ | |
199 | __typeof__(*(ptr)) __pu_val = (x); \ | |
200 | \ | |
201 | if (__access_ok(__pu_addr, (size))) \ | |
202 | __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\ | |
203 | __pu_err; \ | |
204 | }) | |
205 | ||
206 | extern long __put_user_asm_b(void *, long); | |
207 | extern long __put_user_asm_w(void *, long); | |
208 | extern long __put_user_asm_l(void *, long); | |
209 | extern long __put_user_asm_q(void *, long); | |
210 | extern void __put_user_unknown(void); | |
211 | ||
212 | \f | |
213 | /* Generic arbitrary sized copy. */ | |
214 | /* Return the number of bytes NOT copied */ | |
215 | /* XXX: should be such that: 4byte and the rest. */ | |
216 | extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n); | |
217 | ||
218 | #define copy_to_user(to,from,n) ({ \ | |
219 | void *__copy_to = (void *) (to); \ | |
220 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
221 | __kernel_size_t __copy_res; \ | |
222 | if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ | |
223 | __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ | |
224 | } else __copy_res = __copy_size; \ | |
225 | __copy_res; }) | |
226 | ||
227 | #define copy_to_user_ret(to,from,n,retval) ({ \ | |
228 | if (copy_to_user(to,from,n)) \ | |
229 | return retval; \ | |
230 | }) | |
231 | ||
232 | #define __copy_to_user(to,from,n) \ | |
233 | __copy_user((void *)(to), \ | |
234 | (void *)(from), n) | |
235 | ||
236 | #define __copy_to_user_ret(to,from,n,retval) ({ \ | |
237 | if (__copy_to_user(to,from,n)) \ | |
238 | return retval; \ | |
239 | }) | |
240 | ||
241 | #define copy_from_user(to,from,n) ({ \ | |
242 | void *__copy_to = (void *) (to); \ | |
243 | void *__copy_from = (void *) (from); \ | |
244 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
245 | __kernel_size_t __copy_res; \ | |
246 | if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ | |
247 | __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ | |
248 | } else __copy_res = __copy_size; \ | |
249 | __copy_res; }) | |
250 | ||
251 | #define copy_from_user_ret(to,from,n,retval) ({ \ | |
252 | if (copy_from_user(to,from,n)) \ | |
253 | return retval; \ | |
254 | }) | |
255 | ||
256 | #define __copy_from_user(to,from,n) \ | |
257 | __copy_user((void *)(to), \ | |
258 | (void *)(from), n) | |
259 | ||
260 | #define __copy_from_user_ret(to,from,n,retval) ({ \ | |
261 | if (__copy_from_user(to,from,n)) \ | |
262 | return retval; \ | |
263 | }) | |
264 | ||
265 | #define __copy_to_user_inatomic __copy_to_user | |
266 | #define __copy_from_user_inatomic __copy_from_user | |
267 | ||
268 | /* XXX: Not sure it works well.. | |
269 | should be such that: 4byte clear and the rest. */ | |
270 | extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |
271 | ||
272 | #define clear_user(addr,n) ({ \ | |
273 | void * __cl_addr = (addr); \ | |
274 | unsigned long __cl_size = (n); \ | |
275 | if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ | |
276 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | |
277 | __cl_size; }) | |
278 | ||
279 | extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count); | |
280 | ||
281 | #define strncpy_from_user(dest,src,count) ({ \ | |
282 | unsigned long __sfu_src = (unsigned long) (src); \ | |
283 | int __sfu_count = (int) (count); \ | |
284 | long __sfu_res = -EFAULT; \ | |
285 | if(__access_ok(__sfu_src, __sfu_count)) { \ | |
286 | __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ | |
287 | } __sfu_res; }) | |
288 | ||
289 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | |
290 | ||
291 | /* | |
292 | * Return the size of a string (including the ending 0!) | |
293 | */ | |
294 | extern long __strnlen_user(const char *__s, long __n); | |
295 | ||
296 | extern __inline__ long strnlen_user(const char *s, long n) | |
297 | { | |
298 | if (!__addr_ok(s)) | |
299 | return 0; | |
300 | else | |
301 | return __strnlen_user(s, n); | |
302 | } | |
303 | ||
304 | struct exception_table_entry | |
305 | { | |
306 | unsigned long insn, fixup; | |
307 | }; | |
308 | ||
309 | #define ARCH_HAS_SEARCH_EXTABLE | |
310 | ||
311 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | |
312 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | |
313 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | |
314 | sh64 at the moment). */ | |
315 | #define ARCH_KMALLOC_MINALIGN 8 | |
316 | ||
317 | /* | |
318 | * We want 8-byte alignment for the slab caches as well, otherwise we have | |
319 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | |
320 | */ | |
321 | #define ARCH_SLAB_MINALIGN 8 | |
322 | ||
323 | /* Returns 0 if exception not found and fixup.unit otherwise. */ | |
324 | extern unsigned long search_exception_table(unsigned long addr); | |
325 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); | |
326 | ||
327 | #endif /* __ASM_SH64_UACCESS_H */ |