sh: Consolidate addr/access_ok across mmu/nommu on 32bit.
[deliverable/linux.git] / include / asm-sh / uaccess_64.h
1 #ifndef __ASM_SH_UACCESS_64_H
2 #define __ASM_SH_UACCESS_64_H
3
4 /*
5 * include/asm-sh/uaccess_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003, 2004 Paul Mundt
9 *
10 * User space memory access functions
11 *
12 * Copyright (C) 1999 Niibe Yutaka
13 *
14 * Based on:
15 * MIPS implementation version 1.15 by
16 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
17 * and i386 version.
18 *
19 * This file is subject to the terms and conditions of the GNU General Public
20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details.
22 */
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25
26 #define VERIFY_READ 0
27 #define VERIFY_WRITE 1
28
29 #define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
30
31 /*
32 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
33 *
34 * sum := addr + size; carry? --> flag = true;
35 * if (sum >= addr_limit) flag = true;
36 */
37 #define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
38
39 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
40 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
41
42 /*
43 * Uh, these should become the main single-value transfer routines ...
44 * They automatically use the right size if we just have the right
45 * pointer type ...
46 *
47 * As MIPS uses the same address space for kernel and user data, we
48 * can just do these as direct assignments.
49 *
50 * Careful to not
51 * (a) re-use the arguments for side effects (sizeof is ok)
52 * (b) require any knowledge of processes at this stage
53 */
54 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
55 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
56
57 /*
58 * The "__xxx" versions do not do address space checking, useful when
59 * doing multiple accesses to the same area (the user has to do the
60 * checks by hand with "access_ok()")
61 */
62 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
63 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
64
65 /*
66 * The "xxx_ret" versions return constant specified in third argument, if
67 * something bad happens. These macros can be optimized for the
68 * case of just returning from the function xxx_ret is used.
69 */
70
71 #define put_user_ret(x,ptr,ret) ({ \
72 if (put_user(x,ptr)) return ret; })
73
74 #define get_user_ret(x,ptr,ret) ({ \
75 if (get_user(x,ptr)) return ret; })
76
77 #define __put_user_ret(x,ptr,ret) ({ \
78 if (__put_user(x,ptr)) return ret; })
79
80 #define __get_user_ret(x,ptr,ret) ({ \
81 if (__get_user(x,ptr)) return ret; })
82
83 struct __large_struct { unsigned long buf[100]; };
84 #define __m(x) (*(struct __large_struct *)(x))
85
86 #define __get_user_size(x,ptr,size,retval) \
87 do { \
88 retval = 0; \
89 switch (size) { \
90 case 1: \
91 retval = __get_user_asm_b(x, ptr); \
92 break; \
93 case 2: \
94 retval = __get_user_asm_w(x, ptr); \
95 break; \
96 case 4: \
97 retval = __get_user_asm_l(x, ptr); \
98 break; \
99 case 8: \
100 retval = __get_user_asm_q(x, ptr); \
101 break; \
102 default: \
103 __get_user_unknown(); \
104 break; \
105 } \
106 } while (0)
107
108 #define __get_user_nocheck(x,ptr,size) \
109 ({ \
110 long __gu_err, __gu_val; \
111 __get_user_size((void *)&__gu_val, (long)(ptr), \
112 (size), __gu_err); \
113 (x) = (__typeof__(*(ptr)))__gu_val; \
114 __gu_err; \
115 })
116
117 #define __get_user_check(x,ptr,size) \
118 ({ \
119 long __gu_addr = (long)(ptr); \
120 long __gu_err = -EFAULT, __gu_val; \
121 if (__access_ok(__gu_addr, (size))) \
122 __get_user_size((void *)&__gu_val, __gu_addr, \
123 (size), __gu_err); \
124 (x) = (__typeof__(*(ptr))) __gu_val; \
125 __gu_err; \
126 })
127
128 extern long __get_user_asm_b(void *, long);
129 extern long __get_user_asm_w(void *, long);
130 extern long __get_user_asm_l(void *, long);
131 extern long __get_user_asm_q(void *, long);
132 extern void __get_user_unknown(void);
133
134 #define __put_user_size(x,ptr,size,retval) \
135 do { \
136 retval = 0; \
137 switch (size) { \
138 case 1: \
139 retval = __put_user_asm_b(x, ptr); \
140 break; \
141 case 2: \
142 retval = __put_user_asm_w(x, ptr); \
143 break; \
144 case 4: \
145 retval = __put_user_asm_l(x, ptr); \
146 break; \
147 case 8: \
148 retval = __put_user_asm_q(x, ptr); \
149 break; \
150 default: \
151 __put_user_unknown(); \
152 } \
153 } while (0)
154
155 #define __put_user_nocheck(x,ptr,size) \
156 ({ \
157 long __pu_err; \
158 __typeof__(*(ptr)) __pu_val = (x); \
159 __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
160 __pu_err; \
161 })
162
163 #define __put_user_check(x,ptr,size) \
164 ({ \
165 long __pu_err = -EFAULT; \
166 long __pu_addr = (long)(ptr); \
167 __typeof__(*(ptr)) __pu_val = (x); \
168 \
169 if (__access_ok(__pu_addr, (size))) \
170 __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
171 __pu_err; \
172 })
173
174 extern long __put_user_asm_b(void *, long);
175 extern long __put_user_asm_w(void *, long);
176 extern long __put_user_asm_l(void *, long);
177 extern long __put_user_asm_q(void *, long);
178 extern void __put_user_unknown(void);
179
180 \f
181 /* Generic arbitrary sized copy. */
182 /* Return the number of bytes NOT copied */
183 /* XXX: should be such that: 4byte and the rest. */
184 extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
185
186 #define copy_to_user_ret(to,from,n,retval) ({ \
187 if (copy_to_user(to,from,n)) \
188 return retval; \
189 })
190
191 #define __copy_to_user(to,from,n) \
192 __copy_user((void *)(to), \
193 (void *)(from), n)
194
195 #define __copy_to_user_ret(to,from,n,retval) ({ \
196 if (__copy_to_user(to,from,n)) \
197 return retval; \
198 })
199
200 #define copy_from_user_ret(to,from,n,retval) ({ \
201 if (copy_from_user(to,from,n)) \
202 return retval; \
203 })
204
205 #define __copy_from_user(to,from,n) \
206 __copy_user((void *)(to), \
207 (void *)(from), n)
208
209 #define __copy_from_user_ret(to,from,n,retval) ({ \
210 if (__copy_from_user(to,from,n)) \
211 return retval; \
212 })
213
214 #define __copy_to_user_inatomic __copy_to_user
215 #define __copy_from_user_inatomic __copy_from_user
216
217 /* XXX: Not sure it works well..
218 should be such that: 4byte clear and the rest. */
219 extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
220
221 #define clear_user(addr,n) ({ \
222 void * __cl_addr = (addr); \
223 unsigned long __cl_size = (n); \
224 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
225 __cl_size = __clear_user(__cl_addr, __cl_size); \
226 __cl_size; })
227
228 extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
229
230 #define strncpy_from_user(dest,src,count) ({ \
231 unsigned long __sfu_src = (unsigned long) (src); \
232 int __sfu_count = (int) (count); \
233 long __sfu_res = -EFAULT; \
234 if(__access_ok(__sfu_src, __sfu_count)) { \
235 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
236 } __sfu_res; })
237
238 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
239
240 /*
241 * Return the size of a string (including the ending 0!)
242 */
243 extern long __strnlen_user(const char *__s, long __n);
244
245 static inline long strnlen_user(const char *s, long n)
246 {
247 if (!__addr_ok(s))
248 return 0;
249 else
250 return __strnlen_user(s, n);
251 }
252
253 struct exception_table_entry
254 {
255 unsigned long insn, fixup;
256 };
257
258 #ifdef CONFIG_MMU
259 #define ARCH_HAS_SEARCH_EXTABLE
260 #endif
261
262 /* Returns 0 if exception not found and fixup.unit otherwise. */
263 extern unsigned long search_exception_table(unsigned long addr);
264 extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
265
266 #endif /* __ASM_SH_UACCESS_64_H */
This page took 0.037195 seconds and 5 git commands to generate.