sh: Consolidate addr/access_ok across mmu/nommu on 32bit.
[deliverable/linux.git] / include / asm-sh / uaccess_32.h
CommitLineData
02f7e627 1/*
9b01bd9e
PM
2 * User space memory access functions
3 *
4 * Copyright (C) 1999, 2002 Niibe Yutaka
02f7e627 5 * Copyright (C) 2003 - 2008 Paul Mundt
9b01bd9e
PM
6 *
7 * Based on:
8 * MIPS implementation version 1.15 by
9 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
10 * and i386 version.
11 */
1e6760c5
MD
12#ifndef __ASM_SH_UACCESS_32_H
13#define __ASM_SH_UACCESS_32_H
9b01bd9e
PM
14
15#include <linux/errno.h>
16#include <linux/sched.h>
02f7e627 17#include <asm/segment.h>
9b01bd9e
PM
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
9b01bd9e 22#define __addr_ok(addr) \
31f6a11f 23 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
9b01bd9e
PM
24
25/*
26 * __access_ok: Check if address with size is OK or not.
27 *
0fb19dcb 28 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
9b01bd9e 29 *
0fb19dcb
SM
30 * sum := addr + size; carry? --> flag = true;
31 * if (sum >= addr_limit) flag = true;
9b01bd9e 32 */
31f6a11f
PM
33#define __access_ok(addr, size) \
34 (__addr_ok((addr) + (size)))
0465b9fb
PM
35#define access_ok(type, addr, size) \
36 (__chk_user_ptr(addr), \
37 __access_ok((unsigned long __force)(addr), (size)))
9b01bd9e
PM
38
39/*
40 * Uh, these should become the main single-value transfer routines ...
41 * They automatically use the right size if we just have the right
42 * pointer type ...
43 *
44 * As SuperH uses the same address space for kernel and user data, we
45 * can just do these as direct assignments.
46 *
47 * Careful to not
48 * (a) re-use the arguments for side effects (sizeof is ok)
49 * (b) require any knowledge of processes at this stage
50 */
0465b9fb
PM
51#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
52#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
9b01bd9e
PM
53
54/*
55 * The "__xxx" versions do not do address space checking, useful when
56 * doing multiple accesses to the same area (the user has to do the
57 * checks by hand with "access_ok()")
58 */
0465b9fb
PM
59#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
60#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9b01bd9e
PM
61
62struct __large_struct { unsigned long buf[100]; };
63#define __m(x) (*(struct __large_struct __user *)(x))
64
65#define __get_user_size(x,ptr,size,retval) \
66do { \
67 retval = 0; \
9b01bd9e
PM
68 switch (size) { \
69 case 1: \
70 __get_user_asm(x, ptr, retval, "b"); \
71 break; \
72 case 2: \
73 __get_user_asm(x, ptr, retval, "w"); \
74 break; \
75 case 4: \
76 __get_user_asm(x, ptr, retval, "l"); \
77 break; \
78 default: \
79 __get_user_unknown(); \
80 break; \
81 } \
82} while (0)
83
84#define __get_user_nocheck(x,ptr,size) \
85({ \
0465b9fb
PM
86 long __gu_err; \
87 unsigned long __gu_val; \
88 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
89 __chk_user_ptr(ptr); \
90 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
9b01bd9e
PM
91 (x) = (__typeof__(*(ptr)))__gu_val; \
92 __gu_err; \
93})
94
9b01bd9e
PM
95#define __get_user_check(x,ptr,size) \
96({ \
0465b9fb
PM
97 long __gu_err = -EFAULT; \
98 unsigned long __gu_val = 0; \
99 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
100 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
101 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
0fb19dcb 102 (x) = (__typeof__(*(ptr)))__gu_val; \
9b01bd9e
PM
103 __gu_err; \
104})
9b01bd9e
PM
105
106#define __get_user_asm(x, addr, err, insn) \
107({ \
108__asm__ __volatile__( \
109 "1:\n\t" \
110 "mov." insn " %2, %1\n\t" \
9b01bd9e
PM
111 "2:\n" \
112 ".section .fixup,\"ax\"\n" \
113 "3:\n\t" \
114 "mov #0, %1\n\t" \
115 "mov.l 4f, %0\n\t" \
116 "jmp @%0\n\t" \
0fb19dcb
SM
117 " mov %3, %0\n\t" \
118 ".balign 4\n" \
9b01bd9e
PM
119 "4: .long 2b\n\t" \
120 ".previous\n" \
121 ".section __ex_table,\"a\"\n\t" \
122 ".long 1b, 3b\n\t" \
123 ".previous" \
124 :"=&r" (err), "=&r" (x) \
0fb19dcb 125 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
9b01bd9e
PM
126
127extern void __get_user_unknown(void);
128
129#define __put_user_size(x,ptr,size,retval) \
130do { \
131 retval = 0; \
9b01bd9e
PM
132 switch (size) { \
133 case 1: \
134 __put_user_asm(x, ptr, retval, "b"); \
135 break; \
136 case 2: \
137 __put_user_asm(x, ptr, retval, "w"); \
138 break; \
139 case 4: \
140 __put_user_asm(x, ptr, retval, "l"); \
141 break; \
142 case 8: \
143 __put_user_u64(x, ptr, retval); \
144 break; \
145 default: \
146 __put_user_unknown(); \
147 } \
148} while (0)
149
0465b9fb
PM
150#define __put_user_nocheck(x,ptr,size) \
151({ \
152 long __pu_err; \
153 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
154 __chk_user_ptr(ptr); \
155 __put_user_size((x), __pu_addr, (size), __pu_err); \
156 __pu_err; \
9b01bd9e
PM
157})
158
159#define __put_user_check(x,ptr,size) \
160({ \
0465b9fb 161 long __pu_err = -EFAULT; \
9b01bd9e 162 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
0465b9fb
PM
163 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
164 __put_user_size((x), __pu_addr, (size), \
165 __pu_err); \
9b01bd9e
PM
166 __pu_err; \
167})
168
169#define __put_user_asm(x, addr, err, insn) \
170({ \
171__asm__ __volatile__( \
172 "1:\n\t" \
173 "mov." insn " %1, %2\n\t" \
9b01bd9e
PM
174 "2:\n" \
175 ".section .fixup,\"ax\"\n" \
176 "3:\n\t" \
9b01bd9e
PM
177 "mov.l 4f, %0\n\t" \
178 "jmp @%0\n\t" \
0fb19dcb
SM
179 " mov %3, %0\n\t" \
180 ".balign 4\n" \
9b01bd9e
PM
181 "4: .long 2b\n\t" \
182 ".previous\n" \
183 ".section __ex_table,\"a\"\n\t" \
184 ".long 1b, 3b\n\t" \
185 ".previous" \
186 :"=&r" (err) \
0fb19dcb 187 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
9b01bd9e
PM
188 :"memory"); })
189
0fb19dcb 190#if defined(CONFIG_CPU_LITTLE_ENDIAN)
9b01bd9e
PM
191#define __put_user_u64(val,addr,retval) \
192({ \
193__asm__ __volatile__( \
194 "1:\n\t" \
195 "mov.l %R1,%2\n\t" \
196 "mov.l %S1,%T2\n\t" \
9b01bd9e
PM
197 "2:\n" \
198 ".section .fixup,\"ax\"\n" \
199 "3:\n\t" \
9b01bd9e
PM
200 "mov.l 4f,%0\n\t" \
201 "jmp @%0\n\t" \
0fb19dcb
SM
202 " mov %3,%0\n\t" \
203 ".balign 4\n" \
9b01bd9e
PM
204 "4: .long 2b\n\t" \
205 ".previous\n" \
206 ".section __ex_table,\"a\"\n\t" \
207 ".long 1b, 3b\n\t" \
208 ".previous" \
209 : "=r" (retval) \
0fb19dcb 210 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
9b01bd9e
PM
211 : "memory"); })
212#else
213#define __put_user_u64(val,addr,retval) \
214({ \
215__asm__ __volatile__( \
216 "1:\n\t" \
217 "mov.l %S1,%2\n\t" \
218 "mov.l %R1,%T2\n\t" \
9b01bd9e
PM
219 "2:\n" \
220 ".section .fixup,\"ax\"\n" \
221 "3:\n\t" \
9b01bd9e
PM
222 "mov.l 4f,%0\n\t" \
223 "jmp @%0\n\t" \
0fb19dcb
SM
224 " mov %3,%0\n\t" \
225 ".balign 4\n" \
9b01bd9e
PM
226 "4: .long 2b\n\t" \
227 ".previous\n" \
228 ".section __ex_table,\"a\"\n\t" \
229 ".long 1b, 3b\n\t" \
230 ".previous" \
231 : "=r" (retval) \
0fb19dcb 232 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
9b01bd9e
PM
233 : "memory"); })
234#endif
235
236extern void __put_user_unknown(void);
237
238/* Generic arbitrary sized copy. */
239/* Return the number of bytes NOT copied */
240__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
241
9b01bd9e
PM
242
243static __always_inline unsigned long
244__copy_from_user(void *to, const void __user *from, unsigned long n)
245{
246 return __copy_user(to, (__force void *)from, n);
247}
248
249static __always_inline unsigned long __must_check
250__copy_to_user(void __user *to, const void *from, unsigned long n)
251{
252 return __copy_user((__force void *)to, from, n);
253}
254
255#define __copy_to_user_inatomic __copy_to_user
256#define __copy_from_user_inatomic __copy_from_user
257
258/*
259 * Clear the area and return remaining number of bytes
260 * (on failure. Usually it's 0.)
261 */
262extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
263
264#define clear_user(addr,n) ({ \
265void * __cl_addr = (addr); \
266unsigned long __cl_size = (n); \
267if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
268__cl_size = __clear_user(__cl_addr, __cl_size); \
269__cl_size; })
270
271static __inline__ int
272__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
273{
274 __kernel_size_t res;
0fb19dcb 275 unsigned long __dummy, _d, _s, _c;
9b01bd9e
PM
276
277 __asm__ __volatile__(
278 "9:\n"
279 "mov.b @%2+, %1\n\t"
280 "cmp/eq #0, %1\n\t"
281 "bt/s 2f\n"
282 "1:\n"
283 "mov.b %1, @%3\n\t"
0fb19dcb 284 "dt %4\n\t"
9b01bd9e
PM
285 "bf/s 9b\n\t"
286 " add #1, %3\n\t"
287 "2:\n\t"
0fb19dcb 288 "sub %4, %0\n"
9b01bd9e
PM
289 "3:\n"
290 ".section .fixup,\"ax\"\n"
291 "4:\n\t"
292 "mov.l 5f, %1\n\t"
293 "jmp @%1\n\t"
0fb19dcb 294 " mov %9, %0\n\t"
9b01bd9e
PM
295 ".balign 4\n"
296 "5: .long 3b\n"
297 ".previous\n"
298 ".section __ex_table,\"a\"\n"
299 " .balign 4\n"
300 " .long 9b,4b\n"
301 ".previous"
0fb19dcb
SM
302 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
303 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
9b01bd9e
PM
304 "i" (-EFAULT)
305 : "memory", "t");
306
307 return res;
308}
309
0fb19dcb
SM
310/**
311 * strncpy_from_user: - Copy a NUL terminated string from userspace.
312 * @dst: Destination address, in kernel space. This buffer must be at
313 * least @count bytes long.
314 * @src: Source address, in user space.
315 * @count: Maximum number of bytes to copy, including the trailing NUL.
316 *
317 * Copies a NUL-terminated string from userspace to kernel space.
318 *
319 * On success, returns the length of the string (not including the trailing
320 * NUL).
321 *
322 * If access to userspace fails, returns -EFAULT (some data may have been
323 * copied).
324 *
325 * If @count is smaller than the length of the string, copies @count bytes
326 * and returns @count.
327 */
9b01bd9e
PM
328#define strncpy_from_user(dest,src,count) ({ \
329unsigned long __sfu_src = (unsigned long) (src); \
330int __sfu_count = (int) (count); \
331long __sfu_res = -EFAULT; \
332if(__access_ok(__sfu_src, __sfu_count)) { \
333__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
334} __sfu_res; })
335
336/*
0fb19dcb
SM
337 * Return the size of a string (including the ending 0 even when we have
338 * exceeded the maximum string length).
9b01bd9e
PM
339 */
340static __inline__ long __strnlen_user(const char __user *__s, long __n)
341{
342 unsigned long res;
343 unsigned long __dummy;
344
345 __asm__ __volatile__(
9b01bd9e
PM
346 "1:\t"
347 "mov.b @(%0,%3), %1\n\t"
0fb19dcb
SM
348 "cmp/eq %4, %0\n\t"
349 "bt/s 2f\n\t"
350 " add #1, %0\n\t"
9b01bd9e 351 "tst %1, %1\n\t"
0fb19dcb 352 "bf 1b\n\t"
9b01bd9e
PM
353 "2:\n"
354 ".section .fixup,\"ax\"\n"
355 "3:\n\t"
356 "mov.l 4f, %1\n\t"
357 "jmp @%1\n\t"
358 " mov #0, %0\n"
359 ".balign 4\n"
360 "4: .long 2b\n"
361 ".previous\n"
362 ".section __ex_table,\"a\"\n"
363 " .balign 4\n"
364 " .long 1b,3b\n"
365 ".previous"
366 : "=z" (res), "=&r" (__dummy)
367 : "0" (0), "r" (__s), "r" (__n)
368 : "t");
369 return res;
370}
371
0fb19dcb
SM
372/**
373 * strnlen_user: - Get the size of a string in user space.
374 * @s: The string to measure.
375 * @n: The maximum valid length
376 *
377 * Context: User context only. This function may sleep.
378 *
379 * Get the size of a NUL-terminated string in user space.
380 *
381 * Returns the size of the string INCLUDING the terminating NUL.
382 * On exception, returns 0.
383 * If the string is too long, returns a value greater than @n.
384 */
9b01bd9e
PM
385static __inline__ long strnlen_user(const char __user *s, long n)
386{
387 if (!__addr_ok(s))
388 return 0;
389 else
390 return __strnlen_user(s, n);
391}
392
0fb19dcb
SM
393/**
394 * strlen_user: - Get the size of a string in user space.
395 * @str: The string to measure.
396 *
397 * Context: User context only. This function may sleep.
398 *
399 * Get the size of a NUL-terminated string in user space.
400 *
401 * Returns the size of the string INCLUDING the terminating NUL.
402 * On exception, returns 0.
403 *
404 * If there is a limit on the length of a valid string, you may wish to
405 * consider using strnlen_user() instead.
406 */
9b01bd9e
PM
407#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
408
409/*
410 * The exception table consists of pairs of addresses: the first is the
411 * address of an instruction that is allowed to fault, and the second is
412 * the address at which the program should continue. No registers are
413 * modified, so it is entirely up to the continuation code to figure out
414 * what to do.
415 *
416 * All the routines below use bits of fixup code that are out of line
417 * with the main instruction path. This means when everything is well,
418 * we don't even have to jump over them. Further, they do not intrude
419 * on our cache or tlb entries.
420 */
421
422struct exception_table_entry
423{
424 unsigned long insn, fixup;
425};
426
427extern int fixup_exception(struct pt_regs *regs);
428
1e6760c5 429#endif /* __ASM_SH_UACCESS_32_H */
This page took 0.148948 seconds and 5 git commands to generate.