arch/tile: core support for Tilera 32-bit chips.
[deliverable/linux.git] / arch / tile / include / asm / uaccess.h
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_UACCESS_H
16#define _ASM_TILE_UACCESS_H
17
18/*
19 * User space memory access functions
20 */
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <asm-generic/uaccess-unaligned.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
30/*
31 * The fs value determines whether argument validity checking should be
32 * performed or not. If get_fs() == USER_DS, checking is performed, with
33 * get_fs() == KERNEL_DS, checking is bypassed.
34 *
35 * For historical reasons, these macros are grossly misnamed.
36 */
37#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
38
39#define KERNEL_DS MAKE_MM_SEG(-1UL)
40#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
41
42#define get_ds() (KERNEL_DS)
43#define get_fs() (current_thread_info()->addr_limit)
44#define set_fs(x) (current_thread_info()->addr_limit = (x))
45
46#define segment_eq(a, b) ((a).seg == (b).seg)
47
48#ifndef __tilegx__
49/*
50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
51 * special hack in arch_setup_additional_pages() to auto-create a mapping
52 * for the first 16 KB, and it would seem strange to have different
53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
54 */
55static inline int is_arch_mappable_range(unsigned long addr,
56 unsigned long size)
57{
58 return (addr >= MEM_USER_INTRPT &&
59 addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
60 size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
61}
62#define is_arch_mappable_range is_arch_mappable_range
63#else
64#define is_arch_mappable_range(addr, size) 0
65#endif
66
67/*
68 * Test whether a block of memory is a valid user space address.
69 * Returns 0 if the range is valid, nonzero otherwise.
70 */
71int __range_ok(unsigned long addr, unsigned long size);
72
73/**
74 * access_ok: - Checks if a user space pointer is valid
75 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
76 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
77 * to write to a block, it is always safe to read from it.
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
81 * Context: User context only. This function may sleep.
82 *
83 * Checks if a pointer to a block of memory in user space is valid.
84 *
85 * Returns true (nonzero) if the memory block may be valid, false (zero)
86 * if it is definitely invalid.
87 *
88 * Note that, depending on architecture, this function probably just
89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT.
91 */
92#define access_ok(type, addr, size) \
93 (likely(__range_ok((unsigned long)addr, size) == 0))
94
95/*
96 * The exception table consists of pairs of addresses: the first is the
97 * address of an instruction that is allowed to fault, and the second is
98 * the address at which the program should continue. No registers are
99 * modified, so it is entirely up to the continuation code to figure out
100 * what to do.
101 *
102 * All the routines below use bits of fixup code that are out of line
103 * with the main instruction path. This means when everything is well,
104 * we don't even have to jump over them. Further, they do not intrude
105 * on our cache or tlb entries.
106 */
107
108struct exception_table_entry {
109 unsigned long insn, fixup;
110};
111
112extern int fixup_exception(struct pt_regs *regs);
113
114/*
115 * We return the __get_user_N function results in a structure,
116 * thus in r0 and r1. If "err" is zero, "val" is the result
117 * of the read; otherwise, "err" is -EFAULT.
118 *
119 * We rarely need 8-byte values on a 32-bit architecture, but
120 * we size the structure to accommodate. In practice, for the
121 * the smaller reads, we can zero the high word for free, and
122 * the caller will ignore it by virtue of casting anyway.
123 */
124struct __get_user {
125 unsigned long long val;
126 int err;
127};
128
129/*
130 * FIXME: we should express these as inline extended assembler, since
131 * they're fundamentally just a variable dereference and some
132 * supporting exception_table gunk. Note that (a la i386) we can
133 * extend the copy_to_user and copy_from_user routines to call into
134 * such extended assembler routines, though we will have to use a
135 * different return code in that case (1, 2, or 4, rather than -EFAULT).
136 */
137extern struct __get_user __get_user_1(const void *);
138extern struct __get_user __get_user_2(const void *);
139extern struct __get_user __get_user_4(const void *);
140extern struct __get_user __get_user_8(const void *);
141extern int __put_user_1(long, void *);
142extern int __put_user_2(long, void *);
143extern int __put_user_4(long, void *);
144extern int __put_user_8(long long, void *);
145
146/* Unimplemented routines to cause linker failures */
147extern struct __get_user __get_user_bad(void);
148extern int __put_user_bad(void);
149
150/*
151 * Careful: we have to cast the result to the type of the pointer
152 * for sign reasons.
153 */
154/**
155 * __get_user: - Get a simple variable from user space, with less checking.
156 * @x: Variable to store result.
157 * @ptr: Source address, in user space.
158 *
159 * Context: User context only. This function may sleep.
160 *
161 * This macro copies a single simple variable from user space to kernel
162 * space. It supports simple types like char and int, but not larger
163 * data types like structures or arrays.
164 *
165 * @ptr must have pointer-to-simple-variable type, and the result of
166 * dereferencing @ptr must be assignable to @x without a cast.
167 *
168 * Returns zero on success, or -EFAULT on error.
169 * On error, the variable @x is set to zero.
170 *
171 * Caller must check the pointer with access_ok() before calling this
172 * function.
173 */
174#define __get_user(x, ptr) \
175({ struct __get_user __ret; \
176 __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \
177 __chk_user_ptr(__gu_addr); \
178 switch (sizeof(*(__gu_addr))) { \
179 case 1: \
180 __ret = __get_user_1(__gu_addr); \
181 break; \
182 case 2: \
183 __ret = __get_user_2(__gu_addr); \
184 break; \
185 case 4: \
186 __ret = __get_user_4(__gu_addr); \
187 break; \
188 case 8: \
189 __ret = __get_user_8(__gu_addr); \
190 break; \
191 default: \
192 __ret = __get_user_bad(); \
193 break; \
194 } \
195 (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
196 __ret.val; \
197 __ret.err; \
198})
199
200/**
201 * __put_user: - Write a simple value into user space, with less checking.
202 * @x: Value to copy to user space.
203 * @ptr: Destination address, in user space.
204 *
205 * Context: User context only. This function may sleep.
206 *
207 * This macro copies a single simple value from kernel space to user
208 * space. It supports simple types like char and int, but not larger
209 * data types like structures or arrays.
210 *
211 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
212 * to the result of dereferencing @ptr.
213 *
214 * Caller must check the pointer with access_ok() before calling this
215 * function.
216 *
217 * Returns zero on success, or -EFAULT on error.
218 *
219 * Implementation note: The "case 8" logic of casting to the type of
220 * the result of subtracting the value from itself is basically a way
221 * of keeping all integer types the same, but casting any pointers to
222 * ptrdiff_t, i.e. also an integer type. This way there are no
223 * questionable casts seen by the compiler on an ILP32 platform.
224 */
225#define __put_user(x, ptr) \
226({ \
227 int __pu_err = 0; \
228 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
229 typeof(*__pu_addr) __pu_val = (x); \
230 __chk_user_ptr(__pu_addr); \
231 switch (sizeof(__pu_val)) { \
232 case 1: \
233 __pu_err = __put_user_1((long)__pu_val, __pu_addr); \
234 break; \
235 case 2: \
236 __pu_err = __put_user_2((long)__pu_val, __pu_addr); \
237 break; \
238 case 4: \
239 __pu_err = __put_user_4((long)__pu_val, __pu_addr); \
240 break; \
241 case 8: \
242 __pu_err = \
243 __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
244 __pu_addr); \
245 break; \
246 default: \
247 __pu_err = __put_user_bad(); \
248 break; \
249 } \
250 __pu_err; \
251})
252
253/*
254 * The versions of get_user and put_user without initial underscores
255 * check the address of their arguments to make sure they are not
256 * in kernel space.
257 */
258#define put_user(x, ptr) \
259({ \
260 __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
261 access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
262 __put_user((x), (__Pu_addr)) : \
263 -EFAULT; \
264})
265
266#define get_user(x, ptr) \
267({ \
268 __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
269 access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
270 __get_user((x), (__Gu_addr)) : \
271 ((x) = 0, -EFAULT); \
272})
273
274/**
275 * __copy_to_user() - copy data into user space, with less checking.
276 * @to: Destination address, in user space.
277 * @from: Source address, in kernel space.
278 * @n: Number of bytes to copy.
279 *
280 * Context: User context only. This function may sleep.
281 *
282 * Copy data from kernel space to user space. Caller must check
283 * the specified block with access_ok() before calling this function.
284 *
285 * Returns number of bytes that could not be copied.
286 * On success, this will be zero.
287 *
288 * An alternate version - __copy_to_user_inatomic() - is designed
289 * to be called from atomic context, typically bracketed by calls
290 * to pagefault_disable() and pagefault_enable().
291 */
292extern unsigned long __must_check __copy_to_user_inatomic(
293 void __user *to, const void *from, unsigned long n);
294
295static inline unsigned long __must_check
296__copy_to_user(void __user *to, const void *from, unsigned long n)
297{
298 might_fault();
299 return __copy_to_user_inatomic(to, from, n);
300}
301
302static inline unsigned long __must_check
303copy_to_user(void __user *to, const void *from, unsigned long n)
304{
305 if (access_ok(VERIFY_WRITE, to, n))
306 n = __copy_to_user(to, from, n);
307 return n;
308}
309
310/**
311 * __copy_from_user() - copy data from user space, with less checking.
312 * @to: Destination address, in kernel space.
313 * @from: Source address, in user space.
314 * @n: Number of bytes to copy.
315 *
316 * Context: User context only. This function may sleep.
317 *
318 * Copy data from user space to kernel space. Caller must check
319 * the specified block with access_ok() before calling this function.
320 *
321 * Returns number of bytes that could not be copied.
322 * On success, this will be zero.
323 *
324 * If some data could not be copied, this function will pad the copied
325 * data to the requested size using zero bytes.
326 *
327 * An alternate version - __copy_from_user_inatomic() - is designed
328 * to be called from atomic context, typically bracketed by calls
329 * to pagefault_disable() and pagefault_enable(). This version
330 * does *NOT* pad with zeros.
331 */
332extern unsigned long __must_check __copy_from_user_inatomic(
333 void *to, const void __user *from, unsigned long n);
334extern unsigned long __must_check __copy_from_user_zeroing(
335 void *to, const void __user *from, unsigned long n);
336
337static inline unsigned long __must_check
338__copy_from_user(void *to, const void __user *from, unsigned long n)
339{
340 might_fault();
341 return __copy_from_user_zeroing(to, from, n);
342}
343
344static inline unsigned long __must_check
345_copy_from_user(void *to, const void __user *from, unsigned long n)
346{
347 if (access_ok(VERIFY_READ, from, n))
348 n = __copy_from_user(to, from, n);
349 else
350 memset(to, 0, n);
351 return n;
352}
353
354#ifdef CONFIG_DEBUG_COPY_FROM_USER
355extern void copy_from_user_overflow(void)
356 __compiletime_warning("copy_from_user() size is not provably correct");
357
358static inline unsigned long __must_check copy_from_user(void *to,
359 const void __user *from,
360 unsigned long n)
361{
362 int sz = __compiletime_object_size(to);
363
364 if (likely(sz == -1 || sz >= n))
365 n = _copy_from_user(to, from, n);
366 else
367 copy_from_user_overflow();
368
369 return n;
370}
371#else
372#define copy_from_user _copy_from_user
373#endif
374
375#ifdef __tilegx__
376/**
377 * __copy_in_user() - copy data within user space, with less checking.
378 * @to: Destination address, in user space.
379 * @from: Source address, in kernel space.
380 * @n: Number of bytes to copy.
381 *
382 * Context: User context only. This function may sleep.
383 *
384 * Copy data from user space to user space. Caller must check
385 * the specified blocks with access_ok() before calling this function.
386 *
387 * Returns number of bytes that could not be copied.
388 * On success, this will be zero.
389 */
390extern unsigned long __copy_in_user_asm(
391 void __user *to, const void __user *from, unsigned long n);
392
393static inline unsigned long __must_check
394__copy_in_user(void __user *to, const void __user *from, unsigned long n)
395{
396 might_sleep();
397 return __copy_in_user_asm(to, from, n);
398}
399
400static inline unsigned long __must_check
401copy_in_user(void __user *to, const void __user *from, unsigned long n)
402{
403 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
404 n = __copy_in_user(to, from, n);
405 return n;
406}
407#endif
408
409
410/**
411 * strlen_user: - Get the size of a string in user space.
412 * @str: The string to measure.
413 *
414 * Context: User context only. This function may sleep.
415 *
416 * Get the size of a NUL-terminated string in user space.
417 *
418 * Returns the size of the string INCLUDING the terminating NUL.
419 * On exception, returns 0.
420 *
421 * If there is a limit on the length of a valid string, you may wish to
422 * consider using strnlen_user() instead.
423 */
424extern long strnlen_user_asm(const char __user *str, long n);
425static inline long __must_check strnlen_user(const char __user *str, long n)
426{
427 might_fault();
428 return strnlen_user_asm(str, n);
429}
430#define strlen_user(str) strnlen_user(str, LONG_MAX)
431
432/**
433 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
434 * @dst: Destination address, in kernel space. This buffer must be at
435 * least @count bytes long.
436 * @src: Source address, in user space.
437 * @count: Maximum number of bytes to copy, including the trailing NUL.
438 *
439 * Copies a NUL-terminated string from userspace to kernel space.
440 * Caller must check the specified block with access_ok() before calling
441 * this function.
442 *
443 * On success, returns the length of the string (not including the trailing
444 * NUL).
445 *
446 * If access to userspace fails, returns -EFAULT (some data may have been
447 * copied).
448 *
449 * If @count is smaller than the length of the string, copies @count bytes
450 * and returns @count.
451 */
452extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
453static inline long __must_check __strncpy_from_user(
454 char *dst, const char __user *src, long count)
455{
456 might_fault();
457 return strncpy_from_user_asm(dst, src, count);
458}
459static inline long __must_check strncpy_from_user(
460 char *dst, const char __user *src, long count)
461{
462 if (access_ok(VERIFY_READ, src, 1))
463 return __strncpy_from_user(dst, src, count);
464 return -EFAULT;
465}
466
467/**
468 * clear_user: - Zero a block of memory in user space.
469 * @mem: Destination address, in user space.
470 * @len: Number of bytes to zero.
471 *
472 * Zero a block of memory in user space.
473 *
474 * Returns number of bytes that could not be cleared.
475 * On success, this will be zero.
476 */
477extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
478static inline unsigned long __must_check __clear_user(
479 void __user *mem, unsigned long len)
480{
481 might_fault();
482 return clear_user_asm(mem, len);
483}
484static inline unsigned long __must_check clear_user(
485 void __user *mem, unsigned long len)
486{
487 if (access_ok(VERIFY_WRITE, mem, len))
488 return __clear_user(mem, len);
489 return len;
490}
491
492/**
493 * flush_user: - Flush a block of memory in user space from cache.
494 * @mem: Destination address, in user space.
495 * @len: Number of bytes to flush.
496 *
497 * Returns number of bytes that could not be flushed.
498 * On success, this will be zero.
499 */
500extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
501static inline unsigned long __must_check __flush_user(
502 void __user *mem, unsigned long len)
503{
504 int retval;
505
506 might_fault();
507 retval = flush_user_asm(mem, len);
508 mb_incoherent();
509 return retval;
510}
511
512static inline unsigned long __must_check flush_user(
513 void __user *mem, unsigned long len)
514{
515 if (access_ok(VERIFY_WRITE, mem, len))
516 return __flush_user(mem, len);
517 return len;
518}
519
520/**
521 * inv_user: - Invalidate a block of memory in user space from cache.
522 * @mem: Destination address, in user space.
523 * @len: Number of bytes to invalidate.
524 *
525 * Returns number of bytes that could not be invalidated.
526 * On success, this will be zero.
527 *
528 * Note that on Tile64, the "inv" operation is in fact a
529 * "flush and invalidate", so cache write-backs will occur prior
530 * to the cache being marked invalid.
531 */
532extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
533static inline unsigned long __must_check __inv_user(
534 void __user *mem, unsigned long len)
535{
536 int retval;
537
538 might_fault();
539 retval = inv_user_asm(mem, len);
540 mb_incoherent();
541 return retval;
542}
543static inline unsigned long __must_check inv_user(
544 void __user *mem, unsigned long len)
545{
546 if (access_ok(VERIFY_WRITE, mem, len))
547 return __inv_user(mem, len);
548 return len;
549}
550
551/**
552 * finv_user: - Flush-inval a block of memory in user space from cache.
553 * @mem: Destination address, in user space.
554 * @len: Number of bytes to invalidate.
555 *
556 * Returns number of bytes that could not be flush-invalidated.
557 * On success, this will be zero.
558 */
559extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
560static inline unsigned long __must_check __finv_user(
561 void __user *mem, unsigned long len)
562{
563 int retval;
564
565 might_fault();
566 retval = finv_user_asm(mem, len);
567 mb_incoherent();
568 return retval;
569}
570static inline unsigned long __must_check finv_user(
571 void __user *mem, unsigned long len)
572{
573 if (access_ok(VERIFY_WRITE, mem, len))
574 return __finv_user(mem, len);
575 return len;
576}
577
578#endif /* _ASM_TILE_UACCESS_H */
This page took 0.066552 seconds and 5 git commands to generate.