MIPS: ralink: add mt7628an support
[deliverable/linux.git] / arch / mips / include / asm / uaccess.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
619b6e18 8 * Copyright (C) 2007 Maciej W. Rozycki
ac1d8590 9 * Copyright (C) 2014, Imagination Technologies Ltd.
1da177e4
LT
10 */
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/thread_info.h>
ac1d8590 17#include <asm/asm-eva.h>
1da177e4
LT
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
875d43e7 26#ifdef CONFIG_32BIT
1da177e4 27
9843b030
SL
28#ifdef CONFIG_KVM_GUEST
29#define __UA_LIMIT 0x40000000UL
30#else
31#define __UA_LIMIT 0x80000000UL
32#endif
1da177e4
LT
33
34#define __UA_ADDR ".word"
35#define __UA_LA "la"
36#define __UA_ADDU "addu"
37#define __UA_t0 "$8"
38#define __UA_t1 "$9"
39
875d43e7 40#endif /* CONFIG_32BIT */
1da177e4 41
875d43e7 42#ifdef CONFIG_64BIT
1da177e4 43
949e51be
DD
44extern u64 __ua_limit;
45
46#define __UA_LIMIT __ua_limit
1da177e4
LT
47
48#define __UA_ADDR ".dword"
49#define __UA_LA "dla"
50#define __UA_ADDU "daddu"
51#define __UA_t0 "$12"
52#define __UA_t1 "$13"
53
875d43e7 54#endif /* CONFIG_64BIT */
1da177e4
LT
55
56/*
57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
58 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
59 * the arithmetic we're doing only works if the limit is a power of two, so
60 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
61 * address in this range it's the process's problem, not ours :-)
62 */
63
9843b030
SL
64#ifdef CONFIG_KVM_GUEST
65#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
66#define USER_DS ((mm_segment_t) { 0xC0000000UL })
67#else
1da177e4
LT
68#define KERNEL_DS ((mm_segment_t) { 0UL })
69#define USER_DS ((mm_segment_t) { __UA_LIMIT })
9843b030 70#endif
1da177e4
LT
71
72#define VERIFY_READ 0
73#define VERIFY_WRITE 1
74
75#define get_ds() (KERNEL_DS)
76#define get_fs() (current_thread_info()->addr_limit)
77#define set_fs(x) (current_thread_info()->addr_limit = (x))
78
21a151d8 79#define segment_eq(a, b) ((a).seg == (b).seg)
1da177e4
LT
80
81
82/*
83 * Is a address valid? This does a straighforward calculation rather
84 * than tests.
85 *
86 * Address valid if:
87 * - "addr" doesn't have any high-bits set
88 * - AND "size" doesn't have any high-bits set
89 * - AND "addr+size" doesn't have any high-bits set
90 * - OR we are in kernel mode.
91 *
92 * __ua_size() is a trick to avoid runtime checking of positive constant
93 * sizes; for those we already know at compile time that the size is ok.
94 */
95#define __ua_size(size) \
96 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97
98/*
99 * access_ok: - Checks if a user space pointer is valid
100 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
70342287
RB
101 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
102 * to write to a block, it is always safe to read from it.
1da177e4
LT
103 * @addr: User space pointer to start of block to check
104 * @size: Size of block to check
105 *
70342287 106 * Context: User context only. This function may sleep.
1da177e4
LT
107 *
108 * Checks if a pointer to a block of memory in user space is valid.
109 *
110 * Returns true (nonzero) if the memory block may be valid, false (zero)
111 * if it is definitely invalid.
112 *
113 * Note that, depending on architecture, this function probably just
114 * checks that the pointer is in the user space range - after calling
115 * this function, memory access functions may still return -EFAULT.
116 */
117
118#define __access_mask get_fs().seg
119
ed01b3d2
RB
120#define __access_ok(addr, size, mask) \
121({ \
122 unsigned long __addr = (unsigned long) (addr); \
123 unsigned long __size = size; \
124 unsigned long __mask = mask; \
125 unsigned long __ok; \
126 \
127 __chk_user_ptr(addr); \
128 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
129 __ua_size(__size))); \
130 __ok == 0; \
d0aab922 131})
1da177e4
LT
132
133#define access_ok(type, addr, size) \
d0aab922 134 likely(__access_ok((addr), (size), __access_mask))
1da177e4 135
1da177e4
LT
136/*
137 * put_user: - Write a simple value into user space.
70342287 138 * @x: Value to copy to user space.
1da177e4
LT
139 * @ptr: Destination address, in user space.
140 *
70342287 141 * Context: User context only. This function may sleep.
1da177e4
LT
142 *
143 * This macro copies a single simple value from kernel space to user
144 * space. It supports simple types like char and int, but not larger
145 * data types like structures or arrays.
146 *
147 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
148 * to the result of dereferencing @ptr.
149 *
150 * Returns zero on success, or -EFAULT on error.
151 */
70342287 152#define put_user(x,ptr) \
21a151d8 153 __put_user_check((x), (ptr), sizeof(*(ptr)))
1da177e4
LT
154
155/*
156 * get_user: - Get a simple variable from user space.
70342287 157 * @x: Variable to store result.
1da177e4
LT
158 * @ptr: Source address, in user space.
159 *
70342287 160 * Context: User context only. This function may sleep.
1da177e4
LT
161 *
162 * This macro copies a single simple variable from user space to kernel
163 * space. It supports simple types like char and int, but not larger
164 * data types like structures or arrays.
165 *
166 * @ptr must have pointer-to-simple-variable type, and the result of
167 * dereferencing @ptr must be assignable to @x without a cast.
168 *
169 * Returns zero on success, or -EFAULT on error.
170 * On error, the variable @x is set to zero.
171 */
172#define get_user(x,ptr) \
21a151d8 173 __get_user_check((x), (ptr), sizeof(*(ptr)))
1da177e4
LT
174
175/*
176 * __put_user: - Write a simple value into user space, with less checking.
70342287 177 * @x: Value to copy to user space.
1da177e4
LT
178 * @ptr: Destination address, in user space.
179 *
70342287 180 * Context: User context only. This function may sleep.
1da177e4
LT
181 *
182 * This macro copies a single simple value from kernel space to user
183 * space. It supports simple types like char and int, but not larger
184 * data types like structures or arrays.
185 *
186 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
187 * to the result of dereferencing @ptr.
188 *
189 * Caller must check the pointer with access_ok() before calling this
190 * function.
191 *
192 * Returns zero on success, or -EFAULT on error.
193 */
194#define __put_user(x,ptr) \
21a151d8 195 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
1da177e4
LT
196
197/*
198 * __get_user: - Get a simple variable from user space, with less checking.
70342287 199 * @x: Variable to store result.
1da177e4
LT
200 * @ptr: Source address, in user space.
201 *
70342287 202 * Context: User context only. This function may sleep.
1da177e4
LT
203 *
204 * This macro copies a single simple variable from user space to kernel
205 * space. It supports simple types like char and int, but not larger
206 * data types like structures or arrays.
207 *
208 * @ptr must have pointer-to-simple-variable type, and the result of
209 * dereferencing @ptr must be assignable to @x without a cast.
210 *
211 * Caller must check the pointer with access_ok() before calling this
212 * function.
213 *
214 * Returns zero on success, or -EFAULT on error.
215 * On error, the variable @x is set to zero.
216 */
217#define __get_user(x,ptr) \
21a151d8 218 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
1da177e4
LT
219
220struct __large_struct { unsigned long buf[100]; };
fe00f943 221#define __m(x) (*(struct __large_struct __user *)(x))
1da177e4
LT
222
223/*
224 * Yuck. We need two variants, one for 64bit operation and one
225 * for 32 bit mode and old iron.
226 */
ac1d8590
MC
227#ifndef CONFIG_EVA
228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229#else
230/*
231 * Kernel specific functions for EVA. We need to use normal load instructions
232 * to read data from kernel when operating in EVA mode. We use these macros to
233 * avoid redefining __get_user_asm for EVA.
234 */
235#undef _loadd
236#undef _loadw
237#undef _loadh
238#undef _loadb
239#ifdef CONFIG_32BIT
240#define _loadd _loadw
241#else
242#define _loadd(reg, addr) "ld " reg ", " addr
243#endif
244#define _loadw(reg, addr) "lw " reg ", " addr
245#define _loadh(reg, addr) "lh " reg ", " addr
246#define _loadb(reg, addr) "lb " reg ", " addr
247
248#define __get_kernel_common(val, size, ptr) \
249do { \
250 switch (size) { \
0081ad24
MC
251 case 1: __get_data_asm(val, _loadb, ptr); break; \
252 case 2: __get_data_asm(val, _loadh, ptr); break; \
253 case 4: __get_data_asm(val, _loadw, ptr); break; \
254 case 8: __GET_DW(val, _loadd, ptr); break; \
ac1d8590
MC
255 default: __get_user_unknown(); break; \
256 } \
257} while (0)
258#endif
259
4feb8f8f 260#ifdef CONFIG_32BIT
0081ad24 261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
4feb8f8f
RB
262#endif
263#ifdef CONFIG_64BIT
0081ad24 264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
1da177e4
LT
265#endif
266
4feb8f8f
RB
267extern void __get_user_unknown(void);
268
269#define __get_user_common(val, size, ptr) \
270do { \
1da177e4 271 switch (size) { \
0081ad24
MC
272 case 1: __get_data_asm(val, user_lb, ptr); break; \
273 case 2: __get_data_asm(val, user_lh, ptr); break; \
274 case 4: __get_data_asm(val, user_lw, ptr); break; \
275 case 8: __GET_DW(val, user_ld, ptr); break; \
1da177e4
LT
276 default: __get_user_unknown(); break; \
277 } \
4feb8f8f
RB
278} while (0)
279
21a151d8 280#define __get_user_nocheck(x, ptr, size) \
4feb8f8f 281({ \
8d2d91e8 282 int __gu_err; \
4feb8f8f 283 \
ac1d8590
MC
284 if (segment_eq(get_fs(), get_ds())) { \
285 __get_kernel_common((x), size, ptr); \
286 } else { \
287 __chk_user_ptr(ptr); \
288 __get_user_common((x), size, ptr); \
289 } \
1da177e4
LT
290 __gu_err; \
291})
292
21a151d8 293#define __get_user_check(x, ptr, size) \
1da177e4 294({ \
8d2d91e8 295 int __gu_err = -EFAULT; \
8ecbbcaf 296 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
4feb8f8f 297 \
ef41f460 298 might_fault(); \
ac1d8590
MC
299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
300 if (segment_eq(get_fs(), get_ds())) \
301 __get_kernel_common((x), size, __gu_ptr); \
302 else \
303 __get_user_common((x), size, __gu_ptr); \
640465bd
RB
304 } else \
305 (x) = 0; \
1da177e4 306 \
1da177e4
LT
307 __gu_err; \
308})
309
0081ad24 310#define __get_data_asm(val, insn, addr) \
fe00f943 311{ \
4feb8f8f
RB
312 long __gu_tmp; \
313 \
1da177e4 314 __asm__ __volatile__( \
ac1d8590 315 "1: "insn("%1", "%3")" \n" \
1da177e4 316 "2: \n" \
1658f914 317 " .insn \n" \
1da177e4
LT
318 " .section .fixup,\"ax\" \n" \
319 "3: li %0, %4 \n" \
640465bd 320 " move %1, $0 \n" \
1da177e4
LT
321 " j 2b \n" \
322 " .previous \n" \
323 " .section __ex_table,\"a\" \n" \
324 " "__UA_ADDR "\t1b, 3b \n" \
325 " .previous \n" \
4feb8f8f 326 : "=r" (__gu_err), "=r" (__gu_tmp) \
fe00f943 327 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
4feb8f8f 328 \
8ecbbcaf 329 (val) = (__typeof__(*(addr))) __gu_tmp; \
fe00f943 330}
1da177e4
LT
331
332/*
333 * Get a long long 64 using 32 bit registers.
334 */
0081ad24 335#define __get_data_asm_ll32(val, insn, addr) \
fe00f943 336{ \
cb66fb3f
RB
337 union { \
338 unsigned long long l; \
339 __typeof__(*(addr)) t; \
340 } __gu_tmp; \
cd1fb9ea 341 \
1da177e4 342 __asm__ __volatile__( \
ac1d8590
MC
343 "1: " insn("%1", "(%3)")" \n" \
344 "2: " insn("%D1", "4(%3)")" \n" \
1658f914
SH
345 "3: \n" \
346 " .insn \n" \
347 " .section .fixup,\"ax\" \n" \
fe00f943 348 "4: li %0, %4 \n" \
1da177e4
LT
349 " move %1, $0 \n" \
350 " move %D1, $0 \n" \
351 " j 3b \n" \
352 " .previous \n" \
353 " .section __ex_table,\"a\" \n" \
354 " " __UA_ADDR " 1b, 4b \n" \
355 " " __UA_ADDR " 2b, 4b \n" \
356 " .previous \n" \
cb66fb3f 357 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
fe00f943 358 : "0" (0), "r" (addr), "i" (-EFAULT)); \
cb66fb3f
RB
359 \
360 (val) = __gu_tmp.t; \
fe00f943 361}
1da177e4 362
ac1d8590
MC
363#ifndef CONFIG_EVA
364#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
365#else
366/*
367 * Kernel specific functions for EVA. We need to use normal load instructions
368 * to read data from kernel when operating in EVA mode. We use these macros to
0081ad24 369 * avoid redefining __get_data_asm for EVA.
ac1d8590
MC
370 */
371#undef _stored
372#undef _storew
373#undef _storeh
374#undef _storeb
375#ifdef CONFIG_32BIT
376#define _stored _storew
377#else
378#define _stored(reg, addr) "ld " reg ", " addr
379#endif
380
381#define _storew(reg, addr) "sw " reg ", " addr
382#define _storeh(reg, addr) "sh " reg ", " addr
383#define _storeb(reg, addr) "sb " reg ", " addr
384
385#define __put_kernel_common(ptr, size) \
386do { \
387 switch (size) { \
0081ad24
MC
388 case 1: __put_data_asm(_storeb, ptr); break; \
389 case 2: __put_data_asm(_storeh, ptr); break; \
390 case 4: __put_data_asm(_storew, ptr); break; \
391 case 8: __PUT_DW(_stored, ptr); break; \
ac1d8590
MC
392 default: __put_user_unknown(); break; \
393 } \
394} while(0)
395#endif
396
1da177e4
LT
397/*
398 * Yuck. We need two variants, one for 64bit operation and one
399 * for 32 bit mode and old iron.
400 */
4feb8f8f 401#ifdef CONFIG_32BIT
0081ad24 402#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
1da177e4 403#endif
4feb8f8f 404#ifdef CONFIG_64BIT
0081ad24 405#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
4feb8f8f 406#endif
1da177e4 407
ec56b1d4
MC
408#define __put_user_common(ptr, size) \
409do { \
1da177e4 410 switch (size) { \
0081ad24
MC
411 case 1: __put_data_asm(user_sb, ptr); break; \
412 case 2: __put_data_asm(user_sh, ptr); break; \
413 case 4: __put_data_asm(user_sw, ptr); break; \
414 case 8: __PUT_DW(user_sd, ptr); break; \
1da177e4
LT
415 default: __put_user_unknown(); break; \
416 } \
ec56b1d4
MC
417} while (0)
418
419#define __put_user_nocheck(x, ptr, size) \
420({ \
421 __typeof__(*(ptr)) __pu_val; \
422 int __pu_err = 0; \
423 \
ec56b1d4 424 __pu_val = (x); \
ac1d8590
MC
425 if (segment_eq(get_fs(), get_ds())) { \
426 __put_kernel_common(ptr, size); \
427 } else { \
428 __chk_user_ptr(ptr); \
429 __put_user_common(ptr, size); \
430 } \
1da177e4
LT
431 __pu_err; \
432})
433
21a151d8 434#define __put_user_check(x, ptr, size) \
1da177e4 435({ \
fe00f943
RB
436 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
437 __typeof__(*(ptr)) __pu_val = (x); \
8d2d91e8 438 int __pu_err = -EFAULT; \
1da177e4 439 \
ef41f460 440 might_fault(); \
ac1d8590
MC
441 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
442 if (segment_eq(get_fs(), get_ds())) \
443 __put_kernel_common(__pu_addr, size); \
444 else \
445 __put_user_common(__pu_addr, size); \
446 } \
ec56b1d4 447 \
1da177e4
LT
448 __pu_err; \
449})
450
0081ad24 451#define __put_data_asm(insn, ptr) \
fe00f943 452{ \
1da177e4 453 __asm__ __volatile__( \
0081ad24 454 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
1da177e4 455 "2: \n" \
1658f914 456 " .insn \n" \
1da177e4
LT
457 " .section .fixup,\"ax\" \n" \
458 "3: li %0, %4 \n" \
459 " j 2b \n" \
460 " .previous \n" \
461 " .section __ex_table,\"a\" \n" \
462 " " __UA_ADDR " 1b, 3b \n" \
463 " .previous \n" \
464 : "=r" (__pu_err) \
fe00f943 465 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
1da177e4 466 "i" (-EFAULT)); \
fe00f943 467}
1da177e4 468
0081ad24 469#define __put_data_asm_ll32(insn, ptr) \
fe00f943 470{ \
1da177e4 471 __asm__ __volatile__( \
0081ad24 472 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
ac1d8590 473 "2: "insn("%D2", "4(%3)")" \n" \
1da177e4 474 "3: \n" \
1658f914 475 " .insn \n" \
1da177e4 476 " .section .fixup,\"ax\" \n" \
fe00f943 477 "4: li %0, %4 \n" \
1da177e4
LT
478 " j 3b \n" \
479 " .previous \n" \
480 " .section __ex_table,\"a\" \n" \
481 " " __UA_ADDR " 1b, 4b \n" \
482 " " __UA_ADDR " 2b, 4b \n" \
483 " .previous" \
484 : "=r" (__pu_err) \
fe00f943
RB
485 : "0" (0), "r" (__pu_val), "r" (ptr), \
486 "i" (-EFAULT)); \
487}
1da177e4
LT
488
489extern void __put_user_unknown(void);
490
18e90018
LY
491/*
492 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
493 * EVA unaligned access is handled in the ADE exception handler.
494 */
495#ifndef CONFIG_EVA
71ec6ccf
RB
496/*
497 * put_user_unaligned: - Write a simple value into user space.
70342287 498 * @x: Value to copy to user space.
71ec6ccf
RB
499 * @ptr: Destination address, in user space.
500 *
70342287 501 * Context: User context only. This function may sleep.
71ec6ccf
RB
502 *
503 * This macro copies a single simple value from kernel space to user
504 * space. It supports simple types like char and int, but not larger
505 * data types like structures or arrays.
506 *
507 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
508 * to the result of dereferencing @ptr.
509 *
510 * Returns zero on success, or -EFAULT on error.
511 */
512#define put_user_unaligned(x,ptr) \
513 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
514
515/*
516 * get_user_unaligned: - Get a simple variable from user space.
70342287 517 * @x: Variable to store result.
71ec6ccf
RB
518 * @ptr: Source address, in user space.
519 *
70342287 520 * Context: User context only. This function may sleep.
71ec6ccf
RB
521 *
522 * This macro copies a single simple variable from user space to kernel
523 * space. It supports simple types like char and int, but not larger
524 * data types like structures or arrays.
525 *
526 * @ptr must have pointer-to-simple-variable type, and the result of
527 * dereferencing @ptr must be assignable to @x without a cast.
528 *
529 * Returns zero on success, or -EFAULT on error.
530 * On error, the variable @x is set to zero.
531 */
532#define get_user_unaligned(x,ptr) \
533 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
534
535/*
536 * __put_user_unaligned: - Write a simple value into user space, with less checking.
70342287 537 * @x: Value to copy to user space.
71ec6ccf
RB
538 * @ptr: Destination address, in user space.
539 *
70342287 540 * Context: User context only. This function may sleep.
71ec6ccf
RB
541 *
542 * This macro copies a single simple value from kernel space to user
543 * space. It supports simple types like char and int, but not larger
544 * data types like structures or arrays.
545 *
546 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
547 * to the result of dereferencing @ptr.
548 *
549 * Caller must check the pointer with access_ok() before calling this
550 * function.
551 *
552 * Returns zero on success, or -EFAULT on error.
553 */
554#define __put_user_unaligned(x,ptr) \
555 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
556
557/*
558 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
70342287 559 * @x: Variable to store result.
71ec6ccf
RB
560 * @ptr: Source address, in user space.
561 *
70342287 562 * Context: User context only. This function may sleep.
71ec6ccf
RB
563 *
564 * This macro copies a single simple variable from user space to kernel
565 * space. It supports simple types like char and int, but not larger
566 * data types like structures or arrays.
567 *
568 * @ptr must have pointer-to-simple-variable type, and the result of
569 * dereferencing @ptr must be assignable to @x without a cast.
570 *
571 * Caller must check the pointer with access_ok() before calling this
572 * function.
573 *
574 * Returns zero on success, or -EFAULT on error.
575 * On error, the variable @x is set to zero.
576 */
577#define __get_user_unaligned(x,ptr) \
578 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
579
580/*
581 * Yuck. We need two variants, one for 64bit operation and one
582 * for 32 bit mode and old iron.
583 */
584#ifdef CONFIG_32BIT
585#define __GET_USER_UNALIGNED_DW(val, ptr) \
586 __get_user_unaligned_asm_ll32(val, ptr)
587#endif
588#ifdef CONFIG_64BIT
589#define __GET_USER_UNALIGNED_DW(val, ptr) \
590 __get_user_unaligned_asm(val, "uld", ptr)
591#endif
592
593extern void __get_user_unaligned_unknown(void);
594
595#define __get_user_unaligned_common(val, size, ptr) \
596do { \
597 switch (size) { \
0081ad24 598 case 1: __get_data_asm(val, "lb", ptr); break; \
71ec6ccf
RB
599 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
600 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
601 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
602 default: __get_user_unaligned_unknown(); break; \
603 } \
604} while (0)
605
606#define __get_user_unaligned_nocheck(x,ptr,size) \
607({ \
608 int __gu_err; \
609 \
610 __get_user_unaligned_common((x), size, ptr); \
611 __gu_err; \
612})
613
614#define __get_user_unaligned_check(x,ptr,size) \
615({ \
616 int __gu_err = -EFAULT; \
617 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
618 \
619 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
620 __get_user_unaligned_common((x), size, __gu_ptr); \
621 \
622 __gu_err; \
623})
624
0081ad24 625#define __get_data_unaligned_asm(val, insn, addr) \
71ec6ccf
RB
626{ \
627 long __gu_tmp; \
628 \
629 __asm__ __volatile__( \
630 "1: " insn " %1, %3 \n" \
631 "2: \n" \
1658f914 632 " .insn \n" \
71ec6ccf
RB
633 " .section .fixup,\"ax\" \n" \
634 "3: li %0, %4 \n" \
640465bd 635 " move %1, $0 \n" \
71ec6ccf
RB
636 " j 2b \n" \
637 " .previous \n" \
638 " .section __ex_table,\"a\" \n" \
639 " "__UA_ADDR "\t1b, 3b \n" \
640 " "__UA_ADDR "\t1b + 4, 3b \n" \
641 " .previous \n" \
642 : "=r" (__gu_err), "=r" (__gu_tmp) \
643 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
644 \
645 (val) = (__typeof__(*(addr))) __gu_tmp; \
646}
647
648/*
649 * Get a long long 64 using 32 bit registers.
650 */
651#define __get_user_unaligned_asm_ll32(val, addr) \
652{ \
70342287 653 unsigned long long __gu_tmp; \
71ec6ccf
RB
654 \
655 __asm__ __volatile__( \
656 "1: ulw %1, (%3) \n" \
657 "2: ulw %D1, 4(%3) \n" \
658 " move %0, $0 \n" \
1658f914
SH
659 "3: \n" \
660 " .insn \n" \
661 " .section .fixup,\"ax\" \n" \
71ec6ccf
RB
662 "4: li %0, %4 \n" \
663 " move %1, $0 \n" \
664 " move %D1, $0 \n" \
665 " j 3b \n" \
666 " .previous \n" \
667 " .section __ex_table,\"a\" \n" \
668 " " __UA_ADDR " 1b, 4b \n" \
669 " " __UA_ADDR " 1b + 4, 4b \n" \
670 " " __UA_ADDR " 2b, 4b \n" \
671 " " __UA_ADDR " 2b + 4, 4b \n" \
672 " .previous \n" \
673 : "=r" (__gu_err), "=&r" (__gu_tmp) \
674 : "0" (0), "r" (addr), "i" (-EFAULT)); \
675 (val) = (__typeof__(*(addr))) __gu_tmp; \
676}
677
678/*
679 * Yuck. We need two variants, one for 64bit operation and one
680 * for 32 bit mode and old iron.
681 */
682#ifdef CONFIG_32BIT
683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
684#endif
685#ifdef CONFIG_64BIT
686#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
687#endif
688
ec56b1d4
MC
689#define __put_user_unaligned_common(ptr, size) \
690do { \
71ec6ccf 691 switch (size) { \
0081ad24 692 case 1: __put_data_asm("sb", ptr); break; \
71ec6ccf
RB
693 case 2: __put_user_unaligned_asm("ush", ptr); break; \
694 case 4: __put_user_unaligned_asm("usw", ptr); break; \
695 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
696 default: __put_user_unaligned_unknown(); break; \
ec56b1d4
MC
697} while (0)
698
699#define __put_user_unaligned_nocheck(x,ptr,size) \
700({ \
701 __typeof__(*(ptr)) __pu_val; \
702 int __pu_err = 0; \
703 \
704 __pu_val = (x); \
705 __put_user_unaligned_common(ptr, size); \
71ec6ccf
RB
706 __pu_err; \
707})
708
709#define __put_user_unaligned_check(x,ptr,size) \
710({ \
711 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
712 __typeof__(*(ptr)) __pu_val = (x); \
713 int __pu_err = -EFAULT; \
714 \
ec56b1d4
MC
715 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
716 __put_user_unaligned_common(__pu_addr, size); \
717 \
71ec6ccf
RB
718 __pu_err; \
719})
720
721#define __put_user_unaligned_asm(insn, ptr) \
722{ \
723 __asm__ __volatile__( \
724 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
725 "2: \n" \
1658f914 726 " .insn \n" \
71ec6ccf
RB
727 " .section .fixup,\"ax\" \n" \
728 "3: li %0, %4 \n" \
729 " j 2b \n" \
730 " .previous \n" \
731 " .section __ex_table,\"a\" \n" \
732 " " __UA_ADDR " 1b, 3b \n" \
733 " .previous \n" \
734 : "=r" (__pu_err) \
735 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
736 "i" (-EFAULT)); \
737}
738
739#define __put_user_unaligned_asm_ll32(ptr) \
740{ \
741 __asm__ __volatile__( \
70342287 742 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
71ec6ccf
RB
743 "2: sw %D2, 4(%3) \n" \
744 "3: \n" \
1658f914 745 " .insn \n" \
71ec6ccf
RB
746 " .section .fixup,\"ax\" \n" \
747 "4: li %0, %4 \n" \
748 " j 3b \n" \
749 " .previous \n" \
750 " .section __ex_table,\"a\" \n" \
751 " " __UA_ADDR " 1b, 4b \n" \
752 " " __UA_ADDR " 1b + 4, 4b \n" \
753 " " __UA_ADDR " 2b, 4b \n" \
754 " " __UA_ADDR " 2b + 4, 4b \n" \
755 " .previous" \
756 : "=r" (__pu_err) \
757 : "0" (0), "r" (__pu_val), "r" (ptr), \
758 "i" (-EFAULT)); \
759}
760
761extern void __put_user_unaligned_unknown(void);
18e90018 762#endif
71ec6ccf 763
1da177e4
LT
764/*
765 * We're generating jump to subroutines which will be outside the range of
766 * jump instructions
767 */
768#ifdef MODULE
769#define __MODULE_JAL(destination) \
770 ".set\tnoat\n\t" \
70342287 771 __UA_LA "\t$1, " #destination "\n\t" \
1da177e4
LT
772 "jalr\t$1\n\t" \
773 ".set\tat\n\t"
774#else
775#define __MODULE_JAL(destination) \
776 "jal\t" #destination "\n\t"
777#endif
778
58563817
MC
779#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
780 defined(CONFIG_CPU_HAS_PREFETCH))
619b6e18 781#define DADDI_SCRATCH "$3"
58563817
MC
782#else
783#define DADDI_SCRATCH "$0"
619b6e18
MR
784#endif
785
1da177e4
LT
786extern size_t __copy_user(void *__to, const void *__from, size_t __n);
787
05c65160 788#ifndef CONFIG_EVA
21a151d8 789#define __invoke_copy_to_user(to, from, n) \
1da177e4 790({ \
49a89efb
RB
791 register void __user *__cu_to_r __asm__("$4"); \
792 register const void *__cu_from_r __asm__("$5"); \
793 register long __cu_len_r __asm__("$6"); \
1da177e4
LT
794 \
795 __cu_to_r = (to); \
796 __cu_from_r = (from); \
797 __cu_len_r = (n); \
798 __asm__ __volatile__( \
799 __MODULE_JAL(__copy_user) \
800 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
801 : \
bb0757eb 802 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
619b6e18 803 DADDI_SCRATCH, "memory"); \
1da177e4
LT
804 __cu_len_r; \
805})
806
05c65160
MC
807#define __invoke_copy_to_kernel(to, from, n) \
808 __invoke_copy_to_user(to, from, n)
809
810#endif
811
1da177e4
LT
812/*
813 * __copy_to_user: - Copy a block of data into user space, with less checking.
70342287 814 * @to: Destination address, in user space.
1da177e4 815 * @from: Source address, in kernel space.
70342287 816 * @n: Number of bytes to copy.
1da177e4 817 *
70342287 818 * Context: User context only. This function may sleep.
1da177e4
LT
819 *
820 * Copy data from kernel space to user space. Caller must check
821 * the specified block with access_ok() before calling this function.
822 *
823 * Returns number of bytes that could not be copied.
824 * On success, this will be zero.
825 */
21a151d8 826#define __copy_to_user(to, from, n) \
1da177e4 827({ \
fe00f943 828 void __user *__cu_to; \
1da177e4
LT
829 const void *__cu_from; \
830 long __cu_len; \
831 \
1da177e4
LT
832 __cu_to = (to); \
833 __cu_from = (from); \
834 __cu_len = (n); \
ef41f460 835 might_fault(); \
05c65160
MC
836 if (segment_eq(get_fs(), get_ds())) \
837 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
838 __cu_len); \
839 else \
840 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
841 __cu_len); \
1da177e4
LT
842 __cu_len; \
843})
844
d0c91ae2
RB
845extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
846
21a151d8 847#define __copy_to_user_inatomic(to, from, n) \
e03b5269
RB
848({ \
849 void __user *__cu_to; \
850 const void *__cu_from; \
851 long __cu_len; \
852 \
853 __cu_to = (to); \
854 __cu_from = (from); \
855 __cu_len = (n); \
05c65160
MC
856 if (segment_eq(get_fs(), get_ds())) \
857 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
858 __cu_len); \
859 else \
860 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
861 __cu_len); \
e03b5269
RB
862 __cu_len; \
863})
864
21a151d8 865#define __copy_from_user_inatomic(to, from, n) \
e03b5269
RB
866({ \
867 void *__cu_to; \
868 const void __user *__cu_from; \
869 long __cu_len; \
870 \
871 __cu_to = (to); \
872 __cu_from = (from); \
873 __cu_len = (n); \
05c65160
MC
874 if (segment_eq(get_fs(), get_ds())) \
875 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
876 __cu_from,\
877 __cu_len);\
878 else \
879 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
880 __cu_from, \
881 __cu_len); \
e03b5269
RB
882 __cu_len; \
883})
1da177e4
LT
884
885/*
886 * copy_to_user: - Copy a block of data into user space.
70342287 887 * @to: Destination address, in user space.
1da177e4 888 * @from: Source address, in kernel space.
70342287 889 * @n: Number of bytes to copy.
1da177e4 890 *
70342287 891 * Context: User context only. This function may sleep.
1da177e4
LT
892 *
893 * Copy data from kernel space to user space.
894 *
895 * Returns number of bytes that could not be copied.
896 * On success, this will be zero.
897 */
21a151d8 898#define copy_to_user(to, from, n) \
1da177e4 899({ \
fe00f943 900 void __user *__cu_to; \
1da177e4
LT
901 const void *__cu_from; \
902 long __cu_len; \
903 \
1da177e4
LT
904 __cu_to = (to); \
905 __cu_from = (from); \
906 __cu_len = (n); \
05c65160
MC
907 if (segment_eq(get_fs(), get_ds())) { \
908 __cu_len = __invoke_copy_to_kernel(__cu_to, \
909 __cu_from, \
910 __cu_len); \
911 } else { \
912 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
913 might_fault(); \
914 __cu_len = __invoke_copy_to_user(__cu_to, \
915 __cu_from, \
916 __cu_len); \
917 } \
ef41f460 918 } \
1da177e4
LT
919 __cu_len; \
920})
921
05c65160
MC
922#ifndef CONFIG_EVA
923
21a151d8 924#define __invoke_copy_from_user(to, from, n) \
1da177e4 925({ \
49a89efb
RB
926 register void *__cu_to_r __asm__("$4"); \
927 register const void __user *__cu_from_r __asm__("$5"); \
928 register long __cu_len_r __asm__("$6"); \
1da177e4
LT
929 \
930 __cu_to_r = (to); \
931 __cu_from_r = (from); \
932 __cu_len_r = (n); \
933 __asm__ __volatile__( \
934 ".set\tnoreorder\n\t" \
935 __MODULE_JAL(__copy_user) \
936 ".set\tnoat\n\t" \
937 __UA_ADDU "\t$1, %1, %2\n\t" \
938 ".set\tat\n\t" \
939 ".set\treorder" \
940 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
941 : \
bb0757eb 942 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
619b6e18 943 DADDI_SCRATCH, "memory"); \
e03b5269
RB
944 __cu_len_r; \
945})
946
05c65160
MC
947#define __invoke_copy_from_kernel(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950/* For userland <-> userland operations */
951#define ___invoke_copy_in_user(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
954/* For kernel <-> kernel operations */
955#define ___invoke_copy_in_kernel(to, from, n) \
956 __invoke_copy_from_user(to, from, n)
957
21a151d8 958#define __invoke_copy_from_user_inatomic(to, from, n) \
e03b5269 959({ \
49a89efb
RB
960 register void *__cu_to_r __asm__("$4"); \
961 register const void __user *__cu_from_r __asm__("$5"); \
962 register long __cu_len_r __asm__("$6"); \
e03b5269
RB
963 \
964 __cu_to_r = (to); \
965 __cu_from_r = (from); \
966 __cu_len_r = (n); \
967 __asm__ __volatile__( \
968 ".set\tnoreorder\n\t" \
969 __MODULE_JAL(__copy_user_inatomic) \
970 ".set\tnoat\n\t" \
971 __UA_ADDU "\t$1, %1, %2\n\t" \
972 ".set\tat\n\t" \
973 ".set\treorder" \
974 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
975 : \
bb0757eb 976 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
619b6e18 977 DADDI_SCRATCH, "memory"); \
1da177e4
LT
978 __cu_len_r; \
979})
980
05c65160
MC
981#define __invoke_copy_from_kernel_inatomic(to, from, n) \
982 __invoke_copy_from_user_inatomic(to, from, n) \
983
984#else
985
986/* EVA specific functions */
987
988extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_from_user_eva(void *__to, const void *__from,
991 size_t __n);
992extern size_t __copy_to_user_eva(void *__to, const void *__from,
993 size_t __n);
994extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
995
996#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
997({ \
998 register void *__cu_to_r __asm__("$4"); \
999 register const void __user *__cu_from_r __asm__("$5"); \
1000 register long __cu_len_r __asm__("$6"); \
1001 \
1002 __cu_to_r = (to); \
1003 __cu_from_r = (from); \
1004 __cu_len_r = (n); \
1005 __asm__ __volatile__( \
1006 ".set\tnoreorder\n\t" \
1007 __MODULE_JAL(func_ptr) \
1008 ".set\tnoat\n\t" \
1009 __UA_ADDU "\t$1, %1, %2\n\t" \
1010 ".set\tat\n\t" \
1011 ".set\treorder" \
1012 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1013 : \
1014 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1015 DADDI_SCRATCH, "memory"); \
1016 __cu_len_r; \
1017})
1018
1019#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1020({ \
1021 register void *__cu_to_r __asm__("$4"); \
1022 register const void __user *__cu_from_r __asm__("$5"); \
1023 register long __cu_len_r __asm__("$6"); \
1024 \
1025 __cu_to_r = (to); \
1026 __cu_from_r = (from); \
1027 __cu_len_r = (n); \
1028 __asm__ __volatile__( \
1029 __MODULE_JAL(func_ptr) \
1030 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1031 : \
1032 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1033 DADDI_SCRATCH, "memory"); \
1034 __cu_len_r; \
1035})
1036
1037/*
1038 * Source or destination address is in userland. We need to go through
1039 * the TLB
1040 */
1041#define __invoke_copy_from_user(to, from, n) \
1042 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1043
1044#define __invoke_copy_from_user_inatomic(to, from, n) \
1045 __invoke_copy_from_user_eva_generic(to, from, n, \
1046 __copy_user_inatomic_eva)
1047
1048#define __invoke_copy_to_user(to, from, n) \
1049 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1050
1051#define ___invoke_copy_in_user(to, from, n) \
1052 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1053
1054/*
1055 * Source or destination address in the kernel. We are not going through
1056 * the TLB
1057 */
1058#define __invoke_copy_from_kernel(to, from, n) \
1059 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1060
1061#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1062 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1063
1064#define __invoke_copy_to_kernel(to, from, n) \
1065 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1066
1067#define ___invoke_copy_in_kernel(to, from, n) \
1068 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1069
1070#endif /* CONFIG_EVA */
1071
1da177e4 1072/*
131c1a2b 1073 * __copy_from_user: - Copy a block of data from user space, with less checking.
70342287 1074 * @to: Destination address, in kernel space.
1da177e4 1075 * @from: Source address, in user space.
70342287 1076 * @n: Number of bytes to copy.
1da177e4 1077 *
70342287 1078 * Context: User context only. This function may sleep.
1da177e4
LT
1079 *
1080 * Copy data from user space to kernel space. Caller must check
1081 * the specified block with access_ok() before calling this function.
1082 *
1083 * Returns number of bytes that could not be copied.
1084 * On success, this will be zero.
1085 *
1086 * If some data could not be copied, this function will pad the copied
1087 * data to the requested size using zero bytes.
1088 */
21a151d8 1089#define __copy_from_user(to, from, n) \
1da177e4
LT
1090({ \
1091 void *__cu_to; \
fe00f943 1092 const void __user *__cu_from; \
1da177e4
LT
1093 long __cu_len; \
1094 \
1da177e4
LT
1095 __cu_to = (to); \
1096 __cu_from = (from); \
1097 __cu_len = (n); \
ef41f460 1098 might_fault(); \
1da177e4 1099 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
70342287 1100 __cu_len); \
1da177e4
LT
1101 __cu_len; \
1102})
1103
1104/*
1105 * copy_from_user: - Copy a block of data from user space.
70342287 1106 * @to: Destination address, in kernel space.
1da177e4 1107 * @from: Source address, in user space.
70342287 1108 * @n: Number of bytes to copy.
1da177e4 1109 *
70342287 1110 * Context: User context only. This function may sleep.
1da177e4
LT
1111 *
1112 * Copy data from user space to kernel space.
1113 *
1114 * Returns number of bytes that could not be copied.
1115 * On success, this will be zero.
1116 *
1117 * If some data could not be copied, this function will pad the copied
1118 * data to the requested size using zero bytes.
1119 */
21a151d8 1120#define copy_from_user(to, from, n) \
1da177e4
LT
1121({ \
1122 void *__cu_to; \
fe00f943 1123 const void __user *__cu_from; \
1da177e4
LT
1124 long __cu_len; \
1125 \
1da177e4
LT
1126 __cu_to = (to); \
1127 __cu_from = (from); \
1128 __cu_len = (n); \
05c65160
MC
1129 if (segment_eq(get_fs(), get_ds())) { \
1130 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1131 __cu_from, \
1132 __cu_len); \
1133 } else { \
1134 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1135 might_fault(); \
1136 __cu_len = __invoke_copy_from_user(__cu_to, \
1137 __cu_from, \
1138 __cu_len); \
1139 } \
ef41f460 1140 } \
1da177e4
LT
1141 __cu_len; \
1142})
1143
ed01b3d2
RB
1144#define __copy_in_user(to, from, n) \
1145({ \
1146 void __user *__cu_to; \
1147 const void __user *__cu_from; \
1148 long __cu_len; \
1149 \
ed01b3d2
RB
1150 __cu_to = (to); \
1151 __cu_from = (from); \
1152 __cu_len = (n); \
05c65160
MC
1153 if (segment_eq(get_fs(), get_ds())) { \
1154 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } else { \
1157 might_fault(); \
1158 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1159 __cu_len); \
1160 } \
ed01b3d2
RB
1161 __cu_len; \
1162})
1da177e4 1163
21a151d8 1164#define copy_in_user(to, from, n) \
1da177e4 1165({ \
fe00f943
RB
1166 void __user *__cu_to; \
1167 const void __user *__cu_from; \
1da177e4
LT
1168 long __cu_len; \
1169 \
1da177e4
LT
1170 __cu_to = (to); \
1171 __cu_from = (from); \
1172 __cu_len = (n); \
05c65160
MC
1173 if (segment_eq(get_fs(), get_ds())) { \
1174 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1175 __cu_len); \
1176 } else { \
1177 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1178 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1179 might_fault(); \
1180 __cu_len = ___invoke_copy_in_user(__cu_to, \
1181 __cu_from, \
1182 __cu_len); \
1183 } \
ef41f460 1184 } \
1da177e4
LT
1185 __cu_len; \
1186})
1187
1188/*
1189 * __clear_user: - Zero a block of memory in user space, with less checking.
70342287
RB
1190 * @to: Destination address, in user space.
1191 * @n: Number of bytes to zero.
1da177e4
LT
1192 *
1193 * Zero a block of memory in user space. Caller must check
1194 * the specified block with access_ok() before calling this function.
1195 *
1196 * Returns number of bytes that could not be cleared.
1197 * On success, this will be zero.
1198 */
1199static inline __kernel_size_t
fe00f943 1200__clear_user(void __user *addr, __kernel_size_t size)
1da177e4
LT
1201{
1202 __kernel_size_t res;
1203
ef41f460 1204 might_fault();
1da177e4
LT
1205 __asm__ __volatile__(
1206 "move\t$4, %1\n\t"
1207 "move\t$5, $0\n\t"
1208 "move\t$6, %2\n\t"
1209 __MODULE_JAL(__bzero)
1210 "move\t%0, $6"
1211 : "=r" (res)
1212 : "r" (addr), "r" (size)
1213 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1214
1215 return res;
1216}
1217
1218#define clear_user(addr,n) \
1219({ \
fe00f943 1220 void __user * __cl_addr = (addr); \
1da177e4
LT
1221 unsigned long __cl_size = (n); \
1222 if (__cl_size && access_ok(VERIFY_WRITE, \
63d38923 1223 __cl_addr, __cl_size)) \
1da177e4
LT
1224 __cl_size = __clear_user(__cl_addr, __cl_size); \
1225 __cl_size; \
1226})
1227
1228/*
1229 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1230 * @dst: Destination address, in kernel space. This buffer must be at
70342287 1231 * least @count bytes long.
1da177e4
LT
1232 * @src: Source address, in user space.
1233 * @count: Maximum number of bytes to copy, including the trailing NUL.
1234 *
1235 * Copies a NUL-terminated string from userspace to kernel space.
1236 * Caller must check the specified block with access_ok() before calling
1237 * this function.
1238 *
1239 * On success, returns the length of the string (not including the trailing
1240 * NUL).
1241 *
1242 * If access to userspace fails, returns -EFAULT (some data may have been
1243 * copied).
1244 *
1245 * If @count is smaller than the length of the string, copies @count bytes
1246 * and returns @count.
1247 */
1248static inline long
fe00f943 1249__strncpy_from_user(char *__to, const char __user *__from, long __len)
1da177e4
LT
1250{
1251 long res;
1252
e3a9b07a
MC
1253 if (segment_eq(get_fs(), get_ds())) {
1254 __asm__ __volatile__(
1255 "move\t$4, %1\n\t"
1256 "move\t$5, %2\n\t"
1257 "move\t$6, %3\n\t"
1258 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1259 "move\t%0, $2"
1260 : "=r" (res)
1261 : "r" (__to), "r" (__from), "r" (__len)
1262 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1263 } else {
1264 might_fault();
1265 __asm__ __volatile__(
1266 "move\t$4, %1\n\t"
1267 "move\t$5, %2\n\t"
1268 "move\t$6, %3\n\t"
1269 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1270 "move\t%0, $2"
1271 : "=r" (res)
1272 : "r" (__to), "r" (__from), "r" (__len)
1273 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1274 }
1da177e4
LT
1275
1276 return res;
1277}
1278
1279/*
1280 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1281 * @dst: Destination address, in kernel space. This buffer must be at
70342287 1282 * least @count bytes long.
1da177e4
LT
1283 * @src: Source address, in user space.
1284 * @count: Maximum number of bytes to copy, including the trailing NUL.
1285 *
1286 * Copies a NUL-terminated string from userspace to kernel space.
1287 *
1288 * On success, returns the length of the string (not including the trailing
1289 * NUL).
1290 *
1291 * If access to userspace fails, returns -EFAULT (some data may have been
1292 * copied).
1293 *
1294 * If @count is smaller than the length of the string, copies @count bytes
1295 * and returns @count.
1296 */
1297static inline long
fe00f943 1298strncpy_from_user(char *__to, const char __user *__from, long __len)
1da177e4
LT
1299{
1300 long res;
1301
e3a9b07a
MC
1302 if (segment_eq(get_fs(), get_ds())) {
1303 __asm__ __volatile__(
1304 "move\t$4, %1\n\t"
1305 "move\t$5, %2\n\t"
1306 "move\t$6, %3\n\t"
1307 __MODULE_JAL(__strncpy_from_kernel_asm)
1308 "move\t%0, $2"
1309 : "=r" (res)
1310 : "r" (__to), "r" (__from), "r" (__len)
1311 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1312 } else {
1313 might_fault();
1314 __asm__ __volatile__(
1315 "move\t$4, %1\n\t"
1316 "move\t$5, %2\n\t"
1317 "move\t$6, %3\n\t"
1318 __MODULE_JAL(__strncpy_from_user_asm)
1319 "move\t%0, $2"
1320 : "=r" (res)
1321 : "r" (__to), "r" (__from), "r" (__len)
1322 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1323 }
1da177e4
LT
1324
1325 return res;
1326}
1327
1da177e4
LT
1328/*
1329 * strlen_user: - Get the size of a string in user space.
1330 * @str: The string to measure.
1331 *
70342287 1332 * Context: User context only. This function may sleep.
1da177e4
LT
1333 *
1334 * Get the size of a NUL-terminated string in user space.
1335 *
1336 * Returns the size of the string INCLUDING the terminating NUL.
1337 * On exception, returns 0.
1338 *
1339 * If there is a limit on the length of a valid string, you may wish to
1340 * consider using strnlen_user() instead.
1341 */
fe00f943 1342static inline long strlen_user(const char __user *s)
1da177e4
LT
1343{
1344 long res;
1345
e3a9b07a
MC
1346 if (segment_eq(get_fs(), get_ds())) {
1347 __asm__ __volatile__(
1348 "move\t$4, %1\n\t"
1349 __MODULE_JAL(__strlen_kernel_asm)
1350 "move\t%0, $2"
1351 : "=r" (res)
1352 : "r" (s)
1353 : "$2", "$4", __UA_t0, "$31");
1354 } else {
1355 might_fault();
1356 __asm__ __volatile__(
1357 "move\t$4, %1\n\t"
1358 __MODULE_JAL(__strlen_kernel_asm)
1359 "move\t%0, $2"
1360 : "=r" (res)
1361 : "r" (s)
1362 : "$2", "$4", __UA_t0, "$31");
1363 }
1da177e4
LT
1364
1365 return res;
1366}
1367
1368/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
fe00f943 1369static inline long __strnlen_user(const char __user *s, long n)
1da177e4
LT
1370{
1371 long res;
1372
e3a9b07a
MC
1373 if (segment_eq(get_fs(), get_ds())) {
1374 __asm__ __volatile__(
1375 "move\t$4, %1\n\t"
1376 "move\t$5, %2\n\t"
1377 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1378 "move\t%0, $2"
1379 : "=r" (res)
1380 : "r" (s), "r" (n)
1381 : "$2", "$4", "$5", __UA_t0, "$31");
1382 } else {
1383 might_fault();
1384 __asm__ __volatile__(
1385 "move\t$4, %1\n\t"
1386 "move\t$5, %2\n\t"
1387 __MODULE_JAL(__strnlen_user_nocheck_asm)
1388 "move\t%0, $2"
1389 : "=r" (res)
1390 : "r" (s), "r" (n)
1391 : "$2", "$4", "$5", __UA_t0, "$31");
1392 }
1da177e4
LT
1393
1394 return res;
1395}
1396
1397/*
1398 * strlen_user: - Get the size of a string in user space.
1399 * @str: The string to measure.
1400 *
70342287 1401 * Context: User context only. This function may sleep.
1da177e4
LT
1402 *
1403 * Get the size of a NUL-terminated string in user space.
1404 *
1405 * Returns the size of the string INCLUDING the terminating NUL.
1406 * On exception, returns 0.
1407 *
1408 * If there is a limit on the length of a valid string, you may wish to
1409 * consider using strnlen_user() instead.
1410 */
fe00f943 1411static inline long strnlen_user(const char __user *s, long n)
1da177e4
LT
1412{
1413 long res;
1414
ef41f460 1415 might_fault();
e3a9b07a
MC
1416 if (segment_eq(get_fs(), get_ds())) {
1417 __asm__ __volatile__(
1418 "move\t$4, %1\n\t"
1419 "move\t$5, %2\n\t"
1420 __MODULE_JAL(__strnlen_kernel_asm)
1421 "move\t%0, $2"
1422 : "=r" (res)
1423 : "r" (s), "r" (n)
1424 : "$2", "$4", "$5", __UA_t0, "$31");
1425 } else {
1426 __asm__ __volatile__(
1427 "move\t$4, %1\n\t"
1428 "move\t$5, %2\n\t"
1429 __MODULE_JAL(__strnlen_user_asm)
1430 "move\t%0, $2"
1431 : "=r" (res)
1432 : "r" (s), "r" (n)
1433 : "$2", "$4", "$5", __UA_t0, "$31");
1434 }
1da177e4
LT
1435
1436 return res;
1437}
1438
1439struct exception_table_entry
1440{
1441 unsigned long insn;
1442 unsigned long nextinsn;
1443};
1444
1445extern int fixup_exception(struct pt_regs *regs);
1446
1447#endif /* _ASM_UACCESS_H */
This page took 1.66181 seconds and 5 git commands to generate.