Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[deliverable/linux.git] / arch / tile / lib / memcpy_user_64.c
1 /*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do memcpy(), but trap and return "n" when a load or store faults.
15 *
16 * Note: this idiom only works when memcpy() compiles to a leaf function.
17 * Here leaf function not only means it does not have calls, but also
18 * requires no stack operations (sp, stack frame pointer) and no
19 * use of callee-saved registers, else "jrp lr" will be incorrect since
20 * unwinding stack frame is bypassed. Since memcpy() is not complex so
21 * these conditions are satisfied here, but we need to be careful when
22 * modifying this file. This is not a clean solution but is the best
23 * one so far.
24 *
25 * Also note that we are capturing "n" from the containing scope here.
26 */
27
28 #define _ST(p, inst, v) \
29 ({ \
30 asm("1: " #inst " %0, %1;" \
31 ".pushsection .coldtext,\"ax\";" \
32 "2: { move r0, %2; jrp lr };" \
33 ".section __ex_table,\"a\";" \
34 ".align 8;" \
35 ".quad 1b, 2b;" \
36 ".popsection" \
37 : "=m" (*(p)) : "r" (v), "r" (n)); \
38 })
39
40 #define _LD(p, inst) \
41 ({ \
42 unsigned long __v; \
43 asm("1: " #inst " %0, %1;" \
44 ".pushsection .coldtext,\"ax\";" \
45 "2: { move r0, %2; jrp lr };" \
46 ".section __ex_table,\"a\";" \
47 ".align 8;" \
48 ".quad 1b, 2b;" \
49 ".popsection" \
50 : "=r" (__v) : "m" (*(p)), "r" (n)); \
51 __v; \
52 })
53
54 #define USERCOPY_FUNC __copy_to_user_inatomic
55 #define ST1(p, v) _ST((p), st1, (v))
56 #define ST2(p, v) _ST((p), st2, (v))
57 #define ST4(p, v) _ST((p), st4, (v))
58 #define ST8(p, v) _ST((p), st, (v))
59 #define LD1 LD
60 #define LD2 LD
61 #define LD4 LD
62 #define LD8 LD
63 #include "memcpy_64.c"
64
65 #define USERCOPY_FUNC __copy_from_user_inatomic
66 #define ST1 ST
67 #define ST2 ST
68 #define ST4 ST
69 #define ST8 ST
70 #define LD1(p) _LD((p), ld1u)
71 #define LD2(p) _LD((p), ld2u)
72 #define LD4(p) _LD((p), ld4u)
73 #define LD8(p) _LD((p), ld)
74 #include "memcpy_64.c"
75
76 #define USERCOPY_FUNC __copy_in_user_inatomic
77 #define ST1(p, v) _ST((p), st1, (v))
78 #define ST2(p, v) _ST((p), st2, (v))
79 #define ST4(p, v) _ST((p), st4, (v))
80 #define ST8(p, v) _ST((p), st, (v))
81 #define LD1(p) _LD((p), ld1u)
82 #define LD2(p) _LD((p), ld2u)
83 #define LD4(p) _LD((p), ld4u)
84 #define LD8(p) _LD((p), ld)
85 #include "memcpy_64.c"
86
87 unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
88 unsigned long n)
89 {
90 unsigned long rc = __copy_from_user_inatomic(to, from, n);
91 if (unlikely(rc))
92 memset(to + n - rc, 0, rc);
93 return rc;
94 }
This page took 0.037413 seconds and 5 git commands to generate.