2 * Optimized user space space access functions based on mvcos.
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/jump_label.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
13 #include <asm/facility.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
32 static struct static_key have_mvcos
= STATIC_KEY_INIT_TRUE
;
34 static inline unsigned long copy_from_user_mvcos(void *x
, const void __user
*ptr
,
37 register unsigned long reg0
asm("0") = 0x81UL
;
38 unsigned long tmp1
, tmp2
;
42 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
48 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
49 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
51 " "CLR
" %0,%4\n" /* copy crosses next page boundary? */
53 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
57 " "ALR
" %4,%0\n" /* copy remaining size, subtract 1 */
58 " bras %3,6f\n" /* memset loop */
60 "5: xc 0(256,%2),0(%2)\n"
68 EX_TABLE(0b
,2b
) EX_TABLE(3b
,4b
) EX_TABLE(9b
,2b
) EX_TABLE(10b
,4b
)
69 : "+a" (size
), "+a" (ptr
), "+a" (x
), "+a" (tmp1
), "=a" (tmp2
)
70 : "d" (reg0
) : "cc", "memory");
74 unsigned long __copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
76 if (static_key_true(&have_mvcos
))
77 return copy_from_user_mvcos(to
, from
, n
);
78 return copy_from_user_pt(to
, from
, n
);
80 EXPORT_SYMBOL(__copy_from_user
);
82 static inline unsigned long copy_to_user_mvcos(void __user
*ptr
, const void *x
,
85 register unsigned long reg0
asm("0") = 0x810000UL
;
86 unsigned long tmp1
, tmp2
;
90 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
96 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
97 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
99 " "CLR
" %0,%4\n" /* copy crosses next page boundary? */
101 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
106 EX_TABLE(0b
,2b
) EX_TABLE(3b
,5b
) EX_TABLE(6b
,2b
) EX_TABLE(7b
,5b
)
107 : "+a" (size
), "+a" (ptr
), "+a" (x
), "+a" (tmp1
), "=a" (tmp2
)
108 : "d" (reg0
) : "cc", "memory");
112 unsigned long __copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
114 if (static_key_true(&have_mvcos
))
115 return copy_to_user_mvcos(to
, from
, n
);
116 return copy_to_user_pt(to
, from
, n
);
118 EXPORT_SYMBOL(__copy_to_user
);
120 static inline unsigned long copy_in_user_mvcos(void __user
*to
, const void __user
*from
,
123 register unsigned long reg0
asm("0") = 0x810081UL
;
124 unsigned long tmp1
, tmp2
;
127 /* FIXME: copy with reduced length. */
129 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
138 : "+a" (size
), "+a" (to
), "+a" (from
), "+a" (tmp1
), "=a" (tmp2
)
139 : "d" (reg0
) : "cc", "memory");
143 unsigned long __copy_in_user(void __user
*to
, const void __user
*from
, unsigned long n
)
145 if (static_key_true(&have_mvcos
))
146 return copy_in_user_mvcos(to
, from
, n
);
147 return copy_in_user_pt(to
, from
, n
);
149 EXPORT_SYMBOL(__copy_in_user
);
151 static inline unsigned long clear_user_mvcos(void __user
*to
, unsigned long size
)
153 register unsigned long reg0
asm("0") = 0x810000UL
;
154 unsigned long tmp1
, tmp2
;
158 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
163 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
164 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
166 " "CLR
" %0,%3\n" /* copy crosses next page boundary? */
168 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
173 EX_TABLE(0b
,2b
) EX_TABLE(3b
,5b
)
174 : "+a" (size
), "+a" (to
), "+a" (tmp1
), "=a" (tmp2
)
175 : "a" (empty_zero_page
), "d" (reg0
) : "cc", "memory");
179 unsigned long __clear_user(void __user
*to
, unsigned long size
)
181 if (static_key_true(&have_mvcos
))
182 return clear_user_mvcos(to
, size
);
183 return clear_user_pt(to
, size
);
185 EXPORT_SYMBOL(__clear_user
);
187 static inline unsigned long strnlen_user_mvcos(const char __user
*src
,
190 unsigned long done
, len
, offset
, len_str
;
195 offset
= (unsigned long)src
& ~PAGE_MASK
;
196 len
= min(256UL, PAGE_SIZE
- offset
);
197 len
= min(count
- done
, len
);
198 if (copy_from_user_mvcos(buf
, src
, len
))
200 len_str
= strnlen(buf
, len
);
203 } while ((len_str
== len
) && (done
< count
));
207 unsigned long __strnlen_user(const char __user
*src
, unsigned long count
)
209 if (static_key_true(&have_mvcos
))
210 return strnlen_user_mvcos(src
, count
);
211 return strnlen_user_pt(src
, count
);
213 EXPORT_SYMBOL(__strnlen_user
);
215 static inline long strncpy_from_user_mvcos(char *dst
, const char __user
*src
,
218 unsigned long done
, len
, offset
, len_str
;
220 if (unlikely(count
<= 0))
224 offset
= (unsigned long)src
& ~PAGE_MASK
;
225 len
= min(count
- done
, PAGE_SIZE
- offset
);
226 if (copy_from_user_mvcos(dst
, src
, len
))
228 len_str
= strnlen(dst
, len
);
232 } while ((len_str
== len
) && (done
< count
));
236 long __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
238 if (static_key_true(&have_mvcos
))
239 return strncpy_from_user_mvcos(dst
, src
, count
);
240 return strncpy_from_user_pt(dst
, src
, count
);
242 EXPORT_SYMBOL(__strncpy_from_user
);
245 * The uaccess page tabe walk variant can be enforced with the "uaccesspt"
246 * kernel parameter. This is mainly for debugging purposes.
248 static int force_uaccess_pt __initdata
;
250 static int __init
parse_uaccess_pt(char *__unused
)
252 force_uaccess_pt
= 1;
255 early_param("uaccesspt", parse_uaccess_pt
);
257 static int __init
uaccess_init(void)
259 if (IS_ENABLED(CONFIG_32BIT
) || force_uaccess_pt
|| !test_facility(27))
260 static_key_slow_dec(&have_mvcos
);
263 early_initcall(uaccess_init
);