Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / arch / s390 / include / asm / uaccess.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999, 2000
1da177e4
LT
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
a0616cde 17#include <asm/ctl_reg.h>
1da177e4
LT
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22
23/*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34#define KERNEL_DS MAKE_MM_SEG(0)
35#define USER_DS MAKE_MM_SEG(1)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment)
39
1da177e4
LT
40#define set_fs(x) \
41({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
94c12cc7 46 __ctl_load(__pto, 7, 7); \
1da177e4 47})
1da177e4
LT
48
49#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
491af990
HC
51static inline int __range_ok(unsigned long addr, unsigned long size)
52{
53 return 1;
54}
55
56#define __access_ok(addr, size) \
57({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
7683f744 60})
1da177e4 61
7683f744 62#define access_ok(type, addr, size) __access_ok(addr, size)
1da177e4 63
1da177e4
LT
64/*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77struct exception_table_entry
78{
eb608fb3 79 int insn, fixup;
1da177e4
LT
80};
81
eb608fb3
HC
82static inline unsigned long extable_insn(const struct exception_table_entry *x)
83{
84 return (unsigned long)&x->insn + x->insn;
85}
86
87static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88{
89 return (unsigned long)&x->fixup + x->fixup;
90}
91
92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE
94
4f41c2b4
HC
95/**
96 * __copy_from_user: - Copy a block of data from user space, with less checking.
97 * @to: Destination address, in kernel space.
98 * @from: Source address, in user space.
99 * @n: Number of bytes to copy.
100 *
b3c395ef
DH
101 * Context: User context only. This function may sleep if pagefaults are
102 * enabled.
4f41c2b4
HC
103 *
104 * Copy data from user space to kernel space. Caller must check
105 * the specified block with access_ok() before calling this function.
106 *
107 * Returns number of bytes that could not be copied.
108 * On success, this will be zero.
109 *
110 * If some data could not be copied, this function will pad the copied
111 * data to the requested size using zero bytes.
112 */
211deca6
HC
113unsigned long __must_check __copy_from_user(void *to, const void __user *from,
114 unsigned long n);
4f41c2b4
HC
115
116/**
117 * __copy_to_user: - Copy a block of data into user space, with less checking.
118 * @to: Destination address, in user space.
119 * @from: Source address, in kernel space.
120 * @n: Number of bytes to copy.
121 *
b3c395ef
DH
122 * Context: User context only. This function may sleep if pagefaults are
123 * enabled.
4f41c2b4
HC
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
d02765d1 133
4f41c2b4
HC
134#define __copy_to_user_inatomic __copy_to_user
135#define __copy_from_user_inatomic __copy_from_user
6c1e3e79 136
c9ca7841
HC
137#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
138
139#define __put_get_user_asm(to, from, size, spec) \
140({ \
141 register unsigned long __reg0 asm("0") = spec; \
142 int __rc; \
143 \
144 asm volatile( \
145 "0: mvcos %1,%3,%2\n" \
146 "1: xr %0,%0\n" \
147 "2:\n" \
148 ".pushsection .fixup, \"ax\"\n" \
149 "3: lhi %0,%5\n" \
150 " jg 2b\n" \
151 ".popsection\n" \
152 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
153 : "=d" (__rc), "=Q" (*(to)) \
154 : "d" (size), "Q" (*(from)), \
155 "d" (__reg0), "K" (-EFAULT) \
156 : "cc"); \
157 __rc; \
158})
159
160#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
161#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
162
163#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
164
211deca6 165static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
d02765d1 166{
4f41c2b4
HC
167 size = __copy_to_user(ptr, x, size);
168 return size ? -EFAULT : 0;
d02765d1
GS
169}
170
211deca6 171static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
d02765d1 172{
4f41c2b4
HC
173 size = __copy_from_user(x, ptr, size);
174 return size ? -EFAULT : 0;
d02765d1 175}
1da177e4 176
c9ca7841
HC
177#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
178
1da177e4
LT
179/*
180 * These are the main single-value transfer routines. They automatically
181 * use the right size if we just have the right pointer type.
182 */
1da177e4
LT
183#define __put_user(x, ptr) \
184({ \
185 __typeof__(*(ptr)) __x = (x); \
d02765d1 186 int __pu_err = -EFAULT; \
17566c3c 187 __chk_user_ptr(ptr); \
1da177e4
LT
188 switch (sizeof (*(ptr))) { \
189 case 1: \
190 case 2: \
191 case 4: \
192 case 8: \
cfa785e6
HC
193 __pu_err = __put_user_fn(&__x, ptr, \
194 sizeof(*(ptr))); \
1da177e4
LT
195 break; \
196 default: \
197 __put_user_bad(); \
198 break; \
199 } \
200 __pu_err; \
201})
1da177e4
LT
202
203#define put_user(x, ptr) \
204({ \
dab4079d 205 might_fault(); \
1da177e4
LT
206 __put_user(x, ptr); \
207})
208
209
4f41c2b4 210int __put_user_bad(void) __attribute__((noreturn));
1da177e4 211
1da177e4
LT
212#define __get_user(x, ptr) \
213({ \
d02765d1
GS
214 int __gu_err = -EFAULT; \
215 __chk_user_ptr(ptr); \
1da177e4 216 switch (sizeof(*(ptr))) { \
1047aa77
MS
217 case 1: { \
218 unsigned char __x; \
cfa785e6
HC
219 __gu_err = __get_user_fn(&__x, ptr, \
220 sizeof(*(ptr))); \
97fa5a66 221 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
222 break; \
223 }; \
224 case 2: { \
225 unsigned short __x; \
cfa785e6
HC
226 __gu_err = __get_user_fn(&__x, ptr, \
227 sizeof(*(ptr))); \
97fa5a66 228 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
229 break; \
230 }; \
231 case 4: { \
232 unsigned int __x; \
cfa785e6
HC
233 __gu_err = __get_user_fn(&__x, ptr, \
234 sizeof(*(ptr))); \
97fa5a66 235 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
236 break; \
237 }; \
238 case 8: { \
239 unsigned long long __x; \
cfa785e6
HC
240 __gu_err = __get_user_fn(&__x, ptr, \
241 sizeof(*(ptr))); \
97fa5a66 242 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1da177e4 243 break; \
1047aa77 244 }; \
1da177e4
LT
245 default: \
246 __get_user_bad(); \
247 break; \
248 } \
1da177e4
LT
249 __gu_err; \
250})
1da177e4
LT
251
252#define get_user(x, ptr) \
253({ \
dab4079d 254 might_fault(); \
1da177e4
LT
255 __get_user(x, ptr); \
256})
257
4f41c2b4 258int __get_user_bad(void) __attribute__((noreturn));
1da177e4
LT
259
260#define __put_user_unaligned __put_user
261#define __get_user_unaligned __get_user
262
1da177e4
LT
263/**
264 * copy_to_user: - Copy a block of data into user space.
265 * @to: Destination address, in user space.
266 * @from: Source address, in kernel space.
267 * @n: Number of bytes to copy.
268 *
b3c395ef
DH
269 * Context: User context only. This function may sleep if pagefaults are
270 * enabled.
1da177e4
LT
271 *
272 * Copy data from kernel space to user space.
273 *
274 * Returns number of bytes that could not be copied.
275 * On success, this will be zero.
276 */
f7675ad7 277static inline unsigned long __must_check
1da177e4
LT
278copy_to_user(void __user *to, const void *from, unsigned long n)
279{
dab4079d 280 might_fault();
d12a2970 281 return __copy_to_user(to, from, n);
1da177e4
LT
282}
283
4f41c2b4 284void copy_from_user_overflow(void)
1dcec254
HC
285#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
286__compiletime_warning("copy_from_user() buffer size is not provably correct")
287#endif
288;
289
1da177e4
LT
290/**
291 * copy_from_user: - Copy a block of data from user space.
292 * @to: Destination address, in kernel space.
293 * @from: Source address, in user space.
294 * @n: Number of bytes to copy.
295 *
b3c395ef
DH
296 * Context: User context only. This function may sleep if pagefaults are
297 * enabled.
1da177e4
LT
298 *
299 * Copy data from user space to kernel space.
300 *
301 * Returns number of bytes that could not be copied.
302 * On success, this will be zero.
303 *
304 * If some data could not be copied, this function will pad the copied
305 * data to the requested size using zero bytes.
306 */
f7675ad7 307static inline unsigned long __must_check
1da177e4
LT
308copy_from_user(void *to, const void __user *from, unsigned long n)
309{
1dcec254
HC
310 unsigned int sz = __compiletime_object_size(to);
311
dab4079d 312 might_fault();
1dcec254
HC
313 if (unlikely(sz != -1 && sz < n)) {
314 copy_from_user_overflow();
315 return n;
316 }
d12a2970 317 return __copy_from_user(to, from, n);
1da177e4
LT
318}
319
4f41c2b4
HC
320unsigned long __must_check
321__copy_in_user(void __user *to, const void __user *from, unsigned long n);
1da177e4 322
f7675ad7 323static inline unsigned long __must_check
1da177e4
LT
324copy_in_user(void __user *to, const void __user *from, unsigned long n)
325{
dab4079d 326 might_fault();
d12a2970 327 return __copy_in_user(to, from, n);
1da177e4
LT
328}
329
330/*
331 * Copy a null terminated string from userspace.
332 */
4f41c2b4
HC
333
334long __strncpy_from_user(char *dst, const char __user *src, long count);
335
f7675ad7 336static inline long __must_check
1da177e4
LT
337strncpy_from_user(char *dst, const char __user *src, long count)
338{
dab4079d 339 might_fault();
4f41c2b4 340 return __strncpy_from_user(dst, src, count);
1da177e4
LT
341}
342
211deca6 343unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
4f41c2b4 344
211deca6 345static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
1da177e4 346{
dab4079d 347 might_fault();
4f41c2b4 348 return __strnlen_user(src, n);
1da177e4
LT
349}
350
351/**
352 * strlen_user: - Get the size of a string in user space.
353 * @str: The string to measure.
354 *
b3c395ef
DH
355 * Context: User context only. This function may sleep if pagefaults are
356 * enabled.
1da177e4
LT
357 *
358 * Get the size of a NUL-terminated string in user space.
359 *
360 * Returns the size of the string INCLUDING the terminating NUL.
361 * On exception, returns 0.
362 *
363 * If there is a limit on the length of a valid string, you may wish to
364 * consider using strnlen_user() instead.
365 */
366#define strlen_user(str) strnlen_user(str, ~0UL)
367
368/*
369 * Zero Userspace
370 */
211deca6 371unsigned long __must_check __clear_user(void __user *to, unsigned long size);
1da177e4 372
211deca6 373static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1da177e4 374{
dab4079d 375 might_fault();
4f41c2b4 376 return __clear_user(to, n);
1da177e4
LT
377}
378
211deca6 379int copy_to_user_real(void __user *dest, void *src, unsigned long count);
8a5d8473 380void s390_kernel_write(void *dst, const void *src, size_t size);
a0616cde 381
1da177e4 382#endif /* __S390_UACCESS_H */
This page took 0.807929 seconds and 5 git commands to generate.