Merge tag 'renesas-dt-fixes2-for-v4.5' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / s390 / include / asm / uaccess.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11
12 /*
13 * User space memory access functions
14 */
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/ctl_reg.h>
18
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
21
22
23 /*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
36
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->thread.mm_segment)
39
40 #define set_fs(x) \
41 ({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \
47 })
48
49 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
51 static inline int __range_ok(unsigned long addr, unsigned long size)
52 {
53 return 1;
54 }
55
56 #define __access_ok(addr, size) \
57 ({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
60 })
61
62 #define access_ok(type, addr, size) __access_ok(addr, size)
63
64 /*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77 struct exception_table_entry
78 {
79 int insn, fixup;
80 };
81
82 static inline unsigned long extable_insn(const struct exception_table_entry *x)
83 {
84 return (unsigned long)&x->insn + x->insn;
85 }
86
87 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88 {
89 return (unsigned long)&x->fixup + x->fixup;
90 }
91
92 #define ARCH_HAS_SORT_EXTABLE
93 #define ARCH_HAS_SEARCH_EXTABLE
94
95 /**
96 * __copy_from_user: - Copy a block of data from user space, with less checking.
97 * @to: Destination address, in kernel space.
98 * @from: Source address, in user space.
99 * @n: Number of bytes to copy.
100 *
101 * Context: User context only. This function may sleep if pagefaults are
102 * enabled.
103 *
104 * Copy data from user space to kernel space. Caller must check
105 * the specified block with access_ok() before calling this function.
106 *
107 * Returns number of bytes that could not be copied.
108 * On success, this will be zero.
109 *
110 * If some data could not be copied, this function will pad the copied
111 * data to the requested size using zero bytes.
112 */
113 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
114 unsigned long n);
115
116 /**
117 * __copy_to_user: - Copy a block of data into user space, with less checking.
118 * @to: Destination address, in user space.
119 * @from: Source address, in kernel space.
120 * @n: Number of bytes to copy.
121 *
122 * Context: User context only. This function may sleep if pagefaults are
123 * enabled.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
133
134 #define __copy_to_user_inatomic __copy_to_user
135 #define __copy_from_user_inatomic __copy_from_user
136
137 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
138
139 #define __put_get_user_asm(to, from, size, spec) \
140 ({ \
141 register unsigned long __reg0 asm("0") = spec; \
142 int __rc; \
143 \
144 asm volatile( \
145 "0: mvcos %1,%3,%2\n" \
146 "1: xr %0,%0\n" \
147 "2:\n" \
148 ".pushsection .fixup, \"ax\"\n" \
149 "3: lhi %0,%5\n" \
150 " jg 2b\n" \
151 ".popsection\n" \
152 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
153 : "=d" (__rc), "=Q" (*(to)) \
154 : "d" (size), "Q" (*(from)), \
155 "d" (__reg0), "K" (-EFAULT) \
156 : "cc"); \
157 __rc; \
158 })
159
160 #define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
161 #define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
162
163 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
164
165 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
166 {
167 size = __copy_to_user(ptr, x, size);
168 return size ? -EFAULT : 0;
169 }
170
171 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
172 {
173 size = __copy_from_user(x, ptr, size);
174 return size ? -EFAULT : 0;
175 }
176
177 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
178
179 /*
180 * These are the main single-value transfer routines. They automatically
181 * use the right size if we just have the right pointer type.
182 */
183 #define __put_user(x, ptr) \
184 ({ \
185 __typeof__(*(ptr)) __x = (x); \
186 int __pu_err = -EFAULT; \
187 __chk_user_ptr(ptr); \
188 switch (sizeof (*(ptr))) { \
189 case 1: \
190 case 2: \
191 case 4: \
192 case 8: \
193 __pu_err = __put_user_fn(&__x, ptr, \
194 sizeof(*(ptr))); \
195 break; \
196 default: \
197 __put_user_bad(); \
198 break; \
199 } \
200 __pu_err; \
201 })
202
203 #define put_user(x, ptr) \
204 ({ \
205 might_fault(); \
206 __put_user(x, ptr); \
207 })
208
209
210 int __put_user_bad(void) __attribute__((noreturn));
211
212 #define __get_user(x, ptr) \
213 ({ \
214 int __gu_err = -EFAULT; \
215 __chk_user_ptr(ptr); \
216 switch (sizeof(*(ptr))) { \
217 case 1: { \
218 unsigned char __x; \
219 __gu_err = __get_user_fn(&__x, ptr, \
220 sizeof(*(ptr))); \
221 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
222 break; \
223 }; \
224 case 2: { \
225 unsigned short __x; \
226 __gu_err = __get_user_fn(&__x, ptr, \
227 sizeof(*(ptr))); \
228 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
229 break; \
230 }; \
231 case 4: { \
232 unsigned int __x; \
233 __gu_err = __get_user_fn(&__x, ptr, \
234 sizeof(*(ptr))); \
235 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
236 break; \
237 }; \
238 case 8: { \
239 unsigned long long __x; \
240 __gu_err = __get_user_fn(&__x, ptr, \
241 sizeof(*(ptr))); \
242 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
243 break; \
244 }; \
245 default: \
246 __get_user_bad(); \
247 break; \
248 } \
249 __gu_err; \
250 })
251
252 #define get_user(x, ptr) \
253 ({ \
254 might_fault(); \
255 __get_user(x, ptr); \
256 })
257
258 int __get_user_bad(void) __attribute__((noreturn));
259
260 #define __put_user_unaligned __put_user
261 #define __get_user_unaligned __get_user
262
263 /**
264 * copy_to_user: - Copy a block of data into user space.
265 * @to: Destination address, in user space.
266 * @from: Source address, in kernel space.
267 * @n: Number of bytes to copy.
268 *
269 * Context: User context only. This function may sleep if pagefaults are
270 * enabled.
271 *
272 * Copy data from kernel space to user space.
273 *
274 * Returns number of bytes that could not be copied.
275 * On success, this will be zero.
276 */
277 static inline unsigned long __must_check
278 copy_to_user(void __user *to, const void *from, unsigned long n)
279 {
280 might_fault();
281 return __copy_to_user(to, from, n);
282 }
283
284 void copy_from_user_overflow(void)
285 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
286 __compiletime_warning("copy_from_user() buffer size is not provably correct")
287 #endif
288 ;
289
290 /**
291 * copy_from_user: - Copy a block of data from user space.
292 * @to: Destination address, in kernel space.
293 * @from: Source address, in user space.
294 * @n: Number of bytes to copy.
295 *
296 * Context: User context only. This function may sleep if pagefaults are
297 * enabled.
298 *
299 * Copy data from user space to kernel space.
300 *
301 * Returns number of bytes that could not be copied.
302 * On success, this will be zero.
303 *
304 * If some data could not be copied, this function will pad the copied
305 * data to the requested size using zero bytes.
306 */
307 static inline unsigned long __must_check
308 copy_from_user(void *to, const void __user *from, unsigned long n)
309 {
310 unsigned int sz = __compiletime_object_size(to);
311
312 might_fault();
313 if (unlikely(sz != -1 && sz < n)) {
314 copy_from_user_overflow();
315 return n;
316 }
317 return __copy_from_user(to, from, n);
318 }
319
320 unsigned long __must_check
321 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
322
323 static inline unsigned long __must_check
324 copy_in_user(void __user *to, const void __user *from, unsigned long n)
325 {
326 might_fault();
327 return __copy_in_user(to, from, n);
328 }
329
330 /*
331 * Copy a null terminated string from userspace.
332 */
333
334 long __strncpy_from_user(char *dst, const char __user *src, long count);
335
336 static inline long __must_check
337 strncpy_from_user(char *dst, const char __user *src, long count)
338 {
339 might_fault();
340 return __strncpy_from_user(dst, src, count);
341 }
342
343 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
344
345 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
346 {
347 might_fault();
348 return __strnlen_user(src, n);
349 }
350
351 /**
352 * strlen_user: - Get the size of a string in user space.
353 * @str: The string to measure.
354 *
355 * Context: User context only. This function may sleep if pagefaults are
356 * enabled.
357 *
358 * Get the size of a NUL-terminated string in user space.
359 *
360 * Returns the size of the string INCLUDING the terminating NUL.
361 * On exception, returns 0.
362 *
363 * If there is a limit on the length of a valid string, you may wish to
364 * consider using strnlen_user() instead.
365 */
366 #define strlen_user(str) strnlen_user(str, ~0UL)
367
368 /*
369 * Zero Userspace
370 */
371 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
372
373 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
374 {
375 might_fault();
376 return __clear_user(to, n);
377 }
378
379 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
380 void s390_kernel_write(void *dst, const void *src, size_t size);
381
382 #endif /* __S390_UACCESS_H */
This page took 0.050956 seconds and 5 git commands to generate.