Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-s390/system.h | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | |
7 | * | |
8 | * Derived from "include/asm-i386/system.h" | |
9 | */ | |
10 | ||
11 | #ifndef __ASM_SYSTEM_H | |
12 | #define __ASM_SYSTEM_H | |
13 | ||
1da177e4 | 14 | #include <linux/kernel.h> |
320c04c0 | 15 | #include <linux/errno.h> |
1da177e4 LT |
16 | #include <asm/types.h> |
17 | #include <asm/ptrace.h> | |
18 | #include <asm/setup.h> | |
77fa2245 | 19 | #include <asm/processor.h> |
484875b1 | 20 | #include <asm/lowcore.h> |
1da177e4 LT |
21 | |
22 | #ifdef __KERNEL__ | |
23 | ||
24 | struct task_struct; | |
25 | ||
26 | extern struct task_struct *__switch_to(void *, void *); | |
27 | ||
1da177e4 LT |
28 | static inline void save_fp_regs(s390_fp_regs *fpregs) |
29 | { | |
94c12cc7 MS |
30 | asm volatile( |
31 | " std 0,8(%1)\n" | |
32 | " std 2,24(%1)\n" | |
33 | " std 4,40(%1)\n" | |
34 | " std 6,56(%1)" | |
35 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | |
1da177e4 LT |
36 | if (!MACHINE_HAS_IEEE) |
37 | return; | |
38 | asm volatile( | |
94c12cc7 MS |
39 | " stfpc 0(%1)\n" |
40 | " std 1,16(%1)\n" | |
41 | " std 3,32(%1)\n" | |
42 | " std 5,48(%1)\n" | |
43 | " std 7,64(%1)\n" | |
44 | " std 8,72(%1)\n" | |
45 | " std 9,80(%1)\n" | |
46 | " std 10,88(%1)\n" | |
47 | " std 11,96(%1)\n" | |
48 | " std 12,104(%1)\n" | |
49 | " std 13,112(%1)\n" | |
50 | " std 14,120(%1)\n" | |
51 | " std 15,128(%1)\n" | |
52 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | |
1da177e4 LT |
53 | } |
54 | ||
55 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | |
56 | { | |
94c12cc7 MS |
57 | asm volatile( |
58 | " ld 0,8(%0)\n" | |
59 | " ld 2,24(%0)\n" | |
60 | " ld 4,40(%0)\n" | |
61 | " ld 6,56(%0)" | |
62 | : : "a" (fpregs), "m" (*fpregs)); | |
1da177e4 LT |
63 | if (!MACHINE_HAS_IEEE) |
64 | return; | |
65 | asm volatile( | |
94c12cc7 MS |
66 | " lfpc 0(%0)\n" |
67 | " ld 1,16(%0)\n" | |
68 | " ld 3,32(%0)\n" | |
69 | " ld 5,48(%0)\n" | |
70 | " ld 7,64(%0)\n" | |
71 | " ld 8,72(%0)\n" | |
72 | " ld 9,80(%0)\n" | |
73 | " ld 10,88(%0)\n" | |
74 | " ld 11,96(%0)\n" | |
75 | " ld 12,104(%0)\n" | |
76 | " ld 13,112(%0)\n" | |
77 | " ld 14,120(%0)\n" | |
78 | " ld 15,128(%0)\n" | |
79 | : : "a" (fpregs), "m" (*fpregs)); | |
1da177e4 LT |
80 | } |
81 | ||
82 | static inline void save_access_regs(unsigned int *acrs) | |
83 | { | |
94c12cc7 | 84 | asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); |
1da177e4 LT |
85 | } |
86 | ||
87 | static inline void restore_access_regs(unsigned int *acrs) | |
88 | { | |
94c12cc7 | 89 | asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); |
1da177e4 LT |
90 | } |
91 | ||
92 | #define switch_to(prev,next,last) do { \ | |
93 | if (prev == next) \ | |
94 | break; \ | |
95 | save_fp_regs(&prev->thread.fp_regs); \ | |
96 | restore_fp_regs(&next->thread.fp_regs); \ | |
97 | save_access_regs(&prev->thread.acrs[0]); \ | |
98 | restore_access_regs(&next->thread.acrs[0]); \ | |
99 | prev = __switch_to(prev,next); \ | |
100 | } while (0) | |
101 | ||
aa5e97ce | 102 | extern void account_vtime(struct task_struct *, struct task_struct *); |
1f1c12af | 103 | extern void account_tick_vtime(struct task_struct *); |
1da177e4 | 104 | extern void account_system_vtime(struct task_struct *); |
1da177e4 | 105 | |
29b08d2b HC |
106 | #ifdef CONFIG_PFAULT |
107 | extern void pfault_irq_init(void); | |
108 | extern int pfault_init(void); | |
109 | extern void pfault_fini(void); | |
110 | #else /* CONFIG_PFAULT */ | |
111 | #define pfault_irq_init() do { } while (0) | |
112 | #define pfault_init() ({-1;}) | |
113 | #define pfault_fini() do { } while (0) | |
114 | #endif /* CONFIG_PFAULT */ | |
115 | ||
45e576b1 MS |
116 | #ifdef CONFIG_PAGE_STATES |
117 | extern void cmma_init(void); | |
118 | #else | |
119 | static inline void cmma_init(void) { } | |
120 | #endif | |
121 | ||
5ee24d95 | 122 | #define finish_arch_switch(prev) do { \ |
1da177e4 | 123 | set_fs(current->thread.mm_segment); \ |
aa5e97ce | 124 | account_vtime(prev, current); \ |
1da177e4 LT |
125 | } while (0) |
126 | ||
94c12cc7 | 127 | #define nop() asm volatile("nop") |
1da177e4 | 128 | |
5a651c93 HC |
129 | #define xchg(ptr,x) \ |
130 | ({ \ | |
131 | __typeof__(*(ptr)) __ret; \ | |
132 | __ret = (__typeof__(*(ptr))) \ | |
133 | __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ | |
134 | __ret; \ | |
135 | }) | |
1da177e4 | 136 | |
210d3a90 HC |
137 | extern void __xchg_called_with_bad_pointer(void); |
138 | ||
1da177e4 LT |
139 | static inline unsigned long __xchg(unsigned long x, void * ptr, int size) |
140 | { | |
141 | unsigned long addr, old; | |
142 | int shift; | |
143 | ||
144 | switch (size) { | |
145 | case 1: | |
146 | addr = (unsigned long) ptr; | |
147 | shift = (3 ^ (addr & 3)) << 3; | |
148 | addr ^= addr & 3; | |
149 | asm volatile( | |
94c12cc7 MS |
150 | " l %0,0(%4)\n" |
151 | "0: lr 0,%0\n" | |
152 | " nr 0,%3\n" | |
153 | " or 0,%2\n" | |
154 | " cs %0,0,0(%4)\n" | |
155 | " jl 0b\n" | |
1da177e4 LT |
156 | : "=&d" (old), "=m" (*(int *) addr) |
157 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | |
94c12cc7 | 158 | "m" (*(int *) addr) : "memory", "cc", "0"); |
210d3a90 | 159 | return old >> shift; |
1da177e4 LT |
160 | case 2: |
161 | addr = (unsigned long) ptr; | |
162 | shift = (2 ^ (addr & 2)) << 3; | |
163 | addr ^= addr & 2; | |
164 | asm volatile( | |
94c12cc7 MS |
165 | " l %0,0(%4)\n" |
166 | "0: lr 0,%0\n" | |
167 | " nr 0,%3\n" | |
168 | " or 0,%2\n" | |
169 | " cs %0,0,0(%4)\n" | |
170 | " jl 0b\n" | |
1da177e4 LT |
171 | : "=&d" (old), "=m" (*(int *) addr) |
172 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | |
94c12cc7 | 173 | "m" (*(int *) addr) : "memory", "cc", "0"); |
210d3a90 | 174 | return old >> shift; |
1da177e4 | 175 | case 4: |
94c12cc7 MS |
176 | asm volatile( |
177 | " l %0,0(%3)\n" | |
178 | "0: cs %0,%2,0(%3)\n" | |
179 | " jl 0b\n" | |
1da177e4 LT |
180 | : "=&d" (old), "=m" (*(int *) ptr) |
181 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | |
94c12cc7 | 182 | : "memory", "cc"); |
210d3a90 | 183 | return old; |
1da177e4 LT |
184 | #ifdef __s390x__ |
185 | case 8: | |
94c12cc7 MS |
186 | asm volatile( |
187 | " lg %0,0(%3)\n" | |
188 | "0: csg %0,%2,0(%3)\n" | |
189 | " jl 0b\n" | |
1da177e4 LT |
190 | : "=&d" (old), "=m" (*(long *) ptr) |
191 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | |
94c12cc7 | 192 | : "memory", "cc"); |
210d3a90 | 193 | return old; |
1da177e4 | 194 | #endif /* __s390x__ */ |
210d3a90 HC |
195 | } |
196 | __xchg_called_with_bad_pointer(); | |
197 | return x; | |
1da177e4 LT |
198 | } |
199 | ||
200 | /* | |
201 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
202 | * store NEW in MEM. Return the initial value in MEM. Success is | |
203 | * indicated by comparing RETURN with OLD. | |
204 | */ | |
205 | ||
206 | #define __HAVE_ARCH_CMPXCHG 1 | |
207 | ||
fe413013 MD |
208 | #define cmpxchg(ptr, o, n) \ |
209 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | |
210 | (unsigned long)(n), sizeof(*(ptr)))) | |
1da177e4 | 211 | |
210d3a90 HC |
212 | extern void __cmpxchg_called_with_bad_pointer(void); |
213 | ||
1da177e4 LT |
214 | static inline unsigned long |
215 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
216 | { | |
217 | unsigned long addr, prev, tmp; | |
218 | int shift; | |
219 | ||
220 | switch (size) { | |
221 | case 1: | |
222 | addr = (unsigned long) ptr; | |
223 | shift = (3 ^ (addr & 3)) << 3; | |
224 | addr ^= addr & 3; | |
225 | asm volatile( | |
94c12cc7 MS |
226 | " l %0,0(%4)\n" |
227 | "0: nr %0,%5\n" | |
228 | " lr %1,%0\n" | |
229 | " or %0,%2\n" | |
230 | " or %1,%3\n" | |
231 | " cs %0,%1,0(%4)\n" | |
232 | " jnl 1f\n" | |
233 | " xr %1,%0\n" | |
234 | " nr %1,%5\n" | |
235 | " jnz 0b\n" | |
1da177e4 LT |
236 | "1:" |
237 | : "=&d" (prev), "=&d" (tmp) | |
238 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | |
239 | "d" (~(255 << shift)) | |
94c12cc7 | 240 | : "memory", "cc"); |
1da177e4 LT |
241 | return prev >> shift; |
242 | case 2: | |
243 | addr = (unsigned long) ptr; | |
244 | shift = (2 ^ (addr & 2)) << 3; | |
245 | addr ^= addr & 2; | |
246 | asm volatile( | |
94c12cc7 MS |
247 | " l %0,0(%4)\n" |
248 | "0: nr %0,%5\n" | |
249 | " lr %1,%0\n" | |
250 | " or %0,%2\n" | |
251 | " or %1,%3\n" | |
252 | " cs %0,%1,0(%4)\n" | |
253 | " jnl 1f\n" | |
254 | " xr %1,%0\n" | |
255 | " nr %1,%5\n" | |
256 | " jnz 0b\n" | |
1da177e4 LT |
257 | "1:" |
258 | : "=&d" (prev), "=&d" (tmp) | |
259 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | |
260 | "d" (~(65535 << shift)) | |
94c12cc7 | 261 | : "memory", "cc"); |
1da177e4 LT |
262 | return prev >> shift; |
263 | case 4: | |
94c12cc7 MS |
264 | asm volatile( |
265 | " cs %0,%2,0(%3)\n" | |
1da177e4 | 266 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) |
94c12cc7 | 267 | : "memory", "cc"); |
1da177e4 LT |
268 | return prev; |
269 | #ifdef __s390x__ | |
270 | case 8: | |
94c12cc7 MS |
271 | asm volatile( |
272 | " csg %0,%2,0(%3)\n" | |
1da177e4 | 273 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) |
94c12cc7 | 274 | : "memory", "cc"); |
1da177e4 LT |
275 | return prev; |
276 | #endif /* __s390x__ */ | |
277 | } | |
210d3a90 HC |
278 | __cmpxchg_called_with_bad_pointer(); |
279 | return old; | |
1da177e4 LT |
280 | } |
281 | ||
282 | /* | |
283 | * Force strict CPU ordering. | |
284 | * And yes, this is required on UP too when we're talking | |
285 | * to devices. | |
286 | * | |
287 | * This is very similar to the ppc eieio/sync instruction in that is | |
288 | * does a checkpoint syncronisation & makes sure that | |
289 | * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). | |
290 | */ | |
291 | ||
94c12cc7 MS |
292 | #define eieio() asm volatile("bcr 15,0" : : : "memory") |
293 | #define SYNC_OTHER_CORES(x) eieio() | |
1da177e4 LT |
294 | #define mb() eieio() |
295 | #define rmb() eieio() | |
296 | #define wmb() eieio() | |
297 | #define read_barrier_depends() do { } while(0) | |
298 | #define smp_mb() mb() | |
299 | #define smp_rmb() rmb() | |
300 | #define smp_wmb() wmb() | |
301 | #define smp_read_barrier_depends() read_barrier_depends() | |
302 | #define smp_mb__before_clear_bit() smp_mb() | |
303 | #define smp_mb__after_clear_bit() smp_mb() | |
304 | ||
305 | ||
306 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
1da177e4 | 307 | |
1da177e4 LT |
308 | #ifdef __s390x__ |
309 | ||
94c12cc7 MS |
310 | #define __ctl_load(array, low, high) ({ \ |
311 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
312 | asm volatile( \ | |
313 | " lctlg %1,%2,0(%0)\n" \ | |
314 | : : "a" (&array), "i" (low), "i" (high), \ | |
b57838ea | 315 | "m" (*(addrtype *)(&array))); \ |
1da177e4 LT |
316 | }) |
317 | ||
94c12cc7 MS |
318 | #define __ctl_store(array, low, high) ({ \ |
319 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
320 | asm volatile( \ | |
321 | " stctg %2,%3,0(%1)\n" \ | |
b57838ea | 322 | : "=m" (*(addrtype *)(&array)) \ |
94c12cc7 | 323 | : "a" (&array), "i" (low), "i" (high)); \ |
1da177e4 LT |
324 | }) |
325 | ||
1da177e4 LT |
326 | #else /* __s390x__ */ |
327 | ||
94c12cc7 MS |
328 | #define __ctl_load(array, low, high) ({ \ |
329 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
330 | asm volatile( \ | |
331 | " lctl %1,%2,0(%0)\n" \ | |
332 | : : "a" (&array), "i" (low), "i" (high), \ | |
b57838ea | 333 | "m" (*(addrtype *)(&array))); \ |
94c12cc7 | 334 | }) |
1da177e4 | 335 | |
94c12cc7 MS |
336 | #define __ctl_store(array, low, high) ({ \ |
337 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
338 | asm volatile( \ | |
339 | " stctl %2,%3,0(%1)\n" \ | |
b57838ea | 340 | : "=m" (*(addrtype *)(&array)) \ |
94c12cc7 | 341 | : "a" (&array), "i" (low), "i" (high)); \ |
1da177e4 LT |
342 | }) |
343 | ||
1da177e4 LT |
344 | #endif /* __s390x__ */ |
345 | ||
94c12cc7 MS |
346 | #define __ctl_set_bit(cr, bit) ({ \ |
347 | unsigned long __dummy; \ | |
348 | __ctl_store(__dummy, cr, cr); \ | |
349 | __dummy |= 1UL << (bit); \ | |
350 | __ctl_load(__dummy, cr, cr); \ | |
351 | }) | |
352 | ||
353 | #define __ctl_clear_bit(cr, bit) ({ \ | |
354 | unsigned long __dummy; \ | |
355 | __ctl_store(__dummy, cr, cr); \ | |
356 | __dummy &= ~(1UL << (bit)); \ | |
357 | __ctl_load(__dummy, cr, cr); \ | |
358 | }) | |
359 | ||
1f194a4c | 360 | #include <linux/irqflags.h> |
1da177e4 | 361 | |
fe413013 MD |
362 | #include <asm-generic/cmpxchg-local.h> |
363 | ||
364 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
365 | unsigned long old, | |
366 | unsigned long new, int size) | |
367 | { | |
368 | switch (size) { | |
369 | case 1: | |
370 | case 2: | |
371 | case 4: | |
372 | #ifdef __s390x__ | |
373 | case 8: | |
374 | #endif | |
375 | return __cmpxchg(ptr, old, new, size); | |
376 | default: | |
377 | return __cmpxchg_local_generic(ptr, old, new, size); | |
378 | } | |
379 | ||
380 | return old; | |
381 | } | |
382 | ||
383 | /* | |
384 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
385 | * them available. | |
386 | */ | |
387 | #define cmpxchg_local(ptr, o, n) \ | |
388 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | |
389 | (unsigned long)(n), sizeof(*(ptr)))) | |
390 | #ifdef __s390x__ | |
391 | #define cmpxchg64_local(ptr, o, n) \ | |
392 | ({ \ | |
393 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
394 | cmpxchg_local((ptr), (o), (n)); \ | |
395 | }) | |
396 | #else | |
397 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
398 | #endif | |
399 | ||
77fa2245 HC |
400 | /* |
401 | * Use to set psw mask except for the first byte which | |
402 | * won't be changed by this function. | |
403 | */ | |
404 | static inline void | |
405 | __set_psw_mask(unsigned long mask) | |
406 | { | |
94c12cc7 | 407 | __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); |
77fa2245 HC |
408 | } |
409 | ||
c1821c2e GS |
410 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
411 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | |
77fa2245 | 412 | |
1da177e4 LT |
413 | #ifdef CONFIG_SMP |
414 | ||
415 | extern void smp_ctl_set_bit(int cr, int bit); | |
416 | extern void smp_ctl_clear_bit(int cr, int bit); | |
417 | #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | |
418 | #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | |
419 | ||
420 | #else | |
421 | ||
422 | #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) | |
423 | #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) | |
424 | ||
425 | #endif /* CONFIG_SMP */ | |
426 | ||
484875b1 HC |
427 | static inline unsigned int stfl(void) |
428 | { | |
429 | asm volatile( | |
430 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | |
431 | "0:\n" | |
432 | EX_TABLE(0b,0b)); | |
433 | return S390_lowcore.stfl_fac_list; | |
434 | } | |
435 | ||
320c04c0 HC |
436 | static inline int __stfle(unsigned long long *list, int doublewords) |
437 | { | |
438 | typedef struct { unsigned long long _[doublewords]; } addrtype; | |
439 | register unsigned long __nr asm("0") = doublewords - 1; | |
440 | ||
441 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | |
442 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | |
443 | return __nr + 1; | |
444 | } | |
445 | ||
446 | static inline int stfle(unsigned long long *list, int doublewords) | |
447 | { | |
448 | if (!(stfl() & (1UL << 24))) | |
449 | return -EOPNOTSUPP; | |
450 | return __stfle(list, doublewords); | |
451 | } | |
452 | ||
2e5061e4 HC |
453 | static inline unsigned short stap(void) |
454 | { | |
455 | unsigned short cpu_address; | |
456 | ||
457 | asm volatile("stap %0" : "=m" (cpu_address)); | |
458 | return cpu_address; | |
459 | } | |
460 | ||
1da177e4 LT |
461 | extern void (*_machine_restart)(char *command); |
462 | extern void (*_machine_halt)(void); | |
463 | extern void (*_machine_power_off)(void); | |
464 | ||
465 | #define arch_align_stack(x) (x) | |
466 | ||
411788ea HC |
467 | #ifdef CONFIG_TRACE_IRQFLAGS |
468 | extern psw_t sysc_restore_trace_psw; | |
469 | extern psw_t io_restore_trace_psw; | |
470 | #endif | |
471 | ||
1da177e4 LT |
472 | #endif /* __KERNEL__ */ |
473 | ||
474 | #endif |