[S390] kernel: Add z/VM LGR detection
[deliverable/linux.git] / arch / s390 / include / asm / system.h
CommitLineData
1da177e4 1/*
155af2f9 2 * Copyright IBM Corp. 1999, 2009
1da177e4 3 *
155af2f9 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
1da177e4
LT
5 */
6
7#ifndef __ASM_SYSTEM_H
8#define __ASM_SYSTEM_H
9
3ab121ab 10#include <linux/preempt.h>
1da177e4 11#include <linux/kernel.h>
320c04c0 12#include <linux/errno.h>
3ab121ab 13#include <linux/string.h>
1da177e4
LT
14#include <asm/types.h>
15#include <asm/ptrace.h>
16#include <asm/setup.h>
77fa2245 17#include <asm/processor.h>
484875b1 18#include <asm/lowcore.h>
a2c9dbe8 19#include <asm/cmpxchg.h>
1da177e4
LT
20
21#ifdef __KERNEL__
22
23struct task_struct;
24
25extern struct task_struct *__switch_to(void *, void *);
5e9a2692 26extern void update_per_regs(struct task_struct *task);
1da177e4 27
1da177e4
LT
28static inline void save_fp_regs(s390_fp_regs *fpregs)
29{
94c12cc7 30 asm volatile(
987bcdac
MS
31 " std 0,%O0+8(%R0)\n"
32 " std 2,%O0+24(%R0)\n"
33 " std 4,%O0+40(%R0)\n"
34 " std 6,%O0+56(%R0)"
35 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
36 if (!MACHINE_HAS_IEEE)
37 return;
38 asm volatile(
987bcdac
MS
39 " stfpc %0\n"
40 " std 1,%O0+16(%R0)\n"
41 " std 3,%O0+32(%R0)\n"
42 " std 5,%O0+48(%R0)\n"
43 " std 7,%O0+64(%R0)\n"
44 " std 8,%O0+72(%R0)\n"
45 " std 9,%O0+80(%R0)\n"
46 " std 10,%O0+88(%R0)\n"
47 " std 11,%O0+96(%R0)\n"
48 " std 12,%O0+104(%R0)\n"
49 " std 13,%O0+112(%R0)\n"
50 " std 14,%O0+120(%R0)\n"
51 " std 15,%O0+128(%R0)\n"
52 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
53}
54
55static inline void restore_fp_regs(s390_fp_regs *fpregs)
56{
94c12cc7 57 asm volatile(
987bcdac
MS
58 " ld 0,%O0+8(%R0)\n"
59 " ld 2,%O0+24(%R0)\n"
60 " ld 4,%O0+40(%R0)\n"
61 " ld 6,%O0+56(%R0)"
62 : : "Q" (*fpregs));
1da177e4
LT
63 if (!MACHINE_HAS_IEEE)
64 return;
65 asm volatile(
987bcdac
MS
66 " lfpc %0\n"
67 " ld 1,%O0+16(%R0)\n"
68 " ld 3,%O0+32(%R0)\n"
69 " ld 5,%O0+48(%R0)\n"
70 " ld 7,%O0+64(%R0)\n"
71 " ld 8,%O0+72(%R0)\n"
72 " ld 9,%O0+80(%R0)\n"
73 " ld 10,%O0+88(%R0)\n"
74 " ld 11,%O0+96(%R0)\n"
75 " ld 12,%O0+104(%R0)\n"
76 " ld 13,%O0+112(%R0)\n"
77 " ld 14,%O0+120(%R0)\n"
78 " ld 15,%O0+128(%R0)\n"
79 : : "Q" (*fpregs));
1da177e4
LT
80}
81
82static inline void save_access_regs(unsigned int *acrs)
83{
987bcdac 84 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
1da177e4
LT
85}
86
87static inline void restore_access_regs(unsigned int *acrs)
88{
987bcdac 89 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
1da177e4
LT
90}
91
fdb6d070 92#define switch_to(prev,next,last) do { \
fdb6d070
HC
93 if (prev->mm) { \
94 save_fp_regs(&prev->thread.fp_regs); \
95 save_access_regs(&prev->thread.acrs[0]); \
96 } \
97 if (next->mm) { \
98 restore_fp_regs(&next->thread.fp_regs); \
99 restore_access_regs(&next->thread.acrs[0]); \
5e9a2692 100 update_per_regs(next); \
fdb6d070
HC
101 } \
102 prev = __switch_to(prev,next); \
1da177e4
LT
103} while (0)
104
aa5e97ce 105extern void account_vtime(struct task_struct *, struct task_struct *);
1f1c12af 106extern void account_tick_vtime(struct task_struct *);
1da177e4 107
29b08d2b 108#ifdef CONFIG_PFAULT
29b08d2b
HC
109extern int pfault_init(void);
110extern void pfault_fini(void);
111#else /* CONFIG_PFAULT */
29b08d2b
HC
112#define pfault_init() ({-1;})
113#define pfault_fini() do { } while (0)
114#endif /* CONFIG_PFAULT */
115
45e576b1 116extern void cmma_init(void);
92fe3132 117extern int memcpy_real(void *, void *, size_t);
7dd6b334 118extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
7f0bf656
MH
119extern int copy_to_user_real(void __user *dest, void *src, size_t count);
120extern int copy_from_user_real(void *dest, void __user *src, size_t count);
45e576b1 121
5ee24d95 122#define finish_arch_switch(prev) do { \
1da177e4 123 set_fs(current->thread.mm_segment); \
aa5e97ce 124 account_vtime(prev, current); \
1da177e4
LT
125} while (0)
126
94c12cc7 127#define nop() asm volatile("nop")
1da177e4 128
1da177e4
LT
129/*
130 * Force strict CPU ordering.
131 * And yes, this is required on UP too when we're talking
132 * to devices.
133 *
134 * This is very similar to the ppc eieio/sync instruction in that is
135 * does a checkpoint syncronisation & makes sure that
136 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
137 */
138
94c12cc7
MS
139#define eieio() asm volatile("bcr 15,0" : : : "memory")
140#define SYNC_OTHER_CORES(x) eieio()
1da177e4
LT
141#define mb() eieio()
142#define rmb() eieio()
143#define wmb() eieio()
144#define read_barrier_depends() do { } while(0)
145#define smp_mb() mb()
146#define smp_rmb() rmb()
147#define smp_wmb() wmb()
148#define smp_read_barrier_depends() read_barrier_depends()
149#define smp_mb__before_clear_bit() smp_mb()
150#define smp_mb__after_clear_bit() smp_mb()
151
152
153#define set_mb(var, value) do { var = value; mb(); } while (0)
1da177e4 154
1da177e4
LT
155#ifdef __s390x__
156
94c12cc7
MS
157#define __ctl_load(array, low, high) ({ \
158 typedef struct { char _[sizeof(array)]; } addrtype; \
159 asm volatile( \
987bcdac
MS
160 " lctlg %1,%2,%0\n" \
161 : : "Q" (*(addrtype *)(&array)), \
162 "i" (low), "i" (high)); \
1da177e4
LT
163 })
164
94c12cc7
MS
165#define __ctl_store(array, low, high) ({ \
166 typedef struct { char _[sizeof(array)]; } addrtype; \
167 asm volatile( \
987bcdac
MS
168 " stctg %1,%2,%0\n" \
169 : "=Q" (*(addrtype *)(&array)) \
170 : "i" (low), "i" (high)); \
1da177e4
LT
171 })
172
1da177e4
LT
173#else /* __s390x__ */
174
94c12cc7
MS
175#define __ctl_load(array, low, high) ({ \
176 typedef struct { char _[sizeof(array)]; } addrtype; \
177 asm volatile( \
987bcdac
MS
178 " lctl %1,%2,%0\n" \
179 : : "Q" (*(addrtype *)(&array)), \
180 "i" (low), "i" (high)); \
94c12cc7 181})
1da177e4 182
94c12cc7
MS
183#define __ctl_store(array, low, high) ({ \
184 typedef struct { char _[sizeof(array)]; } addrtype; \
185 asm volatile( \
987bcdac
MS
186 " stctl %1,%2,%0\n" \
187 : "=Q" (*(addrtype *)(&array)) \
188 : "i" (low), "i" (high)); \
1da177e4
LT
189 })
190
1da177e4
LT
191#endif /* __s390x__ */
192
94c12cc7
MS
193#define __ctl_set_bit(cr, bit) ({ \
194 unsigned long __dummy; \
195 __ctl_store(__dummy, cr, cr); \
196 __dummy |= 1UL << (bit); \
197 __ctl_load(__dummy, cr, cr); \
198})
199
200#define __ctl_clear_bit(cr, bit) ({ \
201 unsigned long __dummy; \
202 __ctl_store(__dummy, cr, cr); \
203 __dummy &= ~(1UL << (bit)); \
204 __ctl_load(__dummy, cr, cr); \
205})
206
77fa2245
HC
207/*
208 * Use to set psw mask except for the first byte which
209 * won't be changed by this function.
210 */
211static inline void
212__set_psw_mask(unsigned long mask)
213{
df9ee292 214 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
77fa2245
HC
215}
216
b50511e4
MS
217#define local_mcck_enable() \
218 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
219#define local_mcck_disable() \
220 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
77fa2245 221
1da177e4
LT
222#ifdef CONFIG_SMP
223
224extern void smp_ctl_set_bit(int cr, int bit);
225extern void smp_ctl_clear_bit(int cr, int bit);
226#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
227#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
228
229#else
230
231#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
232#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
233
234#endif /* CONFIG_SMP */
235
14375bc4 236#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
484875b1 237
14375bc4
MS
238/*
239 * The test_facility function uses the bit odering where the MSB is bit 0.
240 * That makes it easier to query facility bits with the bit number as
241 * documented in the Principles of Operation.
242 */
243static inline int test_facility(unsigned long nr)
320c04c0 244{
14375bc4 245 unsigned char *ptr;
320c04c0 246
14375bc4
MS
247 if (nr >= MAX_FACILITY_BIT)
248 return 0;
249 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
250 return (*ptr & (0x80 >> (nr & 7))) != 0;
320c04c0
HC
251}
252
3ab121ab
MH
253/**
254 * stfle - Store facility list extended
255 * @stfle_fac_list: array where facility list can be stored
256 * @size: size of passed in array in double words
257 */
258static inline void stfle(u64 *stfle_fac_list, int size)
259{
260 unsigned long nr;
261
262 preempt_disable();
263 S390_lowcore.stfl_fac_list = 0;
264 asm volatile(
265 " .insn s,0xb2b10000,0(0)\n" /* stfl */
266 "0:\n"
267 EX_TABLE(0b, 0b)
268 : "=m" (S390_lowcore.stfl_fac_list));
269 nr = 4; /* bytes stored by stfl */
270 memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
271 if (S390_lowcore.stfl_fac_list & 0x01000000) {
272 /* More facility bits available with stfle */
273 register unsigned long reg0 asm("0") = size - 1;
274
275 asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
276 : "+d" (reg0)
277 : "a" (stfle_fac_list)
278 : "memory", "cc");
279 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
280 }
281 memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
282 preempt_enable();
283}
284
2e5061e4
HC
285static inline unsigned short stap(void)
286{
287 unsigned short cpu_address;
288
289 asm volatile("stap %0" : "=m" (cpu_address));
290 return cpu_address;
291}
292
1da177e4
LT
293extern void (*_machine_restart)(char *command);
294extern void (*_machine_halt)(void);
295extern void (*_machine_power_off)(void);
296
9887a1fc 297extern unsigned long arch_align_stack(unsigned long sp);
1da177e4 298
155af2f9
HJP
299static inline int tprot(unsigned long addr)
300{
301 int rc = -EFAULT;
302
303 asm volatile(
304 " tprot 0(%1),0\n"
305 "0: ipm %0\n"
306 " srl %0,28\n"
307 "1:\n"
308 EX_TABLE(0b,1b)
309 : "+d" (rc) : "a" (addr) : "cc");
310 return rc;
311}
312
1da177e4
LT
313#endif /* __KERNEL__ */
314
315#endif
This page took 0.613125 seconds and 5 git commands to generate.