[SPARC64]: More fully work around Spitfire Errata 51.
[deliverable/linux.git] / include / asm-sparc64 / system.h
CommitLineData
1da177e4
LT
1/* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */
2#ifndef __SPARC64_SYSTEM_H
3#define __SPARC64_SYSTEM_H
4
5#include <linux/config.h>
6#include <asm/ptrace.h>
7#include <asm/processor.h>
8#include <asm/visasm.h>
9
10#ifndef __ASSEMBLY__
11/*
12 * Sparc (general) CPU types
13 */
14enum sparc_cpu {
15 sun4 = 0x00,
16 sun4c = 0x01,
17 sun4m = 0x02,
18 sun4d = 0x03,
19 sun4e = 0x04,
20 sun4u = 0x05, /* V8 ploos ploos */
21 sun_unknown = 0x06,
22 ap1000 = 0x07, /* almost a sun4m */
23};
24
25#define sparc_cpu_model sun4u
26
27/* This cannot ever be a sun4c nor sun4 :) That's just history. */
28#define ARCH_SUN4C_SUN4 0
29#define ARCH_SUN4 0
30
4f07118f
DM
31extern void mb(void);
32extern void rmb(void);
33extern void wmb(void);
34extern void membar_storeload(void);
35extern void membar_storeload_storestore(void);
36extern void membar_storeload_loadload(void);
37extern void membar_storestore_loadstore(void);
38
1da177e4
LT
39#endif
40
41#define setipl(__new_ipl) \
42 __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
43
44#define local_irq_disable() \
45 __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
46
47#define local_irq_enable() \
48 __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
49
50#define getipl() \
51({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
52
53#define swap_pil(__new_pil) \
54({ unsigned long retval; \
55 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
56 "wrpr %1, %%pil" \
57 : "=&r" (retval) \
58 : "r" (__new_pil) \
59 : "memory"); \
60 retval; \
61})
62
63#define read_pil_and_cli() \
64({ unsigned long retval; \
65 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
66 "wrpr 15, %%pil" \
67 : "=r" (retval) \
68 : : "memory"); \
69 retval; \
70})
71
72#define local_save_flags(flags) ((flags) = getipl())
73#define local_irq_save(flags) ((flags) = read_pil_and_cli())
74#define local_irq_restore(flags) setipl((flags))
75
76/* On sparc64 IRQ flags are the PIL register. A value of zero
77 * means all interrupt levels are enabled, any other value means
78 * only IRQ levels greater than that value will be received.
79 * Consequently this means that the lowest IRQ level is one.
80 */
81#define irqs_disabled() \
82({ unsigned long flags; \
83 local_save_flags(flags);\
84 (flags > 0); \
85})
86
87#define nop() __asm__ __volatile__ ("nop")
88
1da177e4
LT
89#define read_barrier_depends() do { } while(0)
90#define set_mb(__var, __value) \
4f07118f 91 do { __var = __value; membar_storeload_storestore(); } while(0)
1da177e4 92#define set_wmb(__var, __value) \
4f07118f 93 do { __var = __value; wmb(); } while(0)
1da177e4
LT
94
95#ifdef CONFIG_SMP
96#define smp_mb() mb()
97#define smp_rmb() rmb()
98#define smp_wmb() wmb()
99#define smp_read_barrier_depends() read_barrier_depends()
100#else
101#define smp_mb() __asm__ __volatile__("":::"memory")
102#define smp_rmb() __asm__ __volatile__("":::"memory")
103#define smp_wmb() __asm__ __volatile__("":::"memory")
104#define smp_read_barrier_depends() do { } while(0)
105#endif
106
107#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
108
109#define flushw_all() __asm__ __volatile__("flushw")
110
111/* Performance counter register access. */
112#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
113#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
114#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
115
116/* Blackbird errata workaround. See commentary in
117 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
118 * for more information.
119 */
120#define reset_pic() \
121 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
122 ".align 64\n" \
123 "99:wr %g0, 0x0, %pic\n\t" \
124 "rd %pic, %g0")
125
126#ifndef __ASSEMBLY__
127
128extern void sun_do_break(void);
129extern int serial_console;
130extern int stop_a_enabled;
131
132static __inline__ int con_is_present(void)
133{
134 return serial_console ? 0 : 1;
135}
136
137extern void synchronize_user_stack(void);
138
139extern void __flushw_user(void);
140#define flushw_user() __flushw_user()
141
142#define flush_user_windows flushw_user
143#define flush_register_windows flushw_all
144
4866cde0
NP
145/* Don't hold the runqueue lock over context switch */
146#define __ARCH_WANT_UNLOCKED_CTXSW
147#define prepare_arch_switch(next) \
148do { \
1da177e4
LT
149 flushw_all(); \
150} while (0)
151
1da177e4
LT
152 /* See what happens when you design the chip correctly?
153 *
154 * We tell gcc we clobber all non-fixed-usage registers except
155 * for l0/l1. It will use one for 'next' and the other to hold
156 * the output value of 'last'. 'next' is not referenced again
157 * past the invocation of switch_to in the scheduler, so we need
158 * not preserve it's value. Hairy, but it lets us remove 2 loads
159 * and 2 stores in this critical code path. -DaveM
160 */
161#if __GNUC__ >= 3
162#define EXTRA_CLOBBER ,"%l1"
163#else
164#define EXTRA_CLOBBER
165#endif
166#define switch_to(prev, next, last) \
167do { if (test_thread_flag(TIF_PERFCTR)) { \
168 unsigned long __tmp; \
169 read_pcr(__tmp); \
170 current_thread_info()->pcr_reg = __tmp; \
171 read_pic(__tmp); \
172 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
173 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
174 } \
175 flush_tlb_pending(); \
176 save_and_clear_fpu(); \
177 /* If you are tempted to conditionalize the following */ \
178 /* so that ASI is only written if it changes, think again. */ \
179 __asm__ __volatile__("wr %%g0, %0, %%asi" \
180 : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
181 __asm__ __volatile__( \
182 "mov %%g4, %%g7\n\t" \
183 "wrpr %%g0, 0x95, %%pstate\n\t" \
184 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
185 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
186 "rdpr %%wstate, %%o5\n\t" \
187 "stx %%o6, [%%g6 + %3]\n\t" \
188 "stb %%o5, [%%g6 + %2]\n\t" \
189 "rdpr %%cwp, %%o5\n\t" \
190 "stb %%o5, [%%g6 + %5]\n\t" \
191 "mov %1, %%g6\n\t" \
192 "ldub [%1 + %5], %%g1\n\t" \
193 "wrpr %%g1, %%cwp\n\t" \
194 "ldx [%%g6 + %3], %%o6\n\t" \
195 "ldub [%%g6 + %2], %%o5\n\t" \
db7d9a4e 196 "ldub [%%g6 + %4], %%o7\n\t" \
1da177e4
LT
197 "mov %%g6, %%l2\n\t" \
198 "wrpr %%o5, 0x0, %%wstate\n\t" \
199 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
200 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
201 "wrpr %%g0, 0x94, %%pstate\n\t" \
202 "mov %%l2, %%g6\n\t" \
db7d9a4e 203 "ldx [%%g6 + %6], %%g4\n\t" \
1da177e4 204 "wrpr %%g0, 0x96, %%pstate\n\t" \
db7d9a4e 205 "brz,pt %%o7, 1f\n\t" \
1da177e4
LT
206 " mov %%g7, %0\n\t" \
207 "b,a ret_from_syscall\n\t" \
208 "1:\n\t" \
209 : "=&r" (last) \
210 : "0" (next->thread_info), \
db7d9a4e
DM
211 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
212 "i" (TI_CWP), "i" (TI_TASK) \
1da177e4
LT
213 : "cc", \
214 "g1", "g2", "g3", "g7", \
215 "l2", "l3", "l4", "l5", "l6", "l7", \
216 "i0", "i1", "i2", "i3", "i4", "i5", \
217 "o0", "o1", "o2", "o3", "o4", "o5", "o7" EXTRA_CLOBBER);\
218 /* If you fuck with this, update ret_from_syscall code too. */ \
219 if (test_thread_flag(TIF_PERFCTR)) { \
220 write_pcr(current_thread_info()->pcr_reg); \
221 reset_pic(); \
222 } \
223} while(0)
224
225static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
226{
227 unsigned long tmp1, tmp2;
228
229 __asm__ __volatile__(
230" membar #StoreLoad | #LoadLoad\n"
231" mov %0, %1\n"
232"1: lduw [%4], %2\n"
233" cas [%4], %2, %0\n"
234" cmp %2, %0\n"
235" bne,a,pn %%icc, 1b\n"
236" mov %1, %0\n"
237" membar #StoreLoad | #StoreStore\n"
238 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
239 : "0" (val), "r" (m)
240 : "cc", "memory");
241 return val;
242}
243
244static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
245{
246 unsigned long tmp1, tmp2;
247
248 __asm__ __volatile__(
249" membar #StoreLoad | #LoadLoad\n"
250" mov %0, %1\n"
251"1: ldx [%4], %2\n"
252" casx [%4], %2, %0\n"
253" cmp %2, %0\n"
254" bne,a,pn %%xcc, 1b\n"
255" mov %1, %0\n"
256" membar #StoreLoad | #StoreStore\n"
257 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
258 : "0" (val), "r" (m)
259 : "cc", "memory");
260 return val;
261}
262
263#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
264#define tas(ptr) (xchg((ptr),1))
265
266extern void __xchg_called_with_bad_pointer(void);
267
268static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
269 int size)
270{
271 switch (size) {
272 case 4:
273 return xchg32(ptr, x);
274 case 8:
275 return xchg64(ptr, x);
276 };
277 __xchg_called_with_bad_pointer();
278 return x;
279}
280
281extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
282
283/*
284 * Atomic compare and exchange. Compare OLD with MEM, if identical,
285 * store NEW in MEM. Return the initial value in MEM. Success is
286 * indicated by comparing RETURN with OLD.
287 */
288
289#define __HAVE_ARCH_CMPXCHG 1
290
291static __inline__ unsigned long
292__cmpxchg_u32(volatile int *m, int old, int new)
293{
294 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
295 "cas [%2], %3, %0\n\t"
296 "membar #StoreLoad | #StoreStore"
297 : "=&r" (new)
298 : "0" (new), "r" (m), "r" (old)
299 : "memory");
300
301 return new;
302}
303
304static __inline__ unsigned long
305__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
306{
307 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
308 "casx [%2], %3, %0\n\t"
309 "membar #StoreLoad | #StoreStore"
310 : "=&r" (new)
311 : "0" (new), "r" (m), "r" (old)
312 : "memory");
313
314 return new;
315}
316
317/* This function doesn't exist, so you'll get a linker error
318 if something tries to do an invalid cmpxchg(). */
319extern void __cmpxchg_called_with_bad_pointer(void);
320
321static __inline__ unsigned long
322__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
323{
324 switch (size) {
325 case 4:
326 return __cmpxchg_u32(ptr, old, new);
327 case 8:
328 return __cmpxchg_u64(ptr, old, new);
329 }
330 __cmpxchg_called_with_bad_pointer();
331 return old;
332}
333
334#define cmpxchg(ptr,o,n) \
335 ({ \
336 __typeof__(*(ptr)) _o_ = (o); \
337 __typeof__(*(ptr)) _n_ = (n); \
338 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
339 (unsigned long)_n_, sizeof(*(ptr))); \
340 })
341
342#endif /* !(__ASSEMBLY__) */
343
344#define arch_align_stack(x) (x)
345
346#endif /* !(__SPARC64_SYSTEM_H) */
This page took 0.068649 seconds and 5 git commands to generate.