powerpc: Merge enough to start building in arch/powerpc.
[deliverable/linux.git] / include / asm-powerpc / system.h
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef __PPC_SYSTEM_H
5 #define __PPC_SYSTEM_H
6
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9
10 #include <asm/hw_irq.h>
11 #include <asm/ppc_asm.h>
12
13 /*
14 * Memory barrier.
15 * The sync instruction guarantees that all memory accesses initiated
16 * by this processor have been performed (with respect to all other
17 * mechanisms that access memory). The eieio instruction is a barrier
18 * providing an ordering (separately) for (a) cacheable stores and (b)
19 * loads and stores to non-cacheable memory (e.g. I/O devices).
20 *
21 * mb() prevents loads and stores being reordered across this point.
22 * rmb() prevents loads being reordered across this point.
23 * wmb() prevents stores being reordered across this point.
24 * read_barrier_depends() prevents data-dependent loads being reordered
25 * across this point (nop on PPC).
26 *
27 * We have to use the sync instructions for mb(), since lwsync doesn't
28 * order loads with respect to previous stores. Lwsync is fine for
29 * rmb(), though. Note that lwsync is interpreted as sync by
30 * 32-bit and older 64-bit CPUs.
31 *
32 * For wmb(), we use sync since wmb is used in drivers to order
33 * stores to system memory with respect to writes to the device.
34 * However, smp_wmb() can be a lighter-weight eieio barrier on
35 * SMP since it is only used to order updates to system memory.
36 */
37 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
38 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
39 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
40 #define read_barrier_depends() do { } while(0)
41
42 #define set_mb(var, value) do { var = value; mb(); } while (0)
43 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
44
45 #ifdef CONFIG_SMP
46 #define smp_mb() mb()
47 #define smp_rmb() rmb()
48 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
49 #define smp_read_barrier_depends() read_barrier_depends()
50 #else
51 #define smp_mb() barrier()
52 #define smp_rmb() barrier()
53 #define smp_wmb() barrier()
54 #define smp_read_barrier_depends() do { } while(0)
55 #endif /* CONFIG_SMP */
56
57 #ifdef __KERNEL__
58 struct task_struct;
59 struct pt_regs;
60
61 #ifdef CONFIG_DEBUGGER
62
63 extern int (*__debugger)(struct pt_regs *regs);
64 extern int (*__debugger_ipi)(struct pt_regs *regs);
65 extern int (*__debugger_bpt)(struct pt_regs *regs);
66 extern int (*__debugger_sstep)(struct pt_regs *regs);
67 extern int (*__debugger_iabr_match)(struct pt_regs *regs);
68 extern int (*__debugger_dabr_match)(struct pt_regs *regs);
69 extern int (*__debugger_fault_handler)(struct pt_regs *regs);
70
71 #define DEBUGGER_BOILERPLATE(__NAME) \
72 static inline int __NAME(struct pt_regs *regs) \
73 { \
74 if (unlikely(__ ## __NAME)) \
75 return __ ## __NAME(regs); \
76 return 0; \
77 }
78
79 DEBUGGER_BOILERPLATE(debugger)
80 DEBUGGER_BOILERPLATE(debugger_ipi)
81 DEBUGGER_BOILERPLATE(debugger_bpt)
82 DEBUGGER_BOILERPLATE(debugger_sstep)
83 DEBUGGER_BOILERPLATE(debugger_iabr_match)
84 DEBUGGER_BOILERPLATE(debugger_dabr_match)
85 DEBUGGER_BOILERPLATE(debugger_fault_handler)
86
87 #ifdef CONFIG_XMON
88 extern void xmon_init(int enable);
89 #endif
90
91 #else
92 static inline int debugger(struct pt_regs *regs) { return 0; }
93 static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
94 static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
95 static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
96 static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
97 static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
98 static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
99 #endif
100
101 extern int set_dabr(unsigned long dabr);
102 extern void print_backtrace(unsigned long *);
103 extern void show_regs(struct pt_regs * regs);
104 extern void flush_instruction_cache(void);
105 extern void hard_reset_now(void);
106 extern void poweroff_now(void);
107
108 #ifdef CONFIG_6xx
109 extern long _get_L2CR(void);
110 extern long _get_L3CR(void);
111 extern void _set_L2CR(unsigned long);
112 extern void _set_L3CR(unsigned long);
113 #else
114 #define _get_L2CR() 0L
115 #define _get_L3CR() 0L
116 #define _set_L2CR(val) do { } while(0)
117 #define _set_L3CR(val) do { } while(0)
118 #endif
119
120 extern void via_cuda_init(void);
121 extern void pmac_nvram_init(void);
122 extern void read_rtc_time(void);
123 extern void pmac_find_display(void);
124 extern void giveup_fpu(struct task_struct *);
125 extern void enable_kernel_fp(void);
126 extern void flush_fp_to_thread(struct task_struct *);
127 extern void enable_kernel_altivec(void);
128 extern void giveup_altivec(struct task_struct *);
129 extern void load_up_altivec(struct task_struct *);
130 extern void giveup_spe(struct task_struct *);
131 extern void load_up_spe(struct task_struct *);
132 extern int fix_alignment(struct pt_regs *);
133 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
134 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
135
136 #ifdef CONFIG_ALTIVEC
137 extern void flush_altivec_to_thread(struct task_struct *);
138 #else
139 static inline void flush_altivec_to_thread(struct task_struct *t)
140 {
141 }
142 #endif
143
144 #ifdef CONFIG_SPE
145 extern void flush_spe_to_thread(struct task_struct *);
146 #else
147 static inline void flush_spe_to_thread(struct task_struct *t)
148 {
149 }
150 #endif
151
152 extern int call_rtas(const char *, int, int, unsigned long *, ...);
153 extern void cacheable_memzero(void *p, unsigned int nb);
154 extern void *cacheable_memcpy(void *, const void *, unsigned int);
155 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
156 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
157 extern int die(const char *, struct pt_regs *, long);
158 extern void _exception(int, struct pt_regs *, int, unsigned long);
159 #ifdef CONFIG_BOOKE_WDT
160 extern u32 booke_wdt_enabled;
161 extern u32 booke_wdt_period;
162 #endif /* CONFIG_BOOKE_WDT */
163
164 /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
165 extern unsigned char e2a(unsigned char);
166
167 struct device_node;
168 extern void note_scsi_host(struct device_node *, void *);
169
170 extern struct task_struct *__switch_to(struct task_struct *,
171 struct task_struct *);
172 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
173
174 struct thread_struct;
175 extern struct task_struct *_switch(struct thread_struct *prev,
176 struct thread_struct *next);
177
178 extern unsigned int rtas_data;
179
180 /*
181 * Atomic exchange
182 *
183 * Changes the memory location '*ptr' to be val and returns
184 * the previous value stored there.
185 */
186 static __inline__ unsigned long
187 __xchg_u32(volatile void *p, unsigned long val)
188 {
189 unsigned long prev;
190
191 __asm__ __volatile__(
192 EIEIO_ON_SMP
193 "1: lwarx %0,0,%2 \n"
194 PPC405_ERR77(0,%2)
195 " stwcx. %3,0,%2 \n\
196 bne- 1b"
197 ISYNC_ON_SMP
198 : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
199 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
200 : "cc", "memory");
201
202 return prev;
203 }
204
205 #ifdef CONFIG_PPC64
206 static __inline__ unsigned long
207 __xchg_u64(volatile void *p, unsigned long val)
208 {
209 unsigned long prev;
210
211 __asm__ __volatile__(
212 EIEIO_ON_SMP
213 "1: ldarx %0,0,%2 \n"
214 PPC405_ERR77(0,%2)
215 " stdcx. %3,0,%2 \n\
216 bne- 1b"
217 ISYNC_ON_SMP
218 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
219 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
220 : "cc", "memory");
221
222 return prev;
223 }
224 #endif
225
226 /*
227 * This function doesn't exist, so you'll get a linker error
228 * if something tries to do an invalid xchg().
229 */
230 extern void __xchg_called_with_bad_pointer(void);
231
232 static __inline__ unsigned long
233 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
234 {
235 switch (size) {
236 case 4:
237 return __xchg_u32(ptr, x);
238 #ifdef CONFIG_PPC64
239 case 8:
240 return __xchg_u64(ptr, x);
241 #endif
242 }
243 __xchg_called_with_bad_pointer();
244 return x;
245 }
246
247 #define xchg(ptr,x) \
248 ({ \
249 __typeof__(*(ptr)) _x_ = (x); \
250 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
251 })
252
253 #define tas(ptr) (xchg((ptr),1))
254
255 /*
256 * Compare and exchange - if *p == old, set it to new,
257 * and return the old value of *p.
258 */
259 #define __HAVE_ARCH_CMPXCHG 1
260
261 static __inline__ unsigned long
262 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
263 {
264 unsigned int prev;
265
266 __asm__ __volatile__ (
267 EIEIO_ON_SMP
268 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
269 cmpw 0,%0,%3\n\
270 bne- 2f\n"
271 PPC405_ERR77(0,%2)
272 " stwcx. %4,0,%2\n\
273 bne- 1b"
274 ISYNC_ON_SMP
275 "\n\
276 2:"
277 : "=&r" (prev), "=m" (*p)
278 : "r" (p), "r" (old), "r" (new), "m" (*p)
279 : "cc", "memory");
280
281 return prev;
282 }
283
284 #ifdef CONFIG_PPC64
285 static __inline__ unsigned long
286 __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
287 {
288 unsigned long prev;
289
290 __asm__ __volatile__ (
291 EIEIO_ON_SMP
292 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
293 cmpd 0,%0,%3\n\
294 bne- 2f\n\
295 stdcx. %4,0,%2\n\
296 bne- 1b"
297 ISYNC_ON_SMP
298 "\n\
299 2:"
300 : "=&r" (prev), "=m" (*p)
301 : "r" (p), "r" (old), "r" (new), "m" (*p)
302 : "cc", "memory");
303
304 return prev;
305 }
306 #endif
307
308 /* This function doesn't exist, so you'll get a linker error
309 if something tries to do an invalid cmpxchg(). */
310 extern void __cmpxchg_called_with_bad_pointer(void);
311
312 static __inline__ unsigned long
313 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
314 unsigned int size)
315 {
316 switch (size) {
317 case 4:
318 return __cmpxchg_u32(ptr, old, new);
319 #ifdef CONFIG_PPC64
320 case 8:
321 return __cmpxchg_u64(ptr, old, new);
322 #endif
323 }
324 __cmpxchg_called_with_bad_pointer();
325 return old;
326 }
327
328 #define cmpxchg(ptr,o,n) \
329 ({ \
330 __typeof__(*(ptr)) _o_ = (o); \
331 __typeof__(*(ptr)) _n_ = (n); \
332 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
333 (unsigned long)_n_, sizeof(*(ptr))); \
334 })
335
336 #ifdef CONFIG_PPC64
337 /*
338 * We handle most unaligned accesses in hardware. On the other hand
339 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
340 * powers of 2 writes until it reaches sufficient alignment).
341 *
342 * Based on this we disable the IP header alignment in network drivers.
343 */
344 #define NET_IP_ALIGN 0
345 #endif
346
347 #define arch_align_stack(x) (x)
348
349 #endif /* __KERNEL__ */
350 #endif /* __PPC_SYSTEM_H */
This page took 0.043336 seconds and 5 git commands to generate.