Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | |
7 | * Copyright (C) 1996 by Paul M. Antoine | |
8 | * Copyright (C) 1999 Silicon Graphics | |
9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | |
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | |
11 | */ | |
12 | #ifndef _ASM_SYSTEM_H | |
13 | #define _ASM_SYSTEM_H | |
14 | ||
1da177e4 LT |
15 | #include <linux/types.h> |
16 | ||
17 | #include <asm/addrspace.h> | |
18 | #include <asm/cpu-features.h> | |
e50c0a8f | 19 | #include <asm/dsp.h> |
1da177e4 LT |
20 | #include <asm/ptrace.h> |
21 | #include <asm/war.h> | |
22 | #include <asm/interrupt.h> | |
23 | ||
24 | /* | |
25 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
26 | * depend on. | |
27 | * | |
28 | * No data-dependent reads from memory-like regions are ever reordered | |
29 | * over this barrier. All reads preceding this primitive are guaranteed | |
30 | * to access memory (but not necessarily other CPUs' caches) before any | |
31 | * reads following this primitive that depend on the data return by | |
32 | * any of the preceding reads. This primitive is much lighter weight than | |
33 | * rmb() on most CPUs, and is never heavier weight than is | |
34 | * rmb(). | |
35 | * | |
36 | * These ordering constraints are respected by both the local CPU | |
37 | * and the compiler. | |
38 | * | |
39 | * Ordering is not guaranteed by anything other than these primitives, | |
40 | * not even by data dependencies. See the documentation for | |
41 | * memory_barrier() for examples and URLs to more information. | |
42 | * | |
43 | * For example, the following code would force ordering (the initial | |
44 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
45 | * | |
46 | * <programlisting> | |
47 | * CPU 0 CPU 1 | |
48 | * | |
49 | * b = 2; | |
50 | * memory_barrier(); | |
51 | * p = &b; q = p; | |
52 | * read_barrier_depends(); | |
53 | * d = *q; | |
54 | * </programlisting> | |
55 | * | |
56 | * because the read of "*q" depends on the read of "p" and these | |
57 | * two reads are separated by a read_barrier_depends(). However, | |
58 | * the following code, with the same initial values for "a" and "b": | |
59 | * | |
60 | * <programlisting> | |
61 | * CPU 0 CPU 1 | |
62 | * | |
63 | * a = 2; | |
64 | * memory_barrier(); | |
65 | * b = 3; y = b; | |
66 | * read_barrier_depends(); | |
67 | * x = a; | |
68 | * </programlisting> | |
69 | * | |
70 | * does not enforce ordering, since there is no data dependency between | |
71 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
72 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
3fd5646c | 73 | * in cases like this where there are no data dependencies. |
1da177e4 LT |
74 | */ |
75 | ||
76 | #define read_barrier_depends() do { } while(0) | |
77 | ||
78 | #ifdef CONFIG_CPU_HAS_SYNC | |
79 | #define __sync() \ | |
80 | __asm__ __volatile__( \ | |
81 | ".set push\n\t" \ | |
82 | ".set noreorder\n\t" \ | |
83 | ".set mips2\n\t" \ | |
84 | "sync\n\t" \ | |
85 | ".set pop" \ | |
86 | : /* no output */ \ | |
87 | : /* no input */ \ | |
88 | : "memory") | |
89 | #else | |
90 | #define __sync() do { } while(0) | |
91 | #endif | |
92 | ||
93 | #define __fast_iob() \ | |
94 | __asm__ __volatile__( \ | |
95 | ".set push\n\t" \ | |
96 | ".set noreorder\n\t" \ | |
97 | "lw $0,%0\n\t" \ | |
98 | "nop\n\t" \ | |
99 | ".set pop" \ | |
100 | : /* no output */ \ | |
101 | : "m" (*(int *)CKSEG1) \ | |
102 | : "memory") | |
103 | ||
104 | #define fast_wmb() __sync() | |
105 | #define fast_rmb() __sync() | |
106 | #define fast_mb() __sync() | |
107 | #define fast_iob() \ | |
108 | do { \ | |
109 | __sync(); \ | |
110 | __fast_iob(); \ | |
111 | } while (0) | |
112 | ||
113 | #ifdef CONFIG_CPU_HAS_WB | |
114 | ||
115 | #include <asm/wbflush.h> | |
116 | ||
117 | #define wmb() fast_wmb() | |
118 | #define rmb() fast_rmb() | |
119 | #define mb() wbflush() | |
120 | #define iob() wbflush() | |
121 | ||
122 | #else /* !CONFIG_CPU_HAS_WB */ | |
123 | ||
124 | #define wmb() fast_wmb() | |
125 | #define rmb() fast_rmb() | |
126 | #define mb() fast_mb() | |
127 | #define iob() fast_iob() | |
128 | ||
129 | #endif /* !CONFIG_CPU_HAS_WB */ | |
130 | ||
131 | #ifdef CONFIG_SMP | |
132 | #define smp_mb() mb() | |
133 | #define smp_rmb() rmb() | |
134 | #define smp_wmb() wmb() | |
135 | #define smp_read_barrier_depends() read_barrier_depends() | |
136 | #else | |
137 | #define smp_mb() barrier() | |
138 | #define smp_rmb() barrier() | |
139 | #define smp_wmb() barrier() | |
140 | #define smp_read_barrier_depends() do { } while(0) | |
141 | #endif | |
142 | ||
143 | #define set_mb(var, value) \ | |
144 | do { var = value; mb(); } while (0) | |
145 | ||
146 | #define set_wmb(var, value) \ | |
147 | do { var = value; wmb(); } while (0) | |
148 | ||
149 | /* | |
150 | * switch_to(n) should switch tasks to task nr n, first | |
151 | * checking that n isn't the current task, in which case it does nothing. | |
152 | */ | |
153 | extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |
154 | ||
155 | struct task_struct; | |
156 | ||
f088fc84 RB |
157 | #ifdef CONFIG_MIPS_MT_FPAFF |
158 | ||
159 | /* | |
160 | * Handle the scheduler resume end of FPU affinity management. We do this | |
161 | * inline to try to keep the overhead down. If we have been forced to run on | |
162 | * a "CPU" with an FPU because of a previous high level of FP computation, | |
163 | * but did not actually use the FPU during the most recent time-slice (CU1 | |
164 | * isn't set), we undo the restriction on cpus_allowed. | |
165 | * | |
166 | * We're not calling set_cpus_allowed() here, because we have no need to | |
167 | * force prompt migration - we're already switching the current CPU to a | |
168 | * different thread. | |
169 | */ | |
170 | ||
171 | #define switch_to(prev,next,last) \ | |
172 | do { \ | |
173 | if (cpu_has_fpu && \ | |
174 | (prev->thread.mflags & MF_FPUBOUND) && \ | |
175 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | |
176 | prev->thread.mflags &= ~MF_FPUBOUND; \ | |
177 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | |
178 | } \ | |
179 | if (cpu_has_dsp) \ | |
180 | __save_dsp(prev); \ | |
181 | next->thread.emulated_fp = 0; \ | |
182 | (last) = resume(prev, next, next->thread_info); \ | |
183 | if (cpu_has_dsp) \ | |
184 | __restore_dsp(current); \ | |
185 | } while(0) | |
186 | ||
187 | #else | |
e50c0a8f RB |
188 | #define switch_to(prev,next,last) \ |
189 | do { \ | |
190 | if (cpu_has_dsp) \ | |
191 | __save_dsp(prev); \ | |
40bc9c67 | 192 | (last) = resume(prev, next, task_thread_info(next)); \ |
e50c0a8f RB |
193 | if (cpu_has_dsp) \ |
194 | __restore_dsp(current); \ | |
1da177e4 | 195 | } while(0) |
f088fc84 | 196 | #endif |
1da177e4 | 197 | |
4dc7a0bb IM |
198 | /* |
199 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
200 | * it needs a way to flush as much of the CPU's caches as possible. | |
201 | * | |
202 | * TODO: fill this in! | |
203 | */ | |
204 | static inline void sched_cacheflush(void) | |
205 | { | |
206 | } | |
207 | ||
1da177e4 LT |
208 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
209 | { | |
210 | __u32 retval; | |
211 | ||
212 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
213 | unsigned long dummy; | |
214 | ||
215 | __asm__ __volatile__( | |
c4559f67 | 216 | " .set mips3 \n" |
1da177e4 | 217 | "1: ll %0, %3 # xchg_u32 \n" |
7222424e | 218 | " .set mips0 \n" |
1da177e4 | 219 | " move %2, %z4 \n" |
7222424e | 220 | " .set mips3 \n" |
1da177e4 LT |
221 | " sc %2, %1 \n" |
222 | " beqzl %2, 1b \n" | |
1da177e4 LT |
223 | #ifdef CONFIG_SMP |
224 | " sync \n" | |
225 | #endif | |
aac8aa77 | 226 | " .set mips0 \n" |
1da177e4 LT |
227 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
228 | : "R" (*m), "Jr" (val) | |
229 | : "memory"); | |
230 | } else if (cpu_has_llsc) { | |
231 | unsigned long dummy; | |
232 | ||
233 | __asm__ __volatile__( | |
c4559f67 | 234 | " .set mips3 \n" |
1da177e4 | 235 | "1: ll %0, %3 # xchg_u32 \n" |
7222424e | 236 | " .set mips0 \n" |
1da177e4 | 237 | " move %2, %z4 \n" |
7222424e | 238 | " .set mips3 \n" |
1da177e4 LT |
239 | " sc %2, %1 \n" |
240 | " beqz %2, 1b \n" | |
241 | #ifdef CONFIG_SMP | |
242 | " sync \n" | |
243 | #endif | |
aac8aa77 | 244 | " .set mips0 \n" |
1da177e4 LT |
245 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
246 | : "R" (*m), "Jr" (val) | |
247 | : "memory"); | |
248 | } else { | |
249 | unsigned long flags; | |
250 | ||
251 | local_irq_save(flags); | |
252 | retval = *m; | |
253 | *m = val; | |
254 | local_irq_restore(flags); /* implies memory barrier */ | |
255 | } | |
256 | ||
257 | return retval; | |
258 | } | |
259 | ||
875d43e7 | 260 | #ifdef CONFIG_64BIT |
1da177e4 LT |
261 | static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) |
262 | { | |
263 | __u64 retval; | |
264 | ||
265 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
266 | unsigned long dummy; | |
267 | ||
268 | __asm__ __volatile__( | |
aac8aa77 | 269 | " .set mips3 \n" |
1da177e4 LT |
270 | "1: lld %0, %3 # xchg_u64 \n" |
271 | " move %2, %z4 \n" | |
272 | " scd %2, %1 \n" | |
273 | " beqzl %2, 1b \n" | |
1da177e4 LT |
274 | #ifdef CONFIG_SMP |
275 | " sync \n" | |
276 | #endif | |
aac8aa77 | 277 | " .set mips0 \n" |
1da177e4 LT |
278 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
279 | : "R" (*m), "Jr" (val) | |
280 | : "memory"); | |
281 | } else if (cpu_has_llsc) { | |
282 | unsigned long dummy; | |
283 | ||
284 | __asm__ __volatile__( | |
aac8aa77 | 285 | " .set mips3 \n" |
1da177e4 LT |
286 | "1: lld %0, %3 # xchg_u64 \n" |
287 | " move %2, %z4 \n" | |
288 | " scd %2, %1 \n" | |
289 | " beqz %2, 1b \n" | |
290 | #ifdef CONFIG_SMP | |
291 | " sync \n" | |
292 | #endif | |
aac8aa77 | 293 | " .set mips0 \n" |
1da177e4 LT |
294 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
295 | : "R" (*m), "Jr" (val) | |
296 | : "memory"); | |
297 | } else { | |
298 | unsigned long flags; | |
299 | ||
300 | local_irq_save(flags); | |
301 | retval = *m; | |
302 | *m = val; | |
303 | local_irq_restore(flags); /* implies memory barrier */ | |
304 | } | |
305 | ||
306 | return retval; | |
307 | } | |
308 | #else | |
309 | extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); | |
310 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels | |
311 | #endif | |
312 | ||
313 | /* This function doesn't exist, so you'll get a linker error | |
314 | if something tries to do an invalid xchg(). */ | |
315 | extern void __xchg_called_with_bad_pointer(void); | |
316 | ||
317 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
318 | { | |
319 | switch (size) { | |
0cea043b RB |
320 | case 4: |
321 | return __xchg_u32(ptr, x); | |
322 | case 8: | |
323 | return __xchg_u64(ptr, x); | |
1da177e4 LT |
324 | } |
325 | __xchg_called_with_bad_pointer(); | |
326 | return x; | |
327 | } | |
328 | ||
329 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
330 | #define tas(ptr) (xchg((ptr),1)) | |
331 | ||
332 | #define __HAVE_ARCH_CMPXCHG 1 | |
333 | ||
334 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |
335 | unsigned long new) | |
336 | { | |
337 | __u32 retval; | |
338 | ||
339 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
340 | __asm__ __volatile__( | |
aac8aa77 | 341 | " .set push \n" |
1da177e4 | 342 | " .set noat \n" |
c4559f67 | 343 | " .set mips3 \n" |
1da177e4 LT |
344 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
345 | " bne %0, %z3, 2f \n" | |
f99d3023 | 346 | " .set mips0 \n" |
1da177e4 | 347 | " move $1, %z4 \n" |
f99d3023 | 348 | " .set mips3 \n" |
1da177e4 LT |
349 | " sc $1, %1 \n" |
350 | " beqzl $1, 1b \n" | |
1da177e4 LT |
351 | #ifdef CONFIG_SMP |
352 | " sync \n" | |
353 | #endif | |
354 | "2: \n" | |
aac8aa77 | 355 | " .set pop \n" |
3e6cb2d3 | 356 | : "=&r" (retval), "=R" (*m) |
1da177e4 LT |
357 | : "R" (*m), "Jr" (old), "Jr" (new) |
358 | : "memory"); | |
359 | } else if (cpu_has_llsc) { | |
360 | __asm__ __volatile__( | |
aac8aa77 | 361 | " .set push \n" |
1da177e4 | 362 | " .set noat \n" |
c4559f67 | 363 | " .set mips3 \n" |
1da177e4 LT |
364 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
365 | " bne %0, %z3, 2f \n" | |
f99d3023 | 366 | " .set mips0 \n" |
1da177e4 | 367 | " move $1, %z4 \n" |
f99d3023 | 368 | " .set mips3 \n" |
1da177e4 LT |
369 | " sc $1, %1 \n" |
370 | " beqz $1, 1b \n" | |
371 | #ifdef CONFIG_SMP | |
372 | " sync \n" | |
373 | #endif | |
374 | "2: \n" | |
aac8aa77 | 375 | " .set pop \n" |
3e6cb2d3 | 376 | : "=&r" (retval), "=R" (*m) |
1da177e4 LT |
377 | : "R" (*m), "Jr" (old), "Jr" (new) |
378 | : "memory"); | |
379 | } else { | |
380 | unsigned long flags; | |
381 | ||
382 | local_irq_save(flags); | |
383 | retval = *m; | |
384 | if (retval == old) | |
385 | *m = new; | |
386 | local_irq_restore(flags); /* implies memory barrier */ | |
387 | } | |
388 | ||
389 | return retval; | |
390 | } | |
391 | ||
875d43e7 | 392 | #ifdef CONFIG_64BIT |
1da177e4 LT |
393 | static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, |
394 | unsigned long new) | |
395 | { | |
396 | __u64 retval; | |
397 | ||
398 | if (cpu_has_llsc) { | |
399 | __asm__ __volatile__( | |
aac8aa77 | 400 | " .set push \n" |
1da177e4 | 401 | " .set noat \n" |
aac8aa77 | 402 | " .set mips3 \n" |
1da177e4 LT |
403 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
404 | " bne %0, %z3, 2f \n" | |
405 | " move $1, %z4 \n" | |
406 | " scd $1, %1 \n" | |
407 | " beqzl $1, 1b \n" | |
1da177e4 LT |
408 | #ifdef CONFIG_SMP |
409 | " sync \n" | |
410 | #endif | |
411 | "2: \n" | |
aac8aa77 | 412 | " .set pop \n" |
3e6cb2d3 | 413 | : "=&r" (retval), "=R" (*m) |
1da177e4 LT |
414 | : "R" (*m), "Jr" (old), "Jr" (new) |
415 | : "memory"); | |
416 | } else if (cpu_has_llsc) { | |
417 | __asm__ __volatile__( | |
aac8aa77 | 418 | " .set push \n" |
1da177e4 | 419 | " .set noat \n" |
c4559f67 | 420 | " .set mips3 \n" |
1da177e4 LT |
421 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
422 | " bne %0, %z3, 2f \n" | |
423 | " move $1, %z4 \n" | |
424 | " scd $1, %1 \n" | |
425 | " beqz $1, 1b \n" | |
426 | #ifdef CONFIG_SMP | |
427 | " sync \n" | |
428 | #endif | |
429 | "2: \n" | |
aac8aa77 | 430 | " .set pop \n" |
3e6cb2d3 | 431 | : "=&r" (retval), "=R" (*m) |
1da177e4 LT |
432 | : "R" (*m), "Jr" (old), "Jr" (new) |
433 | : "memory"); | |
434 | } else { | |
435 | unsigned long flags; | |
436 | ||
437 | local_irq_save(flags); | |
438 | retval = *m; | |
439 | if (retval == old) | |
440 | *m = new; | |
441 | local_irq_restore(flags); /* implies memory barrier */ | |
442 | } | |
443 | ||
444 | return retval; | |
445 | } | |
446 | #else | |
447 | extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( | |
448 | volatile int * m, unsigned long old, unsigned long new); | |
449 | #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels | |
450 | #endif | |
451 | ||
452 | /* This function doesn't exist, so you'll get a linker error | |
453 | if something tries to do an invalid cmpxchg(). */ | |
454 | extern void __cmpxchg_called_with_bad_pointer(void); | |
455 | ||
456 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |
457 | unsigned long new, int size) | |
458 | { | |
459 | switch (size) { | |
460 | case 4: | |
461 | return __cmpxchg_u32(ptr, old, new); | |
462 | case 8: | |
463 | return __cmpxchg_u64(ptr, old, new); | |
464 | } | |
465 | __cmpxchg_called_with_bad_pointer(); | |
466 | return old; | |
467 | } | |
468 | ||
469 | #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) | |
470 | ||
e01402b1 RB |
471 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); |
472 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | |
473 | extern void *set_vi_handler (int n, void *addr); | |
1da177e4 | 474 | extern void *set_except_vector(int n, void *addr); |
91b05e67 | 475 | extern unsigned long ebase; |
1da177e4 LT |
476 | extern void per_cpu_trap_init(void); |
477 | ||
178086c8 RB |
478 | extern NORET_TYPE void die(const char *, struct pt_regs *); |
479 | ||
480 | static inline void die_if_kernel(const char *str, struct pt_regs *regs) | |
481 | { | |
482 | if (unlikely(!user_mode(regs))) | |
483 | die(str, regs); | |
484 | } | |
1da177e4 LT |
485 | |
486 | extern int stop_a_enabled; | |
487 | ||
488 | /* | |
4866cde0 | 489 | * See include/asm-ia64/system.h; prevents deadlock on SMP |
1da177e4 LT |
490 | * systems. |
491 | */ | |
4866cde0 | 492 | #define __ARCH_WANT_UNLOCKED_CTXSW |
1da177e4 LT |
493 | |
494 | #define arch_align_stack(x) (x) | |
495 | ||
496 | #endif /* _ASM_SYSTEM_H */ |