Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
0004a9df | 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle |
1da177e4 LT |
7 | * Copyright (C) 1996 by Paul M. Antoine |
8 | * Copyright (C) 1999 Silicon Graphics | |
9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | |
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | |
11 | */ | |
12 | #ifndef _ASM_SYSTEM_H | |
13 | #define _ASM_SYSTEM_H | |
14 | ||
c677189a | 15 | #include <linux/kernel.h> |
1da177e4 | 16 | #include <linux/types.h> |
192ef366 | 17 | #include <linux/irqflags.h> |
1da177e4 LT |
18 | |
19 | #include <asm/addrspace.h> | |
0004a9df | 20 | #include <asm/barrier.h> |
fef74705 | 21 | #include <asm/cmpxchg.h> |
1da177e4 | 22 | #include <asm/cpu-features.h> |
e50c0a8f | 23 | #include <asm/dsp.h> |
2c708cba | 24 | #include <asm/watch.h> |
1da177e4 | 25 | #include <asm/war.h> |
1da177e4 | 26 | |
1da177e4 | 27 | |
1da177e4 LT |
28 | /* |
29 | * switch_to(n) should switch tasks to task nr n, first | |
30 | * checking that n isn't the current task, in which case it does nothing. | |
31 | */ | |
32 | extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |
33 | ||
34 | struct task_struct; | |
35 | ||
f1e39a4a RB |
36 | extern unsigned int ll_bit; |
37 | extern struct task_struct *ll_task; | |
38 | ||
f088fc84 RB |
39 | #ifdef CONFIG_MIPS_MT_FPAFF |
40 | ||
41 | /* | |
42 | * Handle the scheduler resume end of FPU affinity management. We do this | |
43 | * inline to try to keep the overhead down. If we have been forced to run on | |
44 | * a "CPU" with an FPU because of a previous high level of FP computation, | |
45 | * but did not actually use the FPU during the most recent time-slice (CU1 | |
46 | * isn't set), we undo the restriction on cpus_allowed. | |
47 | * | |
48 | * We're not calling set_cpus_allowed() here, because we have no need to | |
49 | * force prompt migration - we're already switching the current CPU to a | |
50 | * different thread. | |
51 | */ | |
52 | ||
d223a861 | 53 | #define __mips_mt_fpaff_switch_to(prev) \ |
f088fc84 | 54 | do { \ |
293c5bd1 RB |
55 | struct thread_info *__prev_ti = task_thread_info(prev); \ |
56 | \ | |
f088fc84 | 57 | if (cpu_has_fpu && \ |
293c5bd1 RB |
58 | test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ |
59 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | |
60 | clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ | |
f088fc84 RB |
61 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ |
62 | } \ | |
f088fc84 | 63 | next->thread.emulated_fp = 0; \ |
f088fc84 RB |
64 | } while(0) |
65 | ||
66 | #else | |
35c700c0 | 67 | #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) |
d223a861 RB |
68 | #endif |
69 | ||
f4c6b6bc RB |
70 | #define __clear_software_ll_bit() \ |
71 | do { \ | |
43e6ae6d RB |
72 | if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ |
73 | ll_bit = 0; \ | |
f4c6b6bc | 74 | } while (0) |
f4c6b6bc | 75 | |
21a151d8 | 76 | #define switch_to(prev, next, last) \ |
e50c0a8f | 77 | do { \ |
d223a861 | 78 | __mips_mt_fpaff_switch_to(prev); \ |
e50c0a8f RB |
79 | if (cpu_has_dsp) \ |
80 | __save_dsp(prev); \ | |
f4c6b6bc | 81 | __clear_software_ll_bit(); \ |
40bc9c67 | 82 | (last) = resume(prev, next, task_thread_info(next)); \ |
07500b0d RB |
83 | } while (0) |
84 | ||
85 | #define finish_arch_switch(prev) \ | |
86 | do { \ | |
e50c0a8f RB |
87 | if (cpu_has_dsp) \ |
88 | __restore_dsp(current); \ | |
a3692020 | 89 | if (cpu_has_userlocal) \ |
07500b0d | 90 | write_c0_userlocal(current_thread_info()->tp_value); \ |
2c708cba | 91 | __restore_watch(); \ |
07500b0d | 92 | } while (0) |
1da177e4 | 93 | |
1da177e4 LT |
94 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
95 | { | |
96 | __u32 retval; | |
97 | ||
f252ffd5 DD |
98 | smp_mb__before_llsc(); |
99 | ||
b791d119 | 100 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 LT |
101 | unsigned long dummy; |
102 | ||
103 | __asm__ __volatile__( | |
c4559f67 | 104 | " .set mips3 \n" |
1da177e4 | 105 | "1: ll %0, %3 # xchg_u32 \n" |
7222424e | 106 | " .set mips0 \n" |
1da177e4 | 107 | " move %2, %z4 \n" |
7222424e | 108 | " .set mips3 \n" |
1da177e4 LT |
109 | " sc %2, %1 \n" |
110 | " beqzl %2, 1b \n" | |
aac8aa77 | 111 | " .set mips0 \n" |
1da177e4 LT |
112 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
113 | : "R" (*m), "Jr" (val) | |
114 | : "memory"); | |
b791d119 | 115 | } else if (kernel_uses_llsc) { |
1da177e4 LT |
116 | unsigned long dummy; |
117 | ||
7837314d RB |
118 | do { |
119 | __asm__ __volatile__( | |
120 | " .set mips3 \n" | |
121 | " ll %0, %3 # xchg_u32 \n" | |
122 | " .set mips0 \n" | |
123 | " move %2, %z4 \n" | |
124 | " .set mips3 \n" | |
125 | " sc %2, %1 \n" | |
126 | " .set mips0 \n" | |
127 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | |
128 | : "R" (*m), "Jr" (val) | |
129 | : "memory"); | |
130 | } while (unlikely(!dummy)); | |
1da177e4 LT |
131 | } else { |
132 | unsigned long flags; | |
133 | ||
49edd098 | 134 | raw_local_irq_save(flags); |
1da177e4 LT |
135 | retval = *m; |
136 | *m = val; | |
49edd098 | 137 | raw_local_irq_restore(flags); /* implies memory barrier */ |
1da177e4 LT |
138 | } |
139 | ||
17099b11 | 140 | smp_llsc_mb(); |
0004a9df | 141 | |
1da177e4 LT |
142 | return retval; |
143 | } | |
144 | ||
875d43e7 | 145 | #ifdef CONFIG_64BIT |
1da177e4 LT |
146 | static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) |
147 | { | |
148 | __u64 retval; | |
149 | ||
f252ffd5 DD |
150 | smp_mb__before_llsc(); |
151 | ||
b791d119 | 152 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
1da177e4 LT |
153 | unsigned long dummy; |
154 | ||
155 | __asm__ __volatile__( | |
aac8aa77 | 156 | " .set mips3 \n" |
1da177e4 LT |
157 | "1: lld %0, %3 # xchg_u64 \n" |
158 | " move %2, %z4 \n" | |
159 | " scd %2, %1 \n" | |
160 | " beqzl %2, 1b \n" | |
aac8aa77 | 161 | " .set mips0 \n" |
1da177e4 LT |
162 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
163 | : "R" (*m), "Jr" (val) | |
164 | : "memory"); | |
b791d119 | 165 | } else if (kernel_uses_llsc) { |
1da177e4 LT |
166 | unsigned long dummy; |
167 | ||
7837314d RB |
168 | do { |
169 | __asm__ __volatile__( | |
170 | " .set mips3 \n" | |
171 | " lld %0, %3 # xchg_u64 \n" | |
172 | " move %2, %z4 \n" | |
173 | " scd %2, %1 \n" | |
174 | " .set mips0 \n" | |
175 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | |
176 | : "R" (*m), "Jr" (val) | |
177 | : "memory"); | |
178 | } while (unlikely(!dummy)); | |
1da177e4 LT |
179 | } else { |
180 | unsigned long flags; | |
181 | ||
49edd098 | 182 | raw_local_irq_save(flags); |
1da177e4 LT |
183 | retval = *m; |
184 | *m = val; | |
49edd098 | 185 | raw_local_irq_restore(flags); /* implies memory barrier */ |
1da177e4 LT |
186 | } |
187 | ||
17099b11 | 188 | smp_llsc_mb(); |
0004a9df | 189 | |
1da177e4 LT |
190 | return retval; |
191 | } | |
192 | #else | |
193 | extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); | |
194 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels | |
195 | #endif | |
196 | ||
1da177e4 LT |
197 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
198 | { | |
199 | switch (size) { | |
0cea043b RB |
200 | case 4: |
201 | return __xchg_u32(ptr, x); | |
202 | case 8: | |
203 | return __xchg_u64(ptr, x); | |
1da177e4 | 204 | } |
c677189a | 205 | |
1da177e4 LT |
206 | return x; |
207 | } | |
208 | ||
c677189a RB |
209 | #define xchg(ptr, x) \ |
210 | ({ \ | |
211 | BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ | |
212 | \ | |
213 | ((__typeof__(*(ptr))) \ | |
214 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ | |
215 | }) | |
1da177e4 | 216 | |
49a89efb RB |
217 | extern void set_handler(unsigned long offset, void *addr, unsigned long len); |
218 | extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); | |
ef300e42 RB |
219 | |
220 | typedef void (*vi_handler_t)(void); | |
49a89efb | 221 | extern void *set_vi_handler(int n, vi_handler_t addr); |
ef300e42 | 222 | |
1da177e4 | 223 | extern void *set_except_vector(int n, void *addr); |
91b05e67 | 224 | extern unsigned long ebase; |
1da177e4 LT |
225 | extern void per_cpu_trap_init(void); |
226 | ||
1da177e4 | 227 | /* |
4866cde0 | 228 | * See include/asm-ia64/system.h; prevents deadlock on SMP |
1da177e4 LT |
229 | * systems. |
230 | */ | |
4866cde0 | 231 | #define __ARCH_WANT_UNLOCKED_CTXSW |
1da177e4 | 232 | |
94109102 | 233 | extern unsigned long arch_align_stack(unsigned long sp); |
1da177e4 LT |
234 | |
235 | #endif /* _ASM_SYSTEM_H */ |