[MIPS] MT: Improved multithreading support.
[deliverable/linux.git] / include / asm-mips / hazards.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
9 */
10 #ifndef _ASM_HAZARDS_H
11 #define _ASM_HAZARDS_H
12
13 #include <linux/config.h>
14
15 #ifdef __ASSEMBLY__
16
17 .macro _ssnop
18 sll $0, $0, 1
19 .endm
20
21 .macro _ehb
22 sll $0, $0, 3
23 .endm
24
25 /*
26 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
27 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
28 * for data translations should not occur for 3 cpu cycles.
29 */
30 #ifdef CONFIG_CPU_RM9000
31
32 .macro mtc0_tlbw_hazard
33 .set push
34 .set mips32
35 _ssnop; _ssnop; _ssnop; _ssnop
36 .set pop
37 .endm
38
39 .macro tlbw_eret_hazard
40 .set push
41 .set mips32
42 _ssnop; _ssnop; _ssnop; _ssnop
43 .set pop
44 .endm
45
46 #else
47
48 /*
49 * The taken branch will result in a two cycle penalty for the two killed
50 * instructions on R4000 / R4400. Other processors only have a single cycle
51 * hazard so this is nice trick to have an optimal code for a range of
52 * processors.
53 */
54 .macro mtc0_tlbw_hazard
55 b . + 8
56 .endm
57
58 .macro tlbw_eret_hazard
59 .endm
60 #endif
61
62 /*
63 * mtc0->mfc0 hazard
64 * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
65 * It is a MIPS32R2 processor so ehb will clear the hazard.
66 */
67
68 #ifdef CONFIG_CPU_MIPSR2
69 /*
70 * Use a macro for ehb unless explicit support for MIPSR2 is enabled
71 */
72
73 #define irq_enable_hazard
74 _ehb
75
76 #define irq_disable_hazard
77 _ehb
78
79 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
80
81 /*
82 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
83 */
84
85 #define irq_enable_hazard
86
87 #define irq_disable_hazard
88
89 #else
90
91 /*
92 * Classic MIPS needs 1 - 3 nops or ssnops
93 */
94 #define irq_enable_hazard
95 #define irq_disable_hazard \
96 _ssnop; _ssnop; _ssnop
97
98 #endif
99
100 #else /* __ASSEMBLY__ */
101
102 __asm__(
103 " .macro _ssnop \n"
104 " sll $0, $0, 1 \n"
105 " .endm \n"
106 " \n"
107 " .macro _ehb \n"
108 " sll $0, $0, 3 \n"
109 " .endm \n");
110
111 #ifdef CONFIG_CPU_RM9000
112
113 /*
114 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
115 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
116 * for data translations should not occur for 3 cpu cycles.
117 */
118
119 #define mtc0_tlbw_hazard() \
120 __asm__ __volatile__( \
121 " .set mips32 \n" \
122 " _ssnop \n" \
123 " _ssnop \n" \
124 " _ssnop \n" \
125 " _ssnop \n" \
126 " .set mips0 \n")
127
128 #define tlbw_use_hazard() \
129 __asm__ __volatile__( \
130 " .set mips32 \n" \
131 " _ssnop \n" \
132 " _ssnop \n" \
133 " _ssnop \n" \
134 " _ssnop \n" \
135 " .set mips0 \n")
136
137 #else
138
139 /*
140 * Overkill warning ...
141 */
142 #define mtc0_tlbw_hazard() \
143 __asm__ __volatile__( \
144 " .set noreorder \n" \
145 " nop \n" \
146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " nop \n" \
151 " .set reorder \n")
152
153 #define tlbw_use_hazard() \
154 __asm__ __volatile__( \
155 " .set noreorder \n" \
156 " nop \n" \
157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " nop \n" \
162 " .set reorder \n")
163
164 #endif
165
166 /*
167 * Interrupt enable/disable hazards
168 * Some processors have hazards when modifying
169 * the status register to change the interrupt state
170 */
171
172 #ifdef CONFIG_CPU_MIPSR2
173
174 __asm__(" .macro irq_enable_hazard \n"
175 " _ehb \n"
176 " .endm \n"
177 " \n"
178 " .macro irq_disable_hazard \n"
179 " _ehb \n"
180 " .endm \n");
181
182 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
183
184 /*
185 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
186 */
187
188 __asm__(
189 " .macro irq_enable_hazard \n"
190 " .endm \n"
191 " \n"
192 " .macro irq_disable_hazard \n"
193 " .endm \n");
194
195 #else
196
197 /*
198 * Default for classic MIPS processors. Assume worst case hazards but don't
199 * care about the irq_enable_hazard - sooner or later the hardware will
200 * enable it and we don't care when exactly.
201 */
202
203 __asm__(
204 " # \n"
205 " # There is a hazard but we do not care \n"
206 " # \n"
207 " .macro\tirq_enable_hazard \n"
208 " .endm \n"
209 " \n"
210 " .macro\tirq_disable_hazard \n"
211 " _ssnop \n"
212 " _ssnop \n"
213 " _ssnop \n"
214 " .endm \n");
215
216 #endif
217
218 #define irq_enable_hazard() \
219 __asm__ __volatile__("irq_enable_hazard")
220 #define irq_disable_hazard() \
221 __asm__ __volatile__("irq_disable_hazard")
222
223
224 /*
225 * Back-to-back hazards -
226 *
227 * What is needed to separate a move to cp0 from a subsequent read from the
228 * same cp0 register?
229 */
230 #ifdef CONFIG_CPU_MIPSR2
231
232 __asm__(" .macro back_to_back_c0_hazard \n"
233 " _ehb \n"
234 " .endm \n");
235
236 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
237 defined(CONFIG_CPU_SB1)
238
239 __asm__(" .macro back_to_back_c0_hazard \n"
240 " .endm \n");
241
242 #else
243
244 __asm__(" .macro back_to_back_c0_hazard \n"
245 " .set noreorder \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " _ssnop \n"
249 " .set reorder \n"
250 " .endm");
251
252 #endif
253
254 #define back_to_back_c0_hazard() \
255 __asm__ __volatile__("back_to_back_c0_hazard")
256
257
258 /*
259 * Instruction execution hazard
260 */
261 #ifdef CONFIG_CPU_MIPSR2
262 /*
263 * gcc has a tradition of misscompiling the previous construct using the
264 * address of a label as argument to inline assembler. Gas otoh has the
265 * annoying difference between la and dla which are only usable for 32-bit
266 * rsp. 64-bit code, so can't be used without conditional compilation.
267 * The alterantive is switching the assembler to 64-bit code which happens
268 * to work right even for 32-bit code ...
269 */
270 #define instruction_hazard() \
271 do { \
272 unsigned long tmp; \
273 \
274 __asm__ __volatile__( \
275 " .set mips64r2 \n" \
276 " dla %0, 1f \n" \
277 " jr.hb %0 \n" \
278 " .set mips0 \n" \
279 "1: \n" \
280 : "=r" (tmp)); \
281 } while (0)
282
283 #else
284 #define instruction_hazard() do { } while (0)
285 #endif
286
287 extern void mips_ihb(void);
288
289 #endif /* __ASSEMBLY__ */
290
291 #endif /* _ASM_HAZARDS_H */
This page took 0.037127 seconds and 6 git commands to generate.