Sparseify MIPS.
[deliverable/linux.git] / include / asm-mips / hazards.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle
7 */
8#ifndef _ASM_HAZARDS_H
9#define _ASM_HAZARDS_H
10
11#include <linux/config.h>
12
13#ifdef __ASSEMBLY__
14
15 .macro _ssnop
16 sll $0, $0, 1
17 .endm
18
19 .macro _ehb
20 sll $0, $0, 3
21 .endm
22
23/*
24 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
25 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
26 * for data translations should not occur for 3 cpu cycles.
27 */
28#ifdef CONFIG_CPU_RM9000
29
30 .macro mtc0_tlbw_hazard
31 .set push
32 .set mips32
33 _ssnop; _ssnop; _ssnop; _ssnop
34 .set pop
35 .endm
36
37 .macro tlbw_eret_hazard
38 .set push
39 .set mips32
40 _ssnop; _ssnop; _ssnop; _ssnop
41 .set pop
42 .endm
43
44#else
45
46/*
47 * The taken branch will result in a two cycle penalty for the two killed
48 * instructions on R4000 / R4400. Other processors only have a single cycle
49 * hazard so this is nice trick to have an optimal code for a range of
50 * processors.
51 */
52 .macro mtc0_tlbw_hazard
53 b . + 8
54 .endm
55
56 .macro tlbw_eret_hazard
57 .endm
58#endif
59
60/*
61 * mtc0->mfc0 hazard
62 * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
63 * It is a MIPS32R2 processor so ehb will clear the hazard.
64 */
65
66#ifdef CONFIG_CPU_MIPSR2
67/*
68 * Use a macro for ehb unless explicit support for MIPSR2 is enabled
69 */
70
71#define irq_enable_hazard
72 _ehb
73
74#define irq_disable_hazard
75 _ehb
76
77#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
78
79/*
80 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
81 */
82
83#define irq_enable_hazard
84
85#define irq_disable_hazard
86
87#else
88
89/*
90 * Classic MIPS needs 1 - 3 nops or ssnops
91 */
92#define irq_enable_hazard
93#define irq_disable_hazard \
94 _ssnop; _ssnop; _ssnop
95
96#endif
97
98#else /* __ASSEMBLY__ */
99
100__asm__(
101 " .macro _ssnop \n\t"
102 " sll $0, $2, 1 \n\t"
103 " .endm \n\t"
104 " \n\t"
105 " .macro _ehb \n\t"
106 " sll $0, $0, 3 \n\t"
107 " .endm \n\t");
108
109#ifdef CONFIG_CPU_RM9000
110/*
111 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
112 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
113 * for data translations should not occur for 3 cpu cycles.
114 */
115
116#define mtc0_tlbw_hazard() \
117 __asm__ __volatile__( \
118 ".set\tmips32\n\t" \
119 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
120 ".set\tmips0")
121
122#define tlbw_use_hazard() \
123 __asm__ __volatile__( \
124 ".set\tmips32\n\t" \
125 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
126 ".set\tmips0")
5068debf
RB
127
128#define back_to_back_c0_hazard() do { } while (0)
129
1da177e4
LT
130#else
131
132/*
133 * Overkill warning ...
134 */
135#define mtc0_tlbw_hazard() \
136 __asm__ __volatile__( \
137 ".set noreorder\n\t" \
138 "nop; nop; nop; nop; nop; nop;\n\t" \
139 ".set reorder\n\t")
140
141#define tlbw_use_hazard() \
142 __asm__ __volatile__( \
143 ".set noreorder\n\t" \
144 "nop; nop; nop; nop; nop; nop;\n\t" \
145 ".set reorder\n\t")
146
5068debf
RB
147#define back_to_back_c0_hazard() \
148 __asm__ __volatile__( \
149 " .set noreorder \n" \
150 " nop; nop; nop \n" \
151 " .set reorder \n")
152
1da177e4
LT
153#endif
154
155/*
156 * mtc0->mfc0 hazard
157 * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
158 * It is a MIPS32R2 processor so ehb will clear the hazard.
159 */
160
161#ifdef CONFIG_CPU_MIPSR2
162/*
163 * Use a macro for ehb unless explicit support for MIPSR2 is enabled
164 */
165__asm__(
166 " .macro\tirq_enable_hazard \n\t"
167 " _ehb \n\t"
168 " .endm \n\t"
169 " \n\t"
170 " .macro\tirq_disable_hazard \n\t"
171 " _ehb \n\t"
172 " .endm");
173
174#define irq_enable_hazard() \
175 __asm__ __volatile__( \
176 "_ehb\t\t\t\t# irq_enable_hazard")
177
178#define irq_disable_hazard() \
179 __asm__ __volatile__( \
180 "_ehb\t\t\t\t# irq_disable_hazard")
181
5068debf
RB
182#define back_to_back_c0_hazard() \
183 __asm__ __volatile__( \
184 "_ehb\t\t\t\t# back_to_back_c0_hazard")
185
1da177e4
LT
186#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
187
188/*
189 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
190 */
191
192__asm__(
193 " .macro\tirq_enable_hazard \n\t"
194 " .endm \n\t"
195 " \n\t"
196 " .macro\tirq_disable_hazard \n\t"
197 " .endm");
198
199#define irq_enable_hazard() do { } while (0)
200#define irq_disable_hazard() do { } while (0)
201
5068debf
RB
202#define back_to_back_c0_hazard() do { } while (0)
203
1da177e4
LT
204#else
205
206/*
207 * Default for classic MIPS processors. Assume worst case hazards but don't
208 * care about the irq_enable_hazard - sooner or later the hardware will
209 * enable it and we don't care when exactly.
210 */
211
212__asm__(
213 " # \n\t"
214 " # There is a hazard but we do not care \n\t"
215 " # \n\t"
216 " .macro\tirq_enable_hazard \n\t"
217 " .endm \n\t"
218 " \n\t"
219 " .macro\tirq_disable_hazard \n\t"
220 " _ssnop; _ssnop; _ssnop \n\t"
221 " .endm");
222
223#define irq_enable_hazard() do { } while (0)
224#define irq_disable_hazard() \
225 __asm__ __volatile__( \
226 "_ssnop; _ssnop; _ssnop;\t\t# irq_disable_hazard")
227
5068debf
RB
228#define back_to_back_c0_hazard() \
229 __asm__ __volatile__( \
230 " .set noreorder \n" \
231 " nop; nop; nop \n" \
232 " .set reorder \n")
233
1da177e4
LT
234#endif
235
236#endif /* __ASSEMBLY__ */
237
238#endif /* _ASM_HAZARDS_H */
This page took 0.069249 seconds and 5 git commands to generate.