tpm/st33zp24/spi: Remove nbr_dummy_bytes variable usage
[deliverable/linux.git] / arch / mips / kernel / bmips_vec.S
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
7 *
8 * Reset/NMI/re-entry vectors for BMIPS processors
9 */
10
11
12 #include <asm/asm.h>
13 #include <asm/asmmacro.h>
14 #include <asm/cacheops.h>
15 #include <asm/cpu.h>
16 #include <asm/regdef.h>
17 #include <asm/mipsregs.h>
18 #include <asm/stackframe.h>
19 #include <asm/addrspace.h>
20 #include <asm/hazards.h>
21 #include <asm/bmips.h>
22
23 .macro BARRIER
24 .set mips32
25 _ssnop
26 _ssnop
27 _ssnop
28 .set mips0
29 .endm
30
31 /***********************************************************************
32 * Alternate CPU1 startup vector for BMIPS4350
33 *
34 * On some systems the bootloader has already started CPU1 and configured
35 * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
36 * triggered by the SW1 interrupt. If that is the case we try to move
37 * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
38 ***********************************************************************/
39
40 LEAF(bmips_smp_movevec)
41 la k0, 1f
42 li k1, CKSEG1
43 or k0, k1
44 jr k0
45
46 1:
47 /* clear IV, pending IPIs */
48 mtc0 zero, CP0_CAUSE
49
50 /* re-enable IRQs to wait for SW1 */
51 li k0, ST0_IE | ST0_BEV | STATUSF_IP1
52 mtc0 k0, CP0_STATUS
53
54 /* set up CPU1 CBR; move BASE to 0xa000_0000 */
55 li k0, 0xff400000
56 mtc0 k0, $22, 6
57 /* set up relocation vector address based on thread ID */
58 mfc0 k1, $22, 3
59 srl k1, 16
60 andi k1, 0x8000
61 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
62 or k0, k1
63 li k1, 0xa0080000
64 sw k1, 0(k0)
65
66 /* wait here for SW1 interrupt from bmips_boot_secondary() */
67 wait
68
69 la k0, bmips_reset_nmi_vec
70 li k1, CKSEG1
71 or k0, k1
72 jr k0
73 END(bmips_smp_movevec)
74
75 /***********************************************************************
76 * Reset/NMI vector
77 * For BMIPS processors that can relocate their exception vectors, this
78 * entire function gets copied to 0x8000_0000.
79 ***********************************************************************/
80
81 NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
82 .set push
83 .set noat
84 .align 4
85
86 #ifdef CONFIG_SMP
87 /* if the NMI bit is clear, assume this is a CPU1 reset instead */
88 li k1, (1 << 19)
89 mfc0 k0, CP0_STATUS
90 and k0, k1
91 beqz k0, soft_reset
92
93 #if defined(CONFIG_CPU_BMIPS5000)
94 mfc0 k0, CP0_PRID
95 li k1, PRID_IMP_BMIPS5000
96 /* mask with PRID_IMP_BMIPS5000 to cover both variants */
97 andi k0, PRID_IMP_BMIPS5000
98 bne k0, k1, 1f
99
100 /* if we're not on core 0, this must be the SMP boot signal */
101 li k1, (3 << 25)
102 mfc0 k0, $22
103 and k0, k1
104 bnez k0, bmips_smp_entry
105 1:
106 #endif /* CONFIG_CPU_BMIPS5000 */
107 #endif /* CONFIG_SMP */
108
109 /* nope, it's just a regular NMI */
110 SAVE_ALL
111 move a0, sp
112
113 /* clear EXL, ERL, BEV so that TLB refills still work */
114 mfc0 k0, CP0_STATUS
115 li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
116 or k0, k1
117 xor k0, k1
118 mtc0 k0, CP0_STATUS
119 BARRIER
120
121 /* jump to the NMI handler function */
122 la k0, nmi_handler
123 jr k0
124
125 RESTORE_ALL
126 .set arch=r4000
127 eret
128
129 #ifdef CONFIG_SMP
130 soft_reset:
131
132 #if defined(CONFIG_CPU_BMIPS5000)
133 mfc0 k0, CP0_PRID
134 andi k0, 0xff00
135 li k1, PRID_IMP_BMIPS5200
136 bne k0, k1, bmips_smp_entry
137
138 /* if running on TP 1, jump to bmips_smp_entry */
139 mfc0 k0, $22
140 li k1, (1 << 24)
141 and k1, k0
142 bnez k1, bmips_smp_entry
143 nop
144
145 /*
146 * running on TP0, can not be core 0 (the boot core).
147 * Check for soft reset. Indicates a warm boot
148 */
149 mfc0 k0, $12
150 li k1, (1 << 20)
151 and k0, k1
152 beqz k0, bmips_smp_entry
153
154 /*
155 * Warm boot.
156 * Cache init is only done on TP0
157 */
158 la k0, bmips_5xxx_init
159 jalr k0
160 nop
161
162 b bmips_smp_entry
163 nop
164 #endif
165
166 /***********************************************************************
167 * CPU1 reset vector (used for the initial boot only)
168 * This is still part of bmips_reset_nmi_vec().
169 ***********************************************************************/
170
171 bmips_smp_entry:
172
173 /* set up CP0 STATUS; enable FPU */
174 li k0, 0x30000000
175 mtc0 k0, CP0_STATUS
176 BARRIER
177
178 /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
179 mfc0 k0, CP0_CONFIG
180 ori k0, 0x07
181 xori k0, 0x04
182 mtc0 k0, CP0_CONFIG
183
184 mfc0 k0, CP0_PRID
185 andi k0, 0xff00
186 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
187 li k1, PRID_IMP_BMIPS43XX
188 bne k0, k1, 2f
189
190 /* initialize CPU1's local I-cache */
191 li k0, 0x80000000
192 li k1, 0x80010000
193 mtc0 zero, $28
194 mtc0 zero, $28, 1
195 BARRIER
196
197 1: cache Index_Store_Tag_I, 0(k0)
198 addiu k0, 16
199 bne k0, k1, 1b
200
201 b 3f
202 2:
203 #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
204 #if defined(CONFIG_CPU_BMIPS5000)
205 /* mask with PRID_IMP_BMIPS5000 to cover both variants */
206 li k1, PRID_IMP_BMIPS5000
207 andi k0, PRID_IMP_BMIPS5000
208 bne k0, k1, 3f
209
210 /* set exception vector base */
211 la k0, ebase
212 lw k0, 0(k0)
213 mtc0 k0, $15, 1
214 BARRIER
215 #endif /* CONFIG_CPU_BMIPS5000 */
216 3:
217 /* jump back to kseg0 in case we need to remap the kseg1 area */
218 la k0, 1f
219 jr k0
220 1:
221 la k0, bmips_enable_xks01
222 jalr k0
223
224 /* use temporary stack to set up upper memory TLB */
225 li sp, BMIPS_WARM_RESTART_VEC
226 la k0, plat_wired_tlb_setup
227 jalr k0
228
229 /* switch to permanent stack and continue booting */
230
231 .global bmips_secondary_reentry
232 bmips_secondary_reentry:
233 la k0, bmips_smp_boot_sp
234 lw sp, 0(k0)
235 la k0, bmips_smp_boot_gp
236 lw gp, 0(k0)
237 la k0, start_secondary
238 jr k0
239
240 #endif /* CONFIG_SMP */
241
242 .align 4
243 .global bmips_reset_nmi_vec_end
244 bmips_reset_nmi_vec_end:
245
246 END(bmips_reset_nmi_vec)
247
248 .set pop
249
250 /***********************************************************************
251 * CPU1 warm restart vector (used for second and subsequent boots).
252 * Also used for S2 standby recovery (PM).
253 * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
254 ***********************************************************************/
255
256 LEAF(bmips_smp_int_vec)
257
258 .align 4
259 mfc0 k0, CP0_STATUS
260 ori k0, 0x01
261 xori k0, 0x01
262 mtc0 k0, CP0_STATUS
263 eret
264
265 .align 4
266 .global bmips_smp_int_vec_end
267 bmips_smp_int_vec_end:
268
269 END(bmips_smp_int_vec)
270
271 /***********************************************************************
272 * XKS01 support
273 * Certain CPUs support extending kseg0 to 1024MB.
274 ***********************************************************************/
275
276 LEAF(bmips_enable_xks01)
277
278 #if defined(CONFIG_XKS01)
279 mfc0 t0, CP0_PRID
280 andi t2, t0, 0xff00
281 #if defined(CONFIG_CPU_BMIPS4380)
282 li t1, PRID_IMP_BMIPS43XX
283 bne t2, t1, 1f
284
285 andi t0, 0xff
286 addiu t1, t0, -PRID_REV_BMIPS4380_HI
287 bgtz t1, 2f
288 addiu t0, -PRID_REV_BMIPS4380_LO
289 bltz t0, 2f
290
291 mfc0 t0, $22, 3
292 li t1, 0x1ff0
293 li t2, (1 << 12) | (1 << 9)
294 or t0, t1
295 xor t0, t1
296 or t0, t2
297 mtc0 t0, $22, 3
298 BARRIER
299 b 2f
300 1:
301 #endif /* CONFIG_CPU_BMIPS4380 */
302 #if defined(CONFIG_CPU_BMIPS5000)
303 li t1, PRID_IMP_BMIPS5000
304 /* mask with PRID_IMP_BMIPS5000 to cover both variants */
305 andi t2, PRID_IMP_BMIPS5000
306 bne t2, t1, 2f
307
308 mfc0 t0, $22, 5
309 li t1, 0x01ff
310 li t2, (1 << 8) | (1 << 5)
311 or t0, t1
312 xor t0, t1
313 or t0, t2
314 mtc0 t0, $22, 5
315 BARRIER
316 #endif /* CONFIG_CPU_BMIPS5000 */
317 2:
318 #endif /* defined(CONFIG_XKS01) */
319
320 jr ra
321
322 END(bmips_enable_xks01)
This page took 0.089454 seconds and 5 git commands to generate.