MIPS: BMIPS: Clear MIPS_CACHE_ALIASES earlier
[deliverable/linux.git] / arch / mips / kernel / cps-vec.S
1 /*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11 #include <asm/addrspace.h>
12 #include <asm/asm.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/asmmacro.h>
15 #include <asm/cacheops.h>
16 #include <asm/eva.h>
17 #include <asm/mipsregs.h>
18 #include <asm/mipsmtregs.h>
19 #include <asm/pm.h>
20
21 #define GCR_CPC_BASE_OFS 0x0088
22 #define GCR_CL_COHERENCE_OFS 0x2008
23 #define GCR_CL_ID_OFS 0x2028
24
25 #define CPC_CL_VC_RUN_OFS 0x2028
26
27 .extern mips_cm_base
28
29 .set noreorder
30
31 #ifdef CONFIG_64BIT
32 # define STATUS_BITDEPS ST0_KX
33 #else
34 # define STATUS_BITDEPS 0
35 #endif
36
37 #ifdef CONFIG_MIPS_CPS_NS16550
38
39 #define DUMP_EXCEP(name) \
40 PTR_LA a0, 8f; \
41 jal mips_cps_bev_dump; \
42 nop; \
43 TEXT(name)
44
45 #else /* !CONFIG_MIPS_CPS_NS16550 */
46
47 #define DUMP_EXCEP(name)
48
49 #endif /* !CONFIG_MIPS_CPS_NS16550 */
50
51 /*
52 * Set dest to non-zero if the core supports the MT ASE, else zero. If
53 * MT is not supported then branch to nomt.
54 */
55 .macro has_mt dest, nomt
56 mfc0 \dest, CP0_CONFIG, 1
57 bgez \dest, \nomt
58 mfc0 \dest, CP0_CONFIG, 2
59 bgez \dest, \nomt
60 mfc0 \dest, CP0_CONFIG, 3
61 andi \dest, \dest, MIPS_CONF3_MT
62 beqz \dest, \nomt
63 nop
64 .endm
65
66 /*
67 * Set dest to non-zero if the core supports MIPSr6 multithreading
68 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
69 * branch to nomt.
70 */
71 .macro has_vp dest, nomt
72 mfc0 \dest, CP0_CONFIG, 1
73 bgez \dest, \nomt
74 mfc0 \dest, CP0_CONFIG, 2
75 bgez \dest, \nomt
76 mfc0 \dest, CP0_CONFIG, 3
77 bgez \dest, \nomt
78 mfc0 \dest, CP0_CONFIG, 4
79 bgez \dest, \nomt
80 mfc0 \dest, CP0_CONFIG, 5
81 andi \dest, \dest, MIPS_CONF5_VP
82 beqz \dest, \nomt
83 nop
84 .endm
85
86 /* Calculate an uncached address for the CM GCRs */
87 .macro cmgcrb dest
88 .set push
89 .set noat
90 MFC0 $1, CP0_CMGCRBASE
91 PTR_SLL $1, $1, 4
92 PTR_LI \dest, UNCAC_BASE
93 PTR_ADDU \dest, \dest, $1
94 .set pop
95 .endm
96
97 .section .text.cps-vec
98 .balign 0x1000
99
100 LEAF(mips_cps_core_entry)
101 /*
102 * These first 4 bytes will be patched by cps_smp_setup to load the
103 * CCA to use into register s0.
104 */
105 .word 0
106
107 /* Check whether we're here due to an NMI */
108 mfc0 k0, CP0_STATUS
109 and k0, k0, ST0_NMI
110 beqz k0, not_nmi
111 nop
112
113 /* This is an NMI */
114 PTR_LA k0, nmi_handler
115 jr k0
116 nop
117
118 not_nmi:
119 /* Setup Cause */
120 li t0, CAUSEF_IV
121 mtc0 t0, CP0_CAUSE
122
123 /* Setup Status */
124 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
125 mtc0 t0, CP0_STATUS
126
127 /* Skip cache & coherence setup if we're already coherent */
128 cmgcrb v1
129 lw s7, GCR_CL_COHERENCE_OFS(v1)
130 bnez s7, 1f
131 nop
132
133 /* Initialize the L1 caches */
134 jal mips_cps_cache_init
135 nop
136
137 /* Enter the coherent domain */
138 li t0, 0xff
139 sw t0, GCR_CL_COHERENCE_OFS(v1)
140 ehb
141
142 /* Set Kseg0 CCA to that in s0 */
143 1: mfc0 t0, CP0_CONFIG
144 ori t0, 0x7
145 xori t0, 0x7
146 or t0, t0, s0
147 mtc0 t0, CP0_CONFIG
148 ehb
149
150 /* Jump to kseg0 */
151 PTR_LA t0, 1f
152 jr t0
153 nop
154
155 /*
156 * We're up, cached & coherent. Perform any EVA initialization necessary
157 * before we access memory.
158 */
159 1: eva_init
160
161 /* Retrieve boot configuration pointers */
162 jal mips_cps_get_bootcfg
163 nop
164
165 /* Skip core-level init if we started up coherent */
166 bnez s7, 1f
167 nop
168
169 /* Perform any further required core-level initialisation */
170 jal mips_cps_core_init
171 nop
172
173 /*
174 * Boot any other VPEs within this core that should be online, and
175 * deactivate this VPE if it should be offline.
176 */
177 move a1, t9
178 jal mips_cps_boot_vpes
179 move a0, v0
180
181 /* Off we go! */
182 1: PTR_L t1, VPEBOOTCFG_PC(v1)
183 PTR_L gp, VPEBOOTCFG_GP(v1)
184 PTR_L sp, VPEBOOTCFG_SP(v1)
185 jr t1
186 nop
187 END(mips_cps_core_entry)
188
189 .org 0x200
190 LEAF(excep_tlbfill)
191 DUMP_EXCEP("TLB Fill")
192 b .
193 nop
194 END(excep_tlbfill)
195
196 .org 0x280
197 LEAF(excep_xtlbfill)
198 DUMP_EXCEP("XTLB Fill")
199 b .
200 nop
201 END(excep_xtlbfill)
202
203 .org 0x300
204 LEAF(excep_cache)
205 DUMP_EXCEP("Cache")
206 b .
207 nop
208 END(excep_cache)
209
210 .org 0x380
211 LEAF(excep_genex)
212 DUMP_EXCEP("General")
213 b .
214 nop
215 END(excep_genex)
216
217 .org 0x400
218 LEAF(excep_intex)
219 DUMP_EXCEP("Interrupt")
220 b .
221 nop
222 END(excep_intex)
223
224 .org 0x480
225 LEAF(excep_ejtag)
226 PTR_LA k0, ejtag_debug_handler
227 jr k0
228 nop
229 END(excep_ejtag)
230
231 LEAF(mips_cps_core_init)
232 #ifdef CONFIG_MIPS_MT_SMP
233 /* Check that the core implements the MT ASE */
234 has_mt t0, 3f
235
236 .set push
237 .set mt
238
239 /* Only allow 1 TC per VPE to execute... */
240 dmt
241
242 /* ...and for the moment only 1 VPE */
243 dvpe
244 PTR_LA t1, 1f
245 jr.hb t1
246 nop
247
248 /* Enter VPE configuration state */
249 1: mfc0 t0, CP0_MVPCONTROL
250 ori t0, t0, MVPCONTROL_VPC
251 mtc0 t0, CP0_MVPCONTROL
252
253 /* Retrieve the number of VPEs within the core */
254 mfc0 t0, CP0_MVPCONF0
255 srl t0, t0, MVPCONF0_PVPE_SHIFT
256 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
257 addiu ta3, t0, 1
258
259 /* If there's only 1, we're done */
260 beqz t0, 2f
261 nop
262
263 /* Loop through each VPE within this core */
264 li ta1, 1
265
266 1: /* Operate on the appropriate TC */
267 mtc0 ta1, CP0_VPECONTROL
268 ehb
269
270 /* Bind TC to VPE (1:1 TC:VPE mapping) */
271 mttc0 ta1, CP0_TCBIND
272
273 /* Set exclusive TC, non-active, master */
274 li t0, VPECONF0_MVP
275 sll t1, ta1, VPECONF0_XTC_SHIFT
276 or t0, t0, t1
277 mttc0 t0, CP0_VPECONF0
278
279 /* Set TC non-active, non-allocatable */
280 mttc0 zero, CP0_TCSTATUS
281
282 /* Set TC halted */
283 li t0, TCHALT_H
284 mttc0 t0, CP0_TCHALT
285
286 /* Next VPE */
287 addiu ta1, ta1, 1
288 slt t0, ta1, ta3
289 bnez t0, 1b
290 nop
291
292 /* Leave VPE configuration state */
293 2: mfc0 t0, CP0_MVPCONTROL
294 xori t0, t0, MVPCONTROL_VPC
295 mtc0 t0, CP0_MVPCONTROL
296
297 3: .set pop
298 #endif
299 jr ra
300 nop
301 END(mips_cps_core_init)
302
303 /**
304 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
305 *
306 * Returns: pointer to struct core_boot_config in v0, pointer to
307 * struct vpe_boot_config in v1, VPE ID in t9
308 */
309 LEAF(mips_cps_get_bootcfg)
310 /* Calculate a pointer to this cores struct core_boot_config */
311 cmgcrb t0
312 lw t0, GCR_CL_ID_OFS(t0)
313 li t1, COREBOOTCFG_SIZE
314 mul t0, t0, t1
315 PTR_LA t1, mips_cps_core_bootcfg
316 PTR_L t1, 0(t1)
317 PTR_ADDU v0, t0, t1
318
319 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
320 li t9, 0
321 #if defined(CONFIG_CPU_MIPSR6)
322 has_vp ta2, 1f
323
324 /*
325 * Assume non-contiguous numbering. Perhaps some day we'll need
326 * to handle contiguous VP numbering, but no such systems yet
327 * exist.
328 */
329 mfc0 t9, $3, 1
330 andi t9, t9, 0xff
331 #elif defined(CONFIG_MIPS_MT_SMP)
332 has_mt ta2, 1f
333
334 /* Find the number of VPEs present in the core */
335 mfc0 t1, CP0_MVPCONF0
336 srl t1, t1, MVPCONF0_PVPE_SHIFT
337 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
338 addiu t1, t1, 1
339
340 /* Calculate a mask for the VPE ID from EBase.CPUNum */
341 clz t1, t1
342 li t2, 31
343 subu t1, t2, t1
344 li t2, 1
345 sll t1, t2, t1
346 addiu t1, t1, -1
347
348 /* Retrieve the VPE ID from EBase.CPUNum */
349 mfc0 t9, $15, 1
350 and t9, t9, t1
351 #endif
352
353 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
354 li t1, VPEBOOTCFG_SIZE
355 mul v1, t9, t1
356 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
357 PTR_ADDU v1, v1, ta3
358
359 jr ra
360 nop
361 END(mips_cps_get_bootcfg)
362
363 LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366
367 #if defined(CONFIG_CPU_MIPSR6)
368
369 has_vp t0, 5f
370
371 /* Find base address of CPC */
372 cmgcrb t3
373 PTR_L t1, GCR_CPC_BASE_OFS(t3)
374 PTR_LI t2, ~0x7fff
375 and t1, t1, t2
376 PTR_LI t2, UNCAC_BASE
377 PTR_ADD t1, t1, t2
378
379 /* Set VC_RUN to the VPE mask */
380 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
381 ehb
382
383 #elif defined(CONFIG_MIPS_MT)
384
385 .set push
386 .set mt
387
388 /* If the core doesn't support MT then return */
389 has_mt t0, 5f
390
391 /* Enter VPE configuration state */
392 dvpe
393 PTR_LA t1, 1f
394 jr.hb t1
395 nop
396 1: mfc0 t1, CP0_MVPCONTROL
397 ori t1, t1, MVPCONTROL_VPC
398 mtc0 t1, CP0_MVPCONTROL
399 ehb
400
401 /* Loop through each VPE */
402 move t8, ta2
403 li ta1, 0
404
405 /* Check whether the VPE should be running. If not, skip it */
406 1: andi t0, ta2, 1
407 beqz t0, 2f
408 nop
409
410 /* Operate on the appropriate TC */
411 mfc0 t0, CP0_VPECONTROL
412 ori t0, t0, VPECONTROL_TARGTC
413 xori t0, t0, VPECONTROL_TARGTC
414 or t0, t0, ta1
415 mtc0 t0, CP0_VPECONTROL
416 ehb
417
418 /* Skip the VPE if its TC is not halted */
419 mftc0 t0, CP0_TCHALT
420 beqz t0, 2f
421 nop
422
423 /* Calculate a pointer to the VPEs struct vpe_boot_config */
424 li t0, VPEBOOTCFG_SIZE
425 mul t0, t0, ta1
426 addu t0, t0, ta3
427
428 /* Set the TC restart PC */
429 lw t1, VPEBOOTCFG_PC(t0)
430 mttc0 t1, CP0_TCRESTART
431
432 /* Set the TC stack pointer */
433 lw t1, VPEBOOTCFG_SP(t0)
434 mttgpr t1, sp
435
436 /* Set the TC global pointer */
437 lw t1, VPEBOOTCFG_GP(t0)
438 mttgpr t1, gp
439
440 /* Copy config from this VPE */
441 mfc0 t0, CP0_CONFIG
442 mttc0 t0, CP0_CONFIG
443
444 /* Ensure no software interrupts are pending */
445 mttc0 zero, CP0_CAUSE
446 mttc0 zero, CP0_STATUS
447
448 /* Set TC active, not interrupt exempt */
449 mftc0 t0, CP0_TCSTATUS
450 li t1, ~TCSTATUS_IXMT
451 and t0, t0, t1
452 ori t0, t0, TCSTATUS_A
453 mttc0 t0, CP0_TCSTATUS
454
455 /* Clear the TC halt bit */
456 mttc0 zero, CP0_TCHALT
457
458 /* Set VPE active */
459 mftc0 t0, CP0_VPECONF0
460 ori t0, t0, VPECONF0_VPA
461 mttc0 t0, CP0_VPECONF0
462
463 /* Next VPE */
464 2: srl ta2, ta2, 1
465 addiu ta1, ta1, 1
466 bnez ta2, 1b
467 nop
468
469 /* Leave VPE configuration state */
470 mfc0 t1, CP0_MVPCONTROL
471 xori t1, t1, MVPCONTROL_VPC
472 mtc0 t1, CP0_MVPCONTROL
473 ehb
474 evpe
475
476 /* Check whether this VPE is meant to be running */
477 li t0, 1
478 sll t0, t0, a1
479 and t0, t0, t8
480 bnez t0, 2f
481 nop
482
483 /* This VPE should be offline, halt the TC */
484 li t0, TCHALT_H
485 mtc0 t0, CP0_TCHALT
486 PTR_LA t0, 1f
487 1: jr.hb t0
488 nop
489
490 2: .set pop
491
492 #endif /* CONFIG_MIPS_MT_SMP */
493
494 /* Return */
495 5: jr ra
496 nop
497 END(mips_cps_boot_vpes)
498
499 LEAF(mips_cps_cache_init)
500 /*
501 * Clear the bits used to index the caches. Note that the architecture
502 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
503 * be valid for all MIPS32 CPUs, even those for which said writes are
504 * unnecessary.
505 */
506 mtc0 zero, CP0_TAGLO, 0
507 mtc0 zero, CP0_TAGHI, 0
508 mtc0 zero, CP0_TAGLO, 2
509 mtc0 zero, CP0_TAGHI, 2
510 ehb
511
512 /* Primary cache configuration is indicated by Config1 */
513 mfc0 v0, CP0_CONFIG, 1
514
515 /* Detect I-cache line size */
516 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
517 beqz t0, icache_done
518 li t1, 2
519 sllv t0, t1, t0
520
521 /* Detect I-cache size */
522 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
523 xori t2, t1, 0x7
524 beqz t2, 1f
525 li t3, 32
526 addiu t1, t1, 1
527 sllv t1, t3, t1
528 1: /* At this point t1 == I-cache sets per way */
529 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
530 addiu t2, t2, 1
531 mul t1, t1, t0
532 mul t1, t1, t2
533
534 li a0, CKSEG0
535 PTR_ADD a1, a0, t1
536 1: cache Index_Store_Tag_I, 0(a0)
537 PTR_ADD a0, a0, t0
538 bne a0, a1, 1b
539 nop
540 icache_done:
541
542 /* Detect D-cache line size */
543 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
544 beqz t0, dcache_done
545 li t1, 2
546 sllv t0, t1, t0
547
548 /* Detect D-cache size */
549 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
550 xori t2, t1, 0x7
551 beqz t2, 1f
552 li t3, 32
553 addiu t1, t1, 1
554 sllv t1, t3, t1
555 1: /* At this point t1 == D-cache sets per way */
556 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
557 addiu t2, t2, 1
558 mul t1, t1, t0
559 mul t1, t1, t2
560
561 li a0, CKSEG0
562 PTR_ADDU a1, a0, t1
563 PTR_SUBU a1, a1, t0
564 1: cache Index_Store_Tag_D, 0(a0)
565 bne a0, a1, 1b
566 PTR_ADD a0, a0, t0
567 dcache_done:
568
569 jr ra
570 nop
571 END(mips_cps_cache_init)
572
573 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
574
575 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
576 .macro psstate dest
577 .set push
578 .set noat
579 lw $1, TI_CPU(gp)
580 sll $1, $1, LONGLOG
581 PTR_LA \dest, __per_cpu_offset
582 addu $1, $1, \dest
583 lw $1, 0($1)
584 PTR_LA \dest, cps_cpu_state
585 addu \dest, \dest, $1
586 .set pop
587 .endm
588
589 LEAF(mips_cps_pm_save)
590 /* Save CPU state */
591 SUSPEND_SAVE_REGS
592 psstate t1
593 SUSPEND_SAVE_STATIC
594 jr v0
595 nop
596 END(mips_cps_pm_save)
597
598 LEAF(mips_cps_pm_restore)
599 /* Restore CPU state */
600 psstate t1
601 RESUME_RESTORE_STATIC
602 RESUME_RESTORE_REGS_RETURN
603 END(mips_cps_pm_restore)
604
605 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
This page took 0.059822 seconds and 5 git commands to generate.