5de99b498738a71066ae162cacdc56005e00d9ec
[deliverable/linux.git] / arch / sh / kernel / cpu / sh3 / entry.S
1 /*
2 * arch/sh/kernel/entry.S
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11 #include <linux/sys.h>
12 #include <linux/errno.h>
13 #include <linux/linkage.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/thread_info.h>
16 #include <asm/unistd.h>
17 #include <asm/cpu/mmu_context.h>
18 #include <asm/pgtable.h>
19 #include <asm/page.h>
20
21 ! NOTE:
22 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23 ! to be jumped is too far, but it causes illegal slot exception.
24
25 /*
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
29 *
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
32 *
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
35 *
36 * jmp @k0 ! control-transfer instruction
37 * ldc k1, ssr ! delay slot
38 *
39 * Stack layout in 'ret_from_syscall':
40 * ptrace needs to have all regs on the stack.
41 * if the order here is changed, it needs to be
42 * updated in ptrace.c and ptrace.h
43 *
44 * r0
45 * ...
46 * r15 = stack pointer
47 * spc
48 * pr
49 * ssr
50 * gbr
51 * mach
52 * macl
53 * syscall #
54 *
55 */
56 #if defined(CONFIG_KGDB_NMI)
57 NMI_VEC = 0x1c0 ! Must catch early for debounce
58 #endif
59
60 /* Offsets to the stack */
61 OFF_R0 = 0 /* Return value. New ABI also arg4 */
62 OFF_R1 = 4 /* New ABI: arg5 */
63 OFF_R2 = 8 /* New ABI: arg6 */
64 OFF_R3 = 12 /* New ABI: syscall_nr */
65 OFF_R4 = 16 /* New ABI: arg0 */
66 OFF_R5 = 20 /* New ABI: arg1 */
67 OFF_R6 = 24 /* New ABI: arg2 */
68 OFF_R7 = 28 /* New ABI: arg3 */
69 OFF_SP = (15*4)
70 OFF_PC = (16*4)
71 OFF_SR = (16*4+8)
72 OFF_TRA = (16*4+6*4)
73
74
75 #define k0 r0
76 #define k1 r1
77 #define k2 r2
78 #define k3 r3
79 #define k4 r4
80
81 #define g_imask r6 /* r6_bank1 */
82 #define k_g_imask r6_bank /* r6_bank1 */
83 #define current r7 /* r7_bank1 */
84
85 #include <asm/entry-macros.S>
86
87 /*
88 * Kernel mode register usage:
89 * k0 scratch
90 * k1 scratch
91 * k2 scratch (Exception code)
92 * k3 scratch (Return address)
93 * k4 scratch
94 * k5 reserved
95 * k6 Global Interrupt Mask (0--15 << 4)
96 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
97 */
98
99 !
100 ! TLB Miss / Initial Page write exception handling
101 ! _and_
102 ! TLB hits, but the access violate the protection.
103 ! It can be valid access, such as stack grow and/or C-O-W.
104 !
105 !
106 ! Find the pmd/pte entry and loadtlb
107 ! If it's not found, cause address error (SEGV)
108 !
109 ! Although this could be written in assembly language (and it'd be faster),
110 ! this first version depends *much* on C implementation.
111 !
112
113 #if defined(CONFIG_MMU)
114 .align 2
115 ENTRY(tlb_miss_load)
116 bra call_dpf
117 mov #0, r5
118
119 .align 2
120 ENTRY(tlb_miss_store)
121 bra call_dpf
122 mov #1, r5
123
124 .align 2
125 ENTRY(initial_page_write)
126 bra call_dpf
127 mov #1, r5
128
129 .align 2
130 ENTRY(tlb_protection_violation_load)
131 bra call_dpf
132 mov #0, r5
133
134 .align 2
135 ENTRY(tlb_protection_violation_store)
136 bra call_dpf
137 mov #1, r5
138
139 call_dpf:
140 mov.l 1f, r0
141 mov.l @r0, r6 ! address
142 mov.l 3f, r0
143 sti
144 jmp @r0
145 mov r15, r4 ! regs
146
147 .align 2
148 1: .long MMU_TEA
149 3: .long do_page_fault
150
151 .align 2
152 ENTRY(address_error_load)
153 bra call_dae
154 mov #0,r5 ! writeaccess = 0
155
156 .align 2
157 ENTRY(address_error_store)
158 bra call_dae
159 mov #1,r5 ! writeaccess = 1
160
161 .align 2
162 call_dae:
163 mov.l 1f, r0
164 mov.l @r0, r6 ! address
165 mov.l 2f, r0
166 jmp @r0
167 mov r15, r4 ! regs
168
169 .align 2
170 1: .long MMU_TEA
171 2: .long do_address_error
172 #endif /* CONFIG_MMU */
173
174 #if defined(CONFIG_SH_STANDARD_BIOS)
175 /* Unwind the stack and jmp to the debug entry */
176 debug_kernel_fw:
177 mov.l @r15+, r0
178 mov.l @r15+, r1
179 mov.l @r15+, r2
180 mov.l @r15+, r3
181 mov.l @r15+, r4
182 mov.l @r15+, r5
183 mov.l @r15+, r6
184 mov.l @r15+, r7
185 stc sr, r8
186 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
187 or r9, r8
188 ldc r8, sr ! here, change the register bank
189 mov.l @r15+, r8
190 mov.l @r15+, r9
191 mov.l @r15+, r10
192 mov.l @r15+, r11
193 mov.l @r15+, r12
194 mov.l @r15+, r13
195 mov.l @r15+, r14
196 mov.l @r15+, k0
197 ldc.l @r15+, spc
198 lds.l @r15+, pr
199 mov.l @r15+, k1
200 ldc.l @r15+, gbr
201 lds.l @r15+, mach
202 lds.l @r15+, macl
203 mov k0, r15
204 !
205 mov.l 2f, k0
206 mov.l @k0, k0
207 jmp @k0
208 ldc k1, ssr
209 .align 2
210 1: .long 0x300000f0
211 2: .long gdb_vbr_vector
212 #endif /* CONFIG_SH_STANDARD_BIOS */
213
214 restore_all:
215 mov.l @r15+, r0
216 mov.l @r15+, r1
217 mov.l @r15+, r2
218 mov.l @r15+, r3
219 mov.l @r15+, r4
220 mov.l @r15+, r5
221 mov.l @r15+, r6
222 mov.l @r15+, r7
223 !
224 stc sr, r8
225 mov.l 7f, r9
226 or r9, r8 ! BL =1, RB=1
227 ldc r8, sr ! here, change the register bank
228 !
229 mov.l @r15+, r8
230 mov.l @r15+, r9
231 mov.l @r15+, r10
232 mov.l @r15+, r11
233 mov.l @r15+, r12
234 mov.l @r15+, r13
235 mov.l @r15+, r14
236 mov.l @r15+, k4 ! original stack pointer
237 ldc.l @r15+, spc
238 lds.l @r15+, pr
239 mov.l @r15+, k3 ! original SR
240 ldc.l @r15+, gbr
241 lds.l @r15+, mach
242 lds.l @r15+, macl
243 add #4, r15 ! Skip syscall number
244 !
245 #ifdef CONFIG_SH_DSP
246 mov.l @r15+, k0 ! DSP mode marker
247 mov.l 5f, k1
248 cmp/eq k0, k1 ! Do we have a DSP stack frame?
249 bf skip_restore
250
251 stc sr, k0 ! Enable CPU DSP mode
252 or k1, k0 ! (within kernel it may be disabled)
253 ldc k0, sr
254 mov r2, k0 ! Backup r2
255
256 ! Restore DSP registers from stack
257 mov r15, r2
258 movs.l @r2+, a1
259 movs.l @r2+, a0g
260 movs.l @r2+, a1g
261 movs.l @r2+, m0
262 movs.l @r2+, m1
263 mov r2, r15
264
265 lds.l @r15+, a0
266 lds.l @r15+, x0
267 lds.l @r15+, x1
268 lds.l @r15+, y0
269 lds.l @r15+, y1
270 lds.l @r15+, dsr
271 ldc.l @r15+, rs
272 ldc.l @r15+, re
273 ldc.l @r15+, mod
274
275 mov k0, r2 ! Restore r2
276 skip_restore:
277 #endif
278 !
279 ! Calculate new SR value
280 mov k3, k2 ! original SR value
281 mov #0xf0, k1
282 extu.b k1, k1
283 not k1, k1
284 and k1, k2 ! Mask orignal SR value
285 !
286 mov k3, k0 ! Calculate IMASK-bits
287 shlr2 k0
288 and #0x3c, k0
289 cmp/eq #0x3c, k0
290 bt/s 6f
291 shll2 k0
292 mov g_imask, k0
293 !
294 6: or k0, k2 ! Set the IMASK-bits
295 ldc k2, ssr
296 !
297 #if defined(CONFIG_KGDB_NMI)
298 ! Clear in_nmi
299 mov.l 6f, k0
300 mov #0, k1
301 mov.b k1, @k0
302 #endif
303 mov.l @r15+, k2 ! restore EXPEVT
304 mov k4, r15
305 rte
306 nop
307
308 .align 2
309 5: .long 0x00001000 ! DSP
310 7: .long 0x30000000
311
312 ! common exception handler
313 #include "../../entry-common.S"
314
315 ! Exception Vector Base
316 !
317 ! Should be aligned page boundary.
318 !
319 .balign 4096,0,4096
320 ENTRY(vbr_base)
321 .long 0
322 !
323 .balign 256,0,256
324 general_exception:
325 mov.l 1f, k2
326 mov.l 2f, k3
327 bra handle_exception
328 mov.l @k2, k2
329 .align 2
330 1: .long EXPEVT
331 2: .long ret_from_exception
332 !
333 !
334
335 /* This code makes some assumptions to improve performance.
336 * Make sure they are stil true. */
337 #if PTRS_PER_PGD != PTRS_PER_PTE
338 #error PDG and PTE sizes don't match
339 #endif
340
341 /* gas doesn't flag impossible values for mov #immediate as an error */
342 #if (_PAGE_PRESENT >> 2) > 0x7f
343 #error cannot load PAGE_PRESENT as an immediate
344 #endif
345 #if _PAGE_DIRTY > 0x7f
346 #error cannot load PAGE_DIRTY as an immediate
347 #endif
348 #if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
349 #error cannot derive PAGE_ACCESSED from PAGE_PRESENT
350 #endif
351
352 #if defined(CONFIG_CPU_SH4)
353 #define ldmmupteh(r) mov.l 8f, r
354 #else
355 #define ldmmupteh(r) mov #MMU_PTEH, r
356 #endif
357
358 .balign 1024,0,1024
359 tlb_miss:
360 #ifdef COUNT_EXCEPTIONS
361 ! Increment the counts
362 mov.l 9f, k1
363 mov.l @k1, k2
364 add #1, k2
365 mov.l k2, @k1
366 #endif
367
368 ! k0 scratch
369 ! k1 pgd and pte pointers
370 ! k2 faulting address
371 ! k3 pgd and pte index masks
372 ! k4 shift
373
374 ! Load up the pgd entry (k1)
375
376 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
377
378 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
379 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
380
381 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
382
383 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
384
385 mov k2, k0 ! 5 MT (latency=0)
386 shld k4, k0 ! 99 EX
387
388 and k3, k0 ! 78 EX
389
390 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
391 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
392
393 ! Load up the pte entry (k2)
394
395 mov k2, k0 ! 5 MT (latency=0)
396 shld k4, k0 ! 99 EX
397
398 tst k1, k1 ! 86 MT
399
400 bt 20f ! 110 BR
401
402 and k3, k0 ! 78 EX
403 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
404
405 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
406 add k0, k1 ! 49 EX
407
408 #ifdef CONFIG_CPU_HAS_PTEA
409 ! Test the entry for present and _PAGE_ACCESSED
410
411 mov #-28, k3 ! 6 EX
412 mov k2, k0 ! 5 MT (latency=0)
413
414 tst k4, k2 ! 68 MT
415 shld k3, k0 ! 99 EX
416
417 bt 20f ! 110 BR
418
419 ! Set PTEA register
420 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
421 !
422 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
423
424 and #0xe, k0 ! 79 EX
425
426 mov k0, k3 ! 5 MT (latency=0)
427 mov k2, k0 ! 5 MT (latency=0)
428
429 and #1, k0 ! 79 EX
430
431 or k0, k3 ! 82 EX
432
433 ldmmupteh(k0) ! 9 LS (latency=2)
434 shll2 k4 ! 101 EX _PAGE_ACCESSED
435
436 tst k4, k2 ! 68 MT
437
438 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
439
440 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
441
442 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
443 #else
444
445 ! Test the entry for present and _PAGE_ACCESSED
446
447 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
448 tst k4, k2 ! 68 MT
449
450 shll2 k4 ! 101 EX _PAGE_ACCESSED
451 ldmmupteh(k0) ! 9 LS (latency=2)
452
453 bt 20f ! 110 BR
454 tst k4, k2 ! 68 MT
455
456 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
457
458 #endif
459
460 ! Set up the entry
461
462 and k2, k3 ! 78 EX
463 bt/s 10f ! 108 BR
464
465 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
466
467 ldtlb ! 128 CO
468
469 ! At least one instruction between ldtlb and rte
470 nop ! 119 NOP
471
472 rte ! 126 CO
473
474 nop ! 119 NOP
475
476
477 10: or k4, k2 ! 82 EX
478
479 ldtlb ! 128 CO
480
481 ! At least one instruction between ldtlb and rte
482 mov.l k2, @k1 ! 27 LS
483
484 rte ! 126 CO
485
486 ! Note we cannot execute mov here, because it is executed after
487 ! restoring SSR, so would be executed in user space.
488 nop ! 119 NOP
489
490
491 .align 5
492 ! Once cache line if possible...
493 1: .long swapper_pg_dir
494 4: .short (PTRS_PER_PGD-1) << 2
495 5: .short _PAGE_PRESENT
496 7: .long _PAGE_FLAGS_HARDWARE_MASK
497 8: .long MMU_PTEH
498 #ifdef COUNT_EXCEPTIONS
499 9: .long exception_count_miss
500 #endif
501
502 ! Either pgd or pte not present
503 20: mov.l 1f, k2
504 mov.l 4f, k3
505 bra handle_exception
506 mov.l @k2, k2
507 !
508 .balign 512,0,512
509 interrupt:
510 mov.l 2f, k2
511 mov.l 3f, k3
512 #if defined(CONFIG_KGDB_NMI)
513 ! Debounce (filter nested NMI)
514 mov.l @k2, k0
515 mov.l 5f, k1
516 cmp/eq k1, k0
517 bf 0f
518 mov.l 6f, k1
519 tas.b @k1
520 bt 0f
521 rte
522 nop
523 .align 2
524 5: .long NMI_VEC
525 6: .long in_nmi
526 0:
527 #endif /* defined(CONFIG_KGDB_NMI) */
528 bra handle_exception
529 mov #-1, k2 ! interrupt exception marker
530
531 .align 2
532 1: .long EXPEVT
533 2: .long INTEVT
534 3: .long ret_from_irq
535 4: .long ret_from_exception
536
537 !
538 !
539 .align 2
540 ENTRY(handle_exception)
541 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
542 ! save all registers onto stack.
543 !
544 stc ssr, k0 ! Is it from kernel space?
545 shll k0 ! Check MD bit (bit30) by shifting it into...
546 shll k0 ! ...the T bit
547 bt/s 1f ! It's a kernel to kernel transition.
548 mov r15, k0 ! save original stack to k0
549 /* User space to kernel */
550 mov #(THREAD_SIZE >> 8), k1
551 shll8 k1 ! k1 := THREAD_SIZE
552 add current, k1
553 mov k1, r15 ! change to kernel stack
554 !
555 1: mov.l 2f, k1
556 !
557 #ifdef CONFIG_SH_DSP
558 mov.l r2, @-r15 ! Save r2, we need another reg
559 stc sr, k4
560 mov.l 1f, r2
561 tst r2, k4 ! Check if in DSP mode
562 mov.l @r15+, r2 ! Restore r2 now
563 bt/s skip_save
564 mov #0, k4 ! Set marker for no stack frame
565
566 mov r2, k4 ! Backup r2 (in k4) for later
567
568 ! Save DSP registers on stack
569 stc.l mod, @-r15
570 stc.l re, @-r15
571 stc.l rs, @-r15
572 sts.l dsr, @-r15
573 sts.l y1, @-r15
574 sts.l y0, @-r15
575 sts.l x1, @-r15
576 sts.l x0, @-r15
577 sts.l a0, @-r15
578
579 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
580
581 ! FIXME: Make sure that this is still the case with newer toolchains,
582 ! as we're not at all interested in supporting ancient toolchains at
583 ! this point. -- PFM.
584
585 mov r15, r2
586 .word 0xf653 ! movs.l a1, @-r2
587 .word 0xf6f3 ! movs.l a0g, @-r2
588 .word 0xf6d3 ! movs.l a1g, @-r2
589 .word 0xf6c3 ! movs.l m0, @-r2
590 .word 0xf6e3 ! movs.l m1, @-r2
591 mov r2, r15
592
593 mov k4, r2 ! Restore r2
594 mov.l 1f, k4 ! Force DSP stack frame
595 skip_save:
596 mov.l k4, @-r15 ! Push DSP mode marker onto stack
597 #endif
598 ! Save the user registers on the stack.
599 mov.l k2, @-r15 ! EXPEVT
600
601 mov #-1, k4
602 mov.l k4, @-r15 ! set TRA (default: -1)
603 !
604 sts.l macl, @-r15
605 sts.l mach, @-r15
606 stc.l gbr, @-r15
607 stc.l ssr, @-r15
608 sts.l pr, @-r15
609 stc.l spc, @-r15
610 !
611 lds k3, pr ! Set the return address to pr
612 !
613 mov.l k0, @-r15 ! save orignal stack
614 mov.l r14, @-r15
615 mov.l r13, @-r15
616 mov.l r12, @-r15
617 mov.l r11, @-r15
618 mov.l r10, @-r15
619 mov.l r9, @-r15
620 mov.l r8, @-r15
621 !
622 stc sr, r8 ! Back to normal register bank, and
623 or k1, r8 ! Block all interrupts
624 mov.l 3f, k1
625 and k1, r8 ! ...
626 ldc r8, sr ! ...changed here.
627 !
628 mov.l r7, @-r15
629 mov.l r6, @-r15
630 mov.l r5, @-r15
631 mov.l r4, @-r15
632 mov.l r3, @-r15
633 mov.l r2, @-r15
634 mov.l r1, @-r15
635 mov.l r0, @-r15
636
637 /*
638 * This gets a bit tricky.. in the INTEVT case we don't want to use
639 * the VBR offset as a destination in the jump call table, since all
640 * of the destinations are the same. In this case, (interrupt) sets
641 * a marker in r2 (now r2_bank since SR.RB changed), which we check
642 * to determine the exception type. For all other exceptions, we
643 * forcibly read EXPEVT from memory and fix up the jump address, in
644 * the interrupt exception case we jump to do_IRQ() and defer the
645 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
646 * checks that do_IRQ() was doing..
647 */
648 stc r2_bank, r8
649 cmp/pz r8
650 bf interrupt_exception
651 shlr2 r8
652 shlr r8
653
654 #ifdef COUNT_EXCEPTIONS
655 mov.l 5f, r9
656 add r8, r9
657 mov.l @r9, r10
658 add #1, r10
659 mov.l r10, @r9
660 #endif
661
662 mov.l 4f, r9
663 add r8, r9
664 mov.l @r9, r9
665 jmp @r9
666 nop
667 rts
668 nop
669
670 .align 2
671 1: .long 0x00001000 ! DSP=1
672 2: .long 0x000080f0 ! FD=1, IMASK=15
673 3: .long 0xcfffffff ! RB=0, BL=0
674 4: .long exception_handling_table
675 #ifdef COUNT_EXCEPTIONS
676 5: .long exception_count_table
677 #endif
678
679 interrupt_exception:
680 mov.l 1f, r9
681 jmp @r9
682 nop
683 rts
684 nop
685
686 .align 2
687 1: .long do_IRQ
688
689 .align 2
690 ENTRY(exception_none)
691 rts
692 nop
This page took 0.0434 seconds and 4 git commands to generate.