sh: Fixup various PAGE_SIZE == 4096 assumptions.
[deliverable/linux.git] / arch / sh / kernel / cpu / sh3 / entry.S
CommitLineData
baf4326e 1/*
de398406 2 * arch/sh/kernel/entry.S
1da177e4
LT
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
baf4326e 5 * Copyright (C) 2003 - 2006 Paul Mundt
1da177e4
LT
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
1da177e4 10 */
1da177e4 11#include <linux/sys.h>
711fa809 12#include <linux/errno.h>
1da177e4 13#include <linux/linkage.h>
1da177e4
LT
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/unistd.h>
9b3a53ab
SM
17#include <asm/cpu/mmu_context.h>
18#include <asm/pgtable.h>
19#include <asm/page.h>
1da177e4 20
1da177e4
LT
21! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23! to be jumped is too far, but it causes illegal slot exception.
24
25/*
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
29 *
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
32 *
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
35 *
36 * jmp @k0 ! control-transfer instruction
37 * ldc k1, ssr ! delay slot
38 *
39 * Stack layout in 'ret_from_syscall':
40 * ptrace needs to have all regs on the stack.
41 * if the order here is changed, it needs to be
42 * updated in ptrace.c and ptrace.h
43 *
44 * r0
45 * ...
46 * r15 = stack pointer
47 * spc
48 * pr
49 * ssr
50 * gbr
51 * mach
52 * macl
53 * syscall #
54 *
55 */
1da177e4
LT
56#if defined(CONFIG_KGDB_NMI)
57NMI_VEC = 0x1c0 ! Must catch early for debounce
58#endif
59
60/* Offsets to the stack */
61OFF_R0 = 0 /* Return value. New ABI also arg4 */
62OFF_R1 = 4 /* New ABI: arg5 */
63OFF_R2 = 8 /* New ABI: arg6 */
64OFF_R3 = 12 /* New ABI: syscall_nr */
65OFF_R4 = 16 /* New ABI: arg0 */
66OFF_R5 = 20 /* New ABI: arg1 */
67OFF_R6 = 24 /* New ABI: arg2 */
68OFF_R7 = 28 /* New ABI: arg3 */
69OFF_SP = (15*4)
70OFF_PC = (16*4)
71OFF_SR = (16*4+8)
72OFF_TRA = (16*4+6*4)
73
74
75#define k0 r0
76#define k1 r1
77#define k2 r2
78#define k3 r3
79#define k4 r4
80
1da177e4
LT
81#define g_imask r6 /* r6_bank1 */
82#define k_g_imask r6_bank /* r6_bank1 */
83#define current r7 /* r7_bank1 */
84
de398406
YS
85#include <asm/entry-macros.S>
86
1da177e4
LT
87/*
88 * Kernel mode register usage:
89 * k0 scratch
90 * k1 scratch
91 * k2 scratch (Exception code)
92 * k3 scratch (Return address)
93 * k4 scratch
94 * k5 reserved
95 * k6 Global Interrupt Mask (0--15 << 4)
96 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
97 */
98
99!
100! TLB Miss / Initial Page write exception handling
101! _and_
102! TLB hits, but the access violate the protection.
103! It can be valid access, such as stack grow and/or C-O-W.
104!
105!
106! Find the pmd/pte entry and loadtlb
107! If it's not found, cause address error (SEGV)
108!
109! Although this could be written in assembly language (and it'd be faster),
110! this first version depends *much* on C implementation.
111!
112
1da177e4
LT
113#if defined(CONFIG_MMU)
114 .align 2
115ENTRY(tlb_miss_load)
116 bra call_dpf
117 mov #0, r5
118
119 .align 2
120ENTRY(tlb_miss_store)
121 bra call_dpf
122 mov #1, r5
123
124 .align 2
125ENTRY(initial_page_write)
126 bra call_dpf
127 mov #1, r5
128
129 .align 2
130ENTRY(tlb_protection_violation_load)
131 bra call_dpf
132 mov #0, r5
133
134 .align 2
135ENTRY(tlb_protection_violation_store)
136 bra call_dpf
137 mov #1, r5
138
139call_dpf:
140 mov.l 1f, r0
9b3a53ab 141 mov.l @r0, r6 ! address
1da177e4 142 mov.l 3f, r0
9b3a53ab 143 sti
1da177e4 144 jmp @r0
9b3a53ab 145 mov r15, r4 ! regs
1da177e4
LT
146
147 .align 2
1481: .long MMU_TEA
1da177e4
LT
1493: .long do_page_fault
150
151 .align 2
152ENTRY(address_error_load)
153 bra call_dae
154 mov #0,r5 ! writeaccess = 0
155
156 .align 2
157ENTRY(address_error_store)
158 bra call_dae
159 mov #1,r5 ! writeaccess = 1
160
161 .align 2
162call_dae:
163 mov.l 1f, r0
164 mov.l @r0, r6 ! address
165 mov.l 2f, r0
166 jmp @r0
167 mov r15, r4 ! regs
168
169 .align 2
1701: .long MMU_TEA
1712: .long do_address_error
172#endif /* CONFIG_MMU */
173
1da177e4
LT
174#if defined(CONFIG_SH_STANDARD_BIOS)
175 /* Unwind the stack and jmp to the debug entry */
176debug_kernel_fw:
177 mov.l @r15+, r0
178 mov.l @r15+, r1
179 mov.l @r15+, r2
180 mov.l @r15+, r3
181 mov.l @r15+, r4
182 mov.l @r15+, r5
183 mov.l @r15+, r6
184 mov.l @r15+, r7
185 stc sr, r8
186 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
187 or r9, r8
188 ldc r8, sr ! here, change the register bank
189 mov.l @r15+, r8
190 mov.l @r15+, r9
191 mov.l @r15+, r10
192 mov.l @r15+, r11
193 mov.l @r15+, r12
194 mov.l @r15+, r13
195 mov.l @r15+, r14
196 mov.l @r15+, k0
197 ldc.l @r15+, spc
198 lds.l @r15+, pr
199 mov.l @r15+, k1
200 ldc.l @r15+, gbr
201 lds.l @r15+, mach
202 lds.l @r15+, macl
203 mov k0, r15
204 !
205 mov.l 2f, k0
206 mov.l @k0, k0
207 jmp @k0
208 ldc k1, ssr
209 .align 2
2101: .long 0x300000f0
2112: .long gdb_vbr_vector
212#endif /* CONFIG_SH_STANDARD_BIOS */
213
1da177e4
LT
214restore_all:
215 mov.l @r15+, r0
216 mov.l @r15+, r1
217 mov.l @r15+, r2
218 mov.l @r15+, r3
219 mov.l @r15+, r4
220 mov.l @r15+, r5
221 mov.l @r15+, r6
222 mov.l @r15+, r7
223 !
224 stc sr, r8
225 mov.l 7f, r9
226 or r9, r8 ! BL =1, RB=1
227 ldc r8, sr ! here, change the register bank
228 !
229 mov.l @r15+, r8
230 mov.l @r15+, r9
231 mov.l @r15+, r10
232 mov.l @r15+, r11
233 mov.l @r15+, r12
234 mov.l @r15+, r13
235 mov.l @r15+, r14
236 mov.l @r15+, k4 ! original stack pointer
237 ldc.l @r15+, spc
238 lds.l @r15+, pr
239 mov.l @r15+, k3 ! original SR
240 ldc.l @r15+, gbr
241 lds.l @r15+, mach
242 lds.l @r15+, macl
243 add #4, r15 ! Skip syscall number
244 !
245#ifdef CONFIG_SH_DSP
246 mov.l @r15+, k0 ! DSP mode marker
247 mov.l 5f, k1
248 cmp/eq k0, k1 ! Do we have a DSP stack frame?
249 bf skip_restore
250
251 stc sr, k0 ! Enable CPU DSP mode
252 or k1, k0 ! (within kernel it may be disabled)
253 ldc k0, sr
254 mov r2, k0 ! Backup r2
255
256 ! Restore DSP registers from stack
257 mov r15, r2
258 movs.l @r2+, a1
259 movs.l @r2+, a0g
260 movs.l @r2+, a1g
261 movs.l @r2+, m0
262 movs.l @r2+, m1
263 mov r2, r15
264
265 lds.l @r15+, a0
266 lds.l @r15+, x0
267 lds.l @r15+, x1
268 lds.l @r15+, y0
269 lds.l @r15+, y1
270 lds.l @r15+, dsr
271 ldc.l @r15+, rs
272 ldc.l @r15+, re
273 ldc.l @r15+, mod
274
275 mov k0, r2 ! Restore r2
276skip_restore:
277#endif
278 !
279 ! Calculate new SR value
280 mov k3, k2 ! original SR value
de398406
YS
281 mov #0xf0, k1
282 extu.b k1, k1
283 not k1, k1
1da177e4
LT
284 and k1, k2 ! Mask orignal SR value
285 !
286 mov k3, k0 ! Calculate IMASK-bits
287 shlr2 k0
288 and #0x3c, k0
289 cmp/eq #0x3c, k0
290 bt/s 6f
291 shll2 k0
292 mov g_imask, k0
293 !
2946: or k0, k2 ! Set the IMASK-bits
295 ldc k2, ssr
296 !
297#if defined(CONFIG_KGDB_NMI)
298 ! Clear in_nmi
6ae5e8d7 299 mov.l 6f, k0
1da177e4
LT
300 mov #0, k1
301 mov.b k1, @k0
302#endif
303 mov.l @r15+, k2 ! restore EXPEVT
304 mov k4, r15
305 rte
306 nop
307
308 .align 2
1da177e4
LT
3095: .long 0x00001000 ! DSP
3107: .long 0x30000000
1da177e4 311
de398406 312! common exception handler
716067f2 313#include "../../entry-common.S"
de398406 314
1da177e4
LT
315! Exception Vector Base
316!
317! Should be aligned page boundary.
318!
319 .balign 4096,0,4096
320ENTRY(vbr_base)
321 .long 0
322!
323 .balign 256,0,256
324general_exception:
325 mov.l 1f, k2
326 mov.l 2f, k3
327 bra handle_exception
328 mov.l @k2, k2
329 .align 2
3301: .long EXPEVT
3312: .long ret_from_exception
332!
333!
9b3a53ab
SM
334
335/* This code makes some assumptions to improve performance.
336 * Make sure they are stil true. */
337#if PTRS_PER_PGD != PTRS_PER_PTE
510c72ad 338#error PGD and PTE sizes don't match
9b3a53ab
SM
339#endif
340
341/* gas doesn't flag impossible values for mov #immediate as an error */
342#if (_PAGE_PRESENT >> 2) > 0x7f
343#error cannot load PAGE_PRESENT as an immediate
344#endif
345#if _PAGE_DIRTY > 0x7f
346#error cannot load PAGE_DIRTY as an immediate
347#endif
348#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
349#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
350#endif
351
352#if defined(CONFIG_CPU_SH4)
353#define ldmmupteh(r) mov.l 8f, r
354#else
355#define ldmmupteh(r) mov #MMU_PTEH, r
356#endif
357
1da177e4
LT
358 .balign 1024,0,1024
359tlb_miss:
9b3a53ab
SM
360#ifdef COUNT_EXCEPTIONS
361 ! Increment the counts
362 mov.l 9f, k1
363 mov.l @k1, k2
364 add #1, k2
365 mov.l k2, @k1
366#endif
367
368 ! k0 scratch
369 ! k1 pgd and pte pointers
370 ! k2 faulting address
371 ! k3 pgd and pte index masks
372 ! k4 shift
373
374 ! Load up the pgd entry (k1)
375
376 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
377
378 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
379 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
380
381 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
382
383 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
384
385 mov k2, k0 ! 5 MT (latency=0)
386 shld k4, k0 ! 99 EX
387
388 and k3, k0 ! 78 EX
389
390 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
391 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
392
393 ! Load up the pte entry (k2)
394
395 mov k2, k0 ! 5 MT (latency=0)
396 shld k4, k0 ! 99 EX
397
398 tst k1, k1 ! 86 MT
399
400 bt 20f ! 110 BR
401
402 and k3, k0 ! 78 EX
403 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
404
405 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
406 add k0, k1 ! 49 EX
407
408#ifdef CONFIG_CPU_HAS_PTEA
409 ! Test the entry for present and _PAGE_ACCESSED
410
411 mov #-28, k3 ! 6 EX
412 mov k2, k0 ! 5 MT (latency=0)
413
414 tst k4, k2 ! 68 MT
415 shld k3, k0 ! 99 EX
416
417 bt 20f ! 110 BR
418
419 ! Set PTEA register
420 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
421 !
422 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
423
424 and #0xe, k0 ! 79 EX
425
426 mov k0, k3 ! 5 MT (latency=0)
427 mov k2, k0 ! 5 MT (latency=0)
428
429 and #1, k0 ! 79 EX
430
431 or k0, k3 ! 82 EX
432
433 ldmmupteh(k0) ! 9 LS (latency=2)
434 shll2 k4 ! 101 EX _PAGE_ACCESSED
435
436 tst k4, k2 ! 68 MT
437
438 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
439
440 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
441
442 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
443#else
444
445 ! Test the entry for present and _PAGE_ACCESSED
446
447 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
448 tst k4, k2 ! 68 MT
449
450 shll2 k4 ! 101 EX _PAGE_ACCESSED
451 ldmmupteh(k0) ! 9 LS (latency=2)
452
453 bt 20f ! 110 BR
454 tst k4, k2 ! 68 MT
455
456 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
457
458#endif
459
460 ! Set up the entry
461
462 and k2, k3 ! 78 EX
463 bt/s 10f ! 108 BR
464
465 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
466
467 ldtlb ! 128 CO
468
469 ! At least one instruction between ldtlb and rte
470 nop ! 119 NOP
471
472 rte ! 126 CO
473
474 nop ! 119 NOP
475
476
47710: or k4, k2 ! 82 EX
478
479 ldtlb ! 128 CO
480
481 ! At least one instruction between ldtlb and rte
482 mov.l k2, @k1 ! 27 LS
483
484 rte ! 126 CO
485
486 ! Note we cannot execute mov here, because it is executed after
487 ! restoring SSR, so would be executed in user space.
488 nop ! 119 NOP
489
490
491 .align 5
492 ! Once cache line if possible...
4931: .long swapper_pg_dir
4944: .short (PTRS_PER_PGD-1) << 2
4955: .short _PAGE_PRESENT
4967: .long _PAGE_FLAGS_HARDWARE_MASK
4978: .long MMU_PTEH
498#ifdef COUNT_EXCEPTIONS
4999: .long exception_count_miss
500#endif
501
502 ! Either pgd or pte not present
50320: mov.l 1f, k2
1da177e4
LT
504 mov.l 4f, k3
505 bra handle_exception
506 mov.l @k2, k2
507!
508 .balign 512,0,512
509interrupt:
510 mov.l 2f, k2
511 mov.l 3f, k3
512#if defined(CONFIG_KGDB_NMI)
513 ! Debounce (filter nested NMI)
514 mov.l @k2, k0
515 mov.l 5f, k1
516 cmp/eq k1, k0
517 bf 0f
518 mov.l 6f, k1
519 tas.b @k1
520 bt 0f
521 rte
522 nop
523 .align 2
5245: .long NMI_VEC
5256: .long in_nmi
5260:
527#endif /* defined(CONFIG_KGDB_NMI) */
528 bra handle_exception
baf4326e 529 mov #-1, k2 ! interrupt exception marker
1da177e4
LT
530
531 .align 2
5321: .long EXPEVT
5332: .long INTEVT
5343: .long ret_from_irq
5354: .long ret_from_exception
536
537!
538!
539 .align 2
3aa770e7 540ENTRY(handle_exception)
1da177e4
LT
541 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
542 ! save all registers onto stack.
543 !
544 stc ssr, k0 ! Is it from kernel space?
545 shll k0 ! Check MD bit (bit30) by shifting it into...
546 shll k0 ! ...the T bit
547 bt/s 1f ! It's a kernel to kernel transition.
548 mov r15, k0 ! save original stack to k0
549 /* User space to kernel */
510c72ad 550 mov #(THREAD_SIZE >> 10), k1
a6a31139 551 shll8 k1 ! k1 := THREAD_SIZE
510c72ad 552 shll2 k1
1da177e4
LT
553 add current, k1
554 mov k1, r15 ! change to kernel stack
555 !
baf4326e 5561: mov.l 2f, k1
1da177e4
LT
557 !
558#ifdef CONFIG_SH_DSP
559 mov.l r2, @-r15 ! Save r2, we need another reg
560 stc sr, k4
561 mov.l 1f, r2
562 tst r2, k4 ! Check if in DSP mode
563 mov.l @r15+, r2 ! Restore r2 now
564 bt/s skip_save
565 mov #0, k4 ! Set marker for no stack frame
566
567 mov r2, k4 ! Backup r2 (in k4) for later
568
569 ! Save DSP registers on stack
570 stc.l mod, @-r15
571 stc.l re, @-r15
572 stc.l rs, @-r15
573 sts.l dsr, @-r15
574 sts.l y1, @-r15
575 sts.l y0, @-r15
576 sts.l x1, @-r15
577 sts.l x0, @-r15
578 sts.l a0, @-r15
579
580 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
581
582 ! FIXME: Make sure that this is still the case with newer toolchains,
583 ! as we're not at all interested in supporting ancient toolchains at
584 ! this point. -- PFM.
585
586 mov r15, r2
587 .word 0xf653 ! movs.l a1, @-r2
588 .word 0xf6f3 ! movs.l a0g, @-r2
589 .word 0xf6d3 ! movs.l a1g, @-r2
590 .word 0xf6c3 ! movs.l m0, @-r2
591 .word 0xf6e3 ! movs.l m1, @-r2
592 mov r2, r15
593
594 mov k4, r2 ! Restore r2
595 mov.l 1f, k4 ! Force DSP stack frame
596skip_save:
597 mov.l k4, @-r15 ! Push DSP mode marker onto stack
598#endif
599 ! Save the user registers on the stack.
600 mov.l k2, @-r15 ! EXPEVT
baf4326e 601
de398406 602 mov #-1, k4
1da177e4
LT
603 mov.l k4, @-r15 ! set TRA (default: -1)
604 !
605 sts.l macl, @-r15
606 sts.l mach, @-r15
607 stc.l gbr, @-r15
608 stc.l ssr, @-r15
609 sts.l pr, @-r15
610 stc.l spc, @-r15
611 !
612 lds k3, pr ! Set the return address to pr
613 !
614 mov.l k0, @-r15 ! save orignal stack
615 mov.l r14, @-r15
616 mov.l r13, @-r15
617 mov.l r12, @-r15
618 mov.l r11, @-r15
619 mov.l r10, @-r15
620 mov.l r9, @-r15
621 mov.l r8, @-r15
622 !
623 stc sr, r8 ! Back to normal register bank, and
624 or k1, r8 ! Block all interrupts
625 mov.l 3f, k1
626 and k1, r8 ! ...
627 ldc r8, sr ! ...changed here.
628 !
629 mov.l r7, @-r15
630 mov.l r6, @-r15
631 mov.l r5, @-r15
632 mov.l r4, @-r15
633 mov.l r3, @-r15
634 mov.l r2, @-r15
635 mov.l r1, @-r15
636 mov.l r0, @-r15
baf4326e
PM
637
638 /*
639 * This gets a bit tricky.. in the INTEVT case we don't want to use
640 * the VBR offset as a destination in the jump call table, since all
641 * of the destinations are the same. In this case, (interrupt) sets
642 * a marker in r2 (now r2_bank since SR.RB changed), which we check
643 * to determine the exception type. For all other exceptions, we
644 * forcibly read EXPEVT from memory and fix up the jump address, in
645 * the interrupt exception case we jump to do_IRQ() and defer the
646 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
647 * checks that do_IRQ() was doing..
648 */
649 stc r2_bank, r8
650 cmp/pz r8
651 bf interrupt_exception
1da177e4
LT
652 shlr2 r8
653 shlr r8
9b3a53ab
SM
654
655#ifdef COUNT_EXCEPTIONS
656 mov.l 5f, r9
657 add r8, r9
658 mov.l @r9, r10
659 add #1, r10
660 mov.l r10, @r9
661#endif
662
1da177e4
LT
663 mov.l 4f, r9
664 add r8, r9
665 mov.l @r9, r9
666 jmp @r9
667 nop
baf4326e
PM
668 rts
669 nop
1da177e4
LT
670
671 .align 2
6721: .long 0x00001000 ! DSP=1
6732: .long 0x000080f0 ! FD=1, IMASK=15
6743: .long 0xcfffffff ! RB=0, BL=0
6754: .long exception_handling_table
9b3a53ab
SM
676#ifdef COUNT_EXCEPTIONS
6775: .long exception_count_table
678#endif
1da177e4 679
baf4326e
PM
680interrupt_exception:
681 mov.l 1f, r9
682 jmp @r9
683 nop
684 rts
685 nop
686
687 .align 2
6881: .long do_IRQ
689
1da177e4
LT
690 .align 2
691ENTRY(exception_none)
692 rts
693 nop
This page took 0.209206 seconds and 5 git commands to generate.