sh: syscall auditing for sh5, too.
[deliverable/linux.git] / arch / sh / kernel / cpu / sh5 / entry.S
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/entry.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2004, 2005 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13 #include <linux/errno.h>
14 #include <linux/sys.h>
15 #include <asm/cpu/registers.h>
16 #include <asm/processor.h>
17 #include <asm/unistd.h>
18 #include <asm/thread_info.h>
19 #include <asm/asm-offsets.h>
20
21 /*
22 * SR fields.
23 */
24 #define SR_ASID_MASK 0x00ff0000
25 #define SR_FD_MASK 0x00008000
26 #define SR_SS 0x08000000
27 #define SR_BL 0x10000000
28 #define SR_MD 0x40000000
29
30 /*
31 * Event code.
32 */
33 #define EVENT_INTERRUPT 0
34 #define EVENT_FAULT_TLB 1
35 #define EVENT_FAULT_NOT_TLB 2
36 #define EVENT_DEBUG 3
37
38 /* EXPEVT values */
39 #define RESET_CAUSE 0x20
40 #define DEBUGSS_CAUSE 0x980
41
42 /*
43 * Frame layout. Quad index.
44 */
45 #define FRAME_T(x) FRAME_TBASE+(x*8)
46 #define FRAME_R(x) FRAME_RBASE+(x*8)
47 #define FRAME_S(x) FRAME_SBASE+(x*8)
48 #define FSPC 0
49 #define FSSR 1
50 #define FSYSCALL_ID 2
51
52 /* Arrange the save frame to be a multiple of 32 bytes long */
53 #define FRAME_SBASE 0
54 #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
55 #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
56 #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
57 #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
58
59 #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
60 #define FP_FRAME_BASE 0
61
62 #define SAVED_R2 0*8
63 #define SAVED_R3 1*8
64 #define SAVED_R4 2*8
65 #define SAVED_R5 3*8
66 #define SAVED_R18 4*8
67 #define SAVED_R6 5*8
68 #define SAVED_TR0 6*8
69
70 /* These are the registers saved in the TLB path that aren't saved in the first
71 level of the normal one. */
72 #define TLB_SAVED_R25 7*8
73 #define TLB_SAVED_TR1 8*8
74 #define TLB_SAVED_TR2 9*8
75 #define TLB_SAVED_TR3 10*8
76 #define TLB_SAVED_TR4 11*8
77 /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
78 breakage otherwise. */
79 #define TLB_SAVED_R0 12*8
80 #define TLB_SAVED_R1 13*8
81
82 #define CLI() \
83 getcon SR, r6; \
84 ori r6, 0xf0, r6; \
85 putcon r6, SR;
86
87 #define STI() \
88 getcon SR, r6; \
89 andi r6, ~0xf0, r6; \
90 putcon r6, SR;
91
92 #ifdef CONFIG_PREEMPT
93 # define preempt_stop() CLI()
94 #else
95 # define preempt_stop()
96 # define resume_kernel restore_all
97 #endif
98
99 .section .data, "aw"
100
101 #define FAST_TLBMISS_STACK_CACHELINES 4
102 #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
103
104 /* Register back-up area for all exceptions */
105 .balign 32
106 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
107 * register saves etc. */
108 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
109 /* This is 32 byte aligned by construction */
110 /* Register back-up area for all exceptions */
111 reg_save_area:
112 .quad 0
113 .quad 0
114 .quad 0
115 .quad 0
116
117 .quad 0
118 .quad 0
119 .quad 0
120 .quad 0
121
122 .quad 0
123 .quad 0
124 .quad 0
125 .quad 0
126
127 .quad 0
128 .quad 0
129
130 /* Save area for RESVEC exceptions. We cannot use reg_save_area because of
131 * reentrancy. Note this area may be accessed via physical address.
132 * Align so this fits a whole single cache line, for ease of purging.
133 */
134 .balign 32,0,32
135 resvec_save_area:
136 .quad 0
137 .quad 0
138 .quad 0
139 .quad 0
140 .quad 0
141 .balign 32,0,32
142
143 /* Jump table of 3rd level handlers */
144 trap_jtable:
145 .long do_exception_error /* 0x000 */
146 .long do_exception_error /* 0x020 */
147 .long tlb_miss_load /* 0x040 */
148 .long tlb_miss_store /* 0x060 */
149 ! ARTIFICIAL pseudo-EXPEVT setting
150 .long do_debug_interrupt /* 0x080 */
151 .long tlb_miss_load /* 0x0A0 */
152 .long tlb_miss_store /* 0x0C0 */
153 .long do_address_error_load /* 0x0E0 */
154 .long do_address_error_store /* 0x100 */
155 #ifdef CONFIG_SH_FPU
156 .long do_fpu_error /* 0x120 */
157 #else
158 .long do_exception_error /* 0x120 */
159 #endif
160 .long do_exception_error /* 0x140 */
161 .long system_call /* 0x160 */
162 .long do_reserved_inst /* 0x180 */
163 .long do_illegal_slot_inst /* 0x1A0 */
164 .long do_exception_error /* 0x1C0 - NMI */
165 .long do_exception_error /* 0x1E0 */
166 .rept 15
167 .long do_IRQ /* 0x200 - 0x3C0 */
168 .endr
169 .long do_exception_error /* 0x3E0 */
170 .rept 32
171 .long do_IRQ /* 0x400 - 0x7E0 */
172 .endr
173 .long fpu_error_or_IRQA /* 0x800 */
174 .long fpu_error_or_IRQB /* 0x820 */
175 .long do_IRQ /* 0x840 */
176 .long do_IRQ /* 0x860 */
177 .rept 6
178 .long do_exception_error /* 0x880 - 0x920 */
179 .endr
180 .long do_software_break_point /* 0x940 */
181 .long do_exception_error /* 0x960 */
182 .long do_single_step /* 0x980 */
183
184 .rept 3
185 .long do_exception_error /* 0x9A0 - 0x9E0 */
186 .endr
187 .long do_IRQ /* 0xA00 */
188 .long do_IRQ /* 0xA20 */
189 .long itlb_miss_or_IRQ /* 0xA40 */
190 .long do_IRQ /* 0xA60 */
191 .long do_IRQ /* 0xA80 */
192 .long itlb_miss_or_IRQ /* 0xAA0 */
193 .long do_exception_error /* 0xAC0 */
194 .long do_address_error_exec /* 0xAE0 */
195 .rept 8
196 .long do_exception_error /* 0xB00 - 0xBE0 */
197 .endr
198 .rept 18
199 .long do_IRQ /* 0xC00 - 0xE20 */
200 .endr
201
202 .section .text64, "ax"
203
204 /*
205 * --- Exception/Interrupt/Event Handling Section
206 */
207
208 /*
209 * VBR and RESVEC blocks.
210 *
211 * First level handler for VBR-based exceptions.
212 *
213 * To avoid waste of space, align to the maximum text block size.
214 * This is assumed to be at most 128 bytes or 32 instructions.
215 * DO NOT EXCEED 32 instructions on the first level handlers !
216 *
217 * Also note that RESVEC is contained within the VBR block
218 * where the room left (1KB - TEXT_SIZE) allows placing
219 * the RESVEC block (at most 512B + TEXT_SIZE).
220 *
221 * So first (and only) level handler for RESVEC-based exceptions.
222 *
223 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
224 * and interrupt) we are a lot tight with register space until
225 * saving onto the stack frame, which is done in handle_exception().
226 *
227 */
228
229 #define TEXT_SIZE 128
230 #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
231
232 .balign TEXT_SIZE
233 LVBR_block:
234 .space 256, 0 /* Power-on class handler, */
235 /* not required here */
236 not_a_tlb_miss:
237 synco /* TAKum03020 (but probably a good idea anyway.) */
238 /* Save original stack pointer into KCR1 */
239 putcon SP, KCR1
240
241 /* Save other original registers into reg_save_area */
242 movi reg_save_area, SP
243 st.q SP, SAVED_R2, r2
244 st.q SP, SAVED_R3, r3
245 st.q SP, SAVED_R4, r4
246 st.q SP, SAVED_R5, r5
247 st.q SP, SAVED_R6, r6
248 st.q SP, SAVED_R18, r18
249 gettr tr0, r3
250 st.q SP, SAVED_TR0, r3
251
252 /* Set args for Non-debug, Not a TLB miss class handler */
253 getcon EXPEVT, r2
254 movi ret_from_exception, r3
255 ori r3, 1, r3
256 movi EVENT_FAULT_NOT_TLB, r4
257 or SP, ZERO, r5
258 getcon KCR1, SP
259 pta handle_exception, tr0
260 blink tr0, ZERO
261
262 .balign 256
263 ! VBR+0x200
264 nop
265 .balign 256
266 ! VBR+0x300
267 nop
268 .balign 256
269 /*
270 * Instead of the natural .balign 1024 place RESVEC here
271 * respecting the final 1KB alignment.
272 */
273 .balign TEXT_SIZE
274 /*
275 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
276 * block making sure the final alignment is correct.
277 */
278 tlb_miss:
279 synco /* TAKum03020 (but probably a good idea anyway.) */
280 putcon SP, KCR1
281 movi reg_save_area, SP
282 /* SP is guaranteed 32-byte aligned. */
283 st.q SP, TLB_SAVED_R0 , r0
284 st.q SP, TLB_SAVED_R1 , r1
285 st.q SP, SAVED_R2 , r2
286 st.q SP, SAVED_R3 , r3
287 st.q SP, SAVED_R4 , r4
288 st.q SP, SAVED_R5 , r5
289 st.q SP, SAVED_R6 , r6
290 st.q SP, SAVED_R18, r18
291
292 /* Save R25 for safety; as/ld may want to use it to achieve the call to
293 * the code in mm/tlbmiss.c */
294 st.q SP, TLB_SAVED_R25, r25
295 gettr tr0, r2
296 gettr tr1, r3
297 gettr tr2, r4
298 gettr tr3, r5
299 gettr tr4, r18
300 st.q SP, SAVED_TR0 , r2
301 st.q SP, TLB_SAVED_TR1 , r3
302 st.q SP, TLB_SAVED_TR2 , r4
303 st.q SP, TLB_SAVED_TR3 , r5
304 st.q SP, TLB_SAVED_TR4 , r18
305
306 pt do_fast_page_fault, tr0
307 getcon SSR, r2
308 getcon EXPEVT, r3
309 getcon TEA, r4
310 shlri r2, 30, r2
311 andi r2, 1, r2 /* r2 = SSR.MD */
312 blink tr0, LINK
313
314 pt fixup_to_invoke_general_handler, tr1
315
316 /* If the fast path handler fixed the fault, just drop through quickly
317 to the restore code right away to return to the excepting context.
318 */
319 beqi/u r2, 0, tr1
320
321 fast_tlb_miss_restore:
322 ld.q SP, SAVED_TR0, r2
323 ld.q SP, TLB_SAVED_TR1, r3
324 ld.q SP, TLB_SAVED_TR2, r4
325
326 ld.q SP, TLB_SAVED_TR3, r5
327 ld.q SP, TLB_SAVED_TR4, r18
328
329 ptabs r2, tr0
330 ptabs r3, tr1
331 ptabs r4, tr2
332 ptabs r5, tr3
333 ptabs r18, tr4
334
335 ld.q SP, TLB_SAVED_R0, r0
336 ld.q SP, TLB_SAVED_R1, r1
337 ld.q SP, SAVED_R2, r2
338 ld.q SP, SAVED_R3, r3
339 ld.q SP, SAVED_R4, r4
340 ld.q SP, SAVED_R5, r5
341 ld.q SP, SAVED_R6, r6
342 ld.q SP, SAVED_R18, r18
343 ld.q SP, TLB_SAVED_R25, r25
344
345 getcon KCR1, SP
346 rte
347 nop /* for safety, in case the code is run on sh5-101 cut1.x */
348
349 fixup_to_invoke_general_handler:
350
351 /* OK, new method. Restore stuff that's not expected to get saved into
352 the 'first-level' reg save area, then just fall through to setting
353 up the registers and calling the second-level handler. */
354
355 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
356 r25,tr1-4 and save r6 to get into the right state. */
357
358 ld.q SP, TLB_SAVED_TR1, r3
359 ld.q SP, TLB_SAVED_TR2, r4
360 ld.q SP, TLB_SAVED_TR3, r5
361 ld.q SP, TLB_SAVED_TR4, r18
362 ld.q SP, TLB_SAVED_R25, r25
363
364 ld.q SP, TLB_SAVED_R0, r0
365 ld.q SP, TLB_SAVED_R1, r1
366
367 ptabs/u r3, tr1
368 ptabs/u r4, tr2
369 ptabs/u r5, tr3
370 ptabs/u r18, tr4
371
372 /* Set args for Non-debug, TLB miss class handler */
373 getcon EXPEVT, r2
374 movi ret_from_exception, r3
375 ori r3, 1, r3
376 movi EVENT_FAULT_TLB, r4
377 or SP, ZERO, r5
378 getcon KCR1, SP
379 pta handle_exception, tr0
380 blink tr0, ZERO
381
382 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
383 DOES END UP AT VBR+0x600 */
384 nop
385 nop
386 nop
387 nop
388 nop
389 nop
390
391 .balign 256
392 /* VBR + 0x600 */
393
394 interrupt:
395 synco /* TAKum03020 (but probably a good idea anyway.) */
396 /* Save original stack pointer into KCR1 */
397 putcon SP, KCR1
398
399 /* Save other original registers into reg_save_area */
400 movi reg_save_area, SP
401 st.q SP, SAVED_R2, r2
402 st.q SP, SAVED_R3, r3
403 st.q SP, SAVED_R4, r4
404 st.q SP, SAVED_R5, r5
405 st.q SP, SAVED_R6, r6
406 st.q SP, SAVED_R18, r18
407 gettr tr0, r3
408 st.q SP, SAVED_TR0, r3
409
410 /* Set args for interrupt class handler */
411 getcon INTEVT, r2
412 movi ret_from_irq, r3
413 ori r3, 1, r3
414 movi EVENT_INTERRUPT, r4
415 or SP, ZERO, r5
416 getcon KCR1, SP
417 pta handle_exception, tr0
418 blink tr0, ZERO
419 .balign TEXT_SIZE /* let's waste the bare minimum */
420
421 LVBR_block_end: /* Marker. Used for total checking */
422
423 .balign 256
424 LRESVEC_block:
425 /* Panic handler. Called with MMU off. Possible causes/actions:
426 * - Reset: Jump to program start.
427 * - Single Step: Turn off Single Step & return.
428 * - Others: Call panic handler, passing PC as arg.
429 * (this may need to be extended...)
430 */
431 reset_or_panic:
432 synco /* TAKum03020 (but probably a good idea anyway.) */
433 putcon SP, DCR
434 /* First save r0-1 and tr0, as we need to use these */
435 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
436 st.q SP, 0, r0
437 st.q SP, 8, r1
438 gettr tr0, r0
439 st.q SP, 32, r0
440
441 /* Check cause */
442 getcon EXPEVT, r0
443 movi RESET_CAUSE, r1
444 sub r1, r0, r1 /* r1=0 if reset */
445 movi _stext-CONFIG_PAGE_OFFSET, r0
446 ori r0, 1, r0
447 ptabs r0, tr0
448 beqi r1, 0, tr0 /* Jump to start address if reset */
449
450 getcon EXPEVT, r0
451 movi DEBUGSS_CAUSE, r1
452 sub r1, r0, r1 /* r1=0 if single step */
453 pta single_step_panic, tr0
454 beqi r1, 0, tr0 /* jump if single step */
455
456 /* Now jump to where we save the registers. */
457 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
458 ptabs r1, tr0
459 blink tr0, r63
460
461 single_step_panic:
462 /* We are in a handler with Single Step set. We need to resume the
463 * handler, by turning on MMU & turning off Single Step. */
464 getcon SSR, r0
465 movi SR_MMU, r1
466 or r0, r1, r0
467 movi ~SR_SS, r1
468 and r0, r1, r0
469 putcon r0, SSR
470 /* Restore EXPEVT, as the rte won't do this */
471 getcon PEXPEVT, r0
472 putcon r0, EXPEVT
473 /* Restore regs */
474 ld.q SP, 32, r0
475 ptabs r0, tr0
476 ld.q SP, 0, r0
477 ld.q SP, 8, r1
478 getcon DCR, SP
479 synco
480 rte
481
482
483 .balign 256
484 debug_exception:
485 synco /* TAKum03020 (but probably a good idea anyway.) */
486 /*
487 * Single step/software_break_point first level handler.
488 * Called with MMU off, so the first thing we do is enable it
489 * by doing an rte with appropriate SSR.
490 */
491 putcon SP, DCR
492 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
493 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
494
495 /* With the MMU off, we are bypassing the cache, so purge any
496 * data that will be made stale by the following stores.
497 */
498 ocbp SP, 0
499 synco
500
501 st.q SP, 0, r0
502 st.q SP, 8, r1
503 getcon SPC, r0
504 st.q SP, 16, r0
505 getcon SSR, r0
506 st.q SP, 24, r0
507
508 /* Enable MMU, block exceptions, set priv mode, disable single step */
509 movi SR_MMU | SR_BL | SR_MD, r1
510 or r0, r1, r0
511 movi ~SR_SS, r1
512 and r0, r1, r0
513 putcon r0, SSR
514 /* Force control to debug_exception_2 when rte is executed */
515 movi debug_exeception_2, r0
516 ori r0, 1, r0 /* force SHmedia, just in case */
517 putcon r0, SPC
518 getcon DCR, SP
519 synco
520 rte
521 debug_exeception_2:
522 /* Restore saved regs */
523 putcon SP, KCR1
524 movi resvec_save_area, SP
525 ld.q SP, 24, r0
526 putcon r0, SSR
527 ld.q SP, 16, r0
528 putcon r0, SPC
529 ld.q SP, 0, r0
530 ld.q SP, 8, r1
531
532 /* Save other original registers into reg_save_area */
533 movi reg_save_area, SP
534 st.q SP, SAVED_R2, r2
535 st.q SP, SAVED_R3, r3
536 st.q SP, SAVED_R4, r4
537 st.q SP, SAVED_R5, r5
538 st.q SP, SAVED_R6, r6
539 st.q SP, SAVED_R18, r18
540 gettr tr0, r3
541 st.q SP, SAVED_TR0, r3
542
543 /* Set args for debug class handler */
544 getcon EXPEVT, r2
545 movi ret_from_exception, r3
546 ori r3, 1, r3
547 movi EVENT_DEBUG, r4
548 or SP, ZERO, r5
549 getcon KCR1, SP
550 pta handle_exception, tr0
551 blink tr0, ZERO
552
553 .balign 256
554 debug_interrupt:
555 /* !!! WE COME HERE IN REAL MODE !!! */
556 /* Hook-up debug interrupt to allow various debugging options to be
557 * hooked into its handler. */
558 /* Save original stack pointer into KCR1 */
559 synco
560 putcon SP, KCR1
561 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
562 ocbp SP, 0
563 ocbp SP, 32
564 synco
565
566 /* Save other original registers into reg_save_area thru real addresses */
567 st.q SP, SAVED_R2, r2
568 st.q SP, SAVED_R3, r3
569 st.q SP, SAVED_R4, r4
570 st.q SP, SAVED_R5, r5
571 st.q SP, SAVED_R6, r6
572 st.q SP, SAVED_R18, r18
573 gettr tr0, r3
574 st.q SP, SAVED_TR0, r3
575
576 /* move (spc,ssr)->(pspc,pssr). The rte will shift
577 them back again, so that they look like the originals
578 as far as the real handler code is concerned. */
579 getcon spc, r6
580 putcon r6, pspc
581 getcon ssr, r6
582 putcon r6, pssr
583
584 ! construct useful SR for handle_exception
585 movi 3, r6
586 shlli r6, 30, r6
587 getcon sr, r18
588 or r18, r6, r6
589 putcon r6, ssr
590
591 ! SSR is now the current SR with the MD and MMU bits set
592 ! i.e. the rte will switch back to priv mode and put
593 ! the mmu back on
594
595 ! construct spc
596 movi handle_exception, r18
597 ori r18, 1, r18 ! for safety (do we need this?)
598 putcon r18, spc
599
600 /* Set args for Non-debug, Not a TLB miss class handler */
601
602 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
603 ! debug interrupt handler in the vectoring table
604 movi 0x80, r2
605 movi ret_from_exception, r3
606 ori r3, 1, r3
607 movi EVENT_FAULT_NOT_TLB, r4
608
609 or SP, ZERO, r5
610 movi CONFIG_PAGE_OFFSET, r6
611 add r6, r5, r5
612 getcon KCR1, SP
613
614 synco ! for safety
615 rte ! -> handle_exception, switch back to priv mode again
616
617 LRESVEC_block_end: /* Marker. Unused. */
618
619 .balign TEXT_SIZE
620
621 /*
622 * Second level handler for VBR-based exceptions. Pre-handler.
623 * In common to all stack-frame sensitive handlers.
624 *
625 * Inputs:
626 * (KCR0) Current [current task union]
627 * (KCR1) Original SP
628 * (r2) INTEVT/EXPEVT
629 * (r3) appropriate return address
630 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
631 * (r5) Pointer to reg_save_area
632 * (SP) Original SP
633 *
634 * Available registers:
635 * (r6)
636 * (r18)
637 * (tr0)
638 *
639 */
640 handle_exception:
641 /* Common 2nd level handler. */
642
643 /* First thing we need an appropriate stack pointer */
644 getcon SSR, r6
645 shlri r6, 30, r6
646 andi r6, 1, r6
647 pta stack_ok, tr0
648 bne r6, ZERO, tr0 /* Original stack pointer is fine */
649
650 /* Set stack pointer for user fault */
651 getcon KCR0, SP
652 movi THREAD_SIZE, r6 /* Point to the end */
653 add SP, r6, SP
654
655 stack_ok:
656
657 /* DEBUG : check for underflow/overflow of the kernel stack */
658 pta no_underflow, tr0
659 getcon KCR0, r6
660 movi 1024, r18
661 add r6, r18, r6
662 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
663
664 /* Just panic to cause a crash. */
665 bad_sp:
666 ld.b r63, 0, r6
667 nop
668
669 no_underflow:
670 pta bad_sp, tr0
671 getcon kcr0, r6
672 movi THREAD_SIZE, r18
673 add r18, r6, r6
674 bgt SP, r6, tr0 ! sp above the stack
675
676 /* Make some room for the BASIC frame. */
677 movi -(FRAME_SIZE), r6
678 add SP, r6, SP
679
680 /* Could do this with no stalling if we had another spare register, but the
681 code below will be OK. */
682 ld.q r5, SAVED_R2, r6
683 ld.q r5, SAVED_R3, r18
684 st.q SP, FRAME_R(2), r6
685 ld.q r5, SAVED_R4, r6
686 st.q SP, FRAME_R(3), r18
687 ld.q r5, SAVED_R5, r18
688 st.q SP, FRAME_R(4), r6
689 ld.q r5, SAVED_R6, r6
690 st.q SP, FRAME_R(5), r18
691 ld.q r5, SAVED_R18, r18
692 st.q SP, FRAME_R(6), r6
693 ld.q r5, SAVED_TR0, r6
694 st.q SP, FRAME_R(18), r18
695 st.q SP, FRAME_T(0), r6
696
697 /* Keep old SP around */
698 getcon KCR1, r6
699
700 /* Save the rest of the general purpose registers */
701 st.q SP, FRAME_R(0), r0
702 st.q SP, FRAME_R(1), r1
703 st.q SP, FRAME_R(7), r7
704 st.q SP, FRAME_R(8), r8
705 st.q SP, FRAME_R(9), r9
706 st.q SP, FRAME_R(10), r10
707 st.q SP, FRAME_R(11), r11
708 st.q SP, FRAME_R(12), r12
709 st.q SP, FRAME_R(13), r13
710 st.q SP, FRAME_R(14), r14
711
712 /* SP is somewhere else */
713 st.q SP, FRAME_R(15), r6
714
715 st.q SP, FRAME_R(16), r16
716 st.q SP, FRAME_R(17), r17
717 /* r18 is saved earlier. */
718 st.q SP, FRAME_R(19), r19
719 st.q SP, FRAME_R(20), r20
720 st.q SP, FRAME_R(21), r21
721 st.q SP, FRAME_R(22), r22
722 st.q SP, FRAME_R(23), r23
723 st.q SP, FRAME_R(24), r24
724 st.q SP, FRAME_R(25), r25
725 st.q SP, FRAME_R(26), r26
726 st.q SP, FRAME_R(27), r27
727 st.q SP, FRAME_R(28), r28
728 st.q SP, FRAME_R(29), r29
729 st.q SP, FRAME_R(30), r30
730 st.q SP, FRAME_R(31), r31
731 st.q SP, FRAME_R(32), r32
732 st.q SP, FRAME_R(33), r33
733 st.q SP, FRAME_R(34), r34
734 st.q SP, FRAME_R(35), r35
735 st.q SP, FRAME_R(36), r36
736 st.q SP, FRAME_R(37), r37
737 st.q SP, FRAME_R(38), r38
738 st.q SP, FRAME_R(39), r39
739 st.q SP, FRAME_R(40), r40
740 st.q SP, FRAME_R(41), r41
741 st.q SP, FRAME_R(42), r42
742 st.q SP, FRAME_R(43), r43
743 st.q SP, FRAME_R(44), r44
744 st.q SP, FRAME_R(45), r45
745 st.q SP, FRAME_R(46), r46
746 st.q SP, FRAME_R(47), r47
747 st.q SP, FRAME_R(48), r48
748 st.q SP, FRAME_R(49), r49
749 st.q SP, FRAME_R(50), r50
750 st.q SP, FRAME_R(51), r51
751 st.q SP, FRAME_R(52), r52
752 st.q SP, FRAME_R(53), r53
753 st.q SP, FRAME_R(54), r54
754 st.q SP, FRAME_R(55), r55
755 st.q SP, FRAME_R(56), r56
756 st.q SP, FRAME_R(57), r57
757 st.q SP, FRAME_R(58), r58
758 st.q SP, FRAME_R(59), r59
759 st.q SP, FRAME_R(60), r60
760 st.q SP, FRAME_R(61), r61
761 st.q SP, FRAME_R(62), r62
762
763 /*
764 * Save the S* registers.
765 */
766 getcon SSR, r61
767 st.q SP, FRAME_S(FSSR), r61
768 getcon SPC, r62
769 st.q SP, FRAME_S(FSPC), r62
770 movi -1, r62 /* Reset syscall_nr */
771 st.q SP, FRAME_S(FSYSCALL_ID), r62
772
773 /* Save the rest of the target registers */
774 gettr tr1, r6
775 st.q SP, FRAME_T(1), r6
776 gettr tr2, r6
777 st.q SP, FRAME_T(2), r6
778 gettr tr3, r6
779 st.q SP, FRAME_T(3), r6
780 gettr tr4, r6
781 st.q SP, FRAME_T(4), r6
782 gettr tr5, r6
783 st.q SP, FRAME_T(5), r6
784 gettr tr6, r6
785 st.q SP, FRAME_T(6), r6
786 gettr tr7, r6
787 st.q SP, FRAME_T(7), r6
788
789 ! setup FP so that unwinder can wind back through nested kernel mode
790 ! exceptions
791 add SP, ZERO, r14
792
793 #ifdef CONFIG_POOR_MANS_STRACE
794 /* We've pushed all the registers now, so only r2-r4 hold anything
795 * useful. Move them into callee save registers */
796 or r2, ZERO, r28
797 or r3, ZERO, r29
798 or r4, ZERO, r30
799
800 /* Preserve r2 as the event code */
801 movi evt_debug, r3
802 ori r3, 1, r3
803 ptabs r3, tr0
804
805 or SP, ZERO, r6
806 getcon TRA, r5
807 blink tr0, LINK
808
809 or r28, ZERO, r2
810 or r29, ZERO, r3
811 or r30, ZERO, r4
812 #endif
813
814 /* For syscall and debug race condition, get TRA now */
815 getcon TRA, r5
816
817 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
818 * Also set FD, to catch FPU usage in the kernel.
819 *
820 * benedict.gaster@superh.com 29/07/2002
821 *
822 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
823 * same time change BL from 1->0, as any pending interrupt of a level
824 * higher than he previous value of IMASK will leak through and be
825 * taken unexpectedly.
826 *
827 * To avoid this we raise the IMASK and then issue another PUTCON to
828 * enable interrupts.
829 */
830 getcon SR, r6
831 movi SR_IMASK | SR_FD, r7
832 or r6, r7, r6
833 putcon r6, SR
834 movi SR_UNBLOCK_EXC, r7
835 and r6, r7, r6
836 putcon r6, SR
837
838
839 /* Now call the appropriate 3rd level handler */
840 or r3, ZERO, LINK
841 movi trap_jtable, r3
842 shlri r2, 3, r2
843 ldx.l r2, r3, r3
844 shlri r2, 2, r2
845 ptabs r3, tr0
846 or SP, ZERO, r3
847 blink tr0, ZERO
848
849 /*
850 * Second level handler for VBR-based exceptions. Post-handlers.
851 *
852 * Post-handlers for interrupts (ret_from_irq), exceptions
853 * (ret_from_exception) and common reentrance doors (restore_all
854 * to get back to the original context, ret_from_syscall loop to
855 * check kernel exiting).
856 *
857 * ret_with_reschedule and work_notifysig are an inner lables of
858 * the ret_from_syscall loop.
859 *
860 * In common to all stack-frame sensitive handlers.
861 *
862 * Inputs:
863 * (SP) struct pt_regs *, original register's frame pointer (basic)
864 *
865 */
866 .global ret_from_irq
867 ret_from_irq:
868 #ifdef CONFIG_POOR_MANS_STRACE
869 pta evt_debug_ret_from_irq, tr0
870 ori SP, 0, r2
871 blink tr0, LINK
872 #endif
873 ld.q SP, FRAME_S(FSSR), r6
874 shlri r6, 30, r6
875 andi r6, 1, r6
876 pta resume_kernel, tr0
877 bne r6, ZERO, tr0 /* no further checks */
878 STI()
879 pta ret_with_reschedule, tr0
880 blink tr0, ZERO /* Do not check softirqs */
881
882 .global ret_from_exception
883 ret_from_exception:
884 preempt_stop()
885
886 #ifdef CONFIG_POOR_MANS_STRACE
887 pta evt_debug_ret_from_exc, tr0
888 ori SP, 0, r2
889 blink tr0, LINK
890 #endif
891
892 ld.q SP, FRAME_S(FSSR), r6
893 shlri r6, 30, r6
894 andi r6, 1, r6
895 pta resume_kernel, tr0
896 bne r6, ZERO, tr0 /* no further checks */
897
898 /* Check softirqs */
899
900 #ifdef CONFIG_PREEMPT
901 pta ret_from_syscall, tr0
902 blink tr0, ZERO
903
904 resume_kernel:
905 pta restore_all, tr0
906
907 getcon KCR0, r6
908 ld.l r6, TI_PRE_COUNT, r7
909 beq/u r7, ZERO, tr0
910
911 need_resched:
912 ld.l r6, TI_FLAGS, r7
913 movi (1 << TIF_NEED_RESCHED), r8
914 and r8, r7, r8
915 bne r8, ZERO, tr0
916
917 getcon SR, r7
918 andi r7, 0xf0, r7
919 bne r7, ZERO, tr0
920
921 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
922 shori (PREEMPT_ACTIVE & 65535), r8
923 st.l r6, TI_PRE_COUNT, r8
924
925 STI()
926 movi schedule, r7
927 ori r7, 1, r7
928 ptabs r7, tr1
929 blink tr1, LINK
930
931 st.l r6, TI_PRE_COUNT, ZERO
932 CLI()
933
934 pta need_resched, tr1
935 blink tr1, ZERO
936 #endif
937
938 .global ret_from_syscall
939 ret_from_syscall:
940
941 ret_with_reschedule:
942 getcon KCR0, r6 ! r6 contains current_thread_info
943 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
944
945 movi _TIF_NEED_RESCHED, r8
946 and r8, r7, r8
947 pta work_resched, tr0
948 bne r8, ZERO, tr0
949
950 pta restore_all, tr1
951
952 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
953 and r8, r7, r8
954 pta work_notifysig, tr0
955 bne r8, ZERO, tr0
956
957 blink tr1, ZERO
958
959 work_resched:
960 pta ret_from_syscall, tr0
961 gettr tr0, LINK
962 movi schedule, r6
963 ptabs r6, tr0
964 blink tr0, ZERO /* Call schedule(), return on top */
965
966 work_notifysig:
967 gettr tr1, LINK
968
969 movi do_signal, r6
970 ptabs r6, tr0
971 or SP, ZERO, r2
972 or ZERO, ZERO, r3
973 blink tr0, LINK /* Call do_signal(regs, 0), return here */
974
975 restore_all:
976 /* Do prefetches */
977
978 ld.q SP, FRAME_T(0), r6
979 ld.q SP, FRAME_T(1), r7
980 ld.q SP, FRAME_T(2), r8
981 ld.q SP, FRAME_T(3), r9
982 ptabs r6, tr0
983 ptabs r7, tr1
984 ptabs r8, tr2
985 ptabs r9, tr3
986 ld.q SP, FRAME_T(4), r6
987 ld.q SP, FRAME_T(5), r7
988 ld.q SP, FRAME_T(6), r8
989 ld.q SP, FRAME_T(7), r9
990 ptabs r6, tr4
991 ptabs r7, tr5
992 ptabs r8, tr6
993 ptabs r9, tr7
994
995 ld.q SP, FRAME_R(0), r0
996 ld.q SP, FRAME_R(1), r1
997 ld.q SP, FRAME_R(2), r2
998 ld.q SP, FRAME_R(3), r3
999 ld.q SP, FRAME_R(4), r4
1000 ld.q SP, FRAME_R(5), r5
1001 ld.q SP, FRAME_R(6), r6
1002 ld.q SP, FRAME_R(7), r7
1003 ld.q SP, FRAME_R(8), r8
1004 ld.q SP, FRAME_R(9), r9
1005 ld.q SP, FRAME_R(10), r10
1006 ld.q SP, FRAME_R(11), r11
1007 ld.q SP, FRAME_R(12), r12
1008 ld.q SP, FRAME_R(13), r13
1009 ld.q SP, FRAME_R(14), r14
1010
1011 ld.q SP, FRAME_R(16), r16
1012 ld.q SP, FRAME_R(17), r17
1013 ld.q SP, FRAME_R(18), r18
1014 ld.q SP, FRAME_R(19), r19
1015 ld.q SP, FRAME_R(20), r20
1016 ld.q SP, FRAME_R(21), r21
1017 ld.q SP, FRAME_R(22), r22
1018 ld.q SP, FRAME_R(23), r23
1019 ld.q SP, FRAME_R(24), r24
1020 ld.q SP, FRAME_R(25), r25
1021 ld.q SP, FRAME_R(26), r26
1022 ld.q SP, FRAME_R(27), r27
1023 ld.q SP, FRAME_R(28), r28
1024 ld.q SP, FRAME_R(29), r29
1025 ld.q SP, FRAME_R(30), r30
1026 ld.q SP, FRAME_R(31), r31
1027 ld.q SP, FRAME_R(32), r32
1028 ld.q SP, FRAME_R(33), r33
1029 ld.q SP, FRAME_R(34), r34
1030 ld.q SP, FRAME_R(35), r35
1031 ld.q SP, FRAME_R(36), r36
1032 ld.q SP, FRAME_R(37), r37
1033 ld.q SP, FRAME_R(38), r38
1034 ld.q SP, FRAME_R(39), r39
1035 ld.q SP, FRAME_R(40), r40
1036 ld.q SP, FRAME_R(41), r41
1037 ld.q SP, FRAME_R(42), r42
1038 ld.q SP, FRAME_R(43), r43
1039 ld.q SP, FRAME_R(44), r44
1040 ld.q SP, FRAME_R(45), r45
1041 ld.q SP, FRAME_R(46), r46
1042 ld.q SP, FRAME_R(47), r47
1043 ld.q SP, FRAME_R(48), r48
1044 ld.q SP, FRAME_R(49), r49
1045 ld.q SP, FRAME_R(50), r50
1046 ld.q SP, FRAME_R(51), r51
1047 ld.q SP, FRAME_R(52), r52
1048 ld.q SP, FRAME_R(53), r53
1049 ld.q SP, FRAME_R(54), r54
1050 ld.q SP, FRAME_R(55), r55
1051 ld.q SP, FRAME_R(56), r56
1052 ld.q SP, FRAME_R(57), r57
1053 ld.q SP, FRAME_R(58), r58
1054
1055 getcon SR, r59
1056 movi SR_BLOCK_EXC, r60
1057 or r59, r60, r59
1058 putcon r59, SR /* SR.BL = 1, keep nesting out */
1059 ld.q SP, FRAME_S(FSSR), r61
1060 ld.q SP, FRAME_S(FSPC), r62
1061 movi SR_ASID_MASK, r60
1062 and r59, r60, r59
1063 andc r61, r60, r61 /* Clear out older ASID */
1064 or r59, r61, r61 /* Retain current ASID */
1065 putcon r61, SSR
1066 putcon r62, SPC
1067
1068 /* Ignore FSYSCALL_ID */
1069
1070 ld.q SP, FRAME_R(59), r59
1071 ld.q SP, FRAME_R(60), r60
1072 ld.q SP, FRAME_R(61), r61
1073 ld.q SP, FRAME_R(62), r62
1074
1075 /* Last touch */
1076 ld.q SP, FRAME_R(15), SP
1077 rte
1078 nop
1079
1080 /*
1081 * Third level handlers for VBR-based exceptions. Adapting args to
1082 * and/or deflecting to fourth level handlers.
1083 *
1084 * Fourth level handlers interface.
1085 * Most are C-coded handlers directly pointed by the trap_jtable.
1086 * (Third = Fourth level)
1087 * Inputs:
1088 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1089 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1090 * (r3) struct pt_regs *, original register's frame pointer
1091 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1092 * (r5) TRA control register (for syscall/debug benefit only)
1093 * (LINK) return address
1094 * (SP) = r3
1095 *
1096 * Kernel TLB fault handlers will get a slightly different interface.
1097 * (r2) struct pt_regs *, original register's frame pointer
1098 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1099 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1100 * (r5) Effective Address of fault
1101 * (LINK) return address
1102 * (SP) = r2
1103 *
1104 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1105 *
1106 */
1107 tlb_miss_load:
1108 or SP, ZERO, r2
1109 or ZERO, ZERO, r3 /* Read */
1110 or ZERO, ZERO, r4 /* Data */
1111 getcon TEA, r5
1112 pta call_do_page_fault, tr0
1113 beq ZERO, ZERO, tr0
1114
1115 tlb_miss_store:
1116 or SP, ZERO, r2
1117 movi 1, r3 /* Write */
1118 or ZERO, ZERO, r4 /* Data */
1119 getcon TEA, r5
1120 pta call_do_page_fault, tr0
1121 beq ZERO, ZERO, tr0
1122
1123 itlb_miss_or_IRQ:
1124 pta its_IRQ, tr0
1125 beqi/u r4, EVENT_INTERRUPT, tr0
1126 or SP, ZERO, r2
1127 or ZERO, ZERO, r3 /* Read */
1128 movi 1, r4 /* Text */
1129 getcon TEA, r5
1130 /* Fall through */
1131
1132 call_do_page_fault:
1133 movi do_page_fault, r6
1134 ptabs r6, tr0
1135 blink tr0, ZERO
1136
1137 fpu_error_or_IRQA:
1138 pta its_IRQ, tr0
1139 beqi/l r4, EVENT_INTERRUPT, tr0
1140 #ifdef CONFIG_SH_FPU
1141 movi do_fpu_state_restore, r6
1142 #else
1143 movi do_exception_error, r6
1144 #endif
1145 ptabs r6, tr0
1146 blink tr0, ZERO
1147
1148 fpu_error_or_IRQB:
1149 pta its_IRQ, tr0
1150 beqi/l r4, EVENT_INTERRUPT, tr0
1151 #ifdef CONFIG_SH_FPU
1152 movi do_fpu_state_restore, r6
1153 #else
1154 movi do_exception_error, r6
1155 #endif
1156 ptabs r6, tr0
1157 blink tr0, ZERO
1158
1159 its_IRQ:
1160 movi do_IRQ, r6
1161 ptabs r6, tr0
1162 blink tr0, ZERO
1163
1164 /*
1165 * system_call/unknown_trap third level handler:
1166 *
1167 * Inputs:
1168 * (r2) fault/interrupt code, entry number (TRAP = 11)
1169 * (r3) struct pt_regs *, original register's frame pointer
1170 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1171 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1172 * (SP) = r3
1173 * (LINK) return address: ret_from_exception
1174 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1175 *
1176 * Outputs:
1177 * (*r3) Syscall reply (Saved r2)
1178 * (LINK) In case of syscall only it can be scrapped.
1179 * Common second level post handler will be ret_from_syscall.
1180 * Common (non-trace) exit point to that is syscall_ret (saving
1181 * result to r2). Common bad exit point is syscall_bad (returning
1182 * ENOSYS then saved to r2).
1183 *
1184 */
1185
1186 unknown_trap:
1187 /* Unknown Trap or User Trace */
1188 movi do_unknown_trapa, r6
1189 ptabs r6, tr0
1190 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1191 andi r2, 0x1ff, r2 /* r2 = syscall # */
1192 blink tr0, LINK
1193
1194 pta syscall_ret, tr0
1195 blink tr0, ZERO
1196
1197 /* New syscall implementation*/
1198 system_call:
1199 pta unknown_trap, tr0
1200 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1201 shlri r4, 20, r4
1202 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1203
1204 /* It's a system call */
1205 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1206 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1207
1208 STI()
1209
1210 pta syscall_allowed, tr0
1211 movi NR_syscalls - 1, r4 /* Last valid */
1212 bgeu/l r4, r5, tr0
1213
1214 syscall_bad:
1215 /* Return ENOSYS ! */
1216 movi -(ENOSYS), r2 /* Fall-through */
1217
1218 .global syscall_ret
1219 syscall_ret:
1220 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1221
1222 #ifdef CONFIG_POOR_MANS_STRACE
1223 /* nothing useful in registers at this point */
1224
1225 movi evt_debug2, r5
1226 ori r5, 1, r5
1227 ptabs r5, tr0
1228 ld.q SP, FRAME_R(9), r2
1229 or SP, ZERO, r3
1230 blink tr0, LINK
1231 #endif
1232
1233 ld.q SP, FRAME_S(FSPC), r2
1234 addi r2, 4, r2 /* Move PC, being pre-execution event */
1235 st.q SP, FRAME_S(FSPC), r2
1236 pta ret_from_syscall, tr0
1237 blink tr0, ZERO
1238
1239
1240 /* A different return path for ret_from_fork, because we now need
1241 * to call schedule_tail with the later kernels. Because prev is
1242 * loaded into r2 by switch_to() means we can just call it straight away
1243 */
1244
1245 .global ret_from_fork
1246 ret_from_fork:
1247
1248 movi schedule_tail,r5
1249 ori r5, 1, r5
1250 ptabs r5, tr0
1251 blink tr0, LINK
1252
1253 #ifdef CONFIG_POOR_MANS_STRACE
1254 /* nothing useful in registers at this point */
1255
1256 movi evt_debug2, r5
1257 ori r5, 1, r5
1258 ptabs r5, tr0
1259 ld.q SP, FRAME_R(9), r2
1260 or SP, ZERO, r3
1261 blink tr0, LINK
1262 #endif
1263
1264 ld.q SP, FRAME_S(FSPC), r2
1265 addi r2, 4, r2 /* Move PC, being pre-execution event */
1266 st.q SP, FRAME_S(FSPC), r2
1267 pta ret_from_syscall, tr0
1268 blink tr0, ZERO
1269
1270
1271
1272 syscall_allowed:
1273 /* Use LINK to deflect the exit point, default is syscall_ret */
1274 pta syscall_ret, tr0
1275 gettr tr0, LINK
1276 pta syscall_notrace, tr0
1277
1278 getcon KCR0, r2
1279 ld.l r2, TI_FLAGS, r4
1280 movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
1281 and r6, r4, r6
1282 beq/l r6, ZERO, tr0
1283
1284 /* Trace it by calling syscall_trace before and after */
1285 movi syscall_trace, r4
1286 or SP, ZERO, r2
1287 or ZERO, ZERO, r3
1288 ptabs r4, tr0
1289 blink tr0, LINK
1290
1291 /* Reload syscall number as r5 is trashed by syscall_trace */
1292 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1293 andi r5, 0x1ff, r5
1294
1295 pta syscall_ret_trace, tr0
1296 gettr tr0, LINK
1297
1298 syscall_notrace:
1299 /* Now point to the appropriate 4th level syscall handler */
1300 movi sys_call_table, r4
1301 shlli r5, 2, r5
1302 ldx.l r4, r5, r5
1303 ptabs r5, tr0
1304
1305 /* Prepare original args */
1306 ld.q SP, FRAME_R(2), r2
1307 ld.q SP, FRAME_R(3), r3
1308 ld.q SP, FRAME_R(4), r4
1309 ld.q SP, FRAME_R(5), r5
1310 ld.q SP, FRAME_R(6), r6
1311 ld.q SP, FRAME_R(7), r7
1312
1313 /* And now the trick for those syscalls requiring regs * ! */
1314 or SP, ZERO, r8
1315
1316 /* Call it */
1317 blink tr0, ZERO /* LINK is already properly set */
1318
1319 syscall_ret_trace:
1320 /* We get back here only if under trace */
1321 st.q SP, FRAME_R(9), r2 /* Save return value */
1322
1323 movi syscall_trace, LINK
1324 or SP, ZERO, r2
1325 movi 1, r3
1326 ptabs LINK, tr0
1327 blink tr0, LINK
1328
1329 /* This needs to be done after any syscall tracing */
1330 ld.q SP, FRAME_S(FSPC), r2
1331 addi r2, 4, r2 /* Move PC, being pre-execution event */
1332 st.q SP, FRAME_S(FSPC), r2
1333
1334 pta ret_from_syscall, tr0
1335 blink tr0, ZERO /* Resume normal return sequence */
1336
1337 /*
1338 * --- Switch to running under a particular ASID and return the previous ASID value
1339 * --- The caller is assumed to have done a cli before calling this.
1340 *
1341 * Input r2 : new ASID
1342 * Output r2 : old ASID
1343 */
1344
1345 .global switch_and_save_asid
1346 switch_and_save_asid:
1347 getcon sr, r0
1348 movi 255, r4
1349 shlli r4, 16, r4 /* r4 = mask to select ASID */
1350 and r0, r4, r3 /* r3 = shifted old ASID */
1351 andi r2, 255, r2 /* mask down new ASID */
1352 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1353 andc r0, r4, r0 /* efface old ASID from SR */
1354 or r0, r2, r0 /* insert the new ASID */
1355 putcon r0, ssr
1356 movi 1f, r0
1357 putcon r0, spc
1358 rte
1359 nop
1360 1:
1361 ptabs LINK, tr0
1362 shlri r3, 16, r2 /* r2 = old ASID */
1363 blink tr0, r63
1364
1365 .global route_to_panic_handler
1366 route_to_panic_handler:
1367 /* Switch to real mode, goto panic_handler, don't return. Useful for
1368 last-chance debugging, e.g. if no output wants to go to the console.
1369 */
1370
1371 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1372 ptabs r1, tr0
1373 pta 1f, tr1
1374 gettr tr1, r0
1375 putcon r0, spc
1376 getcon sr, r0
1377 movi 1, r1
1378 shlli r1, 31, r1
1379 andc r0, r1, r0
1380 putcon r0, ssr
1381 rte
1382 nop
1383 1: /* Now in real mode */
1384 blink tr0, r63
1385 nop
1386
1387 .global peek_real_address_q
1388 peek_real_address_q:
1389 /* Two args:
1390 r2 : real mode address to peek
1391 r2(out) : result quadword
1392
1393 This is provided as a cheapskate way of manipulating device
1394 registers for debugging (to avoid the need to onchip_remap the debug
1395 module, and to avoid the need to onchip_remap the watchpoint
1396 controller in a way that identity maps sufficient bits to avoid the
1397 SH5-101 cut2 silicon defect).
1398
1399 This code is not performance critical
1400 */
1401
1402 add.l r2, r63, r2 /* sign extend address */
1403 getcon sr, r0 /* r0 = saved original SR */
1404 movi 1, r1
1405 shlli r1, 28, r1
1406 or r0, r1, r1 /* r0 with block bit set */
1407 putcon r1, sr /* now in critical section */
1408 movi 1, r36
1409 shlli r36, 31, r36
1410 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1411
1412 putcon r1, ssr
1413 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1414 movi 1f, r37 /* virtual mode return addr */
1415 putcon r36, spc
1416
1417 synco
1418 rte
1419 nop
1420
1421 .peek0: /* come here in real mode, don't touch caches!!
1422 still in critical section (sr.bl==1) */
1423 putcon r0, ssr
1424 putcon r37, spc
1425 /* Here's the actual peek. If the address is bad, all bets are now off
1426 * what will happen (handlers invoked in real-mode = bad news) */
1427 ld.q r2, 0, r2
1428 synco
1429 rte /* Back to virtual mode */
1430 nop
1431
1432 1:
1433 ptabs LINK, tr0
1434 blink tr0, r63
1435
1436 .global poke_real_address_q
1437 poke_real_address_q:
1438 /* Two args:
1439 r2 : real mode address to poke
1440 r3 : quadword value to write.
1441
1442 This is provided as a cheapskate way of manipulating device
1443 registers for debugging (to avoid the need to onchip_remap the debug
1444 module, and to avoid the need to onchip_remap the watchpoint
1445 controller in a way that identity maps sufficient bits to avoid the
1446 SH5-101 cut2 silicon defect).
1447
1448 This code is not performance critical
1449 */
1450
1451 add.l r2, r63, r2 /* sign extend address */
1452 getcon sr, r0 /* r0 = saved original SR */
1453 movi 1, r1
1454 shlli r1, 28, r1
1455 or r0, r1, r1 /* r0 with block bit set */
1456 putcon r1, sr /* now in critical section */
1457 movi 1, r36
1458 shlli r36, 31, r36
1459 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1460
1461 putcon r1, ssr
1462 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1463 movi 1f, r37 /* virtual mode return addr */
1464 putcon r36, spc
1465
1466 synco
1467 rte
1468 nop
1469
1470 .poke0: /* come here in real mode, don't touch caches!!
1471 still in critical section (sr.bl==1) */
1472 putcon r0, ssr
1473 putcon r37, spc
1474 /* Here's the actual poke. If the address is bad, all bets are now off
1475 * what will happen (handlers invoked in real-mode = bad news) */
1476 st.q r2, 0, r3
1477 synco
1478 rte /* Back to virtual mode */
1479 nop
1480
1481 1:
1482 ptabs LINK, tr0
1483 blink tr0, r63
1484
1485 /*
1486 * --- User Access Handling Section
1487 */
1488
1489 /*
1490 * User Access support. It all moved to non inlined Assembler
1491 * functions in here.
1492 *
1493 * __kernel_size_t __copy_user(void *__to, const void *__from,
1494 * __kernel_size_t __n)
1495 *
1496 * Inputs:
1497 * (r2) target address
1498 * (r3) source address
1499 * (r4) size in bytes
1500 *
1501 * Ouputs:
1502 * (*r2) target data
1503 * (r2) non-copied bytes
1504 *
1505 * If a fault occurs on the user pointer, bail out early and return the
1506 * number of bytes not copied in r2.
1507 * Strategy : for large blocks, call a real memcpy function which can
1508 * move >1 byte at a time using unaligned ld/st instructions, and can
1509 * manipulate the cache using prefetch + alloco to improve the speed
1510 * further. If a fault occurs in that function, just revert to the
1511 * byte-by-byte approach used for small blocks; this is rare so the
1512 * performance hit for that case does not matter.
1513 *
1514 * For small blocks it's not worth the overhead of setting up and calling
1515 * the memcpy routine; do the copy a byte at a time.
1516 *
1517 */
1518 .global __copy_user
1519 __copy_user:
1520 pta __copy_user_byte_by_byte, tr1
1521 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1522 bge/u r0, r4, tr1
1523 pta copy_user_memcpy, tr0
1524 addi SP, -32, SP
1525 /* Save arguments in case we have to fix-up unhandled page fault */
1526 st.q SP, 0, r2
1527 st.q SP, 8, r3
1528 st.q SP, 16, r4
1529 st.q SP, 24, r35 ! r35 is callee-save
1530 /* Save LINK in a register to reduce RTS time later (otherwise
1531 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1532 ori LINK, 0, r35
1533 blink tr0, LINK
1534
1535 /* Copy completed normally if we get back here */
1536 ptabs r35, tr0
1537 ld.q SP, 24, r35
1538 /* don't restore r2-r4, pointless */
1539 /* set result=r2 to zero as the copy must have succeeded. */
1540 or r63, r63, r2
1541 addi SP, 32, SP
1542 blink tr0, r63 ! RTS
1543
1544 .global __copy_user_fixup
1545 __copy_user_fixup:
1546 /* Restore stack frame */
1547 ori r35, 0, LINK
1548 ld.q SP, 24, r35
1549 ld.q SP, 16, r4
1550 ld.q SP, 8, r3
1551 ld.q SP, 0, r2
1552 addi SP, 32, SP
1553 /* Fall through to original code, in the 'same' state we entered with */
1554
1555 /* The slow byte-by-byte method is used if the fast copy traps due to a bad
1556 user address. In that rare case, the speed drop can be tolerated. */
1557 __copy_user_byte_by_byte:
1558 pta ___copy_user_exit, tr1
1559 pta ___copy_user1, tr0
1560 beq/u r4, r63, tr1 /* early exit for zero length copy */
1561 sub r2, r3, r0
1562 addi r0, -1, r0
1563
1564 ___copy_user1:
1565 ld.b r3, 0, r5 /* Fault address 1 */
1566
1567 /* Could rewrite this to use just 1 add, but the second comes 'free'
1568 due to load latency */
1569 addi r3, 1, r3
1570 addi r4, -1, r4 /* No real fixup required */
1571 ___copy_user2:
1572 stx.b r3, r0, r5 /* Fault address 2 */
1573 bne r4, ZERO, tr0
1574
1575 ___copy_user_exit:
1576 or r4, ZERO, r2
1577 ptabs LINK, tr0
1578 blink tr0, ZERO
1579
1580 /*
1581 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1582 *
1583 * Inputs:
1584 * (r2) target address
1585 * (r3) size in bytes
1586 *
1587 * Ouputs:
1588 * (*r2) zero-ed target data
1589 * (r2) non-zero-ed bytes
1590 */
1591 .global __clear_user
1592 __clear_user:
1593 pta ___clear_user_exit, tr1
1594 pta ___clear_user1, tr0
1595 beq/u r3, r63, tr1
1596
1597 ___clear_user1:
1598 st.b r2, 0, ZERO /* Fault address */
1599 addi r2, 1, r2
1600 addi r3, -1, r3 /* No real fixup required */
1601 bne r3, ZERO, tr0
1602
1603 ___clear_user_exit:
1604 or r3, ZERO, r2
1605 ptabs LINK, tr0
1606 blink tr0, ZERO
1607
1608
1609 /*
1610 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1611 * int __count)
1612 *
1613 * Inputs:
1614 * (r2) target address
1615 * (r3) source address
1616 * (r4) maximum size in bytes
1617 *
1618 * Ouputs:
1619 * (*r2) copied data
1620 * (r2) -EFAULT (in case of faulting)
1621 * copied data (otherwise)
1622 */
1623 .global __strncpy_from_user
1624 __strncpy_from_user:
1625 pta ___strncpy_from_user1, tr0
1626 pta ___strncpy_from_user_done, tr1
1627 or r4, ZERO, r5 /* r5 = original count */
1628 beq/u r4, r63, tr1 /* early exit if r4==0 */
1629 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1630 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1631
1632 ___strncpy_from_user1:
1633 ld.b r3, 0, r7 /* Fault address: only in reading */
1634 st.b r2, 0, r7
1635 addi r2, 1, r2
1636 addi r3, 1, r3
1637 beq/u ZERO, r7, tr1
1638 addi r4, -1, r4 /* return real number of copied bytes */
1639 bne/l ZERO, r4, tr0
1640
1641 ___strncpy_from_user_done:
1642 sub r5, r4, r6 /* If done, return copied */
1643
1644 ___strncpy_from_user_exit:
1645 or r6, ZERO, r2
1646 ptabs LINK, tr0
1647 blink tr0, ZERO
1648
1649 /*
1650 * extern long __strnlen_user(const char *__s, long __n)
1651 *
1652 * Inputs:
1653 * (r2) source address
1654 * (r3) source size in bytes
1655 *
1656 * Ouputs:
1657 * (r2) -EFAULT (in case of faulting)
1658 * string length (otherwise)
1659 */
1660 .global __strnlen_user
1661 __strnlen_user:
1662 pta ___strnlen_user_set_reply, tr0
1663 pta ___strnlen_user1, tr1
1664 or ZERO, ZERO, r5 /* r5 = counter */
1665 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1666 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1667 beq r3, ZERO, tr0
1668
1669 ___strnlen_user1:
1670 ldx.b r2, r5, r7 /* Fault address: only in reading */
1671 addi r3, -1, r3 /* No real fixup */
1672 addi r5, 1, r5
1673 beq r3, ZERO, tr0
1674 bne r7, ZERO, tr1
1675 ! The line below used to be active. This meant led to a junk byte lying between each pair
1676 ! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1677 ! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1678 ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1679 ! addi r5, 1, r5 /* Include '\0' */
1680
1681 ___strnlen_user_set_reply:
1682 or r5, ZERO, r6 /* If done, return counter */
1683
1684 ___strnlen_user_exit:
1685 or r6, ZERO, r2
1686 ptabs LINK, tr0
1687 blink tr0, ZERO
1688
1689 /*
1690 * extern long __get_user_asm_?(void *val, long addr)
1691 *
1692 * Inputs:
1693 * (r2) dest address
1694 * (r3) source address (in User Space)
1695 *
1696 * Ouputs:
1697 * (r2) -EFAULT (faulting)
1698 * 0 (not faulting)
1699 */
1700 .global __get_user_asm_b
1701 __get_user_asm_b:
1702 or r2, ZERO, r4
1703 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1704
1705 ___get_user_asm_b1:
1706 ld.b r3, 0, r5 /* r5 = data */
1707 st.b r4, 0, r5
1708 or ZERO, ZERO, r2
1709
1710 ___get_user_asm_b_exit:
1711 ptabs LINK, tr0
1712 blink tr0, ZERO
1713
1714
1715 .global __get_user_asm_w
1716 __get_user_asm_w:
1717 or r2, ZERO, r4
1718 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1719
1720 ___get_user_asm_w1:
1721 ld.w r3, 0, r5 /* r5 = data */
1722 st.w r4, 0, r5
1723 or ZERO, ZERO, r2
1724
1725 ___get_user_asm_w_exit:
1726 ptabs LINK, tr0
1727 blink tr0, ZERO
1728
1729
1730 .global __get_user_asm_l
1731 __get_user_asm_l:
1732 or r2, ZERO, r4
1733 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1734
1735 ___get_user_asm_l1:
1736 ld.l r3, 0, r5 /* r5 = data */
1737 st.l r4, 0, r5
1738 or ZERO, ZERO, r2
1739
1740 ___get_user_asm_l_exit:
1741 ptabs LINK, tr0
1742 blink tr0, ZERO
1743
1744
1745 .global __get_user_asm_q
1746 __get_user_asm_q:
1747 or r2, ZERO, r4
1748 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1749
1750 ___get_user_asm_q1:
1751 ld.q r3, 0, r5 /* r5 = data */
1752 st.q r4, 0, r5
1753 or ZERO, ZERO, r2
1754
1755 ___get_user_asm_q_exit:
1756 ptabs LINK, tr0
1757 blink tr0, ZERO
1758
1759 /*
1760 * extern long __put_user_asm_?(void *pval, long addr)
1761 *
1762 * Inputs:
1763 * (r2) kernel pointer to value
1764 * (r3) dest address (in User Space)
1765 *
1766 * Ouputs:
1767 * (r2) -EFAULT (faulting)
1768 * 0 (not faulting)
1769 */
1770 .global __put_user_asm_b
1771 __put_user_asm_b:
1772 ld.b r2, 0, r4 /* r4 = data */
1773 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1774
1775 ___put_user_asm_b1:
1776 st.b r3, 0, r4
1777 or ZERO, ZERO, r2
1778
1779 ___put_user_asm_b_exit:
1780 ptabs LINK, tr0
1781 blink tr0, ZERO
1782
1783
1784 .global __put_user_asm_w
1785 __put_user_asm_w:
1786 ld.w r2, 0, r4 /* r4 = data */
1787 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1788
1789 ___put_user_asm_w1:
1790 st.w r3, 0, r4
1791 or ZERO, ZERO, r2
1792
1793 ___put_user_asm_w_exit:
1794 ptabs LINK, tr0
1795 blink tr0, ZERO
1796
1797
1798 .global __put_user_asm_l
1799 __put_user_asm_l:
1800 ld.l r2, 0, r4 /* r4 = data */
1801 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1802
1803 ___put_user_asm_l1:
1804 st.l r3, 0, r4
1805 or ZERO, ZERO, r2
1806
1807 ___put_user_asm_l_exit:
1808 ptabs LINK, tr0
1809 blink tr0, ZERO
1810
1811
1812 .global __put_user_asm_q
1813 __put_user_asm_q:
1814 ld.q r2, 0, r4 /* r4 = data */
1815 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1816
1817 ___put_user_asm_q1:
1818 st.q r3, 0, r4
1819 or ZERO, ZERO, r2
1820
1821 ___put_user_asm_q_exit:
1822 ptabs LINK, tr0
1823 blink tr0, ZERO
1824
1825 panic_stash_regs:
1826 /* The idea is : when we get an unhandled panic, we dump the registers
1827 to a known memory location, the just sit in a tight loop.
1828 This allows the human to look at the memory region through the GDB
1829 session (assuming the debug module's SHwy initiator isn't locked up
1830 or anything), to hopefully analyze the cause of the panic. */
1831
1832 /* On entry, former r15 (SP) is in DCR
1833 former r0 is at resvec_saved_area + 0
1834 former r1 is at resvec_saved_area + 8
1835 former tr0 is at resvec_saved_area + 32
1836 DCR is the only register whose value is lost altogether.
1837 */
1838
1839 movi 0xffffffff80000000, r0 ! phy of dump area
1840 ld.q SP, 0x000, r1 ! former r0
1841 st.q r0, 0x000, r1
1842 ld.q SP, 0x008, r1 ! former r1
1843 st.q r0, 0x008, r1
1844 st.q r0, 0x010, r2
1845 st.q r0, 0x018, r3
1846 st.q r0, 0x020, r4
1847 st.q r0, 0x028, r5
1848 st.q r0, 0x030, r6
1849 st.q r0, 0x038, r7
1850 st.q r0, 0x040, r8
1851 st.q r0, 0x048, r9
1852 st.q r0, 0x050, r10
1853 st.q r0, 0x058, r11
1854 st.q r0, 0x060, r12
1855 st.q r0, 0x068, r13
1856 st.q r0, 0x070, r14
1857 getcon dcr, r14
1858 st.q r0, 0x078, r14
1859 st.q r0, 0x080, r16
1860 st.q r0, 0x088, r17
1861 st.q r0, 0x090, r18
1862 st.q r0, 0x098, r19
1863 st.q r0, 0x0a0, r20
1864 st.q r0, 0x0a8, r21
1865 st.q r0, 0x0b0, r22
1866 st.q r0, 0x0b8, r23
1867 st.q r0, 0x0c0, r24
1868 st.q r0, 0x0c8, r25
1869 st.q r0, 0x0d0, r26
1870 st.q r0, 0x0d8, r27
1871 st.q r0, 0x0e0, r28
1872 st.q r0, 0x0e8, r29
1873 st.q r0, 0x0f0, r30
1874 st.q r0, 0x0f8, r31
1875 st.q r0, 0x100, r32
1876 st.q r0, 0x108, r33
1877 st.q r0, 0x110, r34
1878 st.q r0, 0x118, r35
1879 st.q r0, 0x120, r36
1880 st.q r0, 0x128, r37
1881 st.q r0, 0x130, r38
1882 st.q r0, 0x138, r39
1883 st.q r0, 0x140, r40
1884 st.q r0, 0x148, r41
1885 st.q r0, 0x150, r42
1886 st.q r0, 0x158, r43
1887 st.q r0, 0x160, r44
1888 st.q r0, 0x168, r45
1889 st.q r0, 0x170, r46
1890 st.q r0, 0x178, r47
1891 st.q r0, 0x180, r48
1892 st.q r0, 0x188, r49
1893 st.q r0, 0x190, r50
1894 st.q r0, 0x198, r51
1895 st.q r0, 0x1a0, r52
1896 st.q r0, 0x1a8, r53
1897 st.q r0, 0x1b0, r54
1898 st.q r0, 0x1b8, r55
1899 st.q r0, 0x1c0, r56
1900 st.q r0, 0x1c8, r57
1901 st.q r0, 0x1d0, r58
1902 st.q r0, 0x1d8, r59
1903 st.q r0, 0x1e0, r60
1904 st.q r0, 0x1e8, r61
1905 st.q r0, 0x1f0, r62
1906 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1907
1908 ld.q SP, 0x020, r1 ! former tr0
1909 st.q r0, 0x200, r1
1910 gettr tr1, r1
1911 st.q r0, 0x208, r1
1912 gettr tr2, r1
1913 st.q r0, 0x210, r1
1914 gettr tr3, r1
1915 st.q r0, 0x218, r1
1916 gettr tr4, r1
1917 st.q r0, 0x220, r1
1918 gettr tr5, r1
1919 st.q r0, 0x228, r1
1920 gettr tr6, r1
1921 st.q r0, 0x230, r1
1922 gettr tr7, r1
1923 st.q r0, 0x238, r1
1924
1925 getcon sr, r1
1926 getcon ssr, r2
1927 getcon pssr, r3
1928 getcon spc, r4
1929 getcon pspc, r5
1930 getcon intevt, r6
1931 getcon expevt, r7
1932 getcon pexpevt, r8
1933 getcon tra, r9
1934 getcon tea, r10
1935 getcon kcr0, r11
1936 getcon kcr1, r12
1937 getcon vbr, r13
1938 getcon resvec, r14
1939
1940 st.q r0, 0x240, r1
1941 st.q r0, 0x248, r2
1942 st.q r0, 0x250, r3
1943 st.q r0, 0x258, r4
1944 st.q r0, 0x260, r5
1945 st.q r0, 0x268, r6
1946 st.q r0, 0x270, r7
1947 st.q r0, 0x278, r8
1948 st.q r0, 0x280, r9
1949 st.q r0, 0x288, r10
1950 st.q r0, 0x290, r11
1951 st.q r0, 0x298, r12
1952 st.q r0, 0x2a0, r13
1953 st.q r0, 0x2a8, r14
1954
1955 getcon SPC,r2
1956 getcon SSR,r3
1957 getcon EXPEVT,r4
1958 /* Prepare to jump to C - physical address */
1959 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1960 ori r1, 1, r1
1961 ptabs r1, tr0
1962 getcon DCR, SP
1963 blink tr0, ZERO
1964 nop
1965 nop
1966 nop
1967 nop
1968
1969
1970
1971
1972 /*
1973 * --- Signal Handling Section
1974 */
1975
1976 /*
1977 * extern long long _sa_default_rt_restorer
1978 * extern long long _sa_default_restorer
1979 *
1980 * or, better,
1981 *
1982 * extern void _sa_default_rt_restorer(void)
1983 * extern void _sa_default_restorer(void)
1984 *
1985 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1986 * from user space. Copied into user space by signal management.
1987 * Both must be quad aligned and 2 quad long (4 instructions).
1988 *
1989 */
1990 .balign 8
1991 .global sa_default_rt_restorer
1992 sa_default_rt_restorer:
1993 movi 0x10, r9
1994 shori __NR_rt_sigreturn, r9
1995 trapa r9
1996 nop
1997
1998 .balign 8
1999 .global sa_default_restorer
2000 sa_default_restorer:
2001 movi 0x10, r9
2002 shori __NR_sigreturn, r9
2003 trapa r9
2004 nop
2005
2006 /*
2007 * --- __ex_table Section
2008 */
2009
2010 /*
2011 * User Access Exception Table.
2012 */
2013 .section __ex_table, "a"
2014
2015 .global asm_uaccess_start /* Just a marker */
2016 asm_uaccess_start:
2017
2018 .long ___copy_user1, ___copy_user_exit
2019 .long ___copy_user2, ___copy_user_exit
2020 .long ___clear_user1, ___clear_user_exit
2021 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2022 .long ___strnlen_user1, ___strnlen_user_exit
2023 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2024 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2025 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2026 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2027 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2028 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2029 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2030 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2031
2032 .global asm_uaccess_end /* Just a marker */
2033 asm_uaccess_end:
2034
2035
2036
2037
2038 /*
2039 * --- .text.init Section
2040 */
2041
2042 .section .text.init, "ax"
2043
2044 /*
2045 * void trap_init (void)
2046 *
2047 */
2048 .global trap_init
2049 trap_init:
2050 addi SP, -24, SP /* Room to save r28/r29/r30 */
2051 st.q SP, 0, r28
2052 st.q SP, 8, r29
2053 st.q SP, 16, r30
2054
2055 /* Set VBR and RESVEC */
2056 movi LVBR_block, r19
2057 andi r19, -4, r19 /* reset MMUOFF + reserved */
2058 /* For RESVEC exceptions we force the MMU off, which means we need the
2059 physical address. */
2060 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2061 andi r20, -4, r20 /* reset reserved */
2062 ori r20, 1, r20 /* set MMUOFF */
2063 putcon r19, VBR
2064 putcon r20, RESVEC
2065
2066 /* Sanity check */
2067 movi LVBR_block_end, r21
2068 andi r21, -4, r21
2069 movi BLOCK_SIZE, r29 /* r29 = expected size */
2070 or r19, ZERO, r30
2071 add r19, r29, r19
2072
2073 /*
2074 * Ugly, but better loop forever now than crash afterwards.
2075 * We should print a message, but if we touch LVBR or
2076 * LRESVEC blocks we should not be surprised if we get stuck
2077 * in trap_init().
2078 */
2079 pta trap_init_loop, tr1
2080 gettr tr1, r28 /* r28 = trap_init_loop */
2081 sub r21, r30, r30 /* r30 = actual size */
2082
2083 /*
2084 * VBR/RESVEC handlers overlap by being bigger than
2085 * allowed. Very bad. Just loop forever.
2086 * (r28) panic/loop address
2087 * (r29) expected size
2088 * (r30) actual size
2089 */
2090 trap_init_loop:
2091 bne r19, r21, tr1
2092
2093 /* Now that exception vectors are set up reset SR.BL */
2094 getcon SR, r22
2095 movi SR_UNBLOCK_EXC, r23
2096 and r22, r23, r22
2097 putcon r22, SR
2098
2099 addi SP, 24, SP
2100 ptabs LINK, tr0
2101 blink tr0, ZERO
2102
This page took 0.100096 seconds and 5 git commands to generate.