xtensa: add irq flags trace support
[deliverable/linux.git] / arch / xtensa / kernel / entry.S
... / ...
CommitLineData
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004 - 2008 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/asm-offsets.h>
18#include <asm/processor.h>
19#include <asm/coprocessor.h>
20#include <asm/thread_info.h>
21#include <asm/uaccess.h>
22#include <asm/unistd.h>
23#include <asm/ptrace.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/page.h>
27#include <asm/signal.h>
28#include <asm/tlbflush.h>
29#include <variant/tie-asm.h>
30
31/* Unimplemented features. */
32
33#undef KERNEL_STACK_OVERFLOW_CHECK
34#undef PREEMPTIBLE_KERNEL
35#undef ALLOCA_EXCEPTION_IN_IRAM
36
37/* Not well tested.
38 *
39 * - fast_coprocessor
40 */
41
42/*
43 * Macro to find first bit set in WINDOWBASE from the left + 1
44 *
45 * 100....0 -> 1
46 * 010....0 -> 2
47 * 000....1 -> WSBITS
48 */
49
50 .macro ffs_ws bit mask
51
52#if XCHAL_HAVE_NSA
53 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
54 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
55#else
56 movi \bit, WSBITS
57#if WSBITS > 16
58 _bltui \mask, 0x10000, 99f
59 addi \bit, \bit, -16
60 extui \mask, \mask, 16, 16
61#endif
62#if WSBITS > 8
6399: _bltui \mask, 0x100, 99f
64 addi \bit, \bit, -8
65 srli \mask, \mask, 8
66#endif
6799: _bltui \mask, 0x10, 99f
68 addi \bit, \bit, -4
69 srli \mask, \mask, 4
7099: _bltui \mask, 0x4, 99f
71 addi \bit, \bit, -2
72 srli \mask, \mask, 2
7399: _bltui \mask, 0x2, 99f
74 addi \bit, \bit, -1
7599:
76
77#endif
78 .endm
79
80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
81
82/*
83 * First-level exception handler for user exceptions.
84 * Save some special registers, extra states and all registers in the AR
85 * register file that were in use in the user task, and jump to the common
86 * exception code.
87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
88 * save them for kernel exceptions).
89 *
90 * Entry condition for user_exception:
91 *
92 * a0: trashed, original value saved on stack (PT_AREG0)
93 * a1: a1
94 * a2: new stack pointer, original value in depc
95 * a3: dispatch table
96 * depc: a2, original value saved on stack (PT_DEPC)
97 * excsave1: a3
98 *
99 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
100 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
101 *
102 * Entry condition for _user_exception:
103 *
104 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
105 * excsave has been restored, and
106 * stack pointer (a1) has been set.
107 *
108 * Note: _user_exception might be at an odd address. Don't use call0..call12
109 */
110
111ENTRY(user_exception)
112
113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
114
115 xsr a3, excsave1
116 rsr a0, depc
117 s32i a1, a2, PT_AREG1
118 s32i a0, a2, PT_AREG2
119 s32i a3, a2, PT_AREG3
120 mov a1, a2
121
122 .globl _user_exception
123_user_exception:
124
125 /* Save SAR and turn off single stepping */
126
127 movi a2, 0
128 rsr a3, sar
129 xsr a2, icountlevel
130 s32i a3, a1, PT_SAR
131 s32i a2, a1, PT_ICOUNTLEVEL
132
133#if XCHAL_HAVE_THREADPTR
134 rur a2, threadptr
135 s32i a2, a1, PT_THREADPTR
136#endif
137
138 /* Rotate ws so that the current windowbase is at bit0. */
139 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
140
141 rsr a2, windowbase
142 rsr a3, windowstart
143 ssr a2
144 s32i a2, a1, PT_WINDOWBASE
145 s32i a3, a1, PT_WINDOWSTART
146 slli a2, a3, 32-WSBITS
147 src a2, a3, a2
148 srli a2, a2, 32-WSBITS
149 s32i a2, a1, PT_WMASK # needed for restoring registers
150
151 /* Save only live registers. */
152
153 _bbsi.l a2, 1, 1f
154 s32i a4, a1, PT_AREG4
155 s32i a5, a1, PT_AREG5
156 s32i a6, a1, PT_AREG6
157 s32i a7, a1, PT_AREG7
158 _bbsi.l a2, 2, 1f
159 s32i a8, a1, PT_AREG8
160 s32i a9, a1, PT_AREG9
161 s32i a10, a1, PT_AREG10
162 s32i a11, a1, PT_AREG11
163 _bbsi.l a2, 3, 1f
164 s32i a12, a1, PT_AREG12
165 s32i a13, a1, PT_AREG13
166 s32i a14, a1, PT_AREG14
167 s32i a15, a1, PT_AREG15
168 _bnei a2, 1, 1f # only one valid frame?
169
170 /* Only one valid frame, skip saving regs. */
171
172 j 2f
173
174 /* Save the remaining registers.
175 * We have to save all registers up to the first '1' from
176 * the right, except the current frame (bit 0).
177 * Assume a2 is: 001001000110001
178 * All register frames starting from the top field to the marked '1'
179 * must be saved.
180 */
181
1821: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
183 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
184 and a3, a3, a2 # max. only one bit is set
185
186 /* Find number of frames to save */
187
188 ffs_ws a0, a3 # number of frames to the '1' from left
189
190 /* Store information into WMASK:
191 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
192 * bits 4...: number of valid 4-register frames
193 */
194
195 slli a3, a0, 4 # number of frames to save in bits 8..4
196 extui a2, a2, 0, 4 # mask for the first 16 registers
197 or a2, a3, a2
198 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
199
200 /* Save 4 registers at a time */
201
2021: rotw -1
203 s32i a0, a5, PT_AREG_END - 16
204 s32i a1, a5, PT_AREG_END - 12
205 s32i a2, a5, PT_AREG_END - 8
206 s32i a3, a5, PT_AREG_END - 4
207 addi a0, a4, -1
208 addi a1, a5, -16
209 _bnez a0, 1b
210
211 /* WINDOWBASE still in SAR! */
212
213 rsr a2, sar # original WINDOWBASE
214 movi a3, 1
215 ssl a2
216 sll a3, a3
217 wsr a3, windowstart # set corresponding WINDOWSTART bit
218 wsr a2, windowbase # and WINDOWSTART
219 rsync
220
221 /* We are back to the original stack pointer (a1) */
222
2232: /* Now, jump to the common exception handler. */
224
225 j common_exception
226
227ENDPROC(user_exception)
228
229/*
230 * First-level exit handler for kernel exceptions
231 * Save special registers and the live window frame.
232 * Note: Even though we changes the stack pointer, we don't have to do a
233 * MOVSP here, as we do that when we return from the exception.
234 * (See comment in the kernel exception exit code)
235 *
236 * Entry condition for kernel_exception:
237 *
238 * a0: trashed, original value saved on stack (PT_AREG0)
239 * a1: a1
240 * a2: new stack pointer, original in DEPC
241 * a3: dispatch table
242 * depc: a2, original value saved on stack (PT_DEPC)
243 * excsave_1: a3
244 *
245 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
246 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
247 *
248 * Entry condition for _kernel_exception:
249 *
250 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
251 * excsave has been restored, and
252 * stack pointer (a1) has been set.
253 *
254 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
255 */
256
257ENTRY(kernel_exception)
258
259 /* Save a0, a2, a3, DEPC and set SP. */
260
261 xsr a3, excsave1 # restore a3, excsave_1
262 rsr a0, depc # get a2
263 s32i a1, a2, PT_AREG1
264 s32i a0, a2, PT_AREG2
265 s32i a3, a2, PT_AREG3
266 mov a1, a2
267
268 .globl _kernel_exception
269_kernel_exception:
270
271 /* Save SAR and turn off single stepping */
272
273 movi a2, 0
274 rsr a3, sar
275 xsr a2, icountlevel
276 s32i a3, a1, PT_SAR
277 s32i a2, a1, PT_ICOUNTLEVEL
278
279 /* Rotate ws so that the current windowbase is at bit0. */
280 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
281
282 rsr a2, windowbase # don't need to save these, we only
283 rsr a3, windowstart # need shifted windowstart: windowmask
284 ssr a2
285 slli a2, a3, 32-WSBITS
286 src a2, a3, a2
287 srli a2, a2, 32-WSBITS
288 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
289
290 /* Save only the live window-frame */
291
292 _bbsi.l a2, 1, 1f
293 s32i a4, a1, PT_AREG4
294 s32i a5, a1, PT_AREG5
295 s32i a6, a1, PT_AREG6
296 s32i a7, a1, PT_AREG7
297 _bbsi.l a2, 2, 1f
298 s32i a8, a1, PT_AREG8
299 s32i a9, a1, PT_AREG9
300 s32i a10, a1, PT_AREG10
301 s32i a11, a1, PT_AREG11
302 _bbsi.l a2, 3, 1f
303 s32i a12, a1, PT_AREG12
304 s32i a13, a1, PT_AREG13
305 s32i a14, a1, PT_AREG14
306 s32i a15, a1, PT_AREG15
307
3081:
309
310#ifdef KERNEL_STACK_OVERFLOW_CHECK
311
312 /* Stack overflow check, for debugging */
313 extui a2, a1, TASK_SIZE_BITS,XX
314 movi a3, SIZE??
315 _bge a2, a3, out_of_stack_panic
316
317#endif
318
319/*
320 * This is the common exception handler.
321 * We get here from the user exception handler or simply by falling through
322 * from the kernel exception handler.
323 * Save the remaining special registers, switch to kernel mode, and jump
324 * to the second-level exception handler.
325 *
326 */
327
328common_exception:
329
330 /* Save some registers, disable loops and clear the syscall flag. */
331
332 rsr a2, debugcause
333 rsr a3, epc1
334 s32i a2, a1, PT_DEBUGCAUSE
335 s32i a3, a1, PT_PC
336
337 movi a2, -1
338 rsr a3, excvaddr
339 s32i a2, a1, PT_SYSCALL
340 movi a2, 0
341 s32i a3, a1, PT_EXCVADDR
342 xsr a2, lcount
343 s32i a2, a1, PT_LCOUNT
344
345 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
346
347 rsr a0, exccause
348 movi a3, 0
349 rsr a2, excsave1
350 s32i a0, a1, PT_EXCCAUSE
351 s32i a3, a2, EXC_TABLE_FIXUP
352
353 /* All unrecoverable states are saved on stack, now, and a1 is valid,
354 * so we can allow exceptions and interrupts (*) again.
355 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
356 *
357 * (*) We only allow interrupts of higher priority than current IRQ
358 */
359
360 rsr a3, ps
361 addi a0, a0, -4
362 movi a2, 1
363 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
364 # a3 = PS.INTLEVEL
365 movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
366 moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
367 movi a2, 1 << PS_WOE_BIT
368 or a3, a3, a2
369 rsr a0, exccause
370 xsr a3, ps
371
372 s32i a3, a1, PT_PS # save ps
373
374 /* Save lbeg, lend */
375
376 rsr a2, lbeg
377 rsr a3, lend
378 s32i a2, a1, PT_LBEG
379 s32i a3, a1, PT_LEND
380
381 /* Save SCOMPARE1 */
382
383#if XCHAL_HAVE_S32C1I
384 rsr a2, scompare1
385 s32i a2, a1, PT_SCOMPARE1
386#endif
387
388 /* Save optional registers. */
389
390 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
391
392#ifdef CONFIG_TRACE_IRQFLAGS
393 l32i a4, a1, PT_DEPC
394 /* Double exception means we came here with an exception
395 * while PS.EXCM was set, i.e. interrupts disabled.
396 */
397 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
398 l32i a4, a1, PT_EXCCAUSE
399 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
400 /* We came here with an interrupt means interrupts were enabled
401 * and we've just disabled them.
402 */
403 movi a4, trace_hardirqs_off
404 callx4 a4
4051:
406#endif
407
408 /* Go to second-level dispatcher. Set up parameters to pass to the
409 * exception handler and call the exception handler.
410 */
411
412 movi a4, exc_table
413 mov a6, a1 # pass stack frame
414 mov a7, a0 # pass EXCCAUSE
415 addx4 a4, a0, a4
416 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
417
418 /* Call the second-level handler */
419
420 callx4 a4
421
422 /* Jump here for exception exit */
423 .global common_exception_return
424common_exception_return:
425
426#ifdef CONFIG_TRACE_IRQFLAGS
427 l32i a4, a1, PT_DEPC
428 /* Double exception means we came here with an exception
429 * while PS.EXCM was set, i.e. interrupts disabled.
430 */
431 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
432 l32i a4, a1, PT_EXCCAUSE
433 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
434 /* We came here with an interrupt means interrupts were enabled
435 * and we'll reenable them on return.
436 */
437 movi a4, trace_hardirqs_on
438 callx4 a4
4391:
440#endif
441
442 /* Jump if we are returning from kernel exceptions. */
443
4441: l32i a3, a1, PT_PS
445 _bbci.l a3, PS_UM_BIT, 4f
446
447 /* Specific to a user exception exit:
448 * We need to check some flags for signal handling and rescheduling,
449 * and have to restore WB and WS, extra states, and all registers
450 * in the register file that were in use in the user task.
451 * Note that we don't disable interrupts here.
452 */
453
454 GET_THREAD_INFO(a2,a1)
455 l32i a4, a2, TI_FLAGS
456
457 _bbsi.l a4, TIF_NEED_RESCHED, 3f
458 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
459 _bbci.l a4, TIF_SIGPENDING, 4f
460
4612: l32i a4, a1, PT_DEPC
462 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
463
464 /* Call do_signal() */
465
466 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
467 mov a6, a1
468 callx4 a4
469 j 1b
470
4713: /* Reschedule */
472
473 movi a4, schedule # void schedule (void)
474 callx4 a4
475 j 1b
476
4774: /* Restore optional registers. */
478
479 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
480
481 /* Restore SCOMPARE1 */
482
483#if XCHAL_HAVE_S32C1I
484 l32i a2, a1, PT_SCOMPARE1
485 wsr a2, scompare1
486#endif
487 wsr a3, ps /* disable interrupts */
488
489 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
490
491user_exception_exit:
492
493 /* Restore the state of the task and return from the exception. */
494
495 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
496
497 l32i a2, a1, PT_WINDOWBASE
498 l32i a3, a1, PT_WINDOWSTART
499 wsr a1, depc # use DEPC as temp storage
500 wsr a3, windowstart # restore WINDOWSTART
501 ssr a2 # preserve user's WB in the SAR
502 wsr a2, windowbase # switch to user's saved WB
503 rsync
504 rsr a1, depc # restore stack pointer
505 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
506 rotw -1 # we restore a4..a7
507 _bltui a6, 16, 1f # only have to restore current window?
508
509 /* The working registers are a0 and a3. We are restoring to
510 * a4..a7. Be careful not to destroy what we have just restored.
511 * Note: wmask has the format YYYYM:
512 * Y: number of registers saved in groups of 4
513 * M: 4 bit mask of first 16 registers
514 */
515
516 mov a2, a6
517 mov a3, a5
518
5192: rotw -1 # a0..a3 become a4..a7
520 addi a3, a7, -4*4 # next iteration
521 addi a2, a6, -16 # decrementing Y in WMASK
522 l32i a4, a3, PT_AREG_END + 0
523 l32i a5, a3, PT_AREG_END + 4
524 l32i a6, a3, PT_AREG_END + 8
525 l32i a7, a3, PT_AREG_END + 12
526 _bgeui a2, 16, 2b
527
528 /* Clear unrestored registers (don't leak anything to user-land */
529
5301: rsr a0, windowbase
531 rsr a3, sar
532 sub a3, a0, a3
533 beqz a3, 2f
534 extui a3, a3, 0, WBBITS
535
5361: rotw -1
537 addi a3, a7, -1
538 movi a4, 0
539 movi a5, 0
540 movi a6, 0
541 movi a7, 0
542 bgei a3, 1, 1b
543
544 /* We are back were we were when we started.
545 * Note: a2 still contains WMASK (if we've returned to the original
546 * frame where we had loaded a2), or at least the lower 4 bits
547 * (if we have restored WSBITS-1 frames).
548 */
549
550#if XCHAL_HAVE_THREADPTR
551 l32i a3, a1, PT_THREADPTR
552 wur a3, threadptr
553#endif
554
5552: j common_exception_exit
556
557 /* This is the kernel exception exit.
558 * We avoided to do a MOVSP when we entered the exception, but we
559 * have to do it here.
560 */
561
562kernel_exception_exit:
563
564#ifdef PREEMPTIBLE_KERNEL
565
566#ifdef CONFIG_PREEMPT
567
568 /*
569 * Note: We've just returned from a call4, so we have
570 * at least 4 addt'l regs.
571 */
572
573 /* Check current_thread_info->preempt_count */
574
575 GET_THREAD_INFO(a2)
576 l32i a3, a2, TI_PREEMPT
577 bnez a3, 1f
578
579 l32i a2, a2, TI_FLAGS
580
5811:
582
583#endif
584
585#endif
586
587 /* Check if we have to do a movsp.
588 *
589 * We only have to do a movsp if the previous window-frame has
590 * been spilled to the *temporary* exception stack instead of the
591 * task's stack. This is the case if the corresponding bit in
592 * WINDOWSTART for the previous window-frame was set before
593 * (not spilled) but is zero now (spilled).
594 * If this bit is zero, all other bits except the one for the
595 * current window frame are also zero. So, we can use a simple test:
596 * 'and' WINDOWSTART and WINDOWSTART-1:
597 *
598 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
599 *
600 * The result is zero only if one bit was set.
601 *
602 * (Note: We might have gone through several task switches before
603 * we come back to the current task, so WINDOWBASE might be
604 * different from the time the exception occurred.)
605 */
606
607 /* Test WINDOWSTART before and after the exception.
608 * We actually have WMASK, so we only have to test if it is 1 or not.
609 */
610
611 l32i a2, a1, PT_WMASK
612 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
613
614 /* Test WINDOWSTART now. If spilled, do the movsp */
615
616 rsr a3, windowstart
617 addi a0, a3, -1
618 and a3, a3, a0
619 _bnez a3, common_exception_exit
620
621 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
622
623 addi a0, a1, -16
624 l32i a3, a0, 0
625 l32i a4, a0, 4
626 s32i a3, a1, PT_SIZE+0
627 s32i a4, a1, PT_SIZE+4
628 l32i a3, a0, 8
629 l32i a4, a0, 12
630 s32i a3, a1, PT_SIZE+8
631 s32i a4, a1, PT_SIZE+12
632
633 /* Common exception exit.
634 * We restore the special register and the current window frame, and
635 * return from the exception.
636 *
637 * Note: We expect a2 to hold PT_WMASK
638 */
639
640common_exception_exit:
641
642 /* Restore address registers. */
643
644 _bbsi.l a2, 1, 1f
645 l32i a4, a1, PT_AREG4
646 l32i a5, a1, PT_AREG5
647 l32i a6, a1, PT_AREG6
648 l32i a7, a1, PT_AREG7
649 _bbsi.l a2, 2, 1f
650 l32i a8, a1, PT_AREG8
651 l32i a9, a1, PT_AREG9
652 l32i a10, a1, PT_AREG10
653 l32i a11, a1, PT_AREG11
654 _bbsi.l a2, 3, 1f
655 l32i a12, a1, PT_AREG12
656 l32i a13, a1, PT_AREG13
657 l32i a14, a1, PT_AREG14
658 l32i a15, a1, PT_AREG15
659
660 /* Restore PC, SAR */
661
6621: l32i a2, a1, PT_PC
663 l32i a3, a1, PT_SAR
664 wsr a2, epc1
665 wsr a3, sar
666
667 /* Restore LBEG, LEND, LCOUNT */
668
669 l32i a2, a1, PT_LBEG
670 l32i a3, a1, PT_LEND
671 wsr a2, lbeg
672 l32i a2, a1, PT_LCOUNT
673 wsr a3, lend
674 wsr a2, lcount
675
676 /* We control single stepping through the ICOUNTLEVEL register. */
677
678 l32i a2, a1, PT_ICOUNTLEVEL
679 movi a3, -2
680 wsr a2, icountlevel
681 wsr a3, icount
682
683 /* Check if it was double exception. */
684
685 l32i a0, a1, PT_DEPC
686 l32i a3, a1, PT_AREG3
687 _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
688
689 wsr a0, depc
690 l32i a2, a1, PT_AREG2
691 l32i a0, a1, PT_AREG0
692 l32i a1, a1, PT_AREG1
693 rfde
694
6951:
696 /* Restore a0...a3 and return */
697
698 rsr a0, ps
699 extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
700 movi a0, 2f
701 slli a2, a2, 4
702 add a0, a2, a0
703 l32i a2, a1, PT_AREG2
704 jx a0
705
706 .macro irq_exit_level level
707 .align 16
708 .if XCHAL_EXCM_LEVEL >= \level
709 l32i a0, a1, PT_PC
710 wsr a0, epc\level
711 l32i a0, a1, PT_AREG0
712 l32i a1, a1, PT_AREG1
713 rfi \level
714 .endif
715 .endm
716
717 .align 16
7182:
719 l32i a0, a1, PT_AREG0
720 l32i a1, a1, PT_AREG1
721 rfe
722
723 .align 16
724 /* no rfi for level-1 irq, handled by rfe above*/
725 nop
726
727 irq_exit_level 2
728 irq_exit_level 3
729 irq_exit_level 4
730 irq_exit_level 5
731 irq_exit_level 6
732
733ENDPROC(kernel_exception)
734
735/*
736 * Debug exception handler.
737 *
738 * Currently, we don't support KGDB, so only user application can be debugged.
739 *
740 * When we get here, a0 is trashed and saved to excsave[debuglevel]
741 */
742
743ENTRY(debug_exception)
744
745 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
746 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
747
748 /* Set EPC1 and EXCCAUSE */
749
750 wsr a2, depc # save a2 temporarily
751 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
752 wsr a2, epc1
753
754 movi a2, EXCCAUSE_MAPPED_DEBUG
755 wsr a2, exccause
756
757 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
758
759 movi a2, 1 << PS_EXCM_BIT
760 or a2, a0, a2
761 movi a0, debug_exception # restore a3, debug jump vector
762 wsr a2, ps
763 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
764
765 /* Switch to kernel/user stack, restore jump vector, and save a0 */
766
767 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
768
769 addi a2, a1, -16-PT_SIZE # assume kernel stack
770 s32i a0, a2, PT_AREG0
771 movi a0, 0
772 s32i a1, a2, PT_AREG1
773 s32i a0, a2, PT_DEPC # mark it as a regular exception
774 xsr a0, depc
775 s32i a3, a2, PT_AREG3
776 s32i a0, a2, PT_AREG2
777 mov a1, a2
778 j _kernel_exception
779
7802: rsr a2, excsave1
781 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
782 s32i a0, a2, PT_AREG0
783 movi a0, 0
784 s32i a1, a2, PT_AREG1
785 s32i a0, a2, PT_DEPC
786 xsr a0, depc
787 s32i a3, a2, PT_AREG3
788 s32i a0, a2, PT_AREG2
789 mov a1, a2
790 j _user_exception
791
792 /* Debug exception while in exception mode. */
7931: j 1b // FIXME!!
794
795ENDPROC(debug_exception)
796
797/*
798 * We get here in case of an unrecoverable exception.
799 * The only thing we can do is to be nice and print a panic message.
800 * We only produce a single stack frame for panic, so ???
801 *
802 *
803 * Entry conditions:
804 *
805 * - a0 contains the caller address; original value saved in excsave1.
806 * - the original a0 contains a valid return address (backtrace) or 0.
807 * - a2 contains a valid stackpointer
808 *
809 * Notes:
810 *
811 * - If the stack pointer could be invalid, the caller has to setup a
812 * dummy stack pointer (e.g. the stack of the init_task)
813 *
814 * - If the return address could be invalid, the caller has to set it
815 * to 0, so the backtrace would stop.
816 *
817 */
818 .align 4
819unrecoverable_text:
820 .ascii "Unrecoverable error in exception handler\0"
821
822ENTRY(unrecoverable_exception)
823
824 movi a0, 1
825 movi a1, 0
826
827 wsr a0, windowstart
828 wsr a1, windowbase
829 rsync
830
831 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
832 wsr a1, ps
833 rsync
834
835 movi a1, init_task
836 movi a0, 0
837 addi a1, a1, PT_REGS_OFFSET
838
839 movi a4, panic
840 movi a6, unrecoverable_text
841
842 callx4 a4
843
8441: j 1b
845
846ENDPROC(unrecoverable_exception)
847
848/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
849
850/*
851 * Fast-handler for alloca exceptions
852 *
853 * The ALLOCA handler is entered when user code executes the MOVSP
854 * instruction and the caller's frame is not in the register file.
855 * In this case, the caller frame's a0..a3 are on the stack just
856 * below sp (a1), and this handler moves them.
857 *
858 * For "MOVSP <ar>,<as>" without destination register a1, this routine
859 * simply moves the value from <as> to <ar> without moving the save area.
860 *
861 * Entry condition:
862 *
863 * a0: trashed, original value saved on stack (PT_AREG0)
864 * a1: a1
865 * a2: new stack pointer, original in DEPC
866 * a3: dispatch table
867 * depc: a2, original value saved on stack (PT_DEPC)
868 * excsave_1: a3
869 *
870 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
871 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
872 */
873
874#if XCHAL_HAVE_BE
875#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
876#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
877#else
878#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
879#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
880#endif
881
882ENTRY(fast_alloca)
883
884 /* We shouldn't be in a double exception. */
885
886 l32i a0, a2, PT_DEPC
887 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
888
889 rsr a0, depc # get a2
890 s32i a4, a2, PT_AREG4 # save a4 and
891 s32i a0, a2, PT_AREG2 # a2 to stack
892
893 /* Exit critical section. */
894
895 movi a0, 0
896 s32i a0, a3, EXC_TABLE_FIXUP
897
898 /* Restore a3, excsave_1 */
899
900 xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
901 rsr a4, epc1 # get exception address
902 s32i a3, a2, PT_AREG3 # save a3 to stack
903
904#ifdef ALLOCA_EXCEPTION_IN_IRAM
905#error iram not supported
906#else
907 /* Note: l8ui not allowed in IRAM/IROM!! */
908 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
909#endif
910 movi a3, .Lmovsp_src
911 _EXTUI_MOVSP_SRC(a0) # extract source register number
912 addx8 a3, a0, a3
913 jx a3
914
915.Lunhandled_double:
916 wsr a0, excsave1
917 movi a0, unrecoverable_exception
918 callx0 a0
919
920 .align 8
921.Lmovsp_src:
922 l32i a3, a2, PT_AREG0; _j 1f; .align 8
923 mov a3, a1; _j 1f; .align 8
924 l32i a3, a2, PT_AREG2; _j 1f; .align 8
925 l32i a3, a2, PT_AREG3; _j 1f; .align 8
926 l32i a3, a2, PT_AREG4; _j 1f; .align 8
927 mov a3, a5; _j 1f; .align 8
928 mov a3, a6; _j 1f; .align 8
929 mov a3, a7; _j 1f; .align 8
930 mov a3, a8; _j 1f; .align 8
931 mov a3, a9; _j 1f; .align 8
932 mov a3, a10; _j 1f; .align 8
933 mov a3, a11; _j 1f; .align 8
934 mov a3, a12; _j 1f; .align 8
935 mov a3, a13; _j 1f; .align 8
936 mov a3, a14; _j 1f; .align 8
937 mov a3, a15; _j 1f; .align 8
938
9391:
940
941#ifdef ALLOCA_EXCEPTION_IN_IRAM
942#error iram not supported
943#else
944 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
945#endif
946 addi a4, a4, 3 # step over movsp
947 _EXTUI_MOVSP_DST(a0) # extract destination register
948 wsr a4, epc1 # save new epc_1
949
950 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
951
952 /* Move the save area. This implies the use of the L32E
953 * and S32E instructions, because this move must be done with
954 * the user's PS.RING privilege levels, not with ring 0
955 * (kernel's) privileges currently active with PS.EXCM
956 * set. Note that we have stil registered a fixup routine with the
957 * double exception vector in case a double exception occurs.
958 */
959
960 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
961
962 l32e a0, a1, -16
963 l32e a4, a1, -12
964 s32e a0, a3, -16
965 s32e a4, a3, -12
966 l32e a0, a1, -8
967 l32e a4, a1, -4
968 s32e a0, a3, -8
969 s32e a4, a3, -4
970
971 /* Restore stack-pointer and all the other saved registers. */
972
973 mov a1, a3
974
975 l32i a4, a2, PT_AREG4
976 l32i a3, a2, PT_AREG3
977 l32i a0, a2, PT_AREG0
978 l32i a2, a2, PT_AREG2
979 rfe
980
981 /* MOVSP <at>,<as> was invoked with <at> != a1.
982 * Because the stack pointer is not being modified,
983 * we should be able to just modify the pointer
984 * without moving any save area.
985 * The processor only traps these occurrences if the
986 * caller window isn't live, so unfortunately we can't
987 * use this as an alternate trap mechanism.
988 * So we just do the move. This requires that we
989 * resolve the destination register, not just the source,
990 * so there's some extra work.
991 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
992 */
993
994 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
995
9961: movi a4, .Lmovsp_dst
997 addx8 a4, a0, a4
998 jx a4
999
1000 .align 8
1001.Lmovsp_dst:
1002 s32i a3, a2, PT_AREG0; _j 1f; .align 8
1003 mov a1, a3; _j 1f; .align 8
1004 s32i a3, a2, PT_AREG2; _j 1f; .align 8
1005 s32i a3, a2, PT_AREG3; _j 1f; .align 8
1006 s32i a3, a2, PT_AREG4; _j 1f; .align 8
1007 mov a5, a3; _j 1f; .align 8
1008 mov a6, a3; _j 1f; .align 8
1009 mov a7, a3; _j 1f; .align 8
1010 mov a8, a3; _j 1f; .align 8
1011 mov a9, a3; _j 1f; .align 8
1012 mov a10, a3; _j 1f; .align 8
1013 mov a11, a3; _j 1f; .align 8
1014 mov a12, a3; _j 1f; .align 8
1015 mov a13, a3; _j 1f; .align 8
1016 mov a14, a3; _j 1f; .align 8
1017 mov a15, a3; _j 1f; .align 8
1018
10191: l32i a4, a2, PT_AREG4
1020 l32i a3, a2, PT_AREG3
1021 l32i a0, a2, PT_AREG0
1022 l32i a2, a2, PT_AREG2
1023 rfe
1024
1025ENDPROC(fast_alloca)
1026
1027/*
1028 * fast system calls.
1029 *
1030 * WARNING: The kernel doesn't save the entire user context before
1031 * handling a fast system call. These functions are small and short,
1032 * usually offering some functionality not available to user tasks.
1033 *
1034 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1035 *
1036 * Entry condition:
1037 *
1038 * a0: trashed, original value saved on stack (PT_AREG0)
1039 * a1: a1
1040 * a2: new stack pointer, original in DEPC
1041 * a3: dispatch table
1042 * depc: a2, original value saved on stack (PT_DEPC)
1043 * excsave_1: a3
1044 */
1045
1046ENTRY(fast_syscall_kernel)
1047
1048 /* Skip syscall. */
1049
1050 rsr a0, epc1
1051 addi a0, a0, 3
1052 wsr a0, epc1
1053
1054 l32i a0, a2, PT_DEPC
1055 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1056
1057 rsr a0, depc # get syscall-nr
1058 _beqz a0, fast_syscall_spill_registers
1059 _beqi a0, __NR_xtensa, fast_syscall_xtensa
1060
1061 j kernel_exception
1062
1063ENDPROC(fast_syscall_kernel)
1064
1065ENTRY(fast_syscall_user)
1066
1067 /* Skip syscall. */
1068
1069 rsr a0, epc1
1070 addi a0, a0, 3
1071 wsr a0, epc1
1072
1073 l32i a0, a2, PT_DEPC
1074 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1075
1076 rsr a0, depc # get syscall-nr
1077 _beqz a0, fast_syscall_spill_registers
1078 _beqi a0, __NR_xtensa, fast_syscall_xtensa
1079
1080 j user_exception
1081
1082ENDPROC(fast_syscall_user)
1083
1084ENTRY(fast_syscall_unrecoverable)
1085
1086 /* Restore all states. */
1087
1088 l32i a0, a2, PT_AREG0 # restore a0
1089 xsr a2, depc # restore a2, depc
1090 rsr a3, excsave1
1091
1092 wsr a0, excsave1
1093 movi a0, unrecoverable_exception
1094 callx0 a0
1095
1096ENDPROC(fast_syscall_unrecoverable)
1097
1098/*
1099 * sysxtensa syscall handler
1100 *
1101 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
1102 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
1103 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1104 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1105 * a2 a6 a3 a4 a5
1106 *
1107 * Entry condition:
1108 *
1109 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
1110 * a1: a1
1111 * a2: new stack pointer, original in a0 and DEPC
1112 * a3: dispatch table, original in excsave_1
1113 * a4..a15: unchanged
1114 * depc: a2, original value saved on stack (PT_DEPC)
1115 * excsave_1: a3
1116 *
1117 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1118 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1119 *
1120 * Note: we don't have to save a2; a2 holds the return value
1121 *
1122 * We use the two macros TRY and CATCH:
1123 *
1124 * TRY adds an entry to the __ex_table fixup table for the immediately
1125 * following instruction.
1126 *
1127 * CATCH catches any exception that occurred at one of the preceding TRY
1128 * statements and continues from there
1129 *
1130 * Usage TRY l32i a0, a1, 0
1131 * <other code>
1132 * done: rfe
1133 * CATCH <set return code>
1134 * j done
1135 */
1136
1137#define TRY \
1138 .section __ex_table, "a"; \
1139 .word 66f, 67f; \
1140 .text; \
114166:
1142
1143#define CATCH \
114467:
1145
1146ENTRY(fast_syscall_xtensa)
1147
1148 xsr a3, excsave1 # restore a3, excsave1
1149
1150 s32i a7, a2, PT_AREG7 # we need an additional register
1151 movi a7, 4 # sizeof(unsigned int)
1152 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
1153
1154 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
1155 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
1156 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
1157
1158 /* Fall through for ATOMIC_CMP_SWP. */
1159
1160.Lswp: /* Atomic compare and swap */
1161
1162TRY l32i a0, a3, 0 # read old value
1163 bne a0, a4, 1f # same as old value? jump
1164TRY s32i a5, a3, 0 # different, modify value
1165 l32i a7, a2, PT_AREG7 # restore a7
1166 l32i a0, a2, PT_AREG0 # restore a0
1167 movi a2, 1 # and return 1
1168 addi a6, a6, 1 # restore a6 (really necessary?)
1169 rfe
1170
11711: l32i a7, a2, PT_AREG7 # restore a7
1172 l32i a0, a2, PT_AREG0 # restore a0
1173 movi a2, 0 # return 0 (note that we cannot set
1174 addi a6, a6, 1 # restore a6 (really necessary?)
1175 rfe
1176
1177.Lnswp: /* Atomic set, add, and exg_add. */
1178
1179TRY l32i a7, a3, 0 # orig
1180 add a0, a4, a7 # + arg
1181 moveqz a0, a4, a6 # set
1182TRY s32i a0, a3, 0 # write new value
1183
1184 mov a0, a2
1185 mov a2, a7
1186 l32i a7, a0, PT_AREG7 # restore a7
1187 l32i a0, a0, PT_AREG0 # restore a0
1188 addi a6, a6, 1 # restore a6 (really necessary?)
1189 rfe
1190
1191CATCH
1192.Leac: l32i a7, a2, PT_AREG7 # restore a7
1193 l32i a0, a2, PT_AREG0 # restore a0
1194 movi a2, -EFAULT
1195 rfe
1196
1197.Lill: l32i a7, a2, PT_AREG0 # restore a7
1198 l32i a0, a2, PT_AREG0 # restore a0
1199 movi a2, -EINVAL
1200 rfe
1201
1202ENDPROC(fast_syscall_xtensa)
1203
1204
1205/* fast_syscall_spill_registers.
1206 *
1207 * Entry condition:
1208 *
1209 * a0: trashed, original value saved on stack (PT_AREG0)
1210 * a1: a1
1211 * a2: new stack pointer, original in DEPC
1212 * a3: dispatch table
1213 * depc: a2, original value saved on stack (PT_DEPC)
1214 * excsave_1: a3
1215 *
1216 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1217 */
1218
1219ENTRY(fast_syscall_spill_registers)
1220
1221 /* Register a FIXUP handler (pass current wb as a parameter) */
1222
1223 movi a0, fast_syscall_spill_registers_fixup
1224 s32i a0, a3, EXC_TABLE_FIXUP
1225 rsr a0, windowbase
1226 s32i a0, a3, EXC_TABLE_PARAM
1227
1228 /* Save a3 and SAR on stack. */
1229
1230 rsr a0, sar
1231 xsr a3, excsave1 # restore a3 and excsave_1
1232 s32i a3, a2, PT_AREG3
1233 s32i a4, a2, PT_AREG4
1234 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
1235
1236 /* The spill routine might clobber a7, a11, and a15. */
1237
1238 s32i a7, a2, PT_AREG7
1239 s32i a11, a2, PT_AREG11
1240 s32i a15, a2, PT_AREG15
1241
1242 call0 _spill_registers # destroys a3, a4, and SAR
1243
1244 /* Advance PC, restore registers and SAR, and return from exception. */
1245
1246 l32i a3, a2, PT_AREG5
1247 l32i a4, a2, PT_AREG4
1248 l32i a0, a2, PT_AREG0
1249 wsr a3, sar
1250 l32i a3, a2, PT_AREG3
1251
1252 /* Restore clobbered registers. */
1253
1254 l32i a7, a2, PT_AREG7
1255 l32i a11, a2, PT_AREG11
1256 l32i a15, a2, PT_AREG15
1257
1258 movi a2, 0
1259 rfe
1260
1261ENDPROC(fast_syscall_spill_registers)
1262
1263/* Fixup handler.
1264 *
1265 * We get here if the spill routine causes an exception, e.g. tlb miss.
1266 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1267 * we entered the spill routine and jump to the user exception handler.
1268 *
1269 * a0: value of depc, original value in depc
1270 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1271 * a3: exctable, original value in excsave1
1272 */
1273
1274fast_syscall_spill_registers_fixup:
1275
1276 rsr a2, windowbase # get current windowbase (a2 is saved)
1277 xsr a0, depc # restore depc and a0
1278 ssl a2 # set shift (32 - WB)
1279
1280 /* We need to make sure the current registers (a0-a3) are preserved.
1281 * To do this, we simply set the bit for the current window frame
1282 * in WS, so that the exception handlers save them to the task stack.
1283 */
1284
1285 rsr a3, excsave1 # get spill-mask
1286 slli a2, a3, 1 # shift left by one
1287
1288 slli a3, a2, 32-WSBITS
1289 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
1290 wsr a2, windowstart # set corrected windowstart
1291
1292 movi a3, exc_table
1293 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1294 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1295
1296 /* Return to the original (user task) WINDOWBASE.
1297 * We leave the following frame behind:
1298 * a0, a1, a2 same
1299 * a3: trashed (saved in excsave_1)
1300 * depc: depc (we have to return to that address)
1301 * excsave_1: a3
1302 */
1303
1304 wsr a3, windowbase
1305 rsync
1306
1307 /* We are now in the original frame when we entered _spill_registers:
1308 * a0: return address
1309 * a1: used, stack pointer
1310 * a2: kernel stack pointer
1311 * a3: available, saved in EXCSAVE_1
1312 * depc: exception address
1313 * excsave: a3
1314 * Note: This frame might be the same as above.
1315 */
1316
1317 /* Setup stack pointer. */
1318
1319 addi a2, a2, -PT_USER_SIZE
1320 s32i a0, a2, PT_AREG0
1321
1322 /* Make sure we return to this fixup handler. */
1323
1324 movi a3, fast_syscall_spill_registers_fixup_return
1325 s32i a3, a2, PT_DEPC # setup depc
1326
1327 /* Jump to the exception handler. */
1328
1329 movi a3, exc_table
1330 rsr a0, exccause
1331 addx4 a0, a0, a3 # find entry in table
1332 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1333 jx a0
1334
1335fast_syscall_spill_registers_fixup_return:
1336
1337 /* When we return here, all registers have been restored (a2: DEPC) */
1338
1339 wsr a2, depc # exception address
1340
1341 /* Restore fixup handler. */
1342
1343 xsr a3, excsave1
1344 movi a2, fast_syscall_spill_registers_fixup
1345 s32i a2, a3, EXC_TABLE_FIXUP
1346 rsr a2, windowbase
1347 s32i a2, a3, EXC_TABLE_PARAM
1348 l32i a2, a3, EXC_TABLE_KSTK
1349
1350 /* Load WB at the time the exception occurred. */
1351
1352 rsr a3, sar # WB is still in SAR
1353 neg a3, a3
1354 wsr a3, windowbase
1355 rsync
1356
1357 /* Restore a3 and return. */
1358
1359 movi a3, exc_table
1360 xsr a3, excsave1
1361
1362 rfde
1363
1364
1365/*
1366 * spill all registers.
1367 *
1368 * This is not a real function. The following conditions must be met:
1369 *
1370 * - must be called with call0.
1371 * - uses a3, a4 and SAR.
1372 * - the last 'valid' register of each frame are clobbered.
1373 * - the caller must have registered a fixup handler
1374 * (or be inside a critical section)
1375 * - PS_EXCM must be set (PS_WOE cleared?)
1376 */
1377
1378ENTRY(_spill_registers)
1379
1380 /*
1381 * Rotate ws so that the current windowbase is at bit 0.
1382 * Assume ws = xxxwww1yy (www1 current window frame).
1383 * Rotate ws right so that a4 = yyxxxwww1.
1384 */
1385
1386 rsr a4, windowbase
1387 rsr a3, windowstart # a3 = xxxwww1yy
1388 ssr a4 # holds WB
1389 slli a4, a3, WSBITS
1390 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
1391 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
1392
1393 /* We are done if there are no more than the current register frame. */
1394
1395 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
1396 movi a4, (1 << (WSBITS-1))
1397 _beqz a3, .Lnospill # only one active frame? jump
1398
1399 /* We want 1 at the top, so that we return to the current windowbase */
1400
1401 or a3, a3, a4 # 1yyxxxwww
1402
1403 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1404
1405 wsr a3, windowstart # save shifted windowstart
1406 neg a4, a3
1407 and a3, a4, a3 # first bit set from right: 000010000
1408
1409 ffs_ws a4, a3 # a4: shifts to skip empty frames
1410 movi a3, WSBITS
1411 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1412 ssr a4 # save in SAR for later.
1413
1414 rsr a3, windowbase
1415 add a3, a3, a4
1416 wsr a3, windowbase
1417 rsync
1418
1419 rsr a3, windowstart
1420 srl a3, a3 # shift windowstart
1421
1422 /* WB is now just one frame below the oldest frame in the register
1423 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1424 and WS differ by one 4-register frame. */
1425
1426 /* Save frames. Depending what call was used (call4, call8, call12),
1427 * we have to save 4,8. or 12 registers.
1428 */
1429
1430 _bbsi.l a3, 1, .Lc4
1431 _bbsi.l a3, 2, .Lc8
1432
1433 /* Special case: we have a call12-frame starting at a4. */
1434
1435 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1436
1437 s32e a4, a1, -16 # a1 is valid with an empty spill area
1438 l32e a4, a5, -12
1439 s32e a8, a4, -48
1440 mov a8, a4
1441 l32e a4, a1, -16
1442 j .Lc12c
1443
1444.Lnospill:
1445 ret
1446
1447.Lloop: _bbsi.l a3, 1, .Lc4
1448 _bbci.l a3, 2, .Lc12
1449
1450.Lc8: s32e a4, a13, -16
1451 l32e a4, a5, -12
1452 s32e a8, a4, -32
1453 s32e a5, a13, -12
1454 s32e a6, a13, -8
1455 s32e a7, a13, -4
1456 s32e a9, a4, -28
1457 s32e a10, a4, -24
1458 s32e a11, a4, -20
1459
1460 srli a11, a3, 2 # shift windowbase by 2
1461 rotw 2
1462 _bnei a3, 1, .Lloop
1463
1464.Lexit: /* Done. Do the final rotation, set WS, and return. */
1465
1466 rotw 1
1467 rsr a3, windowbase
1468 ssl a3
1469 movi a3, 1
1470 sll a3, a3
1471 wsr a3, windowstart
1472 ret
1473
1474.Lc4: s32e a4, a9, -16
1475 s32e a5, a9, -12
1476 s32e a6, a9, -8
1477 s32e a7, a9, -4
1478
1479 srli a7, a3, 1
1480 rotw 1
1481 _bnei a3, 1, .Lloop
1482 j .Lexit
1483
1484.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1485
1486 /* 12-register frame (call12) */
1487
1488 l32e a2, a5, -12
1489 s32e a8, a2, -48
1490 mov a8, a2
1491
1492.Lc12c: s32e a9, a8, -44
1493 s32e a10, a8, -40
1494 s32e a11, a8, -36
1495 s32e a12, a8, -32
1496 s32e a13, a8, -28
1497 s32e a14, a8, -24
1498 s32e a15, a8, -20
1499 srli a15, a3, 3
1500
1501 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1502 * window, grab the stackpointer, and rotate back.
1503 * Alternatively, we could also use the following approach, but that
1504 * makes the fixup routine much more complicated:
1505 * rotw 1
1506 * s32e a0, a13, -16
1507 * ...
1508 * rotw 2
1509 */
1510
1511 rotw 1
1512 mov a5, a13
1513 rotw -1
1514
1515 s32e a4, a9, -16
1516 s32e a5, a9, -12
1517 s32e a6, a9, -8
1518 s32e a7, a9, -4
1519
1520 rotw 3
1521
1522 _beqi a3, 1, .Lexit
1523 j .Lloop
1524
1525.Linvalid_mask:
1526
1527 /* We get here because of an unrecoverable error in the window
1528 * registers. If we are in user space, we kill the application,
1529 * however, this condition is unrecoverable in kernel space.
1530 */
1531
1532 rsr a0, ps
1533 _bbci.l a0, PS_UM_BIT, 1f
1534
1535 /* User space: Setup a dummy frame and kill application.
1536 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1537 */
1538
1539 movi a0, 1
1540 movi a1, 0
1541
1542 wsr a0, windowstart
1543 wsr a1, windowbase
1544 rsync
1545
1546 movi a0, 0
1547
1548 movi a3, exc_table
1549 l32i a1, a3, EXC_TABLE_KSTK
1550 wsr a3, excsave1
1551
1552 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
1553 wsr a4, ps
1554 rsync
1555
1556 movi a6, SIGSEGV
1557 movi a4, do_exit
1558 callx4 a4
1559
15601: /* Kernel space: PANIC! */
1561
1562 wsr a0, excsave1
1563 movi a0, unrecoverable_exception
1564 callx0 a0 # should not return
15651: j 1b
1566
1567ENDPROC(_spill_registers)
1568
1569#ifdef CONFIG_MMU
1570/*
1571 * We should never get here. Bail out!
1572 */
1573
1574ENTRY(fast_second_level_miss_double_kernel)
1575
15761: movi a0, unrecoverable_exception
1577 callx0 a0 # should not return
15781: j 1b
1579
1580ENDPROC(fast_second_level_miss_double_kernel)
1581
1582/* First-level entry handler for user, kernel, and double 2nd-level
1583 * TLB miss exceptions. Note that for now, user and kernel miss
1584 * exceptions share the same entry point and are handled identically.
1585 *
1586 * An old, less-efficient C version of this function used to exist.
1587 * We include it below, interleaved as comments, for reference.
1588 *
1589 * Entry condition:
1590 *
1591 * a0: trashed, original value saved on stack (PT_AREG0)
1592 * a1: a1
1593 * a2: new stack pointer, original in DEPC
1594 * a3: dispatch table
1595 * depc: a2, original value saved on stack (PT_DEPC)
1596 * excsave_1: a3
1597 *
1598 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1599 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1600 */
1601
1602ENTRY(fast_second_level_miss)
1603
1604 /* Save a1. Note: we don't expect a double exception. */
1605
1606 s32i a1, a2, PT_AREG1
1607
1608 /* We need to map the page of PTEs for the user task. Find
1609 * the pointer to that page. Also, it's possible for tsk->mm
1610 * to be NULL while tsk->active_mm is nonzero if we faulted on
1611 * a vmalloc address. In that rare case, we must use
1612 * active_mm instead to avoid a fault in this handler. See
1613 *
1614 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1615 * (or search Internet on "mm vs. active_mm")
1616 *
1617 * if (!mm)
1618 * mm = tsk->active_mm;
1619 * pgd = pgd_offset (mm, regs->excvaddr);
1620 * pmd = pmd_offset (pgd, regs->excvaddr);
1621 * pmdval = *pmd;
1622 */
1623
1624 GET_CURRENT(a1,a2)
1625 l32i a0, a1, TASK_MM # tsk->mm
1626 beqz a0, 9f
1627
1628
1629 /* We deliberately destroy a3 that holds the exception table. */
1630
16318: rsr a3, excvaddr # fault address
1632 _PGD_OFFSET(a0, a3, a1)
1633 l32i a0, a0, 0 # read pmdval
1634 beqz a0, 2f
1635
1636 /* Read ptevaddr and convert to top of page-table page.
1637 *
1638 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1639 * vpnval += DTLB_WAY_PGTABLE;
1640 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1641 * write_dtlb_entry (pteval, vpnval);
1642 *
1643 * The messy computation for 'pteval' above really simplifies
1644 * into the following:
1645 *
1646 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
1647 */
1648
1649 movi a1, (-PAGE_OFFSET) & 0xffffffff
1650 add a0, a0, a1 # pmdval - PAGE_OFFSET
1651 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1652 xor a0, a0, a1
1653
1654 movi a1, _PAGE_DIRECTORY
1655 or a0, a0, a1 # ... | PAGE_DIRECTORY
1656
1657 /*
1658 * We utilize all three wired-ways (7-9) to hold pmd translations.
1659 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1660 * This allows to map the three most common regions to three different
1661 * DTLBs:
1662 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
1663 * 2 -> way 8 shared libaries (2000.0000)
1664 * 3 -> way 0 stack (3000.0000)
1665 */
1666
1667 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
1668 rsr a1, ptevaddr
1669 addx2 a3, a3, a3 # -> 0,3,6,9
1670 srli a1, a1, PAGE_SHIFT
1671 extui a3, a3, 2, 2 # -> 0,0,1,2
1672 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1673 addi a3, a3, DTLB_WAY_PGD
1674 add a1, a1, a3 # ... + way_number
1675
16763: wdtlb a0, a1
1677 dsync
1678
1679 /* Exit critical section. */
1680
16814: movi a3, exc_table # restore a3
1682 movi a0, 0
1683 s32i a0, a3, EXC_TABLE_FIXUP
1684
1685 /* Restore the working registers, and return. */
1686
1687 l32i a0, a2, PT_AREG0
1688 l32i a1, a2, PT_AREG1
1689 l32i a2, a2, PT_DEPC
1690 xsr a3, excsave1
1691
1692 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1693
1694 /* Restore excsave1 and return. */
1695
1696 rsr a2, depc
1697 rfe
1698
1699 /* Return from double exception. */
1700
17011: xsr a2, depc
1702 esync
1703 rfde
1704
17059: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1706 j 8b
1707
1708#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1709
17102: /* Special case for cache aliasing.
1711 * We (should) only get here if a clear_user_page, copy_user_page
1712 * or the aliased cache flush functions got preemptively interrupted
1713 * by another task. Re-establish temporary mapping to the
1714 * TLBTEMP_BASE areas.
1715 */
1716
1717 /* We shouldn't be in a double exception */
1718
1719 l32i a0, a2, PT_DEPC
1720 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1721
1722 /* Make sure the exception originated in the special functions */
1723
1724 movi a0, __tlbtemp_mapping_start
1725 rsr a3, epc1
1726 bltu a3, a0, 2f
1727 movi a0, __tlbtemp_mapping_end
1728 bgeu a3, a0, 2f
1729
1730 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1731
1732 movi a3, TLBTEMP_BASE_1
1733 rsr a0, excvaddr
1734 bltu a0, a3, 2f
1735
1736 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1737 bgeu a1, a3, 2f
1738
1739 /* Check if we have to restore an ITLB mapping. */
1740
1741 movi a1, __tlbtemp_mapping_itlb
1742 rsr a3, epc1
1743 sub a3, a3, a1
1744
1745 /* Calculate VPN */
1746
1747 movi a1, PAGE_MASK
1748 and a1, a1, a0
1749
1750 /* Jump for ITLB entry */
1751
1752 bgez a3, 1f
1753
1754 /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1755
1756 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1757 add a1, a3, a1
1758
1759 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1760
1761 mov a0, a6
1762 movnez a0, a7, a3
1763 j 3b
1764
1765 /* ITLB entry. We only use dst in a6. */
1766
17671: witlb a6, a1
1768 isync
1769 j 4b
1770
1771
1772#endif // DCACHE_WAY_SIZE > PAGE_SIZE
1773
1774
17752: /* Invalid PGD, default exception handling */
1776
1777 movi a3, exc_table
1778 rsr a1, depc
1779 xsr a3, excsave1
1780 s32i a1, a2, PT_AREG2
1781 s32i a3, a2, PT_AREG3
1782 mov a1, a2
1783
1784 rsr a2, ps
1785 bbsi.l a2, PS_UM_BIT, 1f
1786 j _kernel_exception
17871: j _user_exception
1788
1789ENDPROC(fast_second_level_miss)
1790
1791/*
1792 * StoreProhibitedException
1793 *
1794 * Update the pte and invalidate the itlb mapping for this pte.
1795 *
1796 * Entry condition:
1797 *
1798 * a0: trashed, original value saved on stack (PT_AREG0)
1799 * a1: a1
1800 * a2: new stack pointer, original in DEPC
1801 * a3: dispatch table
1802 * depc: a2, original value saved on stack (PT_DEPC)
1803 * excsave_1: a3
1804 *
1805 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1806 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1807 */
1808
1809ENTRY(fast_store_prohibited)
1810
1811 /* Save a1 and a4. */
1812
1813 s32i a1, a2, PT_AREG1
1814 s32i a4, a2, PT_AREG4
1815
1816 GET_CURRENT(a1,a2)
1817 l32i a0, a1, TASK_MM # tsk->mm
1818 beqz a0, 9f
1819
18208: rsr a1, excvaddr # fault address
1821 _PGD_OFFSET(a0, a1, a4)
1822 l32i a0, a0, 0
1823 beqz a0, 2f
1824
1825 /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
1826
1827 _PTE_OFFSET(a0, a1, a4)
1828 l32i a4, a0, 0 # read pteval
1829 bbci.l a4, _PAGE_WRITABLE_BIT, 2f
1830
1831 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1832 or a4, a4, a1
1833 rsr a1, excvaddr
1834 s32i a4, a0, 0
1835
1836 /* We need to flush the cache if we have page coloring. */
1837#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1838 dhwb a0, 0
1839#endif
1840 pdtlb a0, a1
1841 wdtlb a4, a0
1842
1843 /* Exit critical section. */
1844
1845 movi a0, 0
1846 s32i a0, a3, EXC_TABLE_FIXUP
1847
1848 /* Restore the working registers, and return. */
1849
1850 l32i a4, a2, PT_AREG4
1851 l32i a1, a2, PT_AREG1
1852 l32i a0, a2, PT_AREG0
1853 l32i a2, a2, PT_DEPC
1854
1855 /* Restore excsave1 and a3. */
1856
1857 xsr a3, excsave1
1858 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1859
1860 rsr a2, depc
1861 rfe
1862
1863 /* Double exception. Restore FIXUP handler and return. */
1864
18651: xsr a2, depc
1866 esync
1867 rfde
1868
18699: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1870 j 8b
1871
18722: /* If there was a problem, handle fault in C */
1873
1874 rsr a4, depc # still holds a2
1875 xsr a3, excsave1
1876 s32i a4, a2, PT_AREG2
1877 s32i a3, a2, PT_AREG3
1878 l32i a4, a2, PT_AREG4
1879 mov a1, a2
1880
1881 rsr a2, ps
1882 bbsi.l a2, PS_UM_BIT, 1f
1883 j _kernel_exception
18841: j _user_exception
1885
1886ENDPROC(fast_store_prohibited)
1887
1888#endif /* CONFIG_MMU */
1889
1890/*
1891 * System Calls.
1892 *
1893 * void system_call (struct pt_regs* regs, int exccause)
1894 * a2 a3
1895 */
1896
1897ENTRY(system_call)
1898
1899 entry a1, 32
1900
1901 /* regs->syscall = regs->areg[2] */
1902
1903 l32i a3, a2, PT_AREG2
1904 mov a6, a2
1905 movi a4, do_syscall_trace_enter
1906 s32i a3, a2, PT_SYSCALL
1907 callx4 a4
1908
1909 /* syscall = sys_call_table[syscall_nr] */
1910
1911 movi a4, sys_call_table;
1912 movi a5, __NR_syscall_count
1913 movi a6, -ENOSYS
1914 bgeu a3, a5, 1f
1915
1916 addx4 a4, a3, a4
1917 l32i a4, a4, 0
1918 movi a5, sys_ni_syscall;
1919 beq a4, a5, 1f
1920
1921 /* Load args: arg0 - arg5 are passed via regs. */
1922
1923 l32i a6, a2, PT_AREG6
1924 l32i a7, a2, PT_AREG3
1925 l32i a8, a2, PT_AREG4
1926 l32i a9, a2, PT_AREG5
1927 l32i a10, a2, PT_AREG8
1928 l32i a11, a2, PT_AREG9
1929
1930 /* Pass one additional argument to the syscall: pt_regs (on stack) */
1931 s32i a2, a1, 0
1932
1933 callx4 a4
1934
19351: /* regs->areg[2] = return_value */
1936
1937 s32i a6, a2, PT_AREG2
1938 movi a4, do_syscall_trace_leave
1939 mov a6, a2
1940 callx4 a4
1941 retw
1942
1943ENDPROC(system_call)
1944
1945
1946/*
1947 * Task switch.
1948 *
1949 * struct task* _switch_to (struct task* prev, struct task* next)
1950 * a2 a2 a3
1951 */
1952
1953ENTRY(_switch_to)
1954
1955 entry a1, 16
1956
1957 mov a12, a2 # preserve 'prev' (a2)
1958 mov a13, a3 # and 'next' (a3)
1959
1960 l32i a4, a2, TASK_THREAD_INFO
1961 l32i a5, a3, TASK_THREAD_INFO
1962
1963 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
1964
1965 s32i a0, a12, THREAD_RA # save return address
1966 s32i a1, a12, THREAD_SP # save stack pointer
1967
1968 /* Disable ints while we manipulate the stack pointer. */
1969
1970 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
1971 xsr a14, ps
1972 rsr a3, excsave1
1973 rsync
1974 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1975
1976 /* Switch CPENABLE */
1977
1978#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1979 l32i a3, a5, THREAD_CPENABLE
1980 xsr a3, cpenable
1981 s32i a3, a4, THREAD_CPENABLE
1982#endif
1983
1984 /* Flush register file. */
1985
1986 call0 _spill_registers # destroys a3, a4, and SAR
1987
1988 /* Set kernel stack (and leave critical section)
1989 * Note: It's save to set it here. The stack will not be overwritten
1990 * because the kernel stack will only be loaded again after
1991 * we return from kernel space.
1992 */
1993
1994 rsr a3, excsave1 # exc_table
1995 movi a6, 0
1996 addi a7, a5, PT_REGS_OFFSET
1997 s32i a6, a3, EXC_TABLE_FIXUP
1998 s32i a7, a3, EXC_TABLE_KSTK
1999
2000 /* restore context of the task 'next' */
2001
2002 l32i a0, a13, THREAD_RA # restore return address
2003 l32i a1, a13, THREAD_SP # restore stack pointer
2004
2005 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
2006
2007 wsr a14, ps
2008 mov a2, a12 # return 'prev'
2009 rsync
2010
2011 retw
2012
2013ENDPROC(_switch_to)
2014
2015ENTRY(ret_from_fork)
2016
2017 /* void schedule_tail (struct task_struct *prev)
2018 * Note: prev is still in a6 (return value from fake call4 frame)
2019 */
2020 movi a4, schedule_tail
2021 callx4 a4
2022
2023 movi a4, do_syscall_trace_leave
2024 mov a6, a1
2025 callx4 a4
2026
2027 j common_exception_return
2028
2029ENDPROC(ret_from_fork)
2030
2031/*
2032 * Kernel thread creation helper
2033 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2034 * left from _switch_to: a6 = prev
2035 */
2036ENTRY(ret_from_kernel_thread)
2037
2038 call4 schedule_tail
2039 mov a6, a3
2040 callx4 a2
2041 j common_exception_return
2042
2043ENDPROC(ret_from_kernel_thread)
This page took 0.031751 seconds and 5 git commands to generate.