ARC: K/U SP saved from one location in stack switching macro
[deliverable/linux.git] / arch / arc / include / asm / entry.h
CommitLineData
9d42c84f
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
4788a594
VG
8 * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
9 * Stack switching code can no longer reliably rely on the fact that
10 * if we are NOT in user mode, stack is switched to kernel mode.
11 * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
12 * it's prologue including stack switching from user mode
13 *
9d42c84f
VG
14 * Vineetg: Aug 28th 2008: Bug #94984
15 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
16 * Normally CPU does this automatically, however when doing FAKE rtie,
17 * we also need to explicitly do this. The problem in macros
18 * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
19 * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
20 *
21 * Vineetg: May 5th 2008
080c3747
VG
22 * -Modified CALLEE_REG save/restore macros to handle the fact that
23 * r25 contains the kernel current task ptr
9d42c84f
VG
24 * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
25 * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
26 * address Write back load ld.ab instead of seperate ld/add instn
27 *
28 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29 */
30
31#ifndef __ASM_ARC_ENTRY_H
32#define __ASM_ARC_ENTRY_H
33
34#ifdef __ASSEMBLY__
35#include <asm/unistd.h> /* For NR_syscalls defination */
36#include <asm/asm-offsets.h>
37#include <asm/arcregs.h>
38#include <asm/ptrace.h>
080c3747 39#include <asm/processor.h> /* For VMALLOC_START */
9d42c84f
VG
40#include <asm/thread_info.h> /* For THREAD_SIZE */
41
42/* Note on the LD/ST addr modes with addr reg wback
43 *
44 * LD.a same as LD.aw
45 *
46 * LD.a reg1, [reg2, x] => Pre Incr
47 * Eff Addr for load = [reg2 + x]
48 *
49 * LD.ab reg1, [reg2, x] => Post Incr
50 * Eff Addr for load = [reg2]
51 */
52
3ebedbb2
VG
53.macro PUSH reg
54 st.a \reg, [sp, -4]
55.endm
56
57.macro PUSHAX aux
58 lr r9, [\aux]
59 PUSH r9
60.endm
61
62.macro POP reg
63 ld.ab \reg, [sp, 4]
64.endm
65
66.macro POPAX aux
67 POP r9
68 sr r9, [\aux]
69.endm
70
9d42c84f 71/*--------------------------------------------------------------
3ebedbb2
VG
72 * Helpers to save/restore Scratch Regs:
73 * used by Interrupt/Exception Prologue/Epilogue
9d42c84f 74 *-------------------------------------------------------------*/
3ebedbb2
VG
75.macro SAVE_R0_TO_R12
76 PUSH r0
77 PUSH r1
78 PUSH r2
79 PUSH r3
80 PUSH r4
81 PUSH r5
82 PUSH r6
83 PUSH r7
84 PUSH r8
85 PUSH r9
86 PUSH r10
87 PUSH r11
88 PUSH r12
89.endm
90
91.macro RESTORE_R12_TO_R0
92 POP r12
93 POP r11
94 POP r10
95 POP r9
96 POP r8
97 POP r7
98 POP r6
99 POP r5
100 POP r4
101 POP r3
102 POP r2
103 POP r1
104 POP r0
9d42c84f
VG
105.endm
106
107/*--------------------------------------------------------------
3ebedbb2
VG
108 * Helpers to save/restore callee-saved regs:
109 * used by several macros below
9d42c84f 110 *-------------------------------------------------------------*/
3ebedbb2
VG
111.macro SAVE_R13_TO_R24
112 PUSH r13
113 PUSH r14
114 PUSH r15
115 PUSH r16
116 PUSH r17
117 PUSH r18
118 PUSH r19
119 PUSH r20
120 PUSH r21
121 PUSH r22
122 PUSH r23
123 PUSH r24
124.endm
125
126.macro RESTORE_R24_TO_R13
127 POP r24
128 POP r23
129 POP r22
130 POP r21
131 POP r20
132 POP r19
133 POP r18
134 POP r17
135 POP r16
136 POP r15
137 POP r14
138 POP r13
9d42c84f
VG
139.endm
140
141
142/*--------------------------------------------------------------
3ebedbb2
VG
143 * Collect User Mode callee regs as struct callee_regs - needed by
144 * fork/do_signal/unaligned-access-emulation.
145 * (By default only scratch regs are saved on entry to kernel)
146 *
147 * Special handling for r25 if used for caching Task Pointer.
148 * It would have been saved in task->thread.user_r25 already, but to keep
149 * the interface same it is copied into regular r25 placeholder in
150 * struct callee_regs.
9d42c84f
VG
151 *-------------------------------------------------------------*/
152.macro SAVE_CALLEE_SAVED_USER
3ebedbb2
VG
153
154 SAVE_R13_TO_R24
080c3747
VG
155
156#ifdef CONFIG_ARC_CURR_IN_REG
157 ; Retrieve orig r25 and save it on stack
158 ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
159 st.a r12, [sp, -4]
160#else
3ebedbb2 161 PUSH r25
080c3747 162#endif
9d42c84f 163
9d42c84f
VG
164.endm
165
166/*--------------------------------------------------------------
3ebedbb2
VG
167 * Save kernel Mode callee regs at the time of Contect Switch.
168 *
169 * Special handling for r25 if used for caching Task Pointer.
170 * Kernel simply skips saving it since it will be loaded with
171 * incoming task pointer anyways
9d42c84f
VG
172 *-------------------------------------------------------------*/
173.macro SAVE_CALLEE_SAVED_KERNEL
3ebedbb2
VG
174
175 SAVE_R13_TO_R24
176
080c3747 177#ifdef CONFIG_ARC_CURR_IN_REG
16f9afe6 178 sub sp, sp, 4
080c3747 179#else
3ebedbb2 180 PUSH r25
080c3747 181#endif
9d42c84f
VG
182.endm
183
184/*--------------------------------------------------------------
3ebedbb2 185 * Opposite of SAVE_CALLEE_SAVED_KERNEL
9d42c84f
VG
186 *-------------------------------------------------------------*/
187.macro RESTORE_CALLEE_SAVED_KERNEL
188
080c3747 189#ifdef CONFIG_ARC_CURR_IN_REG
16f9afe6 190 add sp, sp, 4 /* skip usual r25 placeholder */
080c3747 191#else
3ebedbb2 192 POP r25
080c3747 193#endif
3ebedbb2 194 RESTORE_R24_TO_R13
9d42c84f
VG
195.endm
196
c3581039 197/*--------------------------------------------------------------
3ebedbb2
VG
198 * Opposite of SAVE_CALLEE_SAVED_USER
199 *
200 * ptrace tracer or unaligned-access fixup might have changed a user mode
201 * callee reg which is saved back to usual r25 storage location
c3581039
VG
202 *-------------------------------------------------------------*/
203.macro RESTORE_CALLEE_SAVED_USER
204
c3581039
VG
205#ifdef CONFIG_ARC_CURR_IN_REG
206 ld.ab r12, [sp, 4]
207 st r12, [r25, TASK_THREAD + THREAD_USER_R25]
208#else
3ebedbb2 209 POP r25
c3581039 210#endif
3ebedbb2 211 RESTORE_R24_TO_R13
c3581039
VG
212.endm
213
9d42c84f
VG
214/*--------------------------------------------------------------
215 * Super FAST Restore callee saved regs by simply re-adjusting SP
216 *-------------------------------------------------------------*/
217.macro DISCARD_CALLEE_SAVED_USER
16f9afe6 218 add sp, sp, SZ_CALLEE_REGS
9d42c84f
VG
219.endm
220
221/*--------------------------------------------------------------
222 * Restore User mode r25 saved in task_struct->thread.user_r25
223 *-------------------------------------------------------------*/
224.macro RESTORE_USER_R25
225 ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
226.endm
227
228/*-------------------------------------------------------------
229 * given a tsk struct, get to the base of it's kernel mode stack
230 * tsk->thread_info is really a PAGE, whose bottom hoists stack
231 * which grows upwards towards thread_info
232 *------------------------------------------------------------*/
233
234.macro GET_TSK_STACK_BASE tsk, out
235
236 /* Get task->thread_info (this is essentially start of a PAGE) */
237 ld \out, [\tsk, TASK_THREAD_INFO]
238
239 /* Go to end of page where stack begins (grows upwards) */
283237a0 240 add2 \out, \out, (THREAD_SIZE)/4
9d42c84f
VG
241
242.endm
243
244/*--------------------------------------------------------------
245 * Switch to Kernel Mode stack if SP points to User Mode stack
246 *
247 * Entry : r9 contains pre-IRQ/exception/trap status32
248 * Exit : SP is set to kernel mode stack pointer
080c3747 249 * If CURR_IN_REG, r25 set to "current" task pointer
9d42c84f
VG
250 * Clobbers: r9
251 *-------------------------------------------------------------*/
252
253.macro SWITCH_TO_KERNEL_STK
254
255 /* User Mode when this happened ? Yes: Proceed to switch stack */
256 bbit1 r9, STATUS_U_BIT, 88f
257
258 /* OK we were already in kernel mode when this event happened, thus can
259 * assume SP is kernel mode SP. _NO_ need to do any stack switching
260 */
261
4788a594
VG
262#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
263 /* However....
264 * If Level 2 Interrupts enabled, we may end up with a corner case:
265 * 1. User Task executing
266 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
267 * 3. But before it could switch SP from USER to KERNEL stack
268 * a L2 IRQ "Interrupts" L1
269 * Thay way although L2 IRQ happened in Kernel mode, stack is still
270 * not switched.
271 * To handle this, we may need to switch stack even if in kernel mode
272 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
273 */
274 brlo sp, VMALLOC_START, 88f
275
276 /* TODO: vineetg:
277 * We need to be a bit more cautious here. What if a kernel bug in
278 * L1 ISR, caused SP to go whaco (some small value which looks like
279 * USER stk) and then we take L2 ISR.
280 * Above brlo alone would treat it as a valid L1-L2 sceanrio
281 * instead of shouting alound
282 * The only feasible way is to make sure this L2 happened in
283 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
284 * L1 ISR before it switches stack
285 */
286
287#endif
288
9d42c84f
VG
289 /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
290 * safe-keeping not really needed, but it keeps the epilogue code
291 * (SP restore) simpler/uniform.
292 */
ba3558c7
VG
293 b.d 66f
294 mov r9, sp
9d42c84f
VG
295
29688: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
297
298 GET_CURR_TASK_ON_CPU r9
299
080c3747
VG
300#ifdef CONFIG_ARC_CURR_IN_REG
301
302 /* If current task pointer cached in r25, time to
303 * -safekeep USER r25 in task->thread_struct->user_r25
304 * -load r25 with current task ptr
305 */
306 st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
307 mov r25, r9
308#endif
309
9d42c84f
VG
310 /* With current tsk in r9, get it's kernel mode stack base */
311 GET_TSK_STACK_BASE r9, r9
312
ba3558c7 31366:
9d42c84f
VG
314 /* Save Pre Intr/Exception User SP on kernel stack */
315 st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
316
317 /* CAUTION:
318 * SP should be set at the very end when we are done with everything
319 * In case of 2 levels of interrupt we depend on value of SP to assume
320 * that everything else is done (loading r25 etc)
321 */
322
323 /* set SP to point to kernel mode stack */
324 mov sp, r9
325
ba3558c7 326 /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
9d42c84f
VG
327
328.endm
329
330/*------------------------------------------------------------
331 * "FAKE" a rtie to return from CPU Exception context
332 * This is to re-enable Exceptions within exception
333 * Look at EV_ProtV to see how this is actually used
334 *-------------------------------------------------------------*/
335
336.macro FAKE_RET_FROM_EXCPN reg
337
338 ld \reg, [sp, PT_status32]
339 bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
340 bset \reg, \reg, STATUS_L_BIT
341 sr \reg, [erstatus]
342 mov \reg, 55f
343 sr \reg, [eret]
344
345 rtie
34655:
347.endm
348
349/*
350 * @reg [OUT] &thread_info of "current"
351 */
352.macro GET_CURR_THR_INFO_FROM_SP reg
3ebedbb2 353 bic \reg, sp, (THREAD_SIZE - 1)
9d42c84f
VG
354.endm
355
356/*
357 * @reg [OUT] thread_info->flags of "current"
358 */
359.macro GET_CURR_THR_INFO_FLAGS reg
360 GET_CURR_THR_INFO_FROM_SP \reg
361 ld \reg, [\reg, THREAD_INFO_FLAGS]
362.endm
363
364/*--------------------------------------------------------------
365 * For early Exception Prologue, a core reg is temporarily needed to
366 * code the rest of prolog (stack switching). This is done by stashing
367 * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
368 *
369 * Before saving the full regfile - this reg is restored back, only
370 * to be saved again on kernel mode stack, as part of ptregs.
371 *-------------------------------------------------------------*/
372.macro EXCPN_PROLOG_FREEUP_REG reg
41195d23
VG
373#ifdef CONFIG_SMP
374 sr \reg, [ARC_REG_SCRATCH_DATA0]
375#else
9d42c84f 376 st \reg, [@ex_saved_reg1]
41195d23 377#endif
9d42c84f
VG
378.endm
379
380.macro EXCPN_PROLOG_RESTORE_REG reg
41195d23
VG
381#ifdef CONFIG_SMP
382 lr \reg, [ARC_REG_SCRATCH_DATA0]
383#else
9d42c84f 384 ld \reg, [@ex_saved_reg1]
41195d23 385#endif
9d42c84f
VG
386.endm
387
388/*--------------------------------------------------------------
389 * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
390 * Requires SP to be already switched to kernel mode Stack
391 * sp points to the next free element on the stack at exit of this macro.
392 * Registers are pushed / popped in the order defined in struct ptregs
393 * in asm/ptrace.h
394 * Note that syscalls are implemented via TRAP which is also a exception
395 * from CPU's point of view
396 *-------------------------------------------------------------*/
397.macro SAVE_ALL_EXCEPTION marker
398
367f3fcd 399 st \marker, [sp, 8] /* orig_r8 */
5c39c0ab
VG
400 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
401
9d42c84f
VG
402 /* Restore r9 used to code the early prologue */
403 EXCPN_PROLOG_RESTORE_REG r9
404
3ebedbb2
VG
405 SAVE_R0_TO_R12
406 PUSH gp
407 PUSH fp
408 PUSH blink
409 PUSHAX eret
410 PUSHAX erstatus
411 PUSH lp_count
412 PUSHAX lp_end
413 PUSHAX lp_start
414 PUSHAX erbta
9d42c84f
VG
415.endm
416
417/*--------------------------------------------------------------
418 * Save scratch regs for exceptions
419 *-------------------------------------------------------------*/
420.macro SAVE_ALL_SYS
5c39c0ab 421 SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
9d42c84f
VG
422.endm
423
424/*--------------------------------------------------------------
425 * Save scratch regs for sys calls
426 *-------------------------------------------------------------*/
427.macro SAVE_ALL_TRAP
5c39c0ab
VG
428 /*
429 * Setup pt_regs->orig_r8.
430 * Encode syscall number (r8) in upper short word of event type (r9)
431 * N.B. #1: This is already endian safe (see ptrace.h)
432 * #2: Only r9 can be used as scratch as it is already clobbered
433 * and it's contents are no longer needed by the latter part
434 * of exception prologue
435 */
436 lsl r9, r8, 16
437 or r9, r9, orig_r8_IS_SCALL
438
439 SAVE_ALL_EXCEPTION r9
9d42c84f
VG
440.endm
441
442/*--------------------------------------------------------------
443 * Restore all registers used by system call or Exceptions
444 * SP should always be pointing to the next free stack element
445 * when entering this macro.
446 *
447 * NOTE:
448 *
449 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
450 * for memory load operations. If used in that way interrupts are deffered
451 * by hardware and that is not good.
452 *-------------------------------------------------------------*/
453.macro RESTORE_ALL_SYS
3ebedbb2
VG
454 POPAX erbta
455 POPAX lp_start
456 POPAX lp_end
457
458 POP r9
459 mov lp_count, r9 ;LD to lp_count is not allowed
460
461 POPAX erstatus
462 POPAX eret
463 POP blink
464 POP fp
465 POP gp
466 RESTORE_R12_TO_R0
9d42c84f
VG
467
468 ld sp, [sp] /* restore original sp */
469 /* orig_r0 and orig_r8 skipped automatically */
470.endm
471
472
473/*--------------------------------------------------------------
474 * Save all registers used by interrupt handlers.
475 *-------------------------------------------------------------*/
476.macro SAVE_ALL_INT1
477
3ebedbb2 478 /* restore original r9 to be saved as part of reg-file */
41195d23
VG
479#ifdef CONFIG_SMP
480 lr r9, [ARC_REG_SCRATCH_DATA0]
481#else
9d42c84f 482 ld r9, [@int1_saved_reg]
41195d23 483#endif
9d42c84f
VG
484
485 /* now we are ready to save the remaining context :) */
5c39c0ab 486 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
9d42c84f 487 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
3ebedbb2
VG
488
489 SAVE_R0_TO_R12
490 PUSH gp
491 PUSH fp
492 PUSH blink
493 PUSH ilink1
494 PUSHAX status32_l1
495 PUSH lp_count
496 PUSHAX lp_end
497 PUSHAX lp_start
498 PUSHAX bta_l1
9d42c84f
VG
499.endm
500
4788a594
VG
501.macro SAVE_ALL_INT2
502
503 /* TODO-vineetg: SMP we can't use global nor can we use
504 * SCRATCH0 as we do for int1 because while int1 is using
505 * it, int2 can come
506 */
507 /* retsore original r9 , saved in sys_saved_r9 */
508 ld r9, [@int2_saved_reg]
509
510 /* now we are ready to save the remaining context :) */
511 st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
512 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
3ebedbb2
VG
513
514 SAVE_R0_TO_R12
515 PUSH gp
516 PUSH fp
517 PUSH blink
518 PUSH ilink2
519 PUSHAX status32_l2
520 PUSH lp_count
521 PUSHAX lp_end
522 PUSHAX lp_start
523 PUSHAX bta_l2
4788a594
VG
524.endm
525
9d42c84f
VG
526/*--------------------------------------------------------------
527 * Restore all registers used by interrupt handlers.
528 *
529 * NOTE:
530 *
531 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
532 * for memory load operations. If used in that way interrupts are deffered
533 * by hardware and that is not good.
534 *-------------------------------------------------------------*/
535
536.macro RESTORE_ALL_INT1
3ebedbb2
VG
537 POPAX bta_l1
538 POPAX lp_start
539 POPAX lp_end
540
541 POP r9
542 mov lp_count, r9 ;LD to lp_count is not allowed
543
544 POPAX status32_l1
545 POP ilink1
546 POP blink
547 POP fp
548 POP gp
549 RESTORE_R12_TO_R0
9d42c84f
VG
550
551 ld sp, [sp] /* restore original sp */
552 /* orig_r0 and orig_r8 skipped automatically */
553.endm
554
4788a594 555.macro RESTORE_ALL_INT2
3ebedbb2
VG
556 POPAX bta_l2
557 POPAX lp_start
558 POPAX lp_end
559
560 POP r9
561 mov lp_count, r9 ;LD to lp_count is not allowed
562
563 POPAX status32_l2
564 POP ilink2
565 POP blink
566 POP fp
567 POP gp
568 RESTORE_R12_TO_R0
4788a594
VG
569
570 ld sp, [sp] /* restore original sp */
571 /* orig_r0 and orig_r8 skipped automatically */
4788a594
VG
572.endm
573
574
9d42c84f
VG
575/* Get CPU-ID of this core */
576.macro GET_CPU_ID reg
577 lr \reg, [identity]
578 lsr \reg, \reg, 8
579 bmsk \reg, \reg, 7
580.endm
581
41195d23
VG
582#ifdef CONFIG_SMP
583
584/*-------------------------------------------------
585 * Retrieve the current running task on this CPU
586 * 1. Determine curr CPU id.
587 * 2. Use it to index into _current_task[ ]
588 */
589.macro GET_CURR_TASK_ON_CPU reg
590 GET_CPU_ID \reg
591 ld.as \reg, [@_current_task, \reg]
592.endm
593
594/*-------------------------------------------------
595 * Save a new task as the "current" task on this CPU
596 * 1. Determine curr CPU id.
597 * 2. Use it to index into _current_task[ ]
598 *
599 * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
600 * because ST r0, [r1, offset] can ONLY have s9 @offset
601 * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
602 */
603
604.macro SET_CURR_TASK_ON_CPU tsk, tmp
605 GET_CPU_ID \tmp
606 add2 \tmp, @_current_task, \tmp
607 st \tsk, [\tmp]
608#ifdef CONFIG_ARC_CURR_IN_REG
609 mov r25, \tsk
610#endif
611
612.endm
613
614
615#else /* Uniprocessor implementation of macros */
616
9d42c84f
VG
617.macro GET_CURR_TASK_ON_CPU reg
618 ld \reg, [@_current_task]
619.endm
620
621.macro SET_CURR_TASK_ON_CPU tsk, tmp
622 st \tsk, [@_current_task]
080c3747
VG
623#ifdef CONFIG_ARC_CURR_IN_REG
624 mov r25, \tsk
625#endif
9d42c84f
VG
626.endm
627
41195d23
VG
628#endif /* SMP / UNI */
629
9d42c84f
VG
630/* ------------------------------------------------------------------
631 * Get the ptr to some field of Current Task at @off in task struct
080c3747 632 * -Uses r25 for Current task ptr if that is enabled
9d42c84f
VG
633 */
634
080c3747
VG
635#ifdef CONFIG_ARC_CURR_IN_REG
636
637.macro GET_CURR_TASK_FIELD_PTR off, reg
638 add \reg, r25, \off
639.endm
640
641#else
642
9d42c84f
VG
643.macro GET_CURR_TASK_FIELD_PTR off, reg
644 GET_CURR_TASK_ON_CPU \reg
645 add \reg, \reg, \off
646.endm
647
080c3747
VG
648#endif /* CONFIG_ARC_CURR_IN_REG */
649
9d42c84f
VG
650#endif /* __ASSEMBLY__ */
651
652#endif /* __ASM_ARC_ENTRY_H */
This page took 0.066908 seconds and 5 git commands to generate.