[POWERPC] Avoid calling get_irq_server() with a real, not virtual irq.
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
9994a338
PM
31
32/*
33 * System calls.
34 */
35 .section ".toc","aw"
36.SYS_CALL_TABLE:
37 .tc .sys_call_table[TC],.sys_call_table
38
39/* This value is used to mark exception frames on the stack. */
40exception_marker:
41 .tc ID_72656773_68657265[TC],0x7265677368657265
42
43 .section ".text"
44 .align 7
45
46#undef SHOW_SYSCALLS
47
48 .globl system_call_common
49system_call_common:
50 andi. r10,r12,MSR_PR
51 mr r10,r1
52 addi r1,r1,-INT_FRAME_SIZE
53 beq- 1f
54 ld r1,PACAKSAVE(r13)
551: std r10,0(r1)
bd19c899 56 crclr so
9994a338
PM
57 std r11,_NIP(r1)
58 std r12,_MSR(r1)
59 std r0,GPR0(r1)
60 std r10,GPR1(r1)
c6622f63 61 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
62 std r2,GPR2(r1)
63 std r3,GPR3(r1)
64 std r4,GPR4(r1)
65 std r5,GPR5(r1)
66 std r6,GPR6(r1)
67 std r7,GPR7(r1)
68 std r8,GPR8(r1)
69 li r11,0
70 std r11,GPR9(r1)
71 std r11,GPR10(r1)
72 std r11,GPR11(r1)
73 std r11,GPR12(r1)
74 std r9,GPR13(r1)
9994a338
PM
75 mfcr r9
76 mflr r10
77 li r11,0xc01
78 std r9,_CCR(r1)
79 std r10,_LINK(r1)
80 std r11,_TRAP(r1)
81 mfxer r9
82 mfctr r10
83 std r9,_XER(r1)
84 std r10,_CTR(r1)
85 std r3,ORIG_GPR3(r1)
86 ld r2,PACATOC(r13)
87 addi r9,r1,STACK_FRAME_OVERHEAD
88 ld r11,exception_marker@toc(r2)
89 std r11,-16(r9) /* "regshere" marker */
d04c56f7
PM
90 li r10,1
91 stb r10,PACASOFTIRQEN(r13)
92 stb r10,PACAHARDIRQEN(r13)
93 std r10,SOFTE(r1)
9994a338 94#ifdef CONFIG_PPC_ISERIES
3f639ee8 95BEGIN_FW_FTR_SECTION
9994a338
PM
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
100 bne 2f
101 b hardware_interrupt_entry
1022:
3f639ee8 103END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
104#endif
105 mfmsr r11
106 ori r11,r11,MSR_EE
107 mtmsrd r11,1
108
109#ifdef SHOW_SYSCALLS
110 bl .do_show_syscall
111 REST_GPR(0,r1)
112 REST_4GPRS(3,r1)
113 REST_2GPRS(7,r1)
114 addi r9,r1,STACK_FRAME_OVERHEAD
115#endif
116 clrrdi r11,r1,THREAD_SHIFT
9994a338 117 ld r10,TI_FLAGS(r11)
9994a338
PM
118 andi. r11,r10,_TIF_SYSCALL_T_OR_A
119 bne- syscall_dotrace
120syscall_dotrace_cont:
121 cmpldi 0,r0,NR_syscalls
122 bge- syscall_enosys
123
124system_call: /* label this so stack traces look sane */
125/*
126 * Need to vector to 32 Bit or default sys_call_table here,
127 * based on caller's run-mode / personality.
128 */
129 ld r11,.SYS_CALL_TABLE@toc(2)
130 andi. r10,r10,_TIF_32BIT
131 beq 15f
132 addi r11,r11,8 /* use 32-bit syscall entries */
133 clrldi r3,r3,32
134 clrldi r4,r4,32
135 clrldi r5,r5,32
136 clrldi r6,r6,32
137 clrldi r7,r7,32
138 clrldi r8,r8,32
13915:
140 slwi r0,r0,4
141 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
142 mtctr r10
143 bctrl /* Call handler */
144
145syscall_exit:
401d1f02 146 std r3,RESULT(r1)
9994a338 147#ifdef SHOW_SYSCALLS
9994a338 148 bl .do_show_syscall_exit
401d1f02 149 ld r3,RESULT(r1)
9994a338 150#endif
9994a338 151 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
152
153 /* disable interrupts so current_thread_info()->flags can't change,
154 and so that we don't get interrupted after loading SRR0/1. */
155 ld r8,_MSR(r1)
156 andi. r10,r8,MSR_RI
157 beq- unrecov_restore
158 mfmsr r10
159 rldicl r10,r10,48,1
160 rotldi r10,r10,16
161 mtmsrd r10,1
162 ld r9,TI_FLAGS(r12)
401d1f02 163 li r11,-_LAST_ERRNO
1bd79336 164 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 165 bne- syscall_exit_work
401d1f02
DW
166 cmpld r3,r11
167 ld r5,_CCR(r1)
168 bge- syscall_error
169syscall_error_cont:
9994a338
PM
170 ld r7,_NIP(r1)
171 stdcx. r0,0,r1 /* to clear the reservation */
172 andi. r6,r8,MSR_PR
173 ld r4,_LINK(r1)
c6622f63
PM
174 beq- 1f
175 ACCOUNT_CPU_USER_EXIT(r11, r12)
176 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338
PM
1771: ld r2,GPR2(r1)
178 li r12,MSR_RI
3eb6f26b
PM
179 andc r11,r10,r12
180 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
181 ld r1,GPR1(r1)
182 mtlr r4
183 mtcr r5
184 mtspr SPRN_SRR0,r7
185 mtspr SPRN_SRR1,r8
186 rfid
187 b . /* prevent speculative execution */
188
401d1f02 189syscall_error:
9994a338 190 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 191 neg r3,r3
9994a338
PM
192 std r5,_CCR(r1)
193 b syscall_error_cont
401d1f02 194
9994a338
PM
195/* Traced system call support */
196syscall_dotrace:
197 bl .save_nvgprs
198 addi r3,r1,STACK_FRAME_OVERHEAD
199 bl .do_syscall_trace_enter
200 ld r0,GPR0(r1) /* Restore original registers */
201 ld r3,GPR3(r1)
202 ld r4,GPR4(r1)
203 ld r5,GPR5(r1)
204 ld r6,GPR6(r1)
205 ld r7,GPR7(r1)
206 ld r8,GPR8(r1)
207 addi r9,r1,STACK_FRAME_OVERHEAD
208 clrrdi r10,r1,THREAD_SHIFT
209 ld r10,TI_FLAGS(r10)
210 b syscall_dotrace_cont
211
401d1f02
DW
212syscall_enosys:
213 li r3,-ENOSYS
214 b syscall_exit
215
216syscall_exit_work:
217 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
218 If TIF_NOERROR is set, just save r3 as it is. */
219
220 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
221 beq+ 0f
222 REST_NVGPRS(r1)
223 b 2f
2240: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
225 blt+ 1f
226 andi. r0,r9,_TIF_NOERROR
227 bne- 1f
228 ld r5,_CCR(r1)
229 neg r3,r3
230 oris r5,r5,0x1000 /* Set SO bit in CR */
231 std r5,_CCR(r1)
2321: std r3,GPR3(r1)
2332: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
234 beq 4f
235
1bd79336 236 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
237
238 li r11,_TIF_PERSYSCALL_MASK
239 addi r12,r12,TI_FLAGS
2403: ldarx r10,0,r12
241 andc r10,r10,r11
242 stdcx. r10,0,r12
243 bne- 3b
244 subi r12,r12,TI_FLAGS
1bd79336
PM
245
2464: /* Anything else left to do? */
247 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
248 beq .ret_from_except_lite
249
250 /* Re-enable interrupts */
251 mfmsr r10
252 ori r10,r10,MSR_EE
253 mtmsrd r10,1
254
1bd79336 255 bl .save_nvgprs
9994a338
PM
256 addi r3,r1,STACK_FRAME_OVERHEAD
257 bl .do_syscall_trace_leave
1bd79336 258 b .ret_from_except
9994a338
PM
259
260/* Save non-volatile GPRs, if not already saved. */
261_GLOBAL(save_nvgprs)
262 ld r11,_TRAP(r1)
263 andi. r0,r11,1
264 beqlr-
265 SAVE_NVGPRS(r1)
266 clrrdi r0,r11,1
267 std r0,_TRAP(r1)
268 blr
269
401d1f02 270
9994a338
PM
271/*
272 * The sigsuspend and rt_sigsuspend system calls can call do_signal
273 * and thus put the process into the stopped state where we might
274 * want to examine its user state with ptrace. Therefore we need
275 * to save all the nonvolatile registers (r14 - r31) before calling
276 * the C code. Similarly, fork, vfork and clone need the full
277 * register state on the stack so that it can be copied to the child.
278 */
9994a338
PM
279
280_GLOBAL(ppc_fork)
281 bl .save_nvgprs
282 bl .sys_fork
283 b syscall_exit
284
285_GLOBAL(ppc_vfork)
286 bl .save_nvgprs
287 bl .sys_vfork
288 b syscall_exit
289
290_GLOBAL(ppc_clone)
291 bl .save_nvgprs
292 bl .sys_clone
293 b syscall_exit
294
1bd79336
PM
295_GLOBAL(ppc32_swapcontext)
296 bl .save_nvgprs
297 bl .compat_sys_swapcontext
298 b syscall_exit
299
300_GLOBAL(ppc64_swapcontext)
301 bl .save_nvgprs
302 bl .sys_swapcontext
303 b syscall_exit
304
9994a338
PM
305_GLOBAL(ret_from_fork)
306 bl .schedule_tail
307 REST_NVGPRS(r1)
308 li r3,0
309 b syscall_exit
310
311/*
312 * This routine switches between two different tasks. The process
313 * state of one is saved on its kernel stack. Then the state
314 * of the other is restored from its kernel stack. The memory
315 * management hardware is updated to the second process's state.
316 * Finally, we can return to the second process, via ret_from_except.
317 * On entry, r3 points to the THREAD for the current task, r4
318 * points to the THREAD for the new task.
319 *
320 * Note: there are two ways to get to the "going out" portion
321 * of this code; either by coming in via the entry (_switch)
322 * or via "fork" which must set up an environment equivalent
323 * to the "_switch" path. If you change this you'll have to change
324 * the fork code also.
325 *
326 * The code which creates the new task context is in 'copy_thread'
2ef9481e 327 * in arch/powerpc/kernel/process.c
9994a338
PM
328 */
329 .align 7
330_GLOBAL(_switch)
331 mflr r0
332 std r0,16(r1)
333 stdu r1,-SWITCH_FRAME_SIZE(r1)
334 /* r3-r13 are caller saved -- Cort */
335 SAVE_8GPRS(14, r1)
336 SAVE_10GPRS(22, r1)
337 mflr r20 /* Return to switch caller */
338 mfmsr r22
339 li r0, MSR_FP
340#ifdef CONFIG_ALTIVEC
341BEGIN_FTR_SECTION
342 oris r0,r0,MSR_VEC@h /* Disable altivec */
343 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
344 std r24,THREAD_VRSAVE(r3)
345END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
346#endif /* CONFIG_ALTIVEC */
347 and. r0,r0,r22
348 beq+ 1f
349 andc r22,r22,r0
350 mtmsrd r22
351 isync
3521: std r20,_NIP(r1)
353 mfcr r23
354 std r23,_CCR(r1)
355 std r1,KSP(r3) /* Set old stack pointer */
356
357#ifdef CONFIG_SMP
358 /* We need a sync somewhere here to make sure that if the
359 * previous task gets rescheduled on another CPU, it sees all
360 * stores it has performed on this one.
361 */
362 sync
363#endif /* CONFIG_SMP */
364
365 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
366 std r6,PACACURRENT(r13) /* Set new 'current' */
367
368 ld r8,KSP(r4) /* new stack pointer */
369BEGIN_FTR_SECTION
370 clrrdi r6,r8,28 /* get its ESID */
371 clrrdi r9,r1,28 /* get current sp ESID */
372 clrldi. r0,r6,2 /* is new ESID c00000000? */
373 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
374 cror eq,4*cr1+eq,eq
375 beq 2f /* if yes, don't slbie it */
376
377 /* Bolt in the new stack SLB entry */
378 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
379 oris r0,r6,(SLB_ESID_V)@h
380 ori r0,r0,(SLB_NUM_BOLTED-1)@l
2f6093c8
MN
381
382 /* Update the last bolted SLB */
383 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
384 li r12,0
385 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
386 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
387 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 388
9994a338
PM
389 slbie r6
390 slbie r6 /* Workaround POWER5 < DD2.1 issue */
391 slbmte r7,r0
392 isync
393
3942:
395END_FTR_SECTION_IFSET(CPU_FTR_SLB)
396 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
397 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
398 because we don't need to leave the 288-byte ABI gap at the
399 top of the kernel stack. */
400 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
401
402 mr r1,r8 /* start using new stack pointer */
403 std r7,PACAKSAVE(r13)
404
405 ld r6,_CCR(r1)
406 mtcrf 0xFF,r6
407
408#ifdef CONFIG_ALTIVEC
409BEGIN_FTR_SECTION
410 ld r0,THREAD_VRSAVE(r4)
411 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
412END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
413#endif /* CONFIG_ALTIVEC */
414
415 /* r3-r13 are destroyed -- Cort */
416 REST_8GPRS(14, r1)
417 REST_10GPRS(22, r1)
418
419 /* convert old thread to its task_struct for return value */
420 addi r3,r3,-THREAD
421 ld r7,_NIP(r1) /* Return to _switch caller in new task */
422 mtlr r7
423 addi r1,r1,SWITCH_FRAME_SIZE
424 blr
425
426 .align 7
427_GLOBAL(ret_from_except)
428 ld r11,_TRAP(r1)
429 andi. r0,r11,1
430 bne .ret_from_except_lite
431 REST_NVGPRS(r1)
432
433_GLOBAL(ret_from_except_lite)
434 /*
435 * Disable interrupts so that current_thread_info()->flags
436 * can't change between when we test it and when we return
437 * from the interrupt.
438 */
439 mfmsr r10 /* Get current interrupt state */
440 rldicl r9,r10,48,1 /* clear MSR_EE */
441 rotldi r9,r9,16
442 mtmsrd r9,1 /* Update machine state */
443
444#ifdef CONFIG_PREEMPT
445 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
446 li r0,_TIF_NEED_RESCHED /* bits to check */
447 ld r3,_MSR(r1)
448 ld r4,TI_FLAGS(r9)
449 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
450 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
451 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
452 bne do_work
453
454#else /* !CONFIG_PREEMPT */
455 ld r3,_MSR(r1) /* Returning to user mode? */
456 andi. r3,r3,MSR_PR
457 beq restore /* if not, just restore regs and return */
458
459 /* Check current_thread_info()->flags */
460 clrrdi r9,r1,THREAD_SHIFT
461 ld r4,TI_FLAGS(r9)
462 andi. r0,r4,_TIF_USER_WORK_MASK
463 bne do_work
464#endif
465
466restore:
d04c56f7 467 ld r5,SOFTE(r1)
9994a338 468#ifdef CONFIG_PPC_ISERIES
3f639ee8 469BEGIN_FW_FTR_SECTION
9994a338
PM
470 cmpdi 0,r5,0
471 beq 4f
472 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
473 ld r3,PACALPPACAPTR(r13)
474 ld r3,LPPACAANYINT(r3)
9994a338
PM
475 cmpdi r3,0
476 beq+ 4f /* skip do_IRQ if no interrupts */
477
478 li r3,0
d04c56f7 479 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
9994a338
PM
480 ori r10,r10,MSR_EE
481 mtmsrd r10 /* hard-enable again */
482 addi r3,r1,STACK_FRAME_OVERHEAD
483 bl .do_IRQ
484 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 4854:
3f639ee8 486END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 487#endif
d04c56f7 488 stb r5,PACASOFTIRQEN(r13)
9994a338
PM
489
490 ld r3,_MSR(r1)
491 andi. r0,r3,MSR_RI
492 beq- unrecov_restore
493
b0a779de
PM
494 /* extract EE bit and use it to restore paca->hard_enabled */
495 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
496 stb r4,PACAHARDIRQEN(r13)
497
9994a338
PM
498 andi. r0,r3,MSR_PR
499
500 /*
501 * r13 is our per cpu area, only restore it if we are returning to
502 * userspace
503 */
504 beq 1f
c6622f63 505 ACCOUNT_CPU_USER_EXIT(r3, r4)
9994a338
PM
506 REST_GPR(13, r1)
5071:
508 ld r3,_CTR(r1)
509 ld r0,_LINK(r1)
510 mtctr r3
511 mtlr r0
512 ld r3,_XER(r1)
513 mtspr SPRN_XER,r3
514
515 REST_8GPRS(5, r1)
516
517 stdcx. r0,0,r1 /* to clear the reservation */
518
519 mfmsr r0
520 li r2, MSR_RI
521 andc r0,r0,r2
522 mtmsrd r0,1
523
524 ld r0,_MSR(r1)
525 mtspr SPRN_SRR1,r0
526
527 ld r2,_CCR(r1)
528 mtcrf 0xFF,r2
529 ld r2,_NIP(r1)
530 mtspr SPRN_SRR0,r2
531
532 ld r0,GPR0(r1)
533 ld r2,GPR2(r1)
534 ld r3,GPR3(r1)
535 ld r4,GPR4(r1)
536 ld r1,GPR1(r1)
537
538 rfid
539 b . /* prevent speculative execution */
540
541/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
542do_work:
543#ifdef CONFIG_PREEMPT
544 andi. r0,r3,MSR_PR /* Returning to user mode? */
545 bne user_work
546 /* Check that preempt_count() == 0 and interrupts are enabled */
547 lwz r8,TI_PREEMPT(r9)
548 cmpwi cr1,r8,0
9994a338
PM
549 ld r0,SOFTE(r1)
550 cmpdi r0,0
9994a338
PM
551 crandc eq,cr1*4+eq,eq
552 bne restore
553 /* here we are preempting the current task */
5541:
9994a338 555 li r0,1
d04c56f7
PM
556 stb r0,PACASOFTIRQEN(r13)
557 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
558 ori r10,r10,MSR_EE
559 mtmsrd r10,1 /* reenable interrupts */
560 bl .preempt_schedule
561 mfmsr r10
562 clrrdi r9,r1,THREAD_SHIFT
563 rldicl r10,r10,48,1 /* disable interrupts again */
564 rotldi r10,r10,16
565 mtmsrd r10,1
566 ld r4,TI_FLAGS(r9)
567 andi. r0,r4,_TIF_NEED_RESCHED
568 bne 1b
569 b restore
570
571user_work:
572#endif
573 /* Enable interrupts */
574 ori r10,r10,MSR_EE
575 mtmsrd r10,1
576
577 andi. r0,r4,_TIF_NEED_RESCHED
578 beq 1f
579 bl .schedule
580 b .ret_from_except_lite
581
5821: bl .save_nvgprs
583 li r3,0
584 addi r4,r1,STACK_FRAME_OVERHEAD
585 bl .do_signal
586 b .ret_from_except
587
588unrecov_restore:
589 addi r3,r1,STACK_FRAME_OVERHEAD
590 bl .unrecoverable_exception
591 b unrecov_restore
592
593#ifdef CONFIG_PPC_RTAS
594/*
595 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
596 * called with the MMU off.
597 *
598 * In addition, we need to be in 32b mode, at least for now.
599 *
600 * Note: r3 is an input parameter to rtas, so don't trash it...
601 */
602_GLOBAL(enter_rtas)
603 mflr r0
604 std r0,16(r1)
605 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
606
607 /* Because RTAS is running in 32b mode, it clobbers the high order half
608 * of all registers that it saves. We therefore save those registers
609 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
610 */
611 SAVE_GPR(2, r1) /* Save the TOC */
612 SAVE_GPR(13, r1) /* Save paca */
613 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
614 SAVE_10GPRS(22, r1) /* ditto */
615
616 mfcr r4
617 std r4,_CCR(r1)
618 mfctr r5
619 std r5,_CTR(r1)
620 mfspr r6,SPRN_XER
621 std r6,_XER(r1)
622 mfdar r7
623 std r7,_DAR(r1)
624 mfdsisr r8
625 std r8,_DSISR(r1)
626 mfsrr0 r9
627 std r9,_SRR0(r1)
628 mfsrr1 r10
629 std r10,_SRR1(r1)
630
9fe901d1
MK
631 /* Temporary workaround to clear CR until RTAS can be modified to
632 * ignore all bits.
633 */
634 li r0,0
635 mtcr r0
636
9994a338
PM
637 /* There is no way it is acceptable to get here with interrupts enabled,
638 * check it with the asm equivalent of WARN_ON
639 */
d04c56f7 640 lbz r0,PACASOFTIRQEN(r13)
9994a338
PM
6411: tdnei r0,0
642.section __bug_table,"a"
643 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
644.previous
645.section .rodata,"a"
6461: .asciz __FILE__
6472: .asciz "enter_rtas"
648.previous
d04c56f7
PM
649
650 /* Hard-disable interrupts */
651 mfmsr r6
652 rldicl r7,r6,48,1
653 rotldi r7,r7,16
654 mtmsrd r7,1
655
9994a338
PM
656 /* Unfortunately, the stack pointer and the MSR are also clobbered,
657 * so they are saved in the PACA which allows us to restore
658 * our original state after RTAS returns.
659 */
660 std r1,PACAR1(r13)
661 std r6,PACASAVEDMSR(r13)
662
663 /* Setup our real return addr */
e58c3495
DG
664 LOAD_REG_ADDR(r4,.rtas_return_loc)
665 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
666 mtlr r4
667
668 li r0,0
669 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
670 andc r0,r6,r0
671
672 li r9,1
673 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
674 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
675 andc r6,r0,r9
676 ori r6,r6,MSR_RI
677 sync /* disable interrupts so SRR0/1 */
678 mtmsrd r0 /* don't get trashed */
679
e58c3495 680 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
681 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
682 ld r4,RTASBASE(r4) /* get the rtas->base value */
683
684 mtspr SPRN_SRR0,r5
685 mtspr SPRN_SRR1,r6
686 rfid
687 b . /* prevent speculative execution */
688
689_STATIC(rtas_return_loc)
690 /* relocation is off at this point */
691 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 692 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
693
694 mfmsr r6
695 li r0,MSR_RI
696 andc r6,r6,r0
697 sync
698 mtmsrd r6
699
700 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 701 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
702 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
703
704 mtspr SPRN_SRR0,r3
705 mtspr SPRN_SRR1,r4
706 rfid
707 b . /* prevent speculative execution */
708
709_STATIC(rtas_restore_regs)
710 /* relocation is on at this point */
711 REST_GPR(2, r1) /* Restore the TOC */
712 REST_GPR(13, r1) /* Restore paca */
713 REST_8GPRS(14, r1) /* Restore the non-volatiles */
714 REST_10GPRS(22, r1) /* ditto */
715
716 mfspr r13,SPRN_SPRG3
717
718 ld r4,_CCR(r1)
719 mtcr r4
720 ld r5,_CTR(r1)
721 mtctr r5
722 ld r6,_XER(r1)
723 mtspr SPRN_XER,r6
724 ld r7,_DAR(r1)
725 mtdar r7
726 ld r8,_DSISR(r1)
727 mtdsisr r8
728 ld r9,_SRR0(r1)
729 mtsrr0 r9
730 ld r10,_SRR1(r1)
731 mtsrr1 r10
732
733 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
734 ld r0,16(r1) /* get return address */
735
736 mtlr r0
737 blr /* return to caller */
738
739#endif /* CONFIG_PPC_RTAS */
740
9994a338
PM
741_GLOBAL(enter_prom)
742 mflr r0
743 std r0,16(r1)
744 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
745
746 /* Because PROM is running in 32b mode, it clobbers the high order half
747 * of all registers that it saves. We therefore save those registers
748 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
749 */
750 SAVE_8GPRS(2, r1)
751 SAVE_GPR(13, r1)
752 SAVE_8GPRS(14, r1)
753 SAVE_10GPRS(22, r1)
754 mfcr r4
755 std r4,_CCR(r1)
756 mfctr r5
757 std r5,_CTR(r1)
758 mfspr r6,SPRN_XER
759 std r6,_XER(r1)
760 mfdar r7
761 std r7,_DAR(r1)
762 mfdsisr r8
763 std r8,_DSISR(r1)
764 mfsrr0 r9
765 std r9,_SRR0(r1)
766 mfsrr1 r10
767 std r10,_SRR1(r1)
768 mfmsr r11
769 std r11,_MSR(r1)
770
771 /* Get the PROM entrypoint */
772 ld r0,GPR4(r1)
773 mtlr r0
774
775 /* Switch MSR to 32 bits mode
776 */
777 mfmsr r11
778 li r12,1
779 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
780 andc r11,r11,r12
781 li r12,1
782 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
783 andc r11,r11,r12
784 mtmsrd r11
785 isync
786
787 /* Restore arguments & enter PROM here... */
788 ld r3,GPR3(r1)
789 blrl
790
791 /* Just make sure that r1 top 32 bits didn't get
792 * corrupt by OF
793 */
794 rldicl r1,r1,0,32
795
796 /* Restore the MSR (back to 64 bits) */
797 ld r0,_MSR(r1)
798 mtmsrd r0
799 isync
800
801 /* Restore other registers */
802 REST_GPR(2, r1)
803 REST_GPR(13, r1)
804 REST_8GPRS(14, r1)
805 REST_10GPRS(22, r1)
806 ld r4,_CCR(r1)
807 mtcr r4
808 ld r5,_CTR(r1)
809 mtctr r5
810 ld r6,_XER(r1)
811 mtspr SPRN_XER,r6
812 ld r7,_DAR(r1)
813 mtdar r7
814 ld r8,_DSISR(r1)
815 mtdsisr r8
816 ld r9,_SRR0(r1)
817 mtsrr0 r9
818 ld r10,_SRR1(r1)
819 mtsrr1 r10
820
821 addi r1,r1,PROM_FRAME_SIZE
822 ld r0,16(r1)
823 mtlr r0
824 blr
This page took 0.183927 seconds and 5 git commands to generate.