Merge ../powerpc-merge
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
25 #include <asm/page.h>
26 #include <asm/mmu.h>
27 #include <asm/thread_info.h>
28 #include <asm/ppc_asm.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/cputable.h>
31
32 #ifdef CONFIG_PPC_ISERIES
33 #define DO_SOFT_DISABLE
34 #endif
35
36 /*
37 * System calls.
38 */
39 .section ".toc","aw"
40 .SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43 /* This value is used to mark exception frames on the stack. */
44 exception_marker:
45 .tc ID_72656773_68657265[TC],0x7265677368657265
46
47 .section ".text"
48 .align 7
49
50 #undef SHOW_SYSCALLS
51
52 .globl system_call_common
53 system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
59 1: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
64 std r2,GPR2(r1)
65 std r3,GPR3(r1)
66 std r4,GPR4(r1)
67 std r5,GPR5(r1)
68 std r6,GPR6(r1)
69 std r7,GPR7(r1)
70 std r8,GPR8(r1)
71 li r11,0
72 std r11,GPR9(r1)
73 std r11,GPR10(r1)
74 std r11,GPR11(r1)
75 std r11,GPR12(r1)
76 std r9,GPR13(r1)
77 crclr so
78 mfcr r9
79 mflr r10
80 li r11,0xc01
81 std r9,_CCR(r1)
82 std r10,_LINK(r1)
83 std r11,_TRAP(r1)
84 mfxer r9
85 mfctr r10
86 std r9,_XER(r1)
87 std r10,_CTR(r1)
88 std r3,ORIG_GPR3(r1)
89 ld r2,PACATOC(r13)
90 addi r9,r1,STACK_FRAME_OVERHEAD
91 ld r11,exception_marker@toc(r2)
92 std r11,-16(r9) /* "regshere" marker */
93 #ifdef CONFIG_PPC_ISERIES
94 /* Hack for handling interrupts when soft-enabling on iSeries */
95 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
96 andi. r10,r12,MSR_PR /* from kernel */
97 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
98 beq hardware_interrupt_entry
99 lbz r10,PACAPROCENABLED(r13)
100 std r10,SOFTE(r1)
101 #endif
102 mfmsr r11
103 ori r11,r11,MSR_EE
104 mtmsrd r11,1
105
106 #ifdef SHOW_SYSCALLS
107 bl .do_show_syscall
108 REST_GPR(0,r1)
109 REST_4GPRS(3,r1)
110 REST_2GPRS(7,r1)
111 addi r9,r1,STACK_FRAME_OVERHEAD
112 #endif
113 clrrdi r11,r1,THREAD_SHIFT
114 ld r10,TI_FLAGS(r11)
115 andi. r11,r10,_TIF_SYSCALL_T_OR_A
116 bne- syscall_dotrace
117 syscall_dotrace_cont:
118 cmpldi 0,r0,NR_syscalls
119 bge- syscall_enosys
120
121 system_call: /* label this so stack traces look sane */
122 /*
123 * Need to vector to 32 Bit or default sys_call_table here,
124 * based on caller's run-mode / personality.
125 */
126 ld r11,.SYS_CALL_TABLE@toc(2)
127 andi. r10,r10,_TIF_32BIT
128 beq 15f
129 addi r11,r11,8 /* use 32-bit syscall entries */
130 clrldi r3,r3,32
131 clrldi r4,r4,32
132 clrldi r5,r5,32
133 clrldi r6,r6,32
134 clrldi r7,r7,32
135 clrldi r8,r8,32
136 15:
137 slwi r0,r0,4
138 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
139 mtctr r10
140 bctrl /* Call handler */
141
142 syscall_exit:
143 std r3,RESULT(r1)
144 #ifdef SHOW_SYSCALLS
145 bl .do_show_syscall_exit
146 ld r3,RESULT(r1)
147 #endif
148 clrrdi r12,r1,THREAD_SHIFT
149
150 /* disable interrupts so current_thread_info()->flags can't change,
151 and so that we don't get interrupted after loading SRR0/1. */
152 ld r8,_MSR(r1)
153 andi. r10,r8,MSR_RI
154 beq- unrecov_restore
155 mfmsr r10
156 rldicl r10,r10,48,1
157 rotldi r10,r10,16
158 mtmsrd r10,1
159 ld r9,TI_FLAGS(r12)
160 li r11,-_LAST_ERRNO
161 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK)
162 bne- syscall_exit_work
163 cmpld r3,r11
164 ld r5,_CCR(r1)
165 bge- syscall_error
166 syscall_error_cont:
167 ld r7,_NIP(r1)
168 stdcx. r0,0,r1 /* to clear the reservation */
169 andi. r6,r8,MSR_PR
170 ld r4,_LINK(r1)
171 beq- 1f /* only restore r13 if */
172 ld r13,GPR13(r1) /* returning to usermode */
173 1: ld r2,GPR2(r1)
174 li r12,MSR_RI
175 andc r11,r10,r12
176 mtmsrd r11,1 /* clear MSR.RI */
177 ld r1,GPR1(r1)
178 mtlr r4
179 mtcr r5
180 mtspr SPRN_SRR0,r7
181 mtspr SPRN_SRR1,r8
182 rfid
183 b . /* prevent speculative execution */
184
185 syscall_error:
186 oris r5,r5,0x1000 /* Set SO bit in CR */
187 neg r3,r3
188 std r5,_CCR(r1)
189 b syscall_error_cont
190
191 /* Traced system call support */
192 syscall_dotrace:
193 bl .save_nvgprs
194 addi r3,r1,STACK_FRAME_OVERHEAD
195 bl .do_syscall_trace_enter
196 ld r0,GPR0(r1) /* Restore original registers */
197 ld r3,GPR3(r1)
198 ld r4,GPR4(r1)
199 ld r5,GPR5(r1)
200 ld r6,GPR6(r1)
201 ld r7,GPR7(r1)
202 ld r8,GPR8(r1)
203 addi r9,r1,STACK_FRAME_OVERHEAD
204 clrrdi r10,r1,THREAD_SHIFT
205 ld r10,TI_FLAGS(r10)
206 b syscall_dotrace_cont
207
208 syscall_enosys:
209 li r3,-ENOSYS
210 b syscall_exit
211
212 syscall_exit_work:
213 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
214 If TIF_NOERROR is set, just save r3 as it is. */
215
216 andi. r0,r9,_TIF_RESTOREALL
217 bne- 2f
218 cmpld r3,r11 /* r10 is -LAST_ERRNO */
219 blt+ 1f
220 andi. r0,r9,_TIF_NOERROR
221 bne- 1f
222 ld r5,_CCR(r1)
223 neg r3,r3
224 oris r5,r5,0x1000 /* Set SO bit in CR */
225 std r5,_CCR(r1)
226 1: std r3,GPR3(r1)
227 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
228 beq 4f
229
230 /* Clear per-syscall TIF flags if any are set, but _leave_
231 _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
232 yet. */
233
234 li r11,_TIF_PERSYSCALL_MASK
235 addi r12,r12,TI_FLAGS
236 3: ldarx r10,0,r12
237 andc r10,r10,r11
238 stdcx. r10,0,r12
239 bne- 3b
240 subi r12,r12,TI_FLAGS
241
242 4: bl .save_nvgprs
243 /* Anything else left to do? */
244 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
245 beq .ret_from_except_lite
246
247 /* Re-enable interrupts */
248 mfmsr r10
249 ori r10,r10,MSR_EE
250 mtmsrd r10,1
251
252 andi. r0,r9,_TIF_SAVE_NVGPRS
253 bne save_user_nvgprs
254
255 /* If tracing, re-enable interrupts and do it */
256 save_user_nvgprs_cont:
257 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
258 beq 5f
259
260 addi r3,r1,STACK_FRAME_OVERHEAD
261 bl .do_syscall_trace_leave
262 REST_NVGPRS(r1)
263 clrrdi r12,r1,THREAD_SHIFT
264
265 /* Disable interrupts again and handle other work if any */
266 5: mfmsr r10
267 rldicl r10,r10,48,1
268 rotldi r10,r10,16
269 mtmsrd r10,1
270
271 b .ret_from_except_lite
272
273 /* Save non-volatile GPRs, if not already saved. */
274 _GLOBAL(save_nvgprs)
275 ld r11,_TRAP(r1)
276 andi. r0,r11,1
277 beqlr-
278 SAVE_NVGPRS(r1)
279 clrrdi r0,r11,1
280 std r0,_TRAP(r1)
281 blr
282
283
284 save_user_nvgprs:
285 ld r10,TI_SIGFRAME(r12)
286 andi. r0,r9,_TIF_32BIT
287 beq- save_user_nvgprs_64
288
289 /* 32-bit save to userspace */
290
291 .macro savewords start, end
292 1: stw \start,4*(\start)(r10)
293 .section __ex_table,"a"
294 .align 3
295 .llong 1b,save_user_nvgprs_fault
296 .previous
297 .if \end - \start
298 savewords "(\start+1)",\end
299 .endif
300 .endm
301 savewords 14,31
302 b save_user_nvgprs_cont
303
304 save_user_nvgprs_64:
305 /* 64-bit save to userspace */
306
307 .macro savelongs start, end
308 1: std \start,8*(\start)(r10)
309 .section __ex_table,"a"
310 .align 3
311 .llong 1b,save_user_nvgprs_fault
312 .previous
313 .if \end - \start
314 savelongs "(\start+1)",\end
315 .endif
316 .endm
317 savelongs 14,31
318 b save_user_nvgprs_cont
319
320 save_user_nvgprs_fault:
321 li r3,11 /* SIGSEGV */
322 ld r4,TI_TASK(r12)
323 bl .force_sigsegv
324
325 clrrdi r12,r1,THREAD_SHIFT
326 ld r9,TI_FLAGS(r12)
327 b save_user_nvgprs_cont
328
329 /*
330 * The sigsuspend and rt_sigsuspend system calls can call do_signal
331 * and thus put the process into the stopped state where we might
332 * want to examine its user state with ptrace. Therefore we need
333 * to save all the nonvolatile registers (r14 - r31) before calling
334 * the C code. Similarly, fork, vfork and clone need the full
335 * register state on the stack so that it can be copied to the child.
336 */
337
338 _GLOBAL(ppc_fork)
339 bl .save_nvgprs
340 bl .sys_fork
341 b syscall_exit
342
343 _GLOBAL(ppc_vfork)
344 bl .save_nvgprs
345 bl .sys_vfork
346 b syscall_exit
347
348 _GLOBAL(ppc_clone)
349 bl .save_nvgprs
350 bl .sys_clone
351 b syscall_exit
352
353 _GLOBAL(ret_from_fork)
354 bl .schedule_tail
355 REST_NVGPRS(r1)
356 li r3,0
357 b syscall_exit
358
359 /*
360 * This routine switches between two different tasks. The process
361 * state of one is saved on its kernel stack. Then the state
362 * of the other is restored from its kernel stack. The memory
363 * management hardware is updated to the second process's state.
364 * Finally, we can return to the second process, via ret_from_except.
365 * On entry, r3 points to the THREAD for the current task, r4
366 * points to the THREAD for the new task.
367 *
368 * Note: there are two ways to get to the "going out" portion
369 * of this code; either by coming in via the entry (_switch)
370 * or via "fork" which must set up an environment equivalent
371 * to the "_switch" path. If you change this you'll have to change
372 * the fork code also.
373 *
374 * The code which creates the new task context is in 'copy_thread'
375 * in arch/powerpc/kernel/process.c
376 */
377 .align 7
378 _GLOBAL(_switch)
379 mflr r0
380 std r0,16(r1)
381 stdu r1,-SWITCH_FRAME_SIZE(r1)
382 /* r3-r13 are caller saved -- Cort */
383 SAVE_8GPRS(14, r1)
384 SAVE_10GPRS(22, r1)
385 mflr r20 /* Return to switch caller */
386 mfmsr r22
387 li r0, MSR_FP
388 #ifdef CONFIG_ALTIVEC
389 BEGIN_FTR_SECTION
390 oris r0,r0,MSR_VEC@h /* Disable altivec */
391 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
392 std r24,THREAD_VRSAVE(r3)
393 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
394 #endif /* CONFIG_ALTIVEC */
395 and. r0,r0,r22
396 beq+ 1f
397 andc r22,r22,r0
398 mtmsrd r22
399 isync
400 1: std r20,_NIP(r1)
401 mfcr r23
402 std r23,_CCR(r1)
403 std r1,KSP(r3) /* Set old stack pointer */
404
405 #ifdef CONFIG_SMP
406 /* We need a sync somewhere here to make sure that if the
407 * previous task gets rescheduled on another CPU, it sees all
408 * stores it has performed on this one.
409 */
410 sync
411 #endif /* CONFIG_SMP */
412
413 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
414 std r6,PACACURRENT(r13) /* Set new 'current' */
415
416 ld r8,KSP(r4) /* new stack pointer */
417 BEGIN_FTR_SECTION
418 clrrdi r6,r8,28 /* get its ESID */
419 clrrdi r9,r1,28 /* get current sp ESID */
420 clrldi. r0,r6,2 /* is new ESID c00000000? */
421 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
422 cror eq,4*cr1+eq,eq
423 beq 2f /* if yes, don't slbie it */
424
425 /* Bolt in the new stack SLB entry */
426 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
427 oris r0,r6,(SLB_ESID_V)@h
428 ori r0,r0,(SLB_NUM_BOLTED-1)@l
429 slbie r6
430 slbie r6 /* Workaround POWER5 < DD2.1 issue */
431 slbmte r7,r0
432 isync
433
434 2:
435 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
436 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
437 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
438 because we don't need to leave the 288-byte ABI gap at the
439 top of the kernel stack. */
440 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
441
442 mr r1,r8 /* start using new stack pointer */
443 std r7,PACAKSAVE(r13)
444
445 ld r6,_CCR(r1)
446 mtcrf 0xFF,r6
447
448 #ifdef CONFIG_ALTIVEC
449 BEGIN_FTR_SECTION
450 ld r0,THREAD_VRSAVE(r4)
451 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
452 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
453 #endif /* CONFIG_ALTIVEC */
454
455 /* r3-r13 are destroyed -- Cort */
456 REST_8GPRS(14, r1)
457 REST_10GPRS(22, r1)
458
459 /* convert old thread to its task_struct for return value */
460 addi r3,r3,-THREAD
461 ld r7,_NIP(r1) /* Return to _switch caller in new task */
462 mtlr r7
463 addi r1,r1,SWITCH_FRAME_SIZE
464 blr
465
466 .align 7
467 _GLOBAL(ret_from_except)
468 ld r11,_TRAP(r1)
469 andi. r0,r11,1
470 bne .ret_from_except_lite
471 REST_NVGPRS(r1)
472
473 _GLOBAL(ret_from_except_lite)
474 /*
475 * Disable interrupts so that current_thread_info()->flags
476 * can't change between when we test it and when we return
477 * from the interrupt.
478 */
479 mfmsr r10 /* Get current interrupt state */
480 rldicl r9,r10,48,1 /* clear MSR_EE */
481 rotldi r9,r9,16
482 mtmsrd r9,1 /* Update machine state */
483
484 #ifdef CONFIG_PREEMPT
485 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
486 li r0,_TIF_NEED_RESCHED /* bits to check */
487 ld r3,_MSR(r1)
488 ld r4,TI_FLAGS(r9)
489 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
490 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
491 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
492 bne do_work
493
494 #else /* !CONFIG_PREEMPT */
495 ld r3,_MSR(r1) /* Returning to user mode? */
496 andi. r3,r3,MSR_PR
497 beq restore /* if not, just restore regs and return */
498
499 /* Check current_thread_info()->flags */
500 clrrdi r9,r1,THREAD_SHIFT
501 ld r4,TI_FLAGS(r9)
502 andi. r0,r4,_TIF_USER_WORK_MASK
503 bne do_work
504 #endif
505
506 restore:
507 #ifdef CONFIG_PPC_ISERIES
508 ld r5,SOFTE(r1)
509 cmpdi 0,r5,0
510 beq 4f
511 /* Check for pending interrupts (iSeries) */
512 ld r3,PACALPPACAPTR(r13)
513 ld r3,LPPACAANYINT(r3)
514 cmpdi r3,0
515 beq+ 4f /* skip do_IRQ if no interrupts */
516
517 li r3,0
518 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
519 ori r10,r10,MSR_EE
520 mtmsrd r10 /* hard-enable again */
521 addi r3,r1,STACK_FRAME_OVERHEAD
522 bl .do_IRQ
523 b .ret_from_except_lite /* loop back and handle more */
524
525 4: stb r5,PACAPROCENABLED(r13)
526 #endif
527
528 ld r3,_MSR(r1)
529 andi. r0,r3,MSR_RI
530 beq- unrecov_restore
531
532 andi. r0,r3,MSR_PR
533
534 /*
535 * r13 is our per cpu area, only restore it if we are returning to
536 * userspace
537 */
538 beq 1f
539 REST_GPR(13, r1)
540 1:
541 ld r3,_CTR(r1)
542 ld r0,_LINK(r1)
543 mtctr r3
544 mtlr r0
545 ld r3,_XER(r1)
546 mtspr SPRN_XER,r3
547
548 REST_8GPRS(5, r1)
549
550 stdcx. r0,0,r1 /* to clear the reservation */
551
552 mfmsr r0
553 li r2, MSR_RI
554 andc r0,r0,r2
555 mtmsrd r0,1
556
557 ld r0,_MSR(r1)
558 mtspr SPRN_SRR1,r0
559
560 ld r2,_CCR(r1)
561 mtcrf 0xFF,r2
562 ld r2,_NIP(r1)
563 mtspr SPRN_SRR0,r2
564
565 ld r0,GPR0(r1)
566 ld r2,GPR2(r1)
567 ld r3,GPR3(r1)
568 ld r4,GPR4(r1)
569 ld r1,GPR1(r1)
570
571 rfid
572 b . /* prevent speculative execution */
573
574 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
575 do_work:
576 #ifdef CONFIG_PREEMPT
577 andi. r0,r3,MSR_PR /* Returning to user mode? */
578 bne user_work
579 /* Check that preempt_count() == 0 and interrupts are enabled */
580 lwz r8,TI_PREEMPT(r9)
581 cmpwi cr1,r8,0
582 #ifdef CONFIG_PPC_ISERIES
583 ld r0,SOFTE(r1)
584 cmpdi r0,0
585 #else
586 andi. r0,r3,MSR_EE
587 #endif
588 crandc eq,cr1*4+eq,eq
589 bne restore
590 /* here we are preempting the current task */
591 1:
592 #ifdef CONFIG_PPC_ISERIES
593 li r0,1
594 stb r0,PACAPROCENABLED(r13)
595 #endif
596 ori r10,r10,MSR_EE
597 mtmsrd r10,1 /* reenable interrupts */
598 bl .preempt_schedule
599 mfmsr r10
600 clrrdi r9,r1,THREAD_SHIFT
601 rldicl r10,r10,48,1 /* disable interrupts again */
602 rotldi r10,r10,16
603 mtmsrd r10,1
604 ld r4,TI_FLAGS(r9)
605 andi. r0,r4,_TIF_NEED_RESCHED
606 bne 1b
607 b restore
608
609 user_work:
610 #endif
611 /* Enable interrupts */
612 ori r10,r10,MSR_EE
613 mtmsrd r10,1
614
615 andi. r0,r4,_TIF_NEED_RESCHED
616 beq 1f
617 bl .schedule
618 b .ret_from_except_lite
619
620 1: bl .save_nvgprs
621 li r3,0
622 addi r4,r1,STACK_FRAME_OVERHEAD
623 bl .do_signal
624 b .ret_from_except
625
626 unrecov_restore:
627 addi r3,r1,STACK_FRAME_OVERHEAD
628 bl .unrecoverable_exception
629 b unrecov_restore
630
631 #ifdef CONFIG_PPC_RTAS
632 /*
633 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
634 * called with the MMU off.
635 *
636 * In addition, we need to be in 32b mode, at least for now.
637 *
638 * Note: r3 is an input parameter to rtas, so don't trash it...
639 */
640 _GLOBAL(enter_rtas)
641 mflr r0
642 std r0,16(r1)
643 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
644
645 /* Because RTAS is running in 32b mode, it clobbers the high order half
646 * of all registers that it saves. We therefore save those registers
647 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
648 */
649 SAVE_GPR(2, r1) /* Save the TOC */
650 SAVE_GPR(13, r1) /* Save paca */
651 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
652 SAVE_10GPRS(22, r1) /* ditto */
653
654 mfcr r4
655 std r4,_CCR(r1)
656 mfctr r5
657 std r5,_CTR(r1)
658 mfspr r6,SPRN_XER
659 std r6,_XER(r1)
660 mfdar r7
661 std r7,_DAR(r1)
662 mfdsisr r8
663 std r8,_DSISR(r1)
664 mfsrr0 r9
665 std r9,_SRR0(r1)
666 mfsrr1 r10
667 std r10,_SRR1(r1)
668
669 /* There is no way it is acceptable to get here with interrupts enabled,
670 * check it with the asm equivalent of WARN_ON
671 */
672 mfmsr r6
673 andi. r0,r6,MSR_EE
674 1: tdnei r0,0
675 .section __bug_table,"a"
676 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
677 .previous
678 .section .rodata,"a"
679 1: .asciz __FILE__
680 2: .asciz "enter_rtas"
681 .previous
682
683 /* Unfortunately, the stack pointer and the MSR are also clobbered,
684 * so they are saved in the PACA which allows us to restore
685 * our original state after RTAS returns.
686 */
687 std r1,PACAR1(r13)
688 std r6,PACASAVEDMSR(r13)
689
690 /* Setup our real return addr */
691 LOAD_REG_ADDR(r4,.rtas_return_loc)
692 clrldi r4,r4,2 /* convert to realmode address */
693 mtlr r4
694
695 li r0,0
696 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
697 andc r0,r6,r0
698
699 li r9,1
700 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
701 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
702 andc r6,r0,r9
703 ori r6,r6,MSR_RI
704 sync /* disable interrupts so SRR0/1 */
705 mtmsrd r0 /* don't get trashed */
706
707 LOAD_REG_ADDR(r4, rtas)
708 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
709 ld r4,RTASBASE(r4) /* get the rtas->base value */
710
711 mtspr SPRN_SRR0,r5
712 mtspr SPRN_SRR1,r6
713 rfid
714 b . /* prevent speculative execution */
715
716 _STATIC(rtas_return_loc)
717 /* relocation is off at this point */
718 mfspr r4,SPRN_SPRG3 /* Get PACA */
719 clrldi r4,r4,2 /* convert to realmode address */
720
721 mfmsr r6
722 li r0,MSR_RI
723 andc r6,r6,r0
724 sync
725 mtmsrd r6
726
727 ld r1,PACAR1(r4) /* Restore our SP */
728 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
729 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
730
731 mtspr SPRN_SRR0,r3
732 mtspr SPRN_SRR1,r4
733 rfid
734 b . /* prevent speculative execution */
735
736 _STATIC(rtas_restore_regs)
737 /* relocation is on at this point */
738 REST_GPR(2, r1) /* Restore the TOC */
739 REST_GPR(13, r1) /* Restore paca */
740 REST_8GPRS(14, r1) /* Restore the non-volatiles */
741 REST_10GPRS(22, r1) /* ditto */
742
743 mfspr r13,SPRN_SPRG3
744
745 ld r4,_CCR(r1)
746 mtcr r4
747 ld r5,_CTR(r1)
748 mtctr r5
749 ld r6,_XER(r1)
750 mtspr SPRN_XER,r6
751 ld r7,_DAR(r1)
752 mtdar r7
753 ld r8,_DSISR(r1)
754 mtdsisr r8
755 ld r9,_SRR0(r1)
756 mtsrr0 r9
757 ld r10,_SRR1(r1)
758 mtsrr1 r10
759
760 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
761 ld r0,16(r1) /* get return address */
762
763 mtlr r0
764 blr /* return to caller */
765
766 #endif /* CONFIG_PPC_RTAS */
767
768 #ifdef CONFIG_PPC_MULTIPLATFORM
769
770 _GLOBAL(enter_prom)
771 mflr r0
772 std r0,16(r1)
773 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
774
775 /* Because PROM is running in 32b mode, it clobbers the high order half
776 * of all registers that it saves. We therefore save those registers
777 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
778 */
779 SAVE_8GPRS(2, r1)
780 SAVE_GPR(13, r1)
781 SAVE_8GPRS(14, r1)
782 SAVE_10GPRS(22, r1)
783 mfcr r4
784 std r4,_CCR(r1)
785 mfctr r5
786 std r5,_CTR(r1)
787 mfspr r6,SPRN_XER
788 std r6,_XER(r1)
789 mfdar r7
790 std r7,_DAR(r1)
791 mfdsisr r8
792 std r8,_DSISR(r1)
793 mfsrr0 r9
794 std r9,_SRR0(r1)
795 mfsrr1 r10
796 std r10,_SRR1(r1)
797 mfmsr r11
798 std r11,_MSR(r1)
799
800 /* Get the PROM entrypoint */
801 ld r0,GPR4(r1)
802 mtlr r0
803
804 /* Switch MSR to 32 bits mode
805 */
806 mfmsr r11
807 li r12,1
808 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
809 andc r11,r11,r12
810 li r12,1
811 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
812 andc r11,r11,r12
813 mtmsrd r11
814 isync
815
816 /* Restore arguments & enter PROM here... */
817 ld r3,GPR3(r1)
818 blrl
819
820 /* Just make sure that r1 top 32 bits didn't get
821 * corrupt by OF
822 */
823 rldicl r1,r1,0,32
824
825 /* Restore the MSR (back to 64 bits) */
826 ld r0,_MSR(r1)
827 mtmsrd r0
828 isync
829
830 /* Restore other registers */
831 REST_GPR(2, r1)
832 REST_GPR(13, r1)
833 REST_8GPRS(14, r1)
834 REST_10GPRS(22, r1)
835 ld r4,_CCR(r1)
836 mtcr r4
837 ld r5,_CTR(r1)
838 mtctr r5
839 ld r6,_XER(r1)
840 mtspr SPRN_XER,r6
841 ld r7,_DAR(r1)
842 mtdar r7
843 ld r8,_DSISR(r1)
844 mtdsisr r8
845 ld r9,_SRR0(r1)
846 mtsrr0 r9
847 ld r10,_SRR1(r1)
848 mtsrr1 r10
849
850 addi r1,r1,PROM_FRAME_SIZE
851 ld r0,16(r1)
852 mtlr r0
853 blr
854
855 #endif /* CONFIG_PPC_MULTIPLATFORM */
This page took 0.055674 seconds and 6 git commands to generate.