[POWERPC] Check for offline nodes in pci NUMA code
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
9994a338
PM
31
32/*
33 * System calls.
34 */
35 .section ".toc","aw"
36.SYS_CALL_TABLE:
37 .tc .sys_call_table[TC],.sys_call_table
38
39/* This value is used to mark exception frames on the stack. */
40exception_marker:
41 .tc ID_72656773_68657265[TC],0x7265677368657265
42
43 .section ".text"
44 .align 7
45
46#undef SHOW_SYSCALLS
47
48 .globl system_call_common
49system_call_common:
50 andi. r10,r12,MSR_PR
51 mr r10,r1
52 addi r1,r1,-INT_FRAME_SIZE
53 beq- 1f
54 ld r1,PACAKSAVE(r13)
551: std r10,0(r1)
bd19c899 56 crclr so
9994a338
PM
57 std r11,_NIP(r1)
58 std r12,_MSR(r1)
59 std r0,GPR0(r1)
60 std r10,GPR1(r1)
c6622f63 61 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
62 std r2,GPR2(r1)
63 std r3,GPR3(r1)
64 std r4,GPR4(r1)
65 std r5,GPR5(r1)
66 std r6,GPR6(r1)
67 std r7,GPR7(r1)
68 std r8,GPR8(r1)
69 li r11,0
70 std r11,GPR9(r1)
71 std r11,GPR10(r1)
72 std r11,GPR11(r1)
73 std r11,GPR12(r1)
74 std r9,GPR13(r1)
9994a338
PM
75 mfcr r9
76 mflr r10
77 li r11,0xc01
78 std r9,_CCR(r1)
79 std r10,_LINK(r1)
80 std r11,_TRAP(r1)
81 mfxer r9
82 mfctr r10
83 std r9,_XER(r1)
84 std r10,_CTR(r1)
85 std r3,ORIG_GPR3(r1)
86 ld r2,PACATOC(r13)
87 addi r9,r1,STACK_FRAME_OVERHEAD
88 ld r11,exception_marker@toc(r2)
89 std r11,-16(r9) /* "regshere" marker */
90#ifdef CONFIG_PPC_ISERIES
3f639ee8 91BEGIN_FW_FTR_SECTION
9994a338
PM
92 /* Hack for handling interrupts when soft-enabling on iSeries */
93 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
94 andi. r10,r12,MSR_PR /* from kernel */
95 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
96 beq hardware_interrupt_entry
97 lbz r10,PACAPROCENABLED(r13)
98 std r10,SOFTE(r1)
3f639ee8 99END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
100#endif
101 mfmsr r11
102 ori r11,r11,MSR_EE
103 mtmsrd r11,1
104
105#ifdef SHOW_SYSCALLS
106 bl .do_show_syscall
107 REST_GPR(0,r1)
108 REST_4GPRS(3,r1)
109 REST_2GPRS(7,r1)
110 addi r9,r1,STACK_FRAME_OVERHEAD
111#endif
112 clrrdi r11,r1,THREAD_SHIFT
9994a338 113 ld r10,TI_FLAGS(r11)
9994a338
PM
114 andi. r11,r10,_TIF_SYSCALL_T_OR_A
115 bne- syscall_dotrace
116syscall_dotrace_cont:
117 cmpldi 0,r0,NR_syscalls
118 bge- syscall_enosys
119
120system_call: /* label this so stack traces look sane */
121/*
122 * Need to vector to 32 Bit or default sys_call_table here,
123 * based on caller's run-mode / personality.
124 */
125 ld r11,.SYS_CALL_TABLE@toc(2)
126 andi. r10,r10,_TIF_32BIT
127 beq 15f
128 addi r11,r11,8 /* use 32-bit syscall entries */
129 clrldi r3,r3,32
130 clrldi r4,r4,32
131 clrldi r5,r5,32
132 clrldi r6,r6,32
133 clrldi r7,r7,32
134 clrldi r8,r8,32
13515:
136 slwi r0,r0,4
137 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
138 mtctr r10
139 bctrl /* Call handler */
140
141syscall_exit:
401d1f02 142 std r3,RESULT(r1)
9994a338 143#ifdef SHOW_SYSCALLS
9994a338 144 bl .do_show_syscall_exit
401d1f02 145 ld r3,RESULT(r1)
9994a338 146#endif
9994a338 147 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
148
149 /* disable interrupts so current_thread_info()->flags can't change,
150 and so that we don't get interrupted after loading SRR0/1. */
151 ld r8,_MSR(r1)
152 andi. r10,r8,MSR_RI
153 beq- unrecov_restore
154 mfmsr r10
155 rldicl r10,r10,48,1
156 rotldi r10,r10,16
157 mtmsrd r10,1
158 ld r9,TI_FLAGS(r12)
401d1f02 159 li r11,-_LAST_ERRNO
1bd79336 160 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 161 bne- syscall_exit_work
401d1f02
DW
162 cmpld r3,r11
163 ld r5,_CCR(r1)
164 bge- syscall_error
165syscall_error_cont:
9994a338
PM
166 ld r7,_NIP(r1)
167 stdcx. r0,0,r1 /* to clear the reservation */
168 andi. r6,r8,MSR_PR
169 ld r4,_LINK(r1)
c6622f63
PM
170 beq- 1f
171 ACCOUNT_CPU_USER_EXIT(r11, r12)
172 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338
PM
1731: ld r2,GPR2(r1)
174 li r12,MSR_RI
3eb6f26b
PM
175 andc r11,r10,r12
176 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
177 ld r1,GPR1(r1)
178 mtlr r4
179 mtcr r5
180 mtspr SPRN_SRR0,r7
181 mtspr SPRN_SRR1,r8
182 rfid
183 b . /* prevent speculative execution */
184
401d1f02 185syscall_error:
9994a338 186 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 187 neg r3,r3
9994a338
PM
188 std r5,_CCR(r1)
189 b syscall_error_cont
401d1f02 190
9994a338
PM
191/* Traced system call support */
192syscall_dotrace:
193 bl .save_nvgprs
194 addi r3,r1,STACK_FRAME_OVERHEAD
195 bl .do_syscall_trace_enter
196 ld r0,GPR0(r1) /* Restore original registers */
197 ld r3,GPR3(r1)
198 ld r4,GPR4(r1)
199 ld r5,GPR5(r1)
200 ld r6,GPR6(r1)
201 ld r7,GPR7(r1)
202 ld r8,GPR8(r1)
203 addi r9,r1,STACK_FRAME_OVERHEAD
204 clrrdi r10,r1,THREAD_SHIFT
205 ld r10,TI_FLAGS(r10)
206 b syscall_dotrace_cont
207
401d1f02
DW
208syscall_enosys:
209 li r3,-ENOSYS
210 b syscall_exit
211
212syscall_exit_work:
213 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
214 If TIF_NOERROR is set, just save r3 as it is. */
215
216 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
217 beq+ 0f
218 REST_NVGPRS(r1)
219 b 2f
2200: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
221 blt+ 1f
222 andi. r0,r9,_TIF_NOERROR
223 bne- 1f
224 ld r5,_CCR(r1)
225 neg r3,r3
226 oris r5,r5,0x1000 /* Set SO bit in CR */
227 std r5,_CCR(r1)
2281: std r3,GPR3(r1)
2292: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
230 beq 4f
231
1bd79336 232 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
233
234 li r11,_TIF_PERSYSCALL_MASK
235 addi r12,r12,TI_FLAGS
2363: ldarx r10,0,r12
237 andc r10,r10,r11
238 stdcx. r10,0,r12
239 bne- 3b
240 subi r12,r12,TI_FLAGS
1bd79336
PM
241
2424: /* Anything else left to do? */
243 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
244 beq .ret_from_except_lite
245
246 /* Re-enable interrupts */
247 mfmsr r10
248 ori r10,r10,MSR_EE
249 mtmsrd r10,1
250
1bd79336 251 bl .save_nvgprs
9994a338
PM
252 addi r3,r1,STACK_FRAME_OVERHEAD
253 bl .do_syscall_trace_leave
1bd79336 254 b .ret_from_except
9994a338
PM
255
256/* Save non-volatile GPRs, if not already saved. */
257_GLOBAL(save_nvgprs)
258 ld r11,_TRAP(r1)
259 andi. r0,r11,1
260 beqlr-
261 SAVE_NVGPRS(r1)
262 clrrdi r0,r11,1
263 std r0,_TRAP(r1)
264 blr
265
401d1f02 266
9994a338
PM
267/*
268 * The sigsuspend and rt_sigsuspend system calls can call do_signal
269 * and thus put the process into the stopped state where we might
270 * want to examine its user state with ptrace. Therefore we need
271 * to save all the nonvolatile registers (r14 - r31) before calling
272 * the C code. Similarly, fork, vfork and clone need the full
273 * register state on the stack so that it can be copied to the child.
274 */
9994a338
PM
275
276_GLOBAL(ppc_fork)
277 bl .save_nvgprs
278 bl .sys_fork
279 b syscall_exit
280
281_GLOBAL(ppc_vfork)
282 bl .save_nvgprs
283 bl .sys_vfork
284 b syscall_exit
285
286_GLOBAL(ppc_clone)
287 bl .save_nvgprs
288 bl .sys_clone
289 b syscall_exit
290
1bd79336
PM
291_GLOBAL(ppc32_swapcontext)
292 bl .save_nvgprs
293 bl .compat_sys_swapcontext
294 b syscall_exit
295
296_GLOBAL(ppc64_swapcontext)
297 bl .save_nvgprs
298 bl .sys_swapcontext
299 b syscall_exit
300
9994a338
PM
301_GLOBAL(ret_from_fork)
302 bl .schedule_tail
303 REST_NVGPRS(r1)
304 li r3,0
305 b syscall_exit
306
307/*
308 * This routine switches between two different tasks. The process
309 * state of one is saved on its kernel stack. Then the state
310 * of the other is restored from its kernel stack. The memory
311 * management hardware is updated to the second process's state.
312 * Finally, we can return to the second process, via ret_from_except.
313 * On entry, r3 points to the THREAD for the current task, r4
314 * points to the THREAD for the new task.
315 *
316 * Note: there are two ways to get to the "going out" portion
317 * of this code; either by coming in via the entry (_switch)
318 * or via "fork" which must set up an environment equivalent
319 * to the "_switch" path. If you change this you'll have to change
320 * the fork code also.
321 *
322 * The code which creates the new task context is in 'copy_thread'
2ef9481e 323 * in arch/powerpc/kernel/process.c
9994a338
PM
324 */
325 .align 7
326_GLOBAL(_switch)
327 mflr r0
328 std r0,16(r1)
329 stdu r1,-SWITCH_FRAME_SIZE(r1)
330 /* r3-r13 are caller saved -- Cort */
331 SAVE_8GPRS(14, r1)
332 SAVE_10GPRS(22, r1)
333 mflr r20 /* Return to switch caller */
334 mfmsr r22
335 li r0, MSR_FP
336#ifdef CONFIG_ALTIVEC
337BEGIN_FTR_SECTION
338 oris r0,r0,MSR_VEC@h /* Disable altivec */
339 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
340 std r24,THREAD_VRSAVE(r3)
341END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
342#endif /* CONFIG_ALTIVEC */
343 and. r0,r0,r22
344 beq+ 1f
345 andc r22,r22,r0
346 mtmsrd r22
347 isync
3481: std r20,_NIP(r1)
349 mfcr r23
350 std r23,_CCR(r1)
351 std r1,KSP(r3) /* Set old stack pointer */
352
353#ifdef CONFIG_SMP
354 /* We need a sync somewhere here to make sure that if the
355 * previous task gets rescheduled on another CPU, it sees all
356 * stores it has performed on this one.
357 */
358 sync
359#endif /* CONFIG_SMP */
360
361 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
362 std r6,PACACURRENT(r13) /* Set new 'current' */
363
364 ld r8,KSP(r4) /* new stack pointer */
365BEGIN_FTR_SECTION
366 clrrdi r6,r8,28 /* get its ESID */
367 clrrdi r9,r1,28 /* get current sp ESID */
368 clrldi. r0,r6,2 /* is new ESID c00000000? */
369 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
370 cror eq,4*cr1+eq,eq
371 beq 2f /* if yes, don't slbie it */
372
373 /* Bolt in the new stack SLB entry */
374 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
375 oris r0,r6,(SLB_ESID_V)@h
376 ori r0,r0,(SLB_NUM_BOLTED-1)@l
2f6093c8
MN
377
378 /* Update the last bolted SLB */
379 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
380 li r12,0
381 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
382 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
383 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 384
9994a338
PM
385 slbie r6
386 slbie r6 /* Workaround POWER5 < DD2.1 issue */
387 slbmte r7,r0
388 isync
389
3902:
391END_FTR_SECTION_IFSET(CPU_FTR_SLB)
392 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
393 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
394 because we don't need to leave the 288-byte ABI gap at the
395 top of the kernel stack. */
396 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
397
398 mr r1,r8 /* start using new stack pointer */
399 std r7,PACAKSAVE(r13)
400
401 ld r6,_CCR(r1)
402 mtcrf 0xFF,r6
403
404#ifdef CONFIG_ALTIVEC
405BEGIN_FTR_SECTION
406 ld r0,THREAD_VRSAVE(r4)
407 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
408END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
409#endif /* CONFIG_ALTIVEC */
410
411 /* r3-r13 are destroyed -- Cort */
412 REST_8GPRS(14, r1)
413 REST_10GPRS(22, r1)
414
415 /* convert old thread to its task_struct for return value */
416 addi r3,r3,-THREAD
417 ld r7,_NIP(r1) /* Return to _switch caller in new task */
418 mtlr r7
419 addi r1,r1,SWITCH_FRAME_SIZE
420 blr
421
422 .align 7
423_GLOBAL(ret_from_except)
424 ld r11,_TRAP(r1)
425 andi. r0,r11,1
426 bne .ret_from_except_lite
427 REST_NVGPRS(r1)
428
429_GLOBAL(ret_from_except_lite)
430 /*
431 * Disable interrupts so that current_thread_info()->flags
432 * can't change between when we test it and when we return
433 * from the interrupt.
434 */
435 mfmsr r10 /* Get current interrupt state */
436 rldicl r9,r10,48,1 /* clear MSR_EE */
437 rotldi r9,r9,16
438 mtmsrd r9,1 /* Update machine state */
439
440#ifdef CONFIG_PREEMPT
441 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
442 li r0,_TIF_NEED_RESCHED /* bits to check */
443 ld r3,_MSR(r1)
444 ld r4,TI_FLAGS(r9)
445 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
446 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
447 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
448 bne do_work
449
450#else /* !CONFIG_PREEMPT */
451 ld r3,_MSR(r1) /* Returning to user mode? */
452 andi. r3,r3,MSR_PR
453 beq restore /* if not, just restore regs and return */
454
455 /* Check current_thread_info()->flags */
456 clrrdi r9,r1,THREAD_SHIFT
457 ld r4,TI_FLAGS(r9)
458 andi. r0,r4,_TIF_USER_WORK_MASK
459 bne do_work
460#endif
461
462restore:
463#ifdef CONFIG_PPC_ISERIES
3f639ee8 464BEGIN_FW_FTR_SECTION
9994a338
PM
465 ld r5,SOFTE(r1)
466 cmpdi 0,r5,0
467 beq 4f
468 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
469 ld r3,PACALPPACAPTR(r13)
470 ld r3,LPPACAANYINT(r3)
9994a338
PM
471 cmpdi r3,0
472 beq+ 4f /* skip do_IRQ if no interrupts */
473
474 li r3,0
475 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
476 ori r10,r10,MSR_EE
477 mtmsrd r10 /* hard-enable again */
478 addi r3,r1,STACK_FRAME_OVERHEAD
479 bl .do_IRQ
480 b .ret_from_except_lite /* loop back and handle more */
481
4824: stb r5,PACAPROCENABLED(r13)
3f639ee8 483END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
484#endif
485
486 ld r3,_MSR(r1)
487 andi. r0,r3,MSR_RI
488 beq- unrecov_restore
489
490 andi. r0,r3,MSR_PR
491
492 /*
493 * r13 is our per cpu area, only restore it if we are returning to
494 * userspace
495 */
496 beq 1f
c6622f63 497 ACCOUNT_CPU_USER_EXIT(r3, r4)
9994a338
PM
498 REST_GPR(13, r1)
4991:
500 ld r3,_CTR(r1)
501 ld r0,_LINK(r1)
502 mtctr r3
503 mtlr r0
504 ld r3,_XER(r1)
505 mtspr SPRN_XER,r3
506
507 REST_8GPRS(5, r1)
508
509 stdcx. r0,0,r1 /* to clear the reservation */
510
511 mfmsr r0
512 li r2, MSR_RI
513 andc r0,r0,r2
514 mtmsrd r0,1
515
516 ld r0,_MSR(r1)
517 mtspr SPRN_SRR1,r0
518
519 ld r2,_CCR(r1)
520 mtcrf 0xFF,r2
521 ld r2,_NIP(r1)
522 mtspr SPRN_SRR0,r2
523
524 ld r0,GPR0(r1)
525 ld r2,GPR2(r1)
526 ld r3,GPR3(r1)
527 ld r4,GPR4(r1)
528 ld r1,GPR1(r1)
529
530 rfid
531 b . /* prevent speculative execution */
532
533/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
534do_work:
535#ifdef CONFIG_PREEMPT
536 andi. r0,r3,MSR_PR /* Returning to user mode? */
537 bne user_work
538 /* Check that preempt_count() == 0 and interrupts are enabled */
539 lwz r8,TI_PREEMPT(r9)
540 cmpwi cr1,r8,0
541#ifdef CONFIG_PPC_ISERIES
3f639ee8 542BEGIN_FW_FTR_SECTION
9994a338
PM
543 ld r0,SOFTE(r1)
544 cmpdi r0,0
3f639ee8 545END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 546#endif
3f639ee8
SR
547BEGIN_FW_FTR_SECTION
548 andi. r0,r3,MSR_EE
549END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
9994a338
PM
550 crandc eq,cr1*4+eq,eq
551 bne restore
552 /* here we are preempting the current task */
5531:
554#ifdef CONFIG_PPC_ISERIES
3f639ee8 555BEGIN_FW_FTR_SECTION
9994a338
PM
556 li r0,1
557 stb r0,PACAPROCENABLED(r13)
3f639ee8 558END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
559#endif
560 ori r10,r10,MSR_EE
561 mtmsrd r10,1 /* reenable interrupts */
562 bl .preempt_schedule
563 mfmsr r10
564 clrrdi r9,r1,THREAD_SHIFT
565 rldicl r10,r10,48,1 /* disable interrupts again */
566 rotldi r10,r10,16
567 mtmsrd r10,1
568 ld r4,TI_FLAGS(r9)
569 andi. r0,r4,_TIF_NEED_RESCHED
570 bne 1b
571 b restore
572
573user_work:
574#endif
575 /* Enable interrupts */
576 ori r10,r10,MSR_EE
577 mtmsrd r10,1
578
579 andi. r0,r4,_TIF_NEED_RESCHED
580 beq 1f
581 bl .schedule
582 b .ret_from_except_lite
583
5841: bl .save_nvgprs
585 li r3,0
586 addi r4,r1,STACK_FRAME_OVERHEAD
587 bl .do_signal
588 b .ret_from_except
589
590unrecov_restore:
591 addi r3,r1,STACK_FRAME_OVERHEAD
592 bl .unrecoverable_exception
593 b unrecov_restore
594
595#ifdef CONFIG_PPC_RTAS
596/*
597 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
598 * called with the MMU off.
599 *
600 * In addition, we need to be in 32b mode, at least for now.
601 *
602 * Note: r3 is an input parameter to rtas, so don't trash it...
603 */
604_GLOBAL(enter_rtas)
605 mflr r0
606 std r0,16(r1)
607 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
608
609 /* Because RTAS is running in 32b mode, it clobbers the high order half
610 * of all registers that it saves. We therefore save those registers
611 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
612 */
613 SAVE_GPR(2, r1) /* Save the TOC */
614 SAVE_GPR(13, r1) /* Save paca */
615 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
616 SAVE_10GPRS(22, r1) /* ditto */
617
618 mfcr r4
619 std r4,_CCR(r1)
620 mfctr r5
621 std r5,_CTR(r1)
622 mfspr r6,SPRN_XER
623 std r6,_XER(r1)
624 mfdar r7
625 std r7,_DAR(r1)
626 mfdsisr r8
627 std r8,_DSISR(r1)
628 mfsrr0 r9
629 std r9,_SRR0(r1)
630 mfsrr1 r10
631 std r10,_SRR1(r1)
632
9fe901d1
MK
633 /* Temporary workaround to clear CR until RTAS can be modified to
634 * ignore all bits.
635 */
636 li r0,0
637 mtcr r0
638
9994a338
PM
639 /* There is no way it is acceptable to get here with interrupts enabled,
640 * check it with the asm equivalent of WARN_ON
641 */
642 mfmsr r6
643 andi. r0,r6,MSR_EE
6441: tdnei r0,0
645.section __bug_table,"a"
646 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
647.previous
648.section .rodata,"a"
6491: .asciz __FILE__
6502: .asciz "enter_rtas"
651.previous
652
653 /* Unfortunately, the stack pointer and the MSR are also clobbered,
654 * so they are saved in the PACA which allows us to restore
655 * our original state after RTAS returns.
656 */
657 std r1,PACAR1(r13)
658 std r6,PACASAVEDMSR(r13)
659
660 /* Setup our real return addr */
e58c3495
DG
661 LOAD_REG_ADDR(r4,.rtas_return_loc)
662 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
663 mtlr r4
664
665 li r0,0
666 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
667 andc r0,r6,r0
668
669 li r9,1
670 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
671 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
672 andc r6,r0,r9
673 ori r6,r6,MSR_RI
674 sync /* disable interrupts so SRR0/1 */
675 mtmsrd r0 /* don't get trashed */
676
e58c3495 677 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
678 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
679 ld r4,RTASBASE(r4) /* get the rtas->base value */
680
681 mtspr SPRN_SRR0,r5
682 mtspr SPRN_SRR1,r6
683 rfid
684 b . /* prevent speculative execution */
685
686_STATIC(rtas_return_loc)
687 /* relocation is off at this point */
688 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 689 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
690
691 mfmsr r6
692 li r0,MSR_RI
693 andc r6,r6,r0
694 sync
695 mtmsrd r6
696
697 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 698 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
699 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
700
701 mtspr SPRN_SRR0,r3
702 mtspr SPRN_SRR1,r4
703 rfid
704 b . /* prevent speculative execution */
705
706_STATIC(rtas_restore_regs)
707 /* relocation is on at this point */
708 REST_GPR(2, r1) /* Restore the TOC */
709 REST_GPR(13, r1) /* Restore paca */
710 REST_8GPRS(14, r1) /* Restore the non-volatiles */
711 REST_10GPRS(22, r1) /* ditto */
712
713 mfspr r13,SPRN_SPRG3
714
715 ld r4,_CCR(r1)
716 mtcr r4
717 ld r5,_CTR(r1)
718 mtctr r5
719 ld r6,_XER(r1)
720 mtspr SPRN_XER,r6
721 ld r7,_DAR(r1)
722 mtdar r7
723 ld r8,_DSISR(r1)
724 mtdsisr r8
725 ld r9,_SRR0(r1)
726 mtsrr0 r9
727 ld r10,_SRR1(r1)
728 mtsrr1 r10
729
730 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
731 ld r0,16(r1) /* get return address */
732
733 mtlr r0
734 blr /* return to caller */
735
736#endif /* CONFIG_PPC_RTAS */
737
738#ifdef CONFIG_PPC_MULTIPLATFORM
739
740_GLOBAL(enter_prom)
741 mflr r0
742 std r0,16(r1)
743 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
744
745 /* Because PROM is running in 32b mode, it clobbers the high order half
746 * of all registers that it saves. We therefore save those registers
747 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
748 */
749 SAVE_8GPRS(2, r1)
750 SAVE_GPR(13, r1)
751 SAVE_8GPRS(14, r1)
752 SAVE_10GPRS(22, r1)
753 mfcr r4
754 std r4,_CCR(r1)
755 mfctr r5
756 std r5,_CTR(r1)
757 mfspr r6,SPRN_XER
758 std r6,_XER(r1)
759 mfdar r7
760 std r7,_DAR(r1)
761 mfdsisr r8
762 std r8,_DSISR(r1)
763 mfsrr0 r9
764 std r9,_SRR0(r1)
765 mfsrr1 r10
766 std r10,_SRR1(r1)
767 mfmsr r11
768 std r11,_MSR(r1)
769
770 /* Get the PROM entrypoint */
771 ld r0,GPR4(r1)
772 mtlr r0
773
774 /* Switch MSR to 32 bits mode
775 */
776 mfmsr r11
777 li r12,1
778 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
779 andc r11,r11,r12
780 li r12,1
781 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
782 andc r11,r11,r12
783 mtmsrd r11
784 isync
785
786 /* Restore arguments & enter PROM here... */
787 ld r3,GPR3(r1)
788 blrl
789
790 /* Just make sure that r1 top 32 bits didn't get
791 * corrupt by OF
792 */
793 rldicl r1,r1,0,32
794
795 /* Restore the MSR (back to 64 bits) */
796 ld r0,_MSR(r1)
797 mtmsrd r0
798 isync
799
800 /* Restore other registers */
801 REST_GPR(2, r1)
802 REST_GPR(13, r1)
803 REST_8GPRS(14, r1)
804 REST_10GPRS(22, r1)
805 ld r4,_CCR(r1)
806 mtcr r4
807 ld r5,_CTR(r1)
808 mtctr r5
809 ld r6,_XER(r1)
810 mtspr SPRN_XER,r6
811 ld r7,_DAR(r1)
812 mtdar r7
813 ld r8,_DSISR(r1)
814 mtdsisr r8
815 ld r9,_SRR0(r1)
816 mtsrr0 r9
817 ld r10,_SRR1(r1)
818 mtsrr1 r10
819
820 addi r1,r1,PROM_FRAME_SIZE
821 ld r0,16(r1)
822 mtlr r0
823 blr
824
825#endif /* CONFIG_PPC_MULTIPLATFORM */
This page took 0.140924 seconds and 5 git commands to generate.