powerpc: Call tracehook_signal_handler() when setting up signal frames
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
395a59d0 34#include <asm/ftrace.h>
9994a338
PM
35
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
ec2b36b9 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
46
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
bd19c899 60 crclr so
9994a338
PM
61 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
c6622f63 65 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
9994a338
PM
79 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
945feb17
BH
94#ifdef CONFIG_TRACE_IRQFLAGS
95 bl .trace_hardirqs_on
96 REST_GPR(0,r1)
97 REST_4GPRS(3,r1)
98 REST_2GPRS(7,r1)
99 addi r9,r1,STACK_FRAME_OVERHEAD
100 ld r12,_MSR(r1)
101#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
102 li r10,1
103 stb r10,PACASOFTIRQEN(r13)
104 stb r10,PACAHARDIRQEN(r13)
105 std r10,SOFTE(r1)
9994a338 106#ifdef CONFIG_PPC_ISERIES
3f639ee8 107BEGIN_FW_FTR_SECTION
9994a338
PM
108 /* Hack for handling interrupts when soft-enabling on iSeries */
109 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
110 andi. r10,r12,MSR_PR /* from kernel */
111 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
112 bne 2f
113 b hardware_interrupt_entry
1142:
3f639ee8 115END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 116#endif /* CONFIG_PPC_ISERIES */
9994a338
PM
117 mfmsr r11
118 ori r11,r11,MSR_EE
119 mtmsrd r11,1
120
121#ifdef SHOW_SYSCALLS
122 bl .do_show_syscall
123 REST_GPR(0,r1)
124 REST_4GPRS(3,r1)
125 REST_2GPRS(7,r1)
126 addi r9,r1,STACK_FRAME_OVERHEAD
127#endif
128 clrrdi r11,r1,THREAD_SHIFT
9994a338 129 ld r10,TI_FLAGS(r11)
9994a338
PM
130 andi. r11,r10,_TIF_SYSCALL_T_OR_A
131 bne- syscall_dotrace
132syscall_dotrace_cont:
133 cmpldi 0,r0,NR_syscalls
134 bge- syscall_enosys
135
136system_call: /* label this so stack traces look sane */
137/*
138 * Need to vector to 32 Bit or default sys_call_table here,
139 * based on caller's run-mode / personality.
140 */
141 ld r11,.SYS_CALL_TABLE@toc(2)
142 andi. r10,r10,_TIF_32BIT
143 beq 15f
144 addi r11,r11,8 /* use 32-bit syscall entries */
145 clrldi r3,r3,32
146 clrldi r4,r4,32
147 clrldi r5,r5,32
148 clrldi r6,r6,32
149 clrldi r7,r7,32
150 clrldi r8,r8,32
15115:
152 slwi r0,r0,4
153 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
154 mtctr r10
155 bctrl /* Call handler */
156
157syscall_exit:
401d1f02 158 std r3,RESULT(r1)
9994a338 159#ifdef SHOW_SYSCALLS
9994a338 160 bl .do_show_syscall_exit
401d1f02 161 ld r3,RESULT(r1)
9994a338 162#endif
9994a338 163 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
164
165 /* disable interrupts so current_thread_info()->flags can't change,
166 and so that we don't get interrupted after loading SRR0/1. */
167 ld r8,_MSR(r1)
168 andi. r10,r8,MSR_RI
169 beq- unrecov_restore
170 mfmsr r10
171 rldicl r10,r10,48,1
172 rotldi r10,r10,16
173 mtmsrd r10,1
174 ld r9,TI_FLAGS(r12)
401d1f02 175 li r11,-_LAST_ERRNO
1bd79336 176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 177 bne- syscall_exit_work
401d1f02
DW
178 cmpld r3,r11
179 ld r5,_CCR(r1)
180 bge- syscall_error
181syscall_error_cont:
9994a338
PM
182 ld r7,_NIP(r1)
183 stdcx. r0,0,r1 /* to clear the reservation */
184 andi. r6,r8,MSR_PR
185 ld r4,_LINK(r1)
e56a6e20
PM
186 /*
187 * Clear RI before restoring r13. If we are returning to
188 * userspace and we take an exception after restoring r13,
189 * we end up corrupting the userspace r13 value.
190 */
191 li r12,MSR_RI
192 andc r11,r10,r12
193 mtmsrd r11,1 /* clear MSR.RI */
c6622f63
PM
194 beq- 1f
195 ACCOUNT_CPU_USER_EXIT(r11, r12)
196 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 1971: ld r2,GPR2(r1)
9994a338
PM
198 ld r1,GPR1(r1)
199 mtlr r4
200 mtcr r5
201 mtspr SPRN_SRR0,r7
202 mtspr SPRN_SRR1,r8
203 rfid
204 b . /* prevent speculative execution */
205
401d1f02 206syscall_error:
9994a338 207 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 208 neg r3,r3
9994a338
PM
209 std r5,_CCR(r1)
210 b syscall_error_cont
401d1f02 211
9994a338
PM
212/* Traced system call support */
213syscall_dotrace:
214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter
217 ld r0,GPR0(r1) /* Restore original registers */
218 ld r3,GPR3(r1)
219 ld r4,GPR4(r1)
220 ld r5,GPR5(r1)
221 ld r6,GPR6(r1)
222 ld r7,GPR7(r1)
223 ld r8,GPR8(r1)
224 addi r9,r1,STACK_FRAME_OVERHEAD
225 clrrdi r10,r1,THREAD_SHIFT
226 ld r10,TI_FLAGS(r10)
227 b syscall_dotrace_cont
228
401d1f02
DW
229syscall_enosys:
230 li r3,-ENOSYS
231 b syscall_exit
232
233syscall_exit_work:
234 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
235 If TIF_NOERROR is set, just save r3 as it is. */
236
237 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
238 beq+ 0f
239 REST_NVGPRS(r1)
240 b 2f
2410: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
242 blt+ 1f
243 andi. r0,r9,_TIF_NOERROR
244 bne- 1f
245 ld r5,_CCR(r1)
246 neg r3,r3
247 oris r5,r5,0x1000 /* Set SO bit in CR */
248 std r5,_CCR(r1)
2491: std r3,GPR3(r1)
2502: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
251 beq 4f
252
1bd79336 253 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
254
255 li r11,_TIF_PERSYSCALL_MASK
256 addi r12,r12,TI_FLAGS
2573: ldarx r10,0,r12
258 andc r10,r10,r11
259 stdcx. r10,0,r12
260 bne- 3b
261 subi r12,r12,TI_FLAGS
1bd79336
PM
262
2634: /* Anything else left to do? */
264 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
265 beq .ret_from_except_lite
266
267 /* Re-enable interrupts */
268 mfmsr r10
269 ori r10,r10,MSR_EE
270 mtmsrd r10,1
271
1bd79336 272 bl .save_nvgprs
9994a338
PM
273 addi r3,r1,STACK_FRAME_OVERHEAD
274 bl .do_syscall_trace_leave
1bd79336 275 b .ret_from_except
9994a338
PM
276
277/* Save non-volatile GPRs, if not already saved. */
278_GLOBAL(save_nvgprs)
279 ld r11,_TRAP(r1)
280 andi. r0,r11,1
281 beqlr-
282 SAVE_NVGPRS(r1)
283 clrrdi r0,r11,1
284 std r0,_TRAP(r1)
285 blr
286
401d1f02 287
9994a338
PM
288/*
289 * The sigsuspend and rt_sigsuspend system calls can call do_signal
290 * and thus put the process into the stopped state where we might
291 * want to examine its user state with ptrace. Therefore we need
292 * to save all the nonvolatile registers (r14 - r31) before calling
293 * the C code. Similarly, fork, vfork and clone need the full
294 * register state on the stack so that it can be copied to the child.
295 */
9994a338
PM
296
297_GLOBAL(ppc_fork)
298 bl .save_nvgprs
299 bl .sys_fork
300 b syscall_exit
301
302_GLOBAL(ppc_vfork)
303 bl .save_nvgprs
304 bl .sys_vfork
305 b syscall_exit
306
307_GLOBAL(ppc_clone)
308 bl .save_nvgprs
309 bl .sys_clone
310 b syscall_exit
311
1bd79336
PM
312_GLOBAL(ppc32_swapcontext)
313 bl .save_nvgprs
314 bl .compat_sys_swapcontext
315 b syscall_exit
316
317_GLOBAL(ppc64_swapcontext)
318 bl .save_nvgprs
319 bl .sys_swapcontext
320 b syscall_exit
321
9994a338
PM
322_GLOBAL(ret_from_fork)
323 bl .schedule_tail
324 REST_NVGPRS(r1)
325 li r3,0
326 b syscall_exit
327
328/*
329 * This routine switches between two different tasks. The process
330 * state of one is saved on its kernel stack. Then the state
331 * of the other is restored from its kernel stack. The memory
332 * management hardware is updated to the second process's state.
333 * Finally, we can return to the second process, via ret_from_except.
334 * On entry, r3 points to the THREAD for the current task, r4
335 * points to the THREAD for the new task.
336 *
337 * Note: there are two ways to get to the "going out" portion
338 * of this code; either by coming in via the entry (_switch)
339 * or via "fork" which must set up an environment equivalent
340 * to the "_switch" path. If you change this you'll have to change
341 * the fork code also.
342 *
343 * The code which creates the new task context is in 'copy_thread'
2ef9481e 344 * in arch/powerpc/kernel/process.c
9994a338
PM
345 */
346 .align 7
347_GLOBAL(_switch)
348 mflr r0
349 std r0,16(r1)
350 stdu r1,-SWITCH_FRAME_SIZE(r1)
351 /* r3-r13 are caller saved -- Cort */
352 SAVE_8GPRS(14, r1)
353 SAVE_10GPRS(22, r1)
354 mflr r20 /* Return to switch caller */
355 mfmsr r22
356 li r0, MSR_FP
ce48b210
MN
357#ifdef CONFIG_VSX
358BEGIN_FTR_SECTION
359 oris r0,r0,MSR_VSX@h /* Disable VSX */
360END_FTR_SECTION_IFSET(CPU_FTR_VSX)
361#endif /* CONFIG_VSX */
9994a338
PM
362#ifdef CONFIG_ALTIVEC
363BEGIN_FTR_SECTION
364 oris r0,r0,MSR_VEC@h /* Disable altivec */
365 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
366 std r24,THREAD_VRSAVE(r3)
367END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
368#endif /* CONFIG_ALTIVEC */
369 and. r0,r0,r22
370 beq+ 1f
371 andc r22,r22,r0
372 mtmsrd r22
373 isync
3741: std r20,_NIP(r1)
375 mfcr r23
376 std r23,_CCR(r1)
377 std r1,KSP(r3) /* Set old stack pointer */
378
379#ifdef CONFIG_SMP
380 /* We need a sync somewhere here to make sure that if the
381 * previous task gets rescheduled on another CPU, it sees all
382 * stores it has performed on this one.
383 */
384 sync
385#endif /* CONFIG_SMP */
386
387 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
388 std r6,PACACURRENT(r13) /* Set new 'current' */
389
390 ld r8,KSP(r4) /* new stack pointer */
1189be65 391BEGIN_FTR_SECTION
c230328d 392 BEGIN_FTR_SECTION_NESTED(95)
9994a338
PM
393 clrrdi r6,r8,28 /* get its ESID */
394 clrrdi r9,r1,28 /* get current sp ESID */
c230328d 395 FTR_SECTION_ELSE_NESTED(95)
1189be65
PM
396 clrrdi r6,r8,40 /* get its 1T ESID */
397 clrrdi r9,r1,40 /* get current sp 1T ESID */
c230328d
ME
398 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
399FTR_SECTION_ELSE
400 b 2f
401ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
9994a338
PM
402 clrldi. r0,r6,2 /* is new ESID c00000000? */
403 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
404 cror eq,4*cr1+eq,eq
405 beq 2f /* if yes, don't slbie it */
406
407 /* Bolt in the new stack SLB entry */
408 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
409 oris r0,r6,(SLB_ESID_V)@h
410 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
411BEGIN_FTR_SECTION
412 li r9,MMU_SEGSIZE_1T /* insert B field */
413 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
414 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
415END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 416
00efee7d
MN
417 /* Update the last bolted SLB. No write barriers are needed
418 * here, provided we only update the current CPU's SLB shadow
419 * buffer.
420 */
2f6093c8 421 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
422 li r12,0
423 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
424 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
425 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 426
f66bce5e
OJ
427 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
428 * we have 1TB segments, the only CPUs known to have the errata
429 * only support less than 1TB of system memory and we'll never
430 * actually hit this code path.
431 */
432
9994a338
PM
433 slbie r6
434 slbie r6 /* Workaround POWER5 < DD2.1 issue */
435 slbmte r7,r0
436 isync
437
4382:
9994a338
PM
439 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
440 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
441 because we don't need to leave the 288-byte ABI gap at the
442 top of the kernel stack. */
443 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
444
445 mr r1,r8 /* start using new stack pointer */
446 std r7,PACAKSAVE(r13)
447
448 ld r6,_CCR(r1)
449 mtcrf 0xFF,r6
450
451#ifdef CONFIG_ALTIVEC
452BEGIN_FTR_SECTION
453 ld r0,THREAD_VRSAVE(r4)
454 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
455END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
456#endif /* CONFIG_ALTIVEC */
457
458 /* r3-r13 are destroyed -- Cort */
459 REST_8GPRS(14, r1)
460 REST_10GPRS(22, r1)
461
462 /* convert old thread to its task_struct for return value */
463 addi r3,r3,-THREAD
464 ld r7,_NIP(r1) /* Return to _switch caller in new task */
465 mtlr r7
466 addi r1,r1,SWITCH_FRAME_SIZE
467 blr
468
469 .align 7
470_GLOBAL(ret_from_except)
471 ld r11,_TRAP(r1)
472 andi. r0,r11,1
473 bne .ret_from_except_lite
474 REST_NVGPRS(r1)
475
476_GLOBAL(ret_from_except_lite)
477 /*
478 * Disable interrupts so that current_thread_info()->flags
479 * can't change between when we test it and when we return
480 * from the interrupt.
481 */
482 mfmsr r10 /* Get current interrupt state */
483 rldicl r9,r10,48,1 /* clear MSR_EE */
484 rotldi r9,r9,16
485 mtmsrd r9,1 /* Update machine state */
486
487#ifdef CONFIG_PREEMPT
488 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
489 li r0,_TIF_NEED_RESCHED /* bits to check */
490 ld r3,_MSR(r1)
491 ld r4,TI_FLAGS(r9)
492 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
493 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
494 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
495 bne do_work
496
497#else /* !CONFIG_PREEMPT */
498 ld r3,_MSR(r1) /* Returning to user mode? */
499 andi. r3,r3,MSR_PR
500 beq restore /* if not, just restore regs and return */
501
502 /* Check current_thread_info()->flags */
503 clrrdi r9,r1,THREAD_SHIFT
504 ld r4,TI_FLAGS(r9)
505 andi. r0,r4,_TIF_USER_WORK_MASK
506 bne do_work
507#endif
508
509restore:
d04c56f7 510 ld r5,SOFTE(r1)
9994a338 511#ifdef CONFIG_PPC_ISERIES
3f639ee8 512BEGIN_FW_FTR_SECTION
9994a338
PM
513 cmpdi 0,r5,0
514 beq 4f
515 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
516 ld r3,PACALPPACAPTR(r13)
517 ld r3,LPPACAANYINT(r3)
9994a338
PM
518 cmpdi r3,0
519 beq+ 4f /* skip do_IRQ if no interrupts */
520
521 li r3,0
d04c56f7 522 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
945feb17
BH
523#ifdef CONFIG_TRACE_IRQFLAGS
524 bl .trace_hardirqs_off
525 mfmsr r10
526#endif
9994a338
PM
527 ori r10,r10,MSR_EE
528 mtmsrd r10 /* hard-enable again */
529 addi r3,r1,STACK_FRAME_OVERHEAD
530 bl .do_IRQ
531 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 5324:
3f639ee8 533END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 534#endif
945feb17 535 TRACE_AND_RESTORE_IRQ(r5);
9994a338 536
e56a6e20 537 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 538 ld r3,_MSR(r1)
e56a6e20
PM
539 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
540 stb r4,PACAHARDIRQEN(r13)
541
542 ld r4,_CTR(r1)
543 ld r0,_LINK(r1)
544 mtctr r4
545 mtlr r0
546 ld r4,_XER(r1)
547 mtspr SPRN_XER,r4
548
549 REST_8GPRS(5, r1)
550
9994a338
PM
551 andi. r0,r3,MSR_RI
552 beq- unrecov_restore
553
e56a6e20 554 stdcx. r0,0,r1 /* to clear the reservation */
b0a779de 555
e56a6e20
PM
556 /*
557 * Clear RI before restoring r13. If we are returning to
558 * userspace and we take an exception after restoring r13,
559 * we end up corrupting the userspace r13 value.
560 */
561 mfmsr r4
562 andc r4,r4,r0 /* r0 contains MSR_RI here */
563 mtmsrd r4,1
9994a338
PM
564
565 /*
566 * r13 is our per cpu area, only restore it if we are returning to
567 * userspace
568 */
e56a6e20 569 andi. r0,r3,MSR_PR
9994a338 570 beq 1f
e56a6e20 571 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
572 REST_GPR(13, r1)
5731:
e56a6e20 574 mtspr SPRN_SRR1,r3
9994a338
PM
575
576 ld r2,_CCR(r1)
577 mtcrf 0xFF,r2
578 ld r2,_NIP(r1)
579 mtspr SPRN_SRR0,r2
580
581 ld r0,GPR0(r1)
582 ld r2,GPR2(r1)
583 ld r3,GPR3(r1)
584 ld r4,GPR4(r1)
585 ld r1,GPR1(r1)
586
587 rfid
588 b . /* prevent speculative execution */
589
9994a338
PM
590do_work:
591#ifdef CONFIG_PREEMPT
592 andi. r0,r3,MSR_PR /* Returning to user mode? */
593 bne user_work
594 /* Check that preempt_count() == 0 and interrupts are enabled */
595 lwz r8,TI_PREEMPT(r9)
596 cmpwi cr1,r8,0
9994a338
PM
597 ld r0,SOFTE(r1)
598 cmpdi r0,0
9994a338
PM
599 crandc eq,cr1*4+eq,eq
600 bne restore
601 /* here we are preempting the current task */
6021:
945feb17
BH
603#ifdef CONFIG_TRACE_IRQFLAGS
604 bl .trace_hardirqs_on
605 /* Note: we just clobbered r10 which used to contain the previous
606 * MSR before the hard-disabling done by the caller of do_work.
607 * We don't have that value anymore, but it doesn't matter as
608 * we will hard-enable unconditionally, we can just reload the
609 * current MSR into r10
610 */
611 mfmsr r10
612#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 613 li r0,1
d04c56f7
PM
614 stb r0,PACASOFTIRQEN(r13)
615 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
616 ori r10,r10,MSR_EE
617 mtmsrd r10,1 /* reenable interrupts */
618 bl .preempt_schedule
619 mfmsr r10
620 clrrdi r9,r1,THREAD_SHIFT
621 rldicl r10,r10,48,1 /* disable interrupts again */
622 rotldi r10,r10,16
623 mtmsrd r10,1
624 ld r4,TI_FLAGS(r9)
625 andi. r0,r4,_TIF_NEED_RESCHED
626 bne 1b
627 b restore
628
629user_work:
630#endif
631 /* Enable interrupts */
632 ori r10,r10,MSR_EE
633 mtmsrd r10,1
634
635 andi. r0,r4,_TIF_NEED_RESCHED
636 beq 1f
637 bl .schedule
638 b .ret_from_except_lite
639
6401: bl .save_nvgprs
641 li r3,0
642 addi r4,r1,STACK_FRAME_OVERHEAD
643 bl .do_signal
644 b .ret_from_except
645
646unrecov_restore:
647 addi r3,r1,STACK_FRAME_OVERHEAD
648 bl .unrecoverable_exception
649 b unrecov_restore
650
651#ifdef CONFIG_PPC_RTAS
652/*
653 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
654 * called with the MMU off.
655 *
656 * In addition, we need to be in 32b mode, at least for now.
657 *
658 * Note: r3 is an input parameter to rtas, so don't trash it...
659 */
660_GLOBAL(enter_rtas)
661 mflr r0
662 std r0,16(r1)
663 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
664
665 /* Because RTAS is running in 32b mode, it clobbers the high order half
666 * of all registers that it saves. We therefore save those registers
667 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
668 */
669 SAVE_GPR(2, r1) /* Save the TOC */
670 SAVE_GPR(13, r1) /* Save paca */
671 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
672 SAVE_10GPRS(22, r1) /* ditto */
673
674 mfcr r4
675 std r4,_CCR(r1)
676 mfctr r5
677 std r5,_CTR(r1)
678 mfspr r6,SPRN_XER
679 std r6,_XER(r1)
680 mfdar r7
681 std r7,_DAR(r1)
682 mfdsisr r8
683 std r8,_DSISR(r1)
684 mfsrr0 r9
685 std r9,_SRR0(r1)
686 mfsrr1 r10
687 std r10,_SRR1(r1)
688
9fe901d1
MK
689 /* Temporary workaround to clear CR until RTAS can be modified to
690 * ignore all bits.
691 */
692 li r0,0
693 mtcr r0
694
007d88d0 695#ifdef CONFIG_BUG
9994a338
PM
696 /* There is no way it is acceptable to get here with interrupts enabled,
697 * check it with the asm equivalent of WARN_ON
698 */
d04c56f7 699 lbz r0,PACASOFTIRQEN(r13)
9994a338 7001: tdnei r0,0
007d88d0
DW
701 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
702#endif
703
d04c56f7
PM
704 /* Hard-disable interrupts */
705 mfmsr r6
706 rldicl r7,r6,48,1
707 rotldi r7,r7,16
708 mtmsrd r7,1
709
9994a338
PM
710 /* Unfortunately, the stack pointer and the MSR are also clobbered,
711 * so they are saved in the PACA which allows us to restore
712 * our original state after RTAS returns.
713 */
714 std r1,PACAR1(r13)
715 std r6,PACASAVEDMSR(r13)
716
717 /* Setup our real return addr */
e58c3495
DG
718 LOAD_REG_ADDR(r4,.rtas_return_loc)
719 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
720 mtlr r4
721
722 li r0,0
723 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
724 andc r0,r6,r0
725
726 li r9,1
727 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
728 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
729 andc r6,r0,r9
730 ori r6,r6,MSR_RI
731 sync /* disable interrupts so SRR0/1 */
732 mtmsrd r0 /* don't get trashed */
733
e58c3495 734 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
735 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
736 ld r4,RTASBASE(r4) /* get the rtas->base value */
737
738 mtspr SPRN_SRR0,r5
739 mtspr SPRN_SRR1,r6
740 rfid
741 b . /* prevent speculative execution */
742
743_STATIC(rtas_return_loc)
744 /* relocation is off at this point */
745 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 746 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
747
748 mfmsr r6
749 li r0,MSR_RI
750 andc r6,r6,r0
751 sync
752 mtmsrd r6
753
754 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 755 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
756 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
757
758 mtspr SPRN_SRR0,r3
759 mtspr SPRN_SRR1,r4
760 rfid
761 b . /* prevent speculative execution */
762
763_STATIC(rtas_restore_regs)
764 /* relocation is on at this point */
765 REST_GPR(2, r1) /* Restore the TOC */
766 REST_GPR(13, r1) /* Restore paca */
767 REST_8GPRS(14, r1) /* Restore the non-volatiles */
768 REST_10GPRS(22, r1) /* ditto */
769
770 mfspr r13,SPRN_SPRG3
771
772 ld r4,_CCR(r1)
773 mtcr r4
774 ld r5,_CTR(r1)
775 mtctr r5
776 ld r6,_XER(r1)
777 mtspr SPRN_XER,r6
778 ld r7,_DAR(r1)
779 mtdar r7
780 ld r8,_DSISR(r1)
781 mtdsisr r8
782 ld r9,_SRR0(r1)
783 mtsrr0 r9
784 ld r10,_SRR1(r1)
785 mtsrr1 r10
786
787 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
788 ld r0,16(r1) /* get return address */
789
790 mtlr r0
791 blr /* return to caller */
792
793#endif /* CONFIG_PPC_RTAS */
794
9994a338
PM
795_GLOBAL(enter_prom)
796 mflr r0
797 std r0,16(r1)
798 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
799
800 /* Because PROM is running in 32b mode, it clobbers the high order half
801 * of all registers that it saves. We therefore save those registers
802 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
803 */
804 SAVE_8GPRS(2, r1)
805 SAVE_GPR(13, r1)
806 SAVE_8GPRS(14, r1)
807 SAVE_10GPRS(22, r1)
808 mfcr r4
809 std r4,_CCR(r1)
810 mfctr r5
811 std r5,_CTR(r1)
812 mfspr r6,SPRN_XER
813 std r6,_XER(r1)
814 mfdar r7
815 std r7,_DAR(r1)
816 mfdsisr r8
817 std r8,_DSISR(r1)
818 mfsrr0 r9
819 std r9,_SRR0(r1)
820 mfsrr1 r10
821 std r10,_SRR1(r1)
822 mfmsr r11
823 std r11,_MSR(r1)
824
825 /* Get the PROM entrypoint */
826 ld r0,GPR4(r1)
827 mtlr r0
828
829 /* Switch MSR to 32 bits mode
830 */
831 mfmsr r11
832 li r12,1
833 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
834 andc r11,r11,r12
835 li r12,1
836 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
837 andc r11,r11,r12
838 mtmsrd r11
839 isync
840
841 /* Restore arguments & enter PROM here... */
842 ld r3,GPR3(r1)
843 blrl
844
845 /* Just make sure that r1 top 32 bits didn't get
846 * corrupt by OF
847 */
848 rldicl r1,r1,0,32
849
850 /* Restore the MSR (back to 64 bits) */
851 ld r0,_MSR(r1)
852 mtmsrd r0
853 isync
854
855 /* Restore other registers */
856 REST_GPR(2, r1)
857 REST_GPR(13, r1)
858 REST_8GPRS(14, r1)
859 REST_10GPRS(22, r1)
860 ld r4,_CCR(r1)
861 mtcr r4
862 ld r5,_CTR(r1)
863 mtctr r5
864 ld r6,_XER(r1)
865 mtspr SPRN_XER,r6
866 ld r7,_DAR(r1)
867 mtdar r7
868 ld r8,_DSISR(r1)
869 mtdsisr r8
870 ld r9,_SRR0(r1)
871 mtsrr0 r9
872 ld r10,_SRR1(r1)
873 mtsrr1 r10
874
875 addi r1,r1,PROM_FRAME_SIZE
876 ld r0,16(r1)
877 mtlr r0
878 blr
4e491d14
SR
879
880#ifdef CONFIG_FTRACE
881#ifdef CONFIG_DYNAMIC_FTRACE
882_GLOBAL(mcount)
883_GLOBAL(_mcount)
884 /* Taken from output of objdump from lib64/glibc */
885 mflr r3
886 stdu r1, -112(r1)
887 std r3, 128(r1)
395a59d0 888 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
889 .globl mcount_call
890mcount_call:
891 bl ftrace_stub
892 nop
893 ld r0, 128(r1)
894 mtlr r0
895 addi r1, r1, 112
896 blr
897
898_GLOBAL(ftrace_caller)
899 /* Taken from output of objdump from lib64/glibc */
900 mflr r3
901 ld r11, 0(r1)
902 stdu r1, -112(r1)
903 std r3, 128(r1)
904 ld r4, 16(r11)
395a59d0 905 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
906.globl ftrace_call
907ftrace_call:
908 bl ftrace_stub
909 nop
910 ld r0, 128(r1)
911 mtlr r0
912 addi r1, r1, 112
913_GLOBAL(ftrace_stub)
914 blr
915#else
916_GLOBAL(mcount)
917 blr
918
919_GLOBAL(_mcount)
920 /* Taken from output of objdump from lib64/glibc */
921 mflr r3
922 ld r11, 0(r1)
923 stdu r1, -112(r1)
924 std r3, 128(r1)
925 ld r4, 16(r11)
926
395a59d0 927 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
928 LOAD_REG_ADDR(r5,ftrace_trace_function)
929 ld r5,0(r5)
930 ld r5,0(r5)
931 mtctr r5
932 bctrl
933
934 nop
935 ld r0, 128(r1)
936 mtlr r0
937 addi r1, r1, 112
938_GLOBAL(ftrace_stub)
939 blr
940
941#endif
942#endif
This page took 0.298999 seconds and 5 git commands to generate.