powerpc: Add TIF_NOTIFY_RESUME support for tracehook
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
395a59d0 34#include <asm/ftrace.h>
9994a338
PM
35
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
ec2b36b9 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
46
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
bd19c899 60 crclr so
9994a338
PM
61 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
c6622f63 65 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
9994a338
PM
79 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
945feb17
BH
94#ifdef CONFIG_TRACE_IRQFLAGS
95 bl .trace_hardirqs_on
96 REST_GPR(0,r1)
97 REST_4GPRS(3,r1)
98 REST_2GPRS(7,r1)
99 addi r9,r1,STACK_FRAME_OVERHEAD
100 ld r12,_MSR(r1)
101#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
102 li r10,1
103 stb r10,PACASOFTIRQEN(r13)
104 stb r10,PACAHARDIRQEN(r13)
105 std r10,SOFTE(r1)
9994a338 106#ifdef CONFIG_PPC_ISERIES
3f639ee8 107BEGIN_FW_FTR_SECTION
9994a338
PM
108 /* Hack for handling interrupts when soft-enabling on iSeries */
109 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
110 andi. r10,r12,MSR_PR /* from kernel */
111 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
112 bne 2f
113 b hardware_interrupt_entry
1142:
3f639ee8 115END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 116#endif /* CONFIG_PPC_ISERIES */
9994a338
PM
117 mfmsr r11
118 ori r11,r11,MSR_EE
119 mtmsrd r11,1
120
121#ifdef SHOW_SYSCALLS
122 bl .do_show_syscall
123 REST_GPR(0,r1)
124 REST_4GPRS(3,r1)
125 REST_2GPRS(7,r1)
126 addi r9,r1,STACK_FRAME_OVERHEAD
127#endif
128 clrrdi r11,r1,THREAD_SHIFT
9994a338 129 ld r10,TI_FLAGS(r11)
9994a338
PM
130 andi. r11,r10,_TIF_SYSCALL_T_OR_A
131 bne- syscall_dotrace
132syscall_dotrace_cont:
133 cmpldi 0,r0,NR_syscalls
134 bge- syscall_enosys
135
136system_call: /* label this so stack traces look sane */
137/*
138 * Need to vector to 32 Bit or default sys_call_table here,
139 * based on caller's run-mode / personality.
140 */
141 ld r11,.SYS_CALL_TABLE@toc(2)
142 andi. r10,r10,_TIF_32BIT
143 beq 15f
144 addi r11,r11,8 /* use 32-bit syscall entries */
145 clrldi r3,r3,32
146 clrldi r4,r4,32
147 clrldi r5,r5,32
148 clrldi r6,r6,32
149 clrldi r7,r7,32
150 clrldi r8,r8,32
15115:
152 slwi r0,r0,4
153 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
154 mtctr r10
155 bctrl /* Call handler */
156
157syscall_exit:
401d1f02 158 std r3,RESULT(r1)
9994a338 159#ifdef SHOW_SYSCALLS
9994a338 160 bl .do_show_syscall_exit
401d1f02 161 ld r3,RESULT(r1)
9994a338 162#endif
9994a338 163 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
164
165 /* disable interrupts so current_thread_info()->flags can't change,
166 and so that we don't get interrupted after loading SRR0/1. */
167 ld r8,_MSR(r1)
168 andi. r10,r8,MSR_RI
169 beq- unrecov_restore
170 mfmsr r10
171 rldicl r10,r10,48,1
172 rotldi r10,r10,16
173 mtmsrd r10,1
174 ld r9,TI_FLAGS(r12)
401d1f02 175 li r11,-_LAST_ERRNO
1bd79336 176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 177 bne- syscall_exit_work
401d1f02
DW
178 cmpld r3,r11
179 ld r5,_CCR(r1)
180 bge- syscall_error
181syscall_error_cont:
9994a338
PM
182 ld r7,_NIP(r1)
183 stdcx. r0,0,r1 /* to clear the reservation */
184 andi. r6,r8,MSR_PR
185 ld r4,_LINK(r1)
e56a6e20
PM
186 /*
187 * Clear RI before restoring r13. If we are returning to
188 * userspace and we take an exception after restoring r13,
189 * we end up corrupting the userspace r13 value.
190 */
191 li r12,MSR_RI
192 andc r11,r10,r12
193 mtmsrd r11,1 /* clear MSR.RI */
c6622f63
PM
194 beq- 1f
195 ACCOUNT_CPU_USER_EXIT(r11, r12)
196 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 1971: ld r2,GPR2(r1)
9994a338
PM
198 ld r1,GPR1(r1)
199 mtlr r4
200 mtcr r5
201 mtspr SPRN_SRR0,r7
202 mtspr SPRN_SRR1,r8
203 rfid
204 b . /* prevent speculative execution */
205
401d1f02 206syscall_error:
9994a338 207 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 208 neg r3,r3
9994a338
PM
209 std r5,_CCR(r1)
210 b syscall_error_cont
401d1f02 211
9994a338
PM
212/* Traced system call support */
213syscall_dotrace:
214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter
4f72c427
RM
217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
9994a338
PM
223 ld r3,GPR3(r1)
224 ld r4,GPR4(r1)
225 ld r5,GPR5(r1)
226 ld r6,GPR6(r1)
227 ld r7,GPR7(r1)
228 ld r8,GPR8(r1)
229 addi r9,r1,STACK_FRAME_OVERHEAD
230 clrrdi r10,r1,THREAD_SHIFT
231 ld r10,TI_FLAGS(r10)
232 b syscall_dotrace_cont
233
401d1f02
DW
234syscall_enosys:
235 li r3,-ENOSYS
236 b syscall_exit
237
238syscall_exit_work:
239 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
240 If TIF_NOERROR is set, just save r3 as it is. */
241
242 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
243 beq+ 0f
244 REST_NVGPRS(r1)
245 b 2f
2460: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
247 blt+ 1f
248 andi. r0,r9,_TIF_NOERROR
249 bne- 1f
250 ld r5,_CCR(r1)
251 neg r3,r3
252 oris r5,r5,0x1000 /* Set SO bit in CR */
253 std r5,_CCR(r1)
2541: std r3,GPR3(r1)
2552: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
256 beq 4f
257
1bd79336 258 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
259
260 li r11,_TIF_PERSYSCALL_MASK
261 addi r12,r12,TI_FLAGS
2623: ldarx r10,0,r12
263 andc r10,r10,r11
264 stdcx. r10,0,r12
265 bne- 3b
266 subi r12,r12,TI_FLAGS
1bd79336
PM
267
2684: /* Anything else left to do? */
269 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
270 beq .ret_from_except_lite
271
272 /* Re-enable interrupts */
273 mfmsr r10
274 ori r10,r10,MSR_EE
275 mtmsrd r10,1
276
1bd79336 277 bl .save_nvgprs
9994a338
PM
278 addi r3,r1,STACK_FRAME_OVERHEAD
279 bl .do_syscall_trace_leave
1bd79336 280 b .ret_from_except
9994a338
PM
281
282/* Save non-volatile GPRs, if not already saved. */
283_GLOBAL(save_nvgprs)
284 ld r11,_TRAP(r1)
285 andi. r0,r11,1
286 beqlr-
287 SAVE_NVGPRS(r1)
288 clrrdi r0,r11,1
289 std r0,_TRAP(r1)
290 blr
291
401d1f02 292
9994a338
PM
293/*
294 * The sigsuspend and rt_sigsuspend system calls can call do_signal
295 * and thus put the process into the stopped state where we might
296 * want to examine its user state with ptrace. Therefore we need
297 * to save all the nonvolatile registers (r14 - r31) before calling
298 * the C code. Similarly, fork, vfork and clone need the full
299 * register state on the stack so that it can be copied to the child.
300 */
9994a338
PM
301
302_GLOBAL(ppc_fork)
303 bl .save_nvgprs
304 bl .sys_fork
305 b syscall_exit
306
307_GLOBAL(ppc_vfork)
308 bl .save_nvgprs
309 bl .sys_vfork
310 b syscall_exit
311
312_GLOBAL(ppc_clone)
313 bl .save_nvgprs
314 bl .sys_clone
315 b syscall_exit
316
1bd79336
PM
317_GLOBAL(ppc32_swapcontext)
318 bl .save_nvgprs
319 bl .compat_sys_swapcontext
320 b syscall_exit
321
322_GLOBAL(ppc64_swapcontext)
323 bl .save_nvgprs
324 bl .sys_swapcontext
325 b syscall_exit
326
9994a338
PM
327_GLOBAL(ret_from_fork)
328 bl .schedule_tail
329 REST_NVGPRS(r1)
330 li r3,0
331 b syscall_exit
332
333/*
334 * This routine switches between two different tasks. The process
335 * state of one is saved on its kernel stack. Then the state
336 * of the other is restored from its kernel stack. The memory
337 * management hardware is updated to the second process's state.
338 * Finally, we can return to the second process, via ret_from_except.
339 * On entry, r3 points to the THREAD for the current task, r4
340 * points to the THREAD for the new task.
341 *
342 * Note: there are two ways to get to the "going out" portion
343 * of this code; either by coming in via the entry (_switch)
344 * or via "fork" which must set up an environment equivalent
345 * to the "_switch" path. If you change this you'll have to change
346 * the fork code also.
347 *
348 * The code which creates the new task context is in 'copy_thread'
2ef9481e 349 * in arch/powerpc/kernel/process.c
9994a338
PM
350 */
351 .align 7
352_GLOBAL(_switch)
353 mflr r0
354 std r0,16(r1)
355 stdu r1,-SWITCH_FRAME_SIZE(r1)
356 /* r3-r13 are caller saved -- Cort */
357 SAVE_8GPRS(14, r1)
358 SAVE_10GPRS(22, r1)
359 mflr r20 /* Return to switch caller */
360 mfmsr r22
361 li r0, MSR_FP
ce48b210
MN
362#ifdef CONFIG_VSX
363BEGIN_FTR_SECTION
364 oris r0,r0,MSR_VSX@h /* Disable VSX */
365END_FTR_SECTION_IFSET(CPU_FTR_VSX)
366#endif /* CONFIG_VSX */
9994a338
PM
367#ifdef CONFIG_ALTIVEC
368BEGIN_FTR_SECTION
369 oris r0,r0,MSR_VEC@h /* Disable altivec */
370 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
371 std r24,THREAD_VRSAVE(r3)
372END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373#endif /* CONFIG_ALTIVEC */
374 and. r0,r0,r22
375 beq+ 1f
376 andc r22,r22,r0
377 mtmsrd r22
378 isync
3791: std r20,_NIP(r1)
380 mfcr r23
381 std r23,_CCR(r1)
382 std r1,KSP(r3) /* Set old stack pointer */
383
384#ifdef CONFIG_SMP
385 /* We need a sync somewhere here to make sure that if the
386 * previous task gets rescheduled on another CPU, it sees all
387 * stores it has performed on this one.
388 */
389 sync
390#endif /* CONFIG_SMP */
391
392 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
393 std r6,PACACURRENT(r13) /* Set new 'current' */
394
395 ld r8,KSP(r4) /* new stack pointer */
1189be65 396BEGIN_FTR_SECTION
c230328d 397 BEGIN_FTR_SECTION_NESTED(95)
9994a338
PM
398 clrrdi r6,r8,28 /* get its ESID */
399 clrrdi r9,r1,28 /* get current sp ESID */
c230328d 400 FTR_SECTION_ELSE_NESTED(95)
1189be65
PM
401 clrrdi r6,r8,40 /* get its 1T ESID */
402 clrrdi r9,r1,40 /* get current sp 1T ESID */
c230328d
ME
403 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
404FTR_SECTION_ELSE
405 b 2f
406ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
9994a338
PM
407 clrldi. r0,r6,2 /* is new ESID c00000000? */
408 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
409 cror eq,4*cr1+eq,eq
410 beq 2f /* if yes, don't slbie it */
411
412 /* Bolt in the new stack SLB entry */
413 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
414 oris r0,r6,(SLB_ESID_V)@h
415 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
416BEGIN_FTR_SECTION
417 li r9,MMU_SEGSIZE_1T /* insert B field */
418 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
419 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
420END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 421
00efee7d
MN
422 /* Update the last bolted SLB. No write barriers are needed
423 * here, provided we only update the current CPU's SLB shadow
424 * buffer.
425 */
2f6093c8 426 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
427 li r12,0
428 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
429 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
430 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 431
f66bce5e
OJ
432 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
433 * we have 1TB segments, the only CPUs known to have the errata
434 * only support less than 1TB of system memory and we'll never
435 * actually hit this code path.
436 */
437
9994a338
PM
438 slbie r6
439 slbie r6 /* Workaround POWER5 < DD2.1 issue */
440 slbmte r7,r0
441 isync
442
4432:
9994a338
PM
444 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
445 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
446 because we don't need to leave the 288-byte ABI gap at the
447 top of the kernel stack. */
448 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
449
450 mr r1,r8 /* start using new stack pointer */
451 std r7,PACAKSAVE(r13)
452
453 ld r6,_CCR(r1)
454 mtcrf 0xFF,r6
455
456#ifdef CONFIG_ALTIVEC
457BEGIN_FTR_SECTION
458 ld r0,THREAD_VRSAVE(r4)
459 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
460END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
461#endif /* CONFIG_ALTIVEC */
462
463 /* r3-r13 are destroyed -- Cort */
464 REST_8GPRS(14, r1)
465 REST_10GPRS(22, r1)
466
467 /* convert old thread to its task_struct for return value */
468 addi r3,r3,-THREAD
469 ld r7,_NIP(r1) /* Return to _switch caller in new task */
470 mtlr r7
471 addi r1,r1,SWITCH_FRAME_SIZE
472 blr
473
474 .align 7
475_GLOBAL(ret_from_except)
476 ld r11,_TRAP(r1)
477 andi. r0,r11,1
478 bne .ret_from_except_lite
479 REST_NVGPRS(r1)
480
481_GLOBAL(ret_from_except_lite)
482 /*
483 * Disable interrupts so that current_thread_info()->flags
484 * can't change between when we test it and when we return
485 * from the interrupt.
486 */
487 mfmsr r10 /* Get current interrupt state */
488 rldicl r9,r10,48,1 /* clear MSR_EE */
489 rotldi r9,r9,16
490 mtmsrd r9,1 /* Update machine state */
491
492#ifdef CONFIG_PREEMPT
493 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
494 li r0,_TIF_NEED_RESCHED /* bits to check */
495 ld r3,_MSR(r1)
496 ld r4,TI_FLAGS(r9)
497 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
498 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
499 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
500 bne do_work
501
502#else /* !CONFIG_PREEMPT */
503 ld r3,_MSR(r1) /* Returning to user mode? */
504 andi. r3,r3,MSR_PR
505 beq restore /* if not, just restore regs and return */
506
507 /* Check current_thread_info()->flags */
508 clrrdi r9,r1,THREAD_SHIFT
509 ld r4,TI_FLAGS(r9)
510 andi. r0,r4,_TIF_USER_WORK_MASK
511 bne do_work
512#endif
513
514restore:
d04c56f7 515 ld r5,SOFTE(r1)
9994a338 516#ifdef CONFIG_PPC_ISERIES
3f639ee8 517BEGIN_FW_FTR_SECTION
9994a338
PM
518 cmpdi 0,r5,0
519 beq 4f
520 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
521 ld r3,PACALPPACAPTR(r13)
522 ld r3,LPPACAANYINT(r3)
9994a338
PM
523 cmpdi r3,0
524 beq+ 4f /* skip do_IRQ if no interrupts */
525
526 li r3,0
d04c56f7 527 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
945feb17
BH
528#ifdef CONFIG_TRACE_IRQFLAGS
529 bl .trace_hardirqs_off
530 mfmsr r10
531#endif
9994a338
PM
532 ori r10,r10,MSR_EE
533 mtmsrd r10 /* hard-enable again */
534 addi r3,r1,STACK_FRAME_OVERHEAD
535 bl .do_IRQ
536 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 5374:
3f639ee8 538END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 539#endif
945feb17 540 TRACE_AND_RESTORE_IRQ(r5);
9994a338 541
e56a6e20 542 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 543 ld r3,_MSR(r1)
e56a6e20
PM
544 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
545 stb r4,PACAHARDIRQEN(r13)
546
547 ld r4,_CTR(r1)
548 ld r0,_LINK(r1)
549 mtctr r4
550 mtlr r0
551 ld r4,_XER(r1)
552 mtspr SPRN_XER,r4
553
554 REST_8GPRS(5, r1)
555
9994a338
PM
556 andi. r0,r3,MSR_RI
557 beq- unrecov_restore
558
e56a6e20 559 stdcx. r0,0,r1 /* to clear the reservation */
b0a779de 560
e56a6e20
PM
561 /*
562 * Clear RI before restoring r13. If we are returning to
563 * userspace and we take an exception after restoring r13,
564 * we end up corrupting the userspace r13 value.
565 */
566 mfmsr r4
567 andc r4,r4,r0 /* r0 contains MSR_RI here */
568 mtmsrd r4,1
9994a338
PM
569
570 /*
571 * r13 is our per cpu area, only restore it if we are returning to
572 * userspace
573 */
e56a6e20 574 andi. r0,r3,MSR_PR
9994a338 575 beq 1f
e56a6e20 576 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
577 REST_GPR(13, r1)
5781:
e56a6e20 579 mtspr SPRN_SRR1,r3
9994a338
PM
580
581 ld r2,_CCR(r1)
582 mtcrf 0xFF,r2
583 ld r2,_NIP(r1)
584 mtspr SPRN_SRR0,r2
585
586 ld r0,GPR0(r1)
587 ld r2,GPR2(r1)
588 ld r3,GPR3(r1)
589 ld r4,GPR4(r1)
590 ld r1,GPR1(r1)
591
592 rfid
593 b . /* prevent speculative execution */
594
9994a338
PM
595do_work:
596#ifdef CONFIG_PREEMPT
597 andi. r0,r3,MSR_PR /* Returning to user mode? */
598 bne user_work
599 /* Check that preempt_count() == 0 and interrupts are enabled */
600 lwz r8,TI_PREEMPT(r9)
601 cmpwi cr1,r8,0
9994a338
PM
602 ld r0,SOFTE(r1)
603 cmpdi r0,0
9994a338
PM
604 crandc eq,cr1*4+eq,eq
605 bne restore
606 /* here we are preempting the current task */
6071:
945feb17
BH
608#ifdef CONFIG_TRACE_IRQFLAGS
609 bl .trace_hardirqs_on
610 /* Note: we just clobbered r10 which used to contain the previous
611 * MSR before the hard-disabling done by the caller of do_work.
612 * We don't have that value anymore, but it doesn't matter as
613 * we will hard-enable unconditionally, we can just reload the
614 * current MSR into r10
615 */
616 mfmsr r10
617#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 618 li r0,1
d04c56f7
PM
619 stb r0,PACASOFTIRQEN(r13)
620 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
621 ori r10,r10,MSR_EE
622 mtmsrd r10,1 /* reenable interrupts */
623 bl .preempt_schedule
624 mfmsr r10
625 clrrdi r9,r1,THREAD_SHIFT
626 rldicl r10,r10,48,1 /* disable interrupts again */
627 rotldi r10,r10,16
628 mtmsrd r10,1
629 ld r4,TI_FLAGS(r9)
630 andi. r0,r4,_TIF_NEED_RESCHED
631 bne 1b
632 b restore
633
634user_work:
635#endif
636 /* Enable interrupts */
637 ori r10,r10,MSR_EE
638 mtmsrd r10,1
639
640 andi. r0,r4,_TIF_NEED_RESCHED
641 beq 1f
642 bl .schedule
643 b .ret_from_except_lite
644
6451: bl .save_nvgprs
7d6d637d 646 addi r3,r1,STACK_FRAME_OVERHEAD
9994a338
PM
647 bl .do_signal
648 b .ret_from_except
649
650unrecov_restore:
651 addi r3,r1,STACK_FRAME_OVERHEAD
652 bl .unrecoverable_exception
653 b unrecov_restore
654
655#ifdef CONFIG_PPC_RTAS
656/*
657 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
658 * called with the MMU off.
659 *
660 * In addition, we need to be in 32b mode, at least for now.
661 *
662 * Note: r3 is an input parameter to rtas, so don't trash it...
663 */
664_GLOBAL(enter_rtas)
665 mflr r0
666 std r0,16(r1)
667 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
668
669 /* Because RTAS is running in 32b mode, it clobbers the high order half
670 * of all registers that it saves. We therefore save those registers
671 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
672 */
673 SAVE_GPR(2, r1) /* Save the TOC */
674 SAVE_GPR(13, r1) /* Save paca */
675 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
676 SAVE_10GPRS(22, r1) /* ditto */
677
678 mfcr r4
679 std r4,_CCR(r1)
680 mfctr r5
681 std r5,_CTR(r1)
682 mfspr r6,SPRN_XER
683 std r6,_XER(r1)
684 mfdar r7
685 std r7,_DAR(r1)
686 mfdsisr r8
687 std r8,_DSISR(r1)
688 mfsrr0 r9
689 std r9,_SRR0(r1)
690 mfsrr1 r10
691 std r10,_SRR1(r1)
692
9fe901d1
MK
693 /* Temporary workaround to clear CR until RTAS can be modified to
694 * ignore all bits.
695 */
696 li r0,0
697 mtcr r0
698
007d88d0 699#ifdef CONFIG_BUG
9994a338
PM
700 /* There is no way it is acceptable to get here with interrupts enabled,
701 * check it with the asm equivalent of WARN_ON
702 */
d04c56f7 703 lbz r0,PACASOFTIRQEN(r13)
9994a338 7041: tdnei r0,0
007d88d0
DW
705 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
706#endif
707
d04c56f7
PM
708 /* Hard-disable interrupts */
709 mfmsr r6
710 rldicl r7,r6,48,1
711 rotldi r7,r7,16
712 mtmsrd r7,1
713
9994a338
PM
714 /* Unfortunately, the stack pointer and the MSR are also clobbered,
715 * so they are saved in the PACA which allows us to restore
716 * our original state after RTAS returns.
717 */
718 std r1,PACAR1(r13)
719 std r6,PACASAVEDMSR(r13)
720
721 /* Setup our real return addr */
e58c3495
DG
722 LOAD_REG_ADDR(r4,.rtas_return_loc)
723 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
724 mtlr r4
725
726 li r0,0
727 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
728 andc r0,r6,r0
729
730 li r9,1
731 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
732 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
733 andc r6,r0,r9
734 ori r6,r6,MSR_RI
735 sync /* disable interrupts so SRR0/1 */
736 mtmsrd r0 /* don't get trashed */
737
e58c3495 738 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
739 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
740 ld r4,RTASBASE(r4) /* get the rtas->base value */
741
742 mtspr SPRN_SRR0,r5
743 mtspr SPRN_SRR1,r6
744 rfid
745 b . /* prevent speculative execution */
746
747_STATIC(rtas_return_loc)
748 /* relocation is off at this point */
749 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 750 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
751
752 mfmsr r6
753 li r0,MSR_RI
754 andc r6,r6,r0
755 sync
756 mtmsrd r6
757
758 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 759 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
760 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
761
762 mtspr SPRN_SRR0,r3
763 mtspr SPRN_SRR1,r4
764 rfid
765 b . /* prevent speculative execution */
766
767_STATIC(rtas_restore_regs)
768 /* relocation is on at this point */
769 REST_GPR(2, r1) /* Restore the TOC */
770 REST_GPR(13, r1) /* Restore paca */
771 REST_8GPRS(14, r1) /* Restore the non-volatiles */
772 REST_10GPRS(22, r1) /* ditto */
773
774 mfspr r13,SPRN_SPRG3
775
776 ld r4,_CCR(r1)
777 mtcr r4
778 ld r5,_CTR(r1)
779 mtctr r5
780 ld r6,_XER(r1)
781 mtspr SPRN_XER,r6
782 ld r7,_DAR(r1)
783 mtdar r7
784 ld r8,_DSISR(r1)
785 mtdsisr r8
786 ld r9,_SRR0(r1)
787 mtsrr0 r9
788 ld r10,_SRR1(r1)
789 mtsrr1 r10
790
791 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
792 ld r0,16(r1) /* get return address */
793
794 mtlr r0
795 blr /* return to caller */
796
797#endif /* CONFIG_PPC_RTAS */
798
9994a338
PM
799_GLOBAL(enter_prom)
800 mflr r0
801 std r0,16(r1)
802 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
803
804 /* Because PROM is running in 32b mode, it clobbers the high order half
805 * of all registers that it saves. We therefore save those registers
806 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
807 */
808 SAVE_8GPRS(2, r1)
809 SAVE_GPR(13, r1)
810 SAVE_8GPRS(14, r1)
811 SAVE_10GPRS(22, r1)
812 mfcr r4
813 std r4,_CCR(r1)
814 mfctr r5
815 std r5,_CTR(r1)
816 mfspr r6,SPRN_XER
817 std r6,_XER(r1)
818 mfdar r7
819 std r7,_DAR(r1)
820 mfdsisr r8
821 std r8,_DSISR(r1)
822 mfsrr0 r9
823 std r9,_SRR0(r1)
824 mfsrr1 r10
825 std r10,_SRR1(r1)
826 mfmsr r11
827 std r11,_MSR(r1)
828
829 /* Get the PROM entrypoint */
830 ld r0,GPR4(r1)
831 mtlr r0
832
833 /* Switch MSR to 32 bits mode
834 */
835 mfmsr r11
836 li r12,1
837 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
838 andc r11,r11,r12
839 li r12,1
840 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
841 andc r11,r11,r12
842 mtmsrd r11
843 isync
844
845 /* Restore arguments & enter PROM here... */
846 ld r3,GPR3(r1)
847 blrl
848
849 /* Just make sure that r1 top 32 bits didn't get
850 * corrupt by OF
851 */
852 rldicl r1,r1,0,32
853
854 /* Restore the MSR (back to 64 bits) */
855 ld r0,_MSR(r1)
856 mtmsrd r0
857 isync
858
859 /* Restore other registers */
860 REST_GPR(2, r1)
861 REST_GPR(13, r1)
862 REST_8GPRS(14, r1)
863 REST_10GPRS(22, r1)
864 ld r4,_CCR(r1)
865 mtcr r4
866 ld r5,_CTR(r1)
867 mtctr r5
868 ld r6,_XER(r1)
869 mtspr SPRN_XER,r6
870 ld r7,_DAR(r1)
871 mtdar r7
872 ld r8,_DSISR(r1)
873 mtdsisr r8
874 ld r9,_SRR0(r1)
875 mtsrr0 r9
876 ld r10,_SRR1(r1)
877 mtsrr1 r10
878
879 addi r1,r1,PROM_FRAME_SIZE
880 ld r0,16(r1)
881 mtlr r0
882 blr
4e491d14
SR
883
884#ifdef CONFIG_FTRACE
885#ifdef CONFIG_DYNAMIC_FTRACE
886_GLOBAL(mcount)
887_GLOBAL(_mcount)
888 /* Taken from output of objdump from lib64/glibc */
889 mflr r3
890 stdu r1, -112(r1)
891 std r3, 128(r1)
395a59d0 892 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
893 .globl mcount_call
894mcount_call:
895 bl ftrace_stub
896 nop
897 ld r0, 128(r1)
898 mtlr r0
899 addi r1, r1, 112
900 blr
901
902_GLOBAL(ftrace_caller)
903 /* Taken from output of objdump from lib64/glibc */
904 mflr r3
905 ld r11, 0(r1)
906 stdu r1, -112(r1)
907 std r3, 128(r1)
908 ld r4, 16(r11)
395a59d0 909 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
910.globl ftrace_call
911ftrace_call:
912 bl ftrace_stub
913 nop
914 ld r0, 128(r1)
915 mtlr r0
916 addi r1, r1, 112
917_GLOBAL(ftrace_stub)
918 blr
919#else
920_GLOBAL(mcount)
921 blr
922
923_GLOBAL(_mcount)
924 /* Taken from output of objdump from lib64/glibc */
925 mflr r3
926 ld r11, 0(r1)
927 stdu r1, -112(r1)
928 std r3, 128(r1)
929 ld r4, 16(r11)
930
395a59d0 931 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
932 LOAD_REG_ADDR(r5,ftrace_trace_function)
933 ld r5,0(r5)
934 ld r5,0(r5)
935 mtctr r5
936 bctrl
937
938 nop
939 ld r0, 128(r1)
940 mtlr r0
941 addi r1, r1, 112
942_GLOBAL(ftrace_stub)
943 blr
944
945#endif
946#endif
This page took 0.295707 seconds and 5 git commands to generate.