Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
[deliverable/linux.git] / arch / powerpc / kernel / entry_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/errno.h>
23 #include <linux/sys.h>
24 #include <linux/threads.h>
25 #include <asm/reg.h>
26 #include <asm/page.h>
27 #include <asm/mmu.h>
28 #include <asm/cputable.h>
29 #include <asm/thread_info.h>
30 #include <asm/ppc_asm.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/unistd.h>
33 #include <asm/ftrace.h>
34
35 #undef SHOW_SYSCALLS
36 #undef SHOW_SYSCALLS_TASK
37
38 /*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43 #else
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
45 #endif
46
47 #ifdef CONFIG_BOOKE
48 .globl mcheck_transfer_to_handler
49 mcheck_transfer_to_handler:
50 mfspr r0,SPRN_DSRR0
51 stw r0,_DSRR0(r11)
52 mfspr r0,SPRN_DSRR1
53 stw r0,_DSRR1(r11)
54 /* fall through */
55
56 .globl debug_transfer_to_handler
57 debug_transfer_to_handler:
58 mfspr r0,SPRN_CSRR0
59 stw r0,_CSRR0(r11)
60 mfspr r0,SPRN_CSRR1
61 stw r0,_CSRR1(r11)
62 /* fall through */
63
64 .globl crit_transfer_to_handler
65 crit_transfer_to_handler:
66 #ifdef CONFIG_FSL_BOOKE
67 mfspr r0,SPRN_MAS0
68 stw r0,MAS0(r11)
69 mfspr r0,SPRN_MAS1
70 stw r0,MAS1(r11)
71 mfspr r0,SPRN_MAS2
72 stw r0,MAS2(r11)
73 mfspr r0,SPRN_MAS3
74 stw r0,MAS3(r11)
75 mfspr r0,SPRN_MAS6
76 stw r0,MAS6(r11)
77 #ifdef CONFIG_PHYS_64BIT
78 mfspr r0,SPRN_MAS7
79 stw r0,MAS7(r11)
80 #endif /* CONFIG_PHYS_64BIT */
81 #endif /* CONFIG_FSL_BOOKE */
82 #ifdef CONFIG_44x
83 mfspr r0,SPRN_MMUCR
84 stw r0,MMUCR(r11)
85 #endif
86 mfspr r0,SPRN_SRR0
87 stw r0,_SRR0(r11)
88 mfspr r0,SPRN_SRR1
89 stw r0,_SRR1(r11)
90
91 mfspr r8,SPRN_SPRG3
92 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
94 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
95 stw r0,KSP_LIMIT(r8)
96 /* fall through */
97 #endif
98
99 #ifdef CONFIG_40x
100 .globl crit_transfer_to_handler
101 crit_transfer_to_handler:
102 lwz r0,crit_r10@l(0)
103 stw r0,GPR10(r11)
104 lwz r0,crit_r11@l(0)
105 stw r0,GPR11(r11)
106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
111 mfspr r8,SPRN_SPRG3
112 lwz r0,KSP_LIMIT(r8)
113 stw r0,saved_ksp_limit@l(0)
114 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
115 stw r0,KSP_LIMIT(r8)
116 /* fall through */
117 #endif
118
119 /*
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
125 */
126 .globl transfer_to_handler_full
127 transfer_to_handler_full:
128 SAVE_NVGPRS(r11)
129 /* fall through */
130
131 .globl transfer_to_handler
132 transfer_to_handler:
133 stw r2,GPR2(r11)
134 stw r12,_NIP(r11)
135 stw r9,_MSR(r11)
136 andi. r2,r9,MSR_PR
137 mfctr r12
138 mfspr r2,SPRN_XER
139 stw r12,_CTR(r11)
140 stw r2,_XER(r11)
141 mfspr r12,SPRN_SPRG3
142 addi r2,r12,-THREAD
143 tovirt(r2,r2) /* set r2 to current */
144 beq 2f /* if from user, fix up THREAD.regs */
145 addi r11,r1,STACK_FRAME_OVERHEAD
146 stw r11,PT_REGS(r12)
147 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,DBCR0_IDM@h
152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */
155 mtspr SPRN_DBSR,r12
156 lis r11,global_dbcr0@ha
157 tophys(r11,r11)
158 addi r11,r11,global_dbcr0@l
159 #ifdef CONFIG_SMP
160 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
161 lwz r9,TI_CPU(r9)
162 slwi r9,r9,3
163 add r11,r11,r9
164 #endif
165 lwz r12,0(r11)
166 mtspr SPRN_DBCR0,r12
167 lwz r12,4(r11)
168 addi r12,r12,-1
169 stw r12,4(r11)
170 #endif
171 b 3f
172
173 2: /* if from kernel, check interrupted DOZE/NAP mode and
174 * check for stack overflow
175 */
176 lwz r9,KSP_LIMIT(r12)
177 cmplw r1,r9 /* if r1 <= ksp_limit */
178 ble- stack_ovf /* then the kernel stack overflowed */
179 5:
180 #if defined(CONFIG_6xx) || defined(CONFIG_E500)
181 rlwinm r9,r1,0,0,31-THREAD_SHIFT
182 tophys(r9,r9) /* check local flags */
183 lwz r12,TI_LOCAL_FLAGS(r9)
184 mtcrf 0x01,r12
185 bt- 31-TLF_NAPPING,4f
186 bt- 31-TLF_SLEEPING,7f
187 #endif /* CONFIG_6xx || CONFIG_E500 */
188 .globl transfer_to_handler_cont
189 transfer_to_handler_cont:
190 3:
191 mflr r9
192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */
194 mtspr SPRN_SRR0,r11
195 mtspr SPRN_SRR1,r10
196 mtlr r9
197 SYNC
198 RFI /* jump to handler, enable MMU */
199
200 #if defined (CONFIG_6xx) || defined(CONFIG_E500)
201 4: rlwinm r12,r12,0,~_TLF_NAPPING
202 stw r12,TI_LOCAL_FLAGS(r9)
203 b power_save_ppc32_restore
204
205 7: rlwinm r12,r12,0,~_TLF_SLEEPING
206 stw r12,TI_LOCAL_FLAGS(r9)
207 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
208 rlwinm r9,r9,0,~MSR_EE
209 lwz r12,_LINK(r11) /* and return to address in LR */
210 b fast_exception_return
211 #endif
212
213 /*
214 * On kernel stack overflow, load up an initial stack pointer
215 * and call StackOverflow(regs), which should not return.
216 */
217 stack_ovf:
218 /* sometimes we use a statically-allocated stack, which is OK. */
219 lis r12,_end@h
220 ori r12,r12,_end@l
221 cmplw r1,r12
222 ble 5b /* r1 <= &_end is OK */
223 SAVE_NVGPRS(r11)
224 addi r3,r1,STACK_FRAME_OVERHEAD
225 lis r1,init_thread_union@ha
226 addi r1,r1,init_thread_union@l
227 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
228 lis r9,StackOverflow@ha
229 addi r9,r9,StackOverflow@l
230 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
231 FIX_SRR1(r10,r12)
232 mtspr SPRN_SRR0,r9
233 mtspr SPRN_SRR1,r10
234 SYNC
235 RFI
236
237 /*
238 * Handle a system call.
239 */
240 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
241 .stabs "entry_32.S",N_SO,0,0,0f
242 0:
243
244 _GLOBAL(DoSyscall)
245 stw r3,ORIG_GPR3(r1)
246 li r12,0
247 stw r12,RESULT(r1)
248 lwz r11,_CCR(r1) /* Clear SO bit in CR */
249 rlwinm r11,r11,0,4,2
250 stw r11,_CCR(r1)
251 #ifdef SHOW_SYSCALLS
252 bl do_show_syscall
253 #endif /* SHOW_SYSCALLS */
254 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
255 lwz r11,TI_FLAGS(r10)
256 andi. r11,r11,_TIF_SYSCALL_T_OR_A
257 bne- syscall_dotrace
258 syscall_dotrace_cont:
259 cmplwi 0,r0,NR_syscalls
260 lis r10,sys_call_table@h
261 ori r10,r10,sys_call_table@l
262 slwi r0,r0,2
263 bge- 66f
264 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
265 mtlr r10
266 addi r9,r1,STACK_FRAME_OVERHEAD
267 PPC440EP_ERR42
268 blrl /* Call handler */
269 .globl ret_from_syscall
270 ret_from_syscall:
271 #ifdef SHOW_SYSCALLS
272 bl do_show_syscall_exit
273 #endif
274 mr r6,r3
275 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
276 /* disable interrupts so current_thread_info()->flags can't change */
277 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
278 SYNC
279 MTMSRD(r10)
280 lwz r9,TI_FLAGS(r12)
281 li r8,-_LAST_ERRNO
282 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
283 bne- syscall_exit_work
284 cmplw 0,r3,r8
285 blt+ syscall_exit_cont
286 lwz r11,_CCR(r1) /* Load CR */
287 neg r3,r3
288 oris r11,r11,0x1000 /* Set SO bit in CR */
289 stw r11,_CCR(r1)
290 syscall_exit_cont:
291 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,DBCR0_IDM@h
296 bnel- load_dbcr0
297 #endif
298 #ifdef CONFIG_44x
299 lis r4,icache_44x_need_flush@ha
300 lwz r5,icache_44x_need_flush@l(r4)
301 cmplwi cr0,r5,0
302 bne- 2f
303 1:
304 #endif /* CONFIG_44x */
305 BEGIN_FTR_SECTION
306 lwarx r7,0,r1
307 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
308 stwcx. r0,0,r1 /* to clear the reservation */
309 lwz r4,_LINK(r1)
310 lwz r5,_CCR(r1)
311 mtlr r4
312 mtcr r5
313 lwz r7,_NIP(r1)
314 lwz r8,_MSR(r1)
315 FIX_SRR1(r8, r0)
316 lwz r2,GPR2(r1)
317 lwz r1,GPR1(r1)
318 mtspr SPRN_SRR0,r7
319 mtspr SPRN_SRR1,r8
320 SYNC
321 RFI
322 #ifdef CONFIG_44x
323 2: li r7,0
324 iccci r0,r0
325 stw r7,icache_44x_need_flush@l(r4)
326 b 1b
327 #endif /* CONFIG_44x */
328
329 66: li r3,-ENOSYS
330 b ret_from_syscall
331
332 .globl ret_from_fork
333 ret_from_fork:
334 REST_NVGPRS(r1)
335 bl schedule_tail
336 li r3,0
337 b ret_from_syscall
338
339 /* Traced system call support */
340 syscall_dotrace:
341 SAVE_NVGPRS(r1)
342 li r0,0xc00
343 stw r0,_TRAP(r1)
344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter
346 /*
347 * Restore argument registers possibly just changed.
348 * We use the return value of do_syscall_trace_enter
349 * for call number to look up in the table (r0).
350 */
351 mr r0,r3
352 lwz r3,GPR3(r1)
353 lwz r4,GPR4(r1)
354 lwz r5,GPR5(r1)
355 lwz r6,GPR6(r1)
356 lwz r7,GPR7(r1)
357 lwz r8,GPR8(r1)
358 REST_NVGPRS(r1)
359 b syscall_dotrace_cont
360
361 syscall_exit_work:
362 andi. r0,r9,_TIF_RESTOREALL
363 beq+ 0f
364 REST_NVGPRS(r1)
365 b 2f
366 0: cmplw 0,r3,r8
367 blt+ 1f
368 andi. r0,r9,_TIF_NOERROR
369 bne- 1f
370 lwz r11,_CCR(r1) /* Load CR */
371 neg r3,r3
372 oris r11,r11,0x1000 /* Set SO bit in CR */
373 stw r11,_CCR(r1)
374
375 1: stw r6,RESULT(r1) /* Save result */
376 stw r3,GPR3(r1) /* Update return value */
377 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
378 beq 4f
379
380 /* Clear per-syscall TIF flags if any are set. */
381
382 li r11,_TIF_PERSYSCALL_MASK
383 addi r12,r12,TI_FLAGS
384 3: lwarx r8,0,r12
385 andc r8,r8,r11
386 #ifdef CONFIG_IBM405_ERR77
387 dcbt 0,r12
388 #endif
389 stwcx. r8,0,r12
390 bne- 3b
391 subi r12,r12,TI_FLAGS
392
393 4: /* Anything which requires enabling interrupts? */
394 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
395 beq ret_from_except
396
397 /* Re-enable interrupts */
398 ori r10,r10,MSR_EE
399 SYNC
400 MTMSRD(r10)
401
402 /* Save NVGPRS if they're not saved already */
403 lwz r4,_TRAP(r1)
404 andi. r4,r4,1
405 beq 5f
406 SAVE_NVGPRS(r1)
407 li r4,0xc00
408 stw r4,_TRAP(r1)
409 5:
410 addi r3,r1,STACK_FRAME_OVERHEAD
411 bl do_syscall_trace_leave
412 b ret_from_except_full
413
414 #ifdef SHOW_SYSCALLS
415 do_show_syscall:
416 #ifdef SHOW_SYSCALLS_TASK
417 lis r11,show_syscalls_task@ha
418 lwz r11,show_syscalls_task@l(r11)
419 cmp 0,r2,r11
420 bnelr
421 #endif
422 stw r31,GPR31(r1)
423 mflr r31
424 lis r3,7f@ha
425 addi r3,r3,7f@l
426 lwz r4,GPR0(r1)
427 lwz r5,GPR3(r1)
428 lwz r6,GPR4(r1)
429 lwz r7,GPR5(r1)
430 lwz r8,GPR6(r1)
431 lwz r9,GPR7(r1)
432 bl printk
433 lis r3,77f@ha
434 addi r3,r3,77f@l
435 lwz r4,GPR8(r1)
436 mr r5,r2
437 bl printk
438 lwz r0,GPR0(r1)
439 lwz r3,GPR3(r1)
440 lwz r4,GPR4(r1)
441 lwz r5,GPR5(r1)
442 lwz r6,GPR6(r1)
443 lwz r7,GPR7(r1)
444 lwz r8,GPR8(r1)
445 mtlr r31
446 lwz r31,GPR31(r1)
447 blr
448
449 do_show_syscall_exit:
450 #ifdef SHOW_SYSCALLS_TASK
451 lis r11,show_syscalls_task@ha
452 lwz r11,show_syscalls_task@l(r11)
453 cmp 0,r2,r11
454 bnelr
455 #endif
456 stw r31,GPR31(r1)
457 mflr r31
458 stw r3,RESULT(r1) /* Save result */
459 mr r4,r3
460 lis r3,79f@ha
461 addi r3,r3,79f@l
462 bl printk
463 lwz r3,RESULT(r1)
464 mtlr r31
465 lwz r31,GPR31(r1)
466 blr
467
468 7: .string "syscall %d(%x, %x, %x, %x, %x, "
469 77: .string "%x), current=%p\n"
470 79: .string " -> %x\n"
471 .align 2,0
472
473 #ifdef SHOW_SYSCALLS_TASK
474 .data
475 .globl show_syscalls_task
476 show_syscalls_task:
477 .long -1
478 .text
479 #endif
480 #endif /* SHOW_SYSCALLS */
481
482 /*
483 * The fork/clone functions need to copy the full register set into
484 * the child process. Therefore we need to save all the nonvolatile
485 * registers (r13 - r31) before calling the C code.
486 */
487 .globl ppc_fork
488 ppc_fork:
489 SAVE_NVGPRS(r1)
490 lwz r0,_TRAP(r1)
491 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
492 stw r0,_TRAP(r1) /* register set saved */
493 b sys_fork
494
495 .globl ppc_vfork
496 ppc_vfork:
497 SAVE_NVGPRS(r1)
498 lwz r0,_TRAP(r1)
499 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
500 stw r0,_TRAP(r1) /* register set saved */
501 b sys_vfork
502
503 .globl ppc_clone
504 ppc_clone:
505 SAVE_NVGPRS(r1)
506 lwz r0,_TRAP(r1)
507 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
508 stw r0,_TRAP(r1) /* register set saved */
509 b sys_clone
510
511 .globl ppc_swapcontext
512 ppc_swapcontext:
513 SAVE_NVGPRS(r1)
514 lwz r0,_TRAP(r1)
515 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
516 stw r0,_TRAP(r1) /* register set saved */
517 b sys_swapcontext
518
519 /*
520 * Top-level page fault handling.
521 * This is in assembler because if do_page_fault tells us that
522 * it is a bad kernel page fault, we want to save the non-volatile
523 * registers before calling bad_page_fault.
524 */
525 .globl handle_page_fault
526 handle_page_fault:
527 stw r4,_DAR(r1)
528 addi r3,r1,STACK_FRAME_OVERHEAD
529 bl do_page_fault
530 cmpwi r3,0
531 beq+ ret_from_except
532 SAVE_NVGPRS(r1)
533 lwz r0,_TRAP(r1)
534 clrrwi r0,r0,1
535 stw r0,_TRAP(r1)
536 mr r5,r3
537 addi r3,r1,STACK_FRAME_OVERHEAD
538 lwz r4,_DAR(r1)
539 bl bad_page_fault
540 b ret_from_except_full
541
542 /*
543 * This routine switches between two different tasks. The process
544 * state of one is saved on its kernel stack. Then the state
545 * of the other is restored from its kernel stack. The memory
546 * management hardware is updated to the second process's state.
547 * Finally, we can return to the second process.
548 * On entry, r3 points to the THREAD for the current task, r4
549 * points to the THREAD for the new task.
550 *
551 * This routine is always called with interrupts disabled.
552 *
553 * Note: there are two ways to get to the "going out" portion
554 * of this code; either by coming in via the entry (_switch)
555 * or via "fork" which must set up an environment equivalent
556 * to the "_switch" path. If you change this , you'll have to
557 * change the fork code also.
558 *
559 * The code which creates the new task context is in 'copy_thread'
560 * in arch/ppc/kernel/process.c
561 */
562 _GLOBAL(_switch)
563 stwu r1,-INT_FRAME_SIZE(r1)
564 mflr r0
565 stw r0,INT_FRAME_SIZE+4(r1)
566 /* r3-r12 are caller saved -- Cort */
567 SAVE_NVGPRS(r1)
568 stw r0,_NIP(r1) /* Return to switch caller */
569 mfmsr r11
570 li r0,MSR_FP /* Disable floating-point */
571 #ifdef CONFIG_ALTIVEC
572 BEGIN_FTR_SECTION
573 oris r0,r0,MSR_VEC@h /* Disable altivec */
574 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
575 stw r12,THREAD+THREAD_VRSAVE(r2)
576 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
577 #endif /* CONFIG_ALTIVEC */
578 #ifdef CONFIG_SPE
579 BEGIN_FTR_SECTION
580 oris r0,r0,MSR_SPE@h /* Disable SPE */
581 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
582 stw r12,THREAD+THREAD_SPEFSCR(r2)
583 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
584 #endif /* CONFIG_SPE */
585 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
586 beq+ 1f
587 andc r11,r11,r0
588 MTMSRD(r11)
589 isync
590 1: stw r11,_MSR(r1)
591 mfcr r10
592 stw r10,_CCR(r1)
593 stw r1,KSP(r3) /* Set old stack pointer */
594
595 #ifdef CONFIG_SMP
596 /* We need a sync somewhere here to make sure that if the
597 * previous task gets rescheduled on another CPU, it sees all
598 * stores it has performed on this one.
599 */
600 sync
601 #endif /* CONFIG_SMP */
602
603 tophys(r0,r4)
604 CLR_TOP32(r0)
605 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
606 lwz r1,KSP(r4) /* Load new stack pointer */
607
608 /* save the old current 'last' for return value */
609 mr r3,r2
610 addi r2,r4,-THREAD /* Update current */
611
612 #ifdef CONFIG_ALTIVEC
613 BEGIN_FTR_SECTION
614 lwz r0,THREAD+THREAD_VRSAVE(r2)
615 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
616 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
617 #endif /* CONFIG_ALTIVEC */
618 #ifdef CONFIG_SPE
619 BEGIN_FTR_SECTION
620 lwz r0,THREAD+THREAD_SPEFSCR(r2)
621 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
622 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
623 #endif /* CONFIG_SPE */
624
625 lwz r0,_CCR(r1)
626 mtcrf 0xFF,r0
627 /* r3-r12 are destroyed -- Cort */
628 REST_NVGPRS(r1)
629
630 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
631 mtlr r4
632 addi r1,r1,INT_FRAME_SIZE
633 blr
634
635 .globl fast_exception_return
636 fast_exception_return:
637 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
638 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
639 beq 1f /* if not, we've got problems */
640 #endif
641
642 2: REST_4GPRS(3, r11)
643 lwz r10,_CCR(r11)
644 REST_GPR(1, r11)
645 mtcr r10
646 lwz r10,_LINK(r11)
647 mtlr r10
648 REST_GPR(10, r11)
649 mtspr SPRN_SRR1,r9
650 mtspr SPRN_SRR0,r12
651 REST_GPR(9, r11)
652 REST_GPR(12, r11)
653 lwz r11,GPR11(r11)
654 SYNC
655 RFI
656
657 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
658 /* check if the exception happened in a restartable section */
659 1: lis r3,exc_exit_restart_end@ha
660 addi r3,r3,exc_exit_restart_end@l
661 cmplw r12,r3
662 bge 3f
663 lis r4,exc_exit_restart@ha
664 addi r4,r4,exc_exit_restart@l
665 cmplw r12,r4
666 blt 3f
667 lis r3,fee_restarts@ha
668 tophys(r3,r3)
669 lwz r5,fee_restarts@l(r3)
670 addi r5,r5,1
671 stw r5,fee_restarts@l(r3)
672 mr r12,r4 /* restart at exc_exit_restart */
673 b 2b
674
675 .section .bss
676 .align 2
677 fee_restarts:
678 .space 4
679 .previous
680
681 /* aargh, a nonrecoverable interrupt, panic */
682 /* aargh, we don't know which trap this is */
683 /* but the 601 doesn't implement the RI bit, so assume it's OK */
684 3:
685 BEGIN_FTR_SECTION
686 b 2b
687 END_FTR_SECTION_IFSET(CPU_FTR_601)
688 li r10,-1
689 stw r10,_TRAP(r11)
690 addi r3,r1,STACK_FRAME_OVERHEAD
691 lis r10,MSR_KERNEL@h
692 ori r10,r10,MSR_KERNEL@l
693 bl transfer_to_handler_full
694 .long nonrecoverable_exception
695 .long ret_from_except
696 #endif
697
698 .globl ret_from_except_full
699 ret_from_except_full:
700 REST_NVGPRS(r1)
701 /* fall through */
702
703 .globl ret_from_except
704 ret_from_except:
705 /* Hard-disable interrupts so that current_thread_info()->flags
706 * can't change between when we test it and when we return
707 * from the interrupt. */
708 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
709 SYNC /* Some chip revs have problems here... */
710 MTMSRD(r10) /* disable interrupts */
711
712 lwz r3,_MSR(r1) /* Returning to user mode? */
713 andi. r0,r3,MSR_PR
714 beq resume_kernel
715
716 user_exc_return: /* r10 contains MSR_KERNEL here */
717 /* Check current_thread_info()->flags */
718 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
719 lwz r9,TI_FLAGS(r9)
720 andi. r0,r9,_TIF_USER_WORK_MASK
721 bne do_work
722
723 restore_user:
724 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
725 /* Check whether this process has its own DBCR0 value. The internal
726 debug mode bit tells us that dbcr0 should be loaded. */
727 lwz r0,THREAD+THREAD_DBCR0(r2)
728 andis. r10,r0,DBCR0_IDM@h
729 bnel- load_dbcr0
730 #endif
731
732 #ifdef CONFIG_PREEMPT
733 b restore
734
735 /* N.B. the only way to get here is from the beq following ret_from_except. */
736 resume_kernel:
737 /* check current_thread_info->preempt_count */
738 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
739 lwz r0,TI_PREEMPT(r9)
740 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
741 bne restore
742 lwz r0,TI_FLAGS(r9)
743 andi. r0,r0,_TIF_NEED_RESCHED
744 beq+ restore
745 andi. r0,r3,MSR_EE /* interrupts off? */
746 beq restore /* don't schedule if so */
747 1: bl preempt_schedule_irq
748 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
749 lwz r3,TI_FLAGS(r9)
750 andi. r0,r3,_TIF_NEED_RESCHED
751 bne- 1b
752 #else
753 resume_kernel:
754 #endif /* CONFIG_PREEMPT */
755
756 /* interrupts are hard-disabled at this point */
757 restore:
758 #ifdef CONFIG_44x
759 lis r4,icache_44x_need_flush@ha
760 lwz r5,icache_44x_need_flush@l(r4)
761 cmplwi cr0,r5,0
762 beq+ 1f
763 li r6,0
764 iccci r0,r0
765 stw r6,icache_44x_need_flush@l(r4)
766 1:
767 #endif /* CONFIG_44x */
768 lwz r0,GPR0(r1)
769 lwz r2,GPR2(r1)
770 REST_4GPRS(3, r1)
771 REST_2GPRS(7, r1)
772
773 lwz r10,_XER(r1)
774 lwz r11,_CTR(r1)
775 mtspr SPRN_XER,r10
776 mtctr r11
777
778 PPC405_ERR77(0,r1)
779 BEGIN_FTR_SECTION
780 lwarx r11,0,r1
781 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
782 stwcx. r0,0,r1 /* to clear the reservation */
783
784 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
785 lwz r9,_MSR(r1)
786 andi. r10,r9,MSR_RI /* check if this exception occurred */
787 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
788
789 lwz r10,_CCR(r1)
790 lwz r11,_LINK(r1)
791 mtcrf 0xFF,r10
792 mtlr r11
793
794 /*
795 * Once we put values in SRR0 and SRR1, we are in a state
796 * where exceptions are not recoverable, since taking an
797 * exception will trash SRR0 and SRR1. Therefore we clear the
798 * MSR:RI bit to indicate this. If we do take an exception,
799 * we can't return to the point of the exception but we
800 * can restart the exception exit path at the label
801 * exc_exit_restart below. -- paulus
802 */
803 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
804 SYNC
805 MTMSRD(r10) /* clear the RI bit */
806 .globl exc_exit_restart
807 exc_exit_restart:
808 lwz r9,_MSR(r1)
809 lwz r12,_NIP(r1)
810 FIX_SRR1(r9,r10)
811 mtspr SPRN_SRR0,r12
812 mtspr SPRN_SRR1,r9
813 REST_4GPRS(9, r1)
814 lwz r1,GPR1(r1)
815 .globl exc_exit_restart_end
816 exc_exit_restart_end:
817 SYNC
818 RFI
819
820 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
821 /*
822 * This is a bit different on 4xx/Book-E because it doesn't have
823 * the RI bit in the MSR.
824 * The TLB miss handler checks if we have interrupted
825 * the exception exit path and restarts it if so
826 * (well maybe one day it will... :).
827 */
828 lwz r11,_LINK(r1)
829 mtlr r11
830 lwz r10,_CCR(r1)
831 mtcrf 0xff,r10
832 REST_2GPRS(9, r1)
833 .globl exc_exit_restart
834 exc_exit_restart:
835 lwz r11,_NIP(r1)
836 lwz r12,_MSR(r1)
837 exc_exit_start:
838 mtspr SPRN_SRR0,r11
839 mtspr SPRN_SRR1,r12
840 REST_2GPRS(11, r1)
841 lwz r1,GPR1(r1)
842 .globl exc_exit_restart_end
843 exc_exit_restart_end:
844 PPC405_ERR77_SYNC
845 rfi
846 b . /* prevent prefetch past rfi */
847
848 /*
849 * Returning from a critical interrupt in user mode doesn't need
850 * to be any different from a normal exception. For a critical
851 * interrupt in the kernel, we just return (without checking for
852 * preemption) since the interrupt may have happened at some crucial
853 * place (e.g. inside the TLB miss handler), and because we will be
854 * running with r1 pointing into critical_stack, not the current
855 * process's kernel stack (and therefore current_thread_info() will
856 * give the wrong answer).
857 * We have to restore various SPRs that may have been in use at the
858 * time of the critical interrupt.
859 *
860 */
861 #ifdef CONFIG_40x
862 #define PPC_40x_TURN_OFF_MSR_DR \
863 /* avoid any possible TLB misses here by turning off MSR.DR, we \
864 * assume the instructions here are mapped by a pinned TLB entry */ \
865 li r10,MSR_IR; \
866 mtmsr r10; \
867 isync; \
868 tophys(r1, r1);
869 #else
870 #define PPC_40x_TURN_OFF_MSR_DR
871 #endif
872
873 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
874 REST_NVGPRS(r1); \
875 lwz r3,_MSR(r1); \
876 andi. r3,r3,MSR_PR; \
877 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
878 bne user_exc_return; \
879 lwz r0,GPR0(r1); \
880 lwz r2,GPR2(r1); \
881 REST_4GPRS(3, r1); \
882 REST_2GPRS(7, r1); \
883 lwz r10,_XER(r1); \
884 lwz r11,_CTR(r1); \
885 mtspr SPRN_XER,r10; \
886 mtctr r11; \
887 PPC405_ERR77(0,r1); \
888 stwcx. r0,0,r1; /* to clear the reservation */ \
889 lwz r11,_LINK(r1); \
890 mtlr r11; \
891 lwz r10,_CCR(r1); \
892 mtcrf 0xff,r10; \
893 PPC_40x_TURN_OFF_MSR_DR; \
894 lwz r9,_DEAR(r1); \
895 lwz r10,_ESR(r1); \
896 mtspr SPRN_DEAR,r9; \
897 mtspr SPRN_ESR,r10; \
898 lwz r11,_NIP(r1); \
899 lwz r12,_MSR(r1); \
900 mtspr exc_lvl_srr0,r11; \
901 mtspr exc_lvl_srr1,r12; \
902 lwz r9,GPR9(r1); \
903 lwz r12,GPR12(r1); \
904 lwz r10,GPR10(r1); \
905 lwz r11,GPR11(r1); \
906 lwz r1,GPR1(r1); \
907 PPC405_ERR77_SYNC; \
908 exc_lvl_rfi; \
909 b .; /* prevent prefetch past exc_lvl_rfi */
910
911 #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
912 lwz r9,_##exc_lvl_srr0(r1); \
913 lwz r10,_##exc_lvl_srr1(r1); \
914 mtspr SPRN_##exc_lvl_srr0,r9; \
915 mtspr SPRN_##exc_lvl_srr1,r10;
916
917 #if defined(CONFIG_FSL_BOOKE)
918 #ifdef CONFIG_PHYS_64BIT
919 #define RESTORE_MAS7 \
920 lwz r11,MAS7(r1); \
921 mtspr SPRN_MAS7,r11;
922 #else
923 #define RESTORE_MAS7
924 #endif /* CONFIG_PHYS_64BIT */
925 #define RESTORE_MMU_REGS \
926 lwz r9,MAS0(r1); \
927 lwz r10,MAS1(r1); \
928 lwz r11,MAS2(r1); \
929 mtspr SPRN_MAS0,r9; \
930 lwz r9,MAS3(r1); \
931 mtspr SPRN_MAS1,r10; \
932 lwz r10,MAS6(r1); \
933 mtspr SPRN_MAS2,r11; \
934 mtspr SPRN_MAS3,r9; \
935 mtspr SPRN_MAS6,r10; \
936 RESTORE_MAS7;
937 #elif defined(CONFIG_44x)
938 #define RESTORE_MMU_REGS \
939 lwz r9,MMUCR(r1); \
940 mtspr SPRN_MMUCR,r9;
941 #else
942 #define RESTORE_MMU_REGS
943 #endif
944
945 #ifdef CONFIG_40x
946 .globl ret_from_crit_exc
947 ret_from_crit_exc:
948 mfspr r9,SPRN_SPRG3
949 lis r10,saved_ksp_limit@ha;
950 lwz r10,saved_ksp_limit@l(r10);
951 tovirt(r9,r9);
952 stw r10,KSP_LIMIT(r9)
953 lis r9,crit_srr0@ha;
954 lwz r9,crit_srr0@l(r9);
955 lis r10,crit_srr1@ha;
956 lwz r10,crit_srr1@l(r10);
957 mtspr SPRN_SRR0,r9;
958 mtspr SPRN_SRR1,r10;
959 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
960 #endif /* CONFIG_40x */
961
962 #ifdef CONFIG_BOOKE
963 .globl ret_from_crit_exc
964 ret_from_crit_exc:
965 mfspr r9,SPRN_SPRG3
966 lwz r10,SAVED_KSP_LIMIT(r1)
967 stw r10,KSP_LIMIT(r9)
968 RESTORE_xSRR(SRR0,SRR1);
969 RESTORE_MMU_REGS;
970 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
971
972 .globl ret_from_debug_exc
973 ret_from_debug_exc:
974 mfspr r9,SPRN_SPRG3
975 lwz r10,SAVED_KSP_LIMIT(r1)
976 stw r10,KSP_LIMIT(r9)
977 lwz r9,THREAD_INFO-THREAD(r9)
978 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
979 lwz r10,TI_PREEMPT(r10)
980 stw r10,TI_PREEMPT(r9)
981 RESTORE_xSRR(SRR0,SRR1);
982 RESTORE_xSRR(CSRR0,CSRR1);
983 RESTORE_MMU_REGS;
984 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
985
986 .globl ret_from_mcheck_exc
987 ret_from_mcheck_exc:
988 mfspr r9,SPRN_SPRG3
989 lwz r10,SAVED_KSP_LIMIT(r1)
990 stw r10,KSP_LIMIT(r9)
991 RESTORE_xSRR(SRR0,SRR1);
992 RESTORE_xSRR(CSRR0,CSRR1);
993 RESTORE_xSRR(DSRR0,DSRR1);
994 RESTORE_MMU_REGS;
995 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
996 #endif /* CONFIG_BOOKE */
997
998 /*
999 * Load the DBCR0 value for a task that is being ptraced,
1000 * having first saved away the global DBCR0. Note that r0
1001 * has the dbcr0 value to set upon entry to this.
1002 */
1003 load_dbcr0:
1004 mfmsr r10 /* first disable debug exceptions */
1005 rlwinm r10,r10,0,~MSR_DE
1006 mtmsr r10
1007 isync
1008 mfspr r10,SPRN_DBCR0
1009 lis r11,global_dbcr0@ha
1010 addi r11,r11,global_dbcr0@l
1011 #ifdef CONFIG_SMP
1012 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1013 lwz r9,TI_CPU(r9)
1014 slwi r9,r9,3
1015 add r11,r11,r9
1016 #endif
1017 stw r10,0(r11)
1018 mtspr SPRN_DBCR0,r0
1019 lwz r10,4(r11)
1020 addi r10,r10,1
1021 stw r10,4(r11)
1022 li r11,-1
1023 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1024 blr
1025
1026 .section .bss
1027 .align 4
1028 global_dbcr0:
1029 .space 8*NR_CPUS
1030 .previous
1031 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1032
1033 do_work: /* r10 contains MSR_KERNEL here */
1034 andi. r0,r9,_TIF_NEED_RESCHED
1035 beq do_user_signal
1036
1037 do_resched: /* r10 contains MSR_KERNEL here */
1038 ori r10,r10,MSR_EE
1039 SYNC
1040 MTMSRD(r10) /* hard-enable interrupts */
1041 bl schedule
1042 recheck:
1043 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1044 SYNC
1045 MTMSRD(r10) /* disable interrupts */
1046 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1047 lwz r9,TI_FLAGS(r9)
1048 andi. r0,r9,_TIF_NEED_RESCHED
1049 bne- do_resched
1050 andi. r0,r9,_TIF_USER_WORK_MASK
1051 beq restore_user
1052 do_user_signal: /* r10 contains MSR_KERNEL here */
1053 ori r10,r10,MSR_EE
1054 SYNC
1055 MTMSRD(r10) /* hard-enable interrupts */
1056 /* save r13-r31 in the exception frame, if not already done */
1057 lwz r3,_TRAP(r1)
1058 andi. r0,r3,1
1059 beq 2f
1060 SAVE_NVGPRS(r1)
1061 rlwinm r3,r3,0,0,30
1062 stw r3,_TRAP(r1)
1063 2: addi r3,r1,STACK_FRAME_OVERHEAD
1064 mr r4,r9
1065 bl do_signal
1066 REST_NVGPRS(r1)
1067 b recheck
1068
1069 /*
1070 * We come here when we are at the end of handling an exception
1071 * that occurred at a place where taking an exception will lose
1072 * state information, such as the contents of SRR0 and SRR1.
1073 */
1074 nonrecoverable:
1075 lis r10,exc_exit_restart_end@ha
1076 addi r10,r10,exc_exit_restart_end@l
1077 cmplw r12,r10
1078 bge 3f
1079 lis r11,exc_exit_restart@ha
1080 addi r11,r11,exc_exit_restart@l
1081 cmplw r12,r11
1082 blt 3f
1083 lis r10,ee_restarts@ha
1084 lwz r12,ee_restarts@l(r10)
1085 addi r12,r12,1
1086 stw r12,ee_restarts@l(r10)
1087 mr r12,r11 /* restart at exc_exit_restart */
1088 blr
1089 3: /* OK, we can't recover, kill this process */
1090 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1091 BEGIN_FTR_SECTION
1092 blr
1093 END_FTR_SECTION_IFSET(CPU_FTR_601)
1094 lwz r3,_TRAP(r1)
1095 andi. r0,r3,1
1096 beq 4f
1097 SAVE_NVGPRS(r1)
1098 rlwinm r3,r3,0,0,30
1099 stw r3,_TRAP(r1)
1100 4: addi r3,r1,STACK_FRAME_OVERHEAD
1101 bl nonrecoverable_exception
1102 /* shouldn't return */
1103 b 4b
1104
1105 .section .bss
1106 .align 2
1107 ee_restarts:
1108 .space 4
1109 .previous
1110
1111 /*
1112 * PROM code for specific machines follows. Put it
1113 * here so it's easy to add arch-specific sections later.
1114 * -- Cort
1115 */
1116 #ifdef CONFIG_PPC_RTAS
1117 /*
1118 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1119 * called with the MMU off.
1120 */
1121 _GLOBAL(enter_rtas)
1122 stwu r1,-INT_FRAME_SIZE(r1)
1123 mflr r0
1124 stw r0,INT_FRAME_SIZE+4(r1)
1125 LOAD_REG_ADDR(r4, rtas)
1126 lis r6,1f@ha /* physical return address for rtas */
1127 addi r6,r6,1f@l
1128 tophys(r6,r6)
1129 tophys(r7,r1)
1130 lwz r8,RTASENTRY(r4)
1131 lwz r4,RTASBASE(r4)
1132 mfmsr r9
1133 stw r9,8(r1)
1134 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1135 SYNC /* disable interrupts so SRR0/1 */
1136 MTMSRD(r0) /* don't get trashed */
1137 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1138 mtlr r6
1139 mtspr SPRN_SPRG2,r7
1140 mtspr SPRN_SRR0,r8
1141 mtspr SPRN_SRR1,r9
1142 RFI
1143 1: tophys(r9,r1)
1144 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1145 lwz r9,8(r9) /* original msr value */
1146 FIX_SRR1(r9,r0)
1147 addi r1,r1,INT_FRAME_SIZE
1148 li r0,0
1149 mtspr SPRN_SPRG2,r0
1150 mtspr SPRN_SRR0,r8
1151 mtspr SPRN_SRR1,r9
1152 RFI /* return to caller */
1153
1154 .globl machine_check_in_rtas
1155 machine_check_in_rtas:
1156 twi 31,0,0
1157 /* XXX load up BATs and panic */
1158
1159 #endif /* CONFIG_PPC_RTAS */
1160
1161 #ifdef CONFIG_FUNCTION_TRACER
1162 #ifdef CONFIG_DYNAMIC_FTRACE
1163 _GLOBAL(mcount)
1164 _GLOBAL(_mcount)
1165 /*
1166 * It is required that _mcount on PPC32 must preserve the
1167 * link register. But we have r0 to play with. We use r0
1168 * to push the return address back to the caller of mcount
1169 * into the ctr register, restore the link register and
1170 * then jump back using the ctr register.
1171 */
1172 mflr r0
1173 mtctr r0
1174 lwz r0, 4(r1)
1175 mtlr r0
1176 bctr
1177
1178 _GLOBAL(ftrace_caller)
1179 /* Based off of objdump optput from glibc */
1180 stwu r1,-48(r1)
1181 stw r3, 12(r1)
1182 stw r4, 16(r1)
1183 stw r5, 20(r1)
1184 stw r6, 24(r1)
1185 mflr r3
1186 lwz r4, 52(r1)
1187 mfcr r5
1188 stw r7, 28(r1)
1189 stw r8, 32(r1)
1190 stw r9, 36(r1)
1191 stw r10,40(r1)
1192 stw r3, 44(r1)
1193 stw r5, 8(r1)
1194 subi r3, r3, MCOUNT_INSN_SIZE
1195 .globl ftrace_call
1196 ftrace_call:
1197 bl ftrace_stub
1198 nop
1199 lwz r6, 8(r1)
1200 lwz r0, 44(r1)
1201 lwz r3, 12(r1)
1202 mtctr r0
1203 lwz r4, 16(r1)
1204 mtcr r6
1205 lwz r5, 20(r1)
1206 lwz r6, 24(r1)
1207 lwz r0, 52(r1)
1208 lwz r7, 28(r1)
1209 lwz r8, 32(r1)
1210 mtlr r0
1211 lwz r9, 36(r1)
1212 lwz r10,40(r1)
1213 addi r1, r1, 48
1214 bctr
1215 #else
1216 _GLOBAL(mcount)
1217 _GLOBAL(_mcount)
1218 stwu r1,-48(r1)
1219 stw r3, 12(r1)
1220 stw r4, 16(r1)
1221 stw r5, 20(r1)
1222 stw r6, 24(r1)
1223 mflr r3
1224 lwz r4, 52(r1)
1225 mfcr r5
1226 stw r7, 28(r1)
1227 stw r8, 32(r1)
1228 stw r9, 36(r1)
1229 stw r10,40(r1)
1230 stw r3, 44(r1)
1231 stw r5, 8(r1)
1232
1233 subi r3, r3, MCOUNT_INSN_SIZE
1234 LOAD_REG_ADDR(r5, ftrace_trace_function)
1235 lwz r5,0(r5)
1236
1237 mtctr r5
1238 bctrl
1239
1240 nop
1241
1242 lwz r6, 8(r1)
1243 lwz r0, 44(r1)
1244 lwz r3, 12(r1)
1245 mtctr r0
1246 lwz r4, 16(r1)
1247 mtcr r6
1248 lwz r5, 20(r1)
1249 lwz r6, 24(r1)
1250 lwz r0, 52(r1)
1251 lwz r7, 28(r1)
1252 lwz r8, 32(r1)
1253 mtlr r0
1254 lwz r9, 36(r1)
1255 lwz r10,40(r1)
1256 addi r1, r1, 48
1257 bctr
1258 #endif
1259
1260 _GLOBAL(ftrace_stub)
1261 blr
1262
1263 #endif /* CONFIG_MCOUNT */
This page took 0.059851 seconds and 5 git commands to generate.