[POWERPC] Remove the dregs of APUS support from arch/powerpc
[deliverable/linux.git] / arch / powerpc / kernel / head_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the low-level support and setup for the
14 * PowerPC platform, including trap and interrupt dispatch.
15 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24 #include <asm/reg.h>
25 #include <asm/page.h>
26 #include <asm/mmu.h>
27 #include <asm/pgtable.h>
28 #include <asm/cputable.h>
29 #include <asm/cache.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/asm-offsets.h>
33
34 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
35 #define LOAD_BAT(n, reg, RA, RB) \
36 /* see the comment for clear_bats() -- Cort */ \
37 li RA,0; \
38 mtspr SPRN_IBAT##n##U,RA; \
39 mtspr SPRN_DBAT##n##U,RA; \
40 lwz RA,(n*16)+0(reg); \
41 lwz RB,(n*16)+4(reg); \
42 mtspr SPRN_IBAT##n##U,RA; \
43 mtspr SPRN_IBAT##n##L,RB; \
44 beq 1f; \
45 lwz RA,(n*16)+8(reg); \
46 lwz RB,(n*16)+12(reg); \
47 mtspr SPRN_DBAT##n##U,RA; \
48 mtspr SPRN_DBAT##n##L,RB; \
49 1:
50
51 .text
52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
53 .stabs "head_32.S",N_SO,0,0,0f
54 0:
55 .globl _stext
56 _stext:
57
58 /*
59 * _start is defined this way because the XCOFF loader in the OpenFirmware
60 * on the powermac expects the entry point to be a procedure descriptor.
61 */
62 .text
63 .globl _start
64 _start:
65 /*
66 * These are here for legacy reasons, the kernel used to
67 * need to look like a coff function entry for the pmac
68 * but we're always started by some kind of bootloader now.
69 * -- Cort
70 */
71 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
72 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73 nop
74
75 /* PMAC
76 * Enter here with the kernel text, data and bss loaded starting at
77 * 0, running with virtual == physical mapping.
78 * r5 points to the prom entry point (the client interface handler
79 * address). Address translation is turned on, with the prom
80 * managing the hash table. Interrupts are disabled. The stack
81 * pointer (r1) points to just below the end of the half-meg region
82 * from 0x380000 - 0x400000, which is mapped in already.
83 *
84 * If we are booted from MacOS via BootX, we enter with the kernel
85 * image loaded somewhere, and the following values in registers:
86 * r3: 'BooX' (0x426f6f58)
87 * r4: virtual address of boot_infos_t
88 * r5: 0
89 *
90 * PREP
91 * This is jumped to on prep systems right after the kernel is relocated
92 * to its proper place in memory by the boot loader. The expected layout
93 * of the regs is:
94 * r3: ptr to residual data
95 * r4: initrd_start or if no initrd then 0
96 * r5: initrd_end - unused if r4 is 0
97 * r6: Start of command line string
98 * r7: End of command line string
99 *
100 * This just gets a minimal mmu environment setup so we can call
101 * start_here() to do the real work.
102 * -- Cort
103 */
104
105 .globl __start
106 __start:
107 /*
108 * We have to do any OF calls before we map ourselves to KERNELBASE,
109 * because OF may have I/O devices mapped into that area
110 * (particularly on CHRP).
111 */
112 #ifdef CONFIG_PPC_MULTIPLATFORM
113 cmpwi 0,r5,0
114 beq 1f
115 bl prom_init
116 trap
117 #endif
118
119 /*
120 * Check for BootX signature when supporting PowerMac and branch to
121 * appropriate trampoline if it's present
122 */
123 #ifdef CONFIG_PPC_PMAC
124 1: lis r31,0x426f
125 ori r31,r31,0x6f58
126 cmpw 0,r3,r31
127 bne 1f
128 bl bootx_init
129 trap
130 #endif /* CONFIG_PPC_PMAC */
131
132 1: mr r31,r3 /* save parameters */
133 mr r30,r4
134 li r24,0 /* cpu # */
135
136 /*
137 * early_init() does the early machine identification and does
138 * the necessary low-level setup and clears the BSS
139 * -- Cort <cort@fsmlabs.com>
140 */
141 bl early_init
142
143 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
144 * the physical address we are running at, returned by early_init()
145 */
146 bl mmu_off
147 __after_mmu_off:
148 bl clear_bats
149 bl flush_tlbs
150
151 bl initial_bats
152 #if defined(CONFIG_BOOTX_TEXT)
153 bl setup_disp_bat
154 #endif
155
156 /*
157 * Call setup_cpu for CPU 0 and initialize 6xx Idle
158 */
159 bl reloc_offset
160 li r24,0 /* cpu# */
161 bl call_setup_cpu /* Call setup_cpu for this CPU */
162 #ifdef CONFIG_6xx
163 bl reloc_offset
164 bl init_idle_6xx
165 #endif /* CONFIG_6xx */
166
167
168 /*
169 * We need to run with _start at physical address 0.
170 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
171 * the exception vectors at 0 (and therefore this copy
172 * overwrites OF's exception vectors with our own).
173 * The MMU is off at this point.
174 */
175 bl reloc_offset
176 mr r26,r3
177 addis r4,r3,KERNELBASE@h /* current address of _start */
178 cmpwi 0,r4,0 /* are we already running at 0? */
179 bne relocate_kernel
180 /*
181 * we now have the 1st 16M of ram mapped with the bats.
182 * prep needs the mmu to be turned on here, but pmac already has it on.
183 * this shouldn't bother the pmac since it just gets turned on again
184 * as we jump to our code at KERNELBASE. -- Cort
185 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
186 * off, and in other cases, we now turn it off before changing BATs above.
187 */
188 turn_on_mmu:
189 mfmsr r0
190 ori r0,r0,MSR_DR|MSR_IR
191 mtspr SPRN_SRR1,r0
192 lis r0,start_here@h
193 ori r0,r0,start_here@l
194 mtspr SPRN_SRR0,r0
195 SYNC
196 RFI /* enables MMU */
197
198 /*
199 * We need __secondary_hold as a place to hold the other cpus on
200 * an SMP machine, even when we are running a UP kernel.
201 */
202 . = 0xc0 /* for prep bootloader */
203 li r3,1 /* MTX only has 1 cpu */
204 .globl __secondary_hold
205 __secondary_hold:
206 /* tell the master we're here */
207 stw r3,__secondary_hold_acknowledge@l(0)
208 #ifdef CONFIG_SMP
209 100: lwz r4,0(0)
210 /* wait until we're told to start */
211 cmpw 0,r4,r3
212 bne 100b
213 /* our cpu # was at addr 0 - go */
214 mr r24,r3 /* cpu # */
215 b __secondary_start
216 #else
217 b .
218 #endif /* CONFIG_SMP */
219
220 .globl __secondary_hold_spinloop
221 __secondary_hold_spinloop:
222 .long 0
223 .globl __secondary_hold_acknowledge
224 __secondary_hold_acknowledge:
225 .long -1
226
227 /*
228 * Exception entry code. This code runs with address translation
229 * turned off, i.e. using physical addresses.
230 * We assume sprg3 has the physical address of the current
231 * task's thread_struct.
232 */
233 #define EXCEPTION_PROLOG \
234 mtspr SPRN_SPRG0,r10; \
235 mtspr SPRN_SPRG1,r11; \
236 mfcr r10; \
237 EXCEPTION_PROLOG_1; \
238 EXCEPTION_PROLOG_2
239
240 #define EXCEPTION_PROLOG_1 \
241 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
242 andi. r11,r11,MSR_PR; \
243 tophys(r11,r1); /* use tophys(r1) if kernel */ \
244 beq 1f; \
245 mfspr r11,SPRN_SPRG3; \
246 lwz r11,THREAD_INFO-THREAD(r11); \
247 addi r11,r11,THREAD_SIZE; \
248 tophys(r11,r11); \
249 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
250
251
252 #define EXCEPTION_PROLOG_2 \
253 CLR_TOP32(r11); \
254 stw r10,_CCR(r11); /* save registers */ \
255 stw r12,GPR12(r11); \
256 stw r9,GPR9(r11); \
257 mfspr r10,SPRN_SPRG0; \
258 stw r10,GPR10(r11); \
259 mfspr r12,SPRN_SPRG1; \
260 stw r12,GPR11(r11); \
261 mflr r10; \
262 stw r10,_LINK(r11); \
263 mfspr r12,SPRN_SRR0; \
264 mfspr r9,SPRN_SRR1; \
265 stw r1,GPR1(r11); \
266 stw r1,0(r11); \
267 tovirt(r1,r11); /* set new kernel sp */ \
268 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
269 MTMSRD(r10); /* (except for mach check in rtas) */ \
270 stw r0,GPR0(r11); \
271 lis r10,0x7265; /* put exception frame marker */ \
272 addi r10,r10,0x6773; \
273 stw r10,8(r11); \
274 SAVE_4GPRS(3, r11); \
275 SAVE_2GPRS(7, r11)
276
277 /*
278 * Note: code which follows this uses cr0.eq (set if from kernel),
279 * r11, r12 (SRR0), and r9 (SRR1).
280 *
281 * Note2: once we have set r1 we are in a position to take exceptions
282 * again, and we could thus set MSR:RI at that point.
283 */
284
285 /*
286 * Exception vectors.
287 */
288 #define EXCEPTION(n, label, hdlr, xfer) \
289 . = n; \
290 label: \
291 EXCEPTION_PROLOG; \
292 addi r3,r1,STACK_FRAME_OVERHEAD; \
293 xfer(n, hdlr)
294
295 #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
296 li r10,trap; \
297 stw r10,_TRAP(r11); \
298 li r10,MSR_KERNEL; \
299 copyee(r10, r9); \
300 bl tfer; \
301 i##n: \
302 .long hdlr; \
303 .long ret
304
305 #define COPY_EE(d, s) rlwimi d,s,0,16,16
306 #define NOCOPY(d, s)
307
308 #define EXC_XFER_STD(n, hdlr) \
309 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
310 ret_from_except_full)
311
312 #define EXC_XFER_LITE(n, hdlr) \
313 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
314 ret_from_except)
315
316 #define EXC_XFER_EE(n, hdlr) \
317 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
318 ret_from_except_full)
319
320 #define EXC_XFER_EE_LITE(n, hdlr) \
321 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
322 ret_from_except)
323
324 /* System reset */
325 /* core99 pmac starts the seconary here by changing the vector, and
326 putting it back to what it was (unknown_exception) when done. */
327 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
328
329 /* Machine check */
330 /*
331 * On CHRP, this is complicated by the fact that we could get a
332 * machine check inside RTAS, and we have no guarantee that certain
333 * critical registers will have the values we expect. The set of
334 * registers that might have bad values includes all the GPRs
335 * and all the BATs. We indicate that we are in RTAS by putting
336 * a non-zero value, the address of the exception frame to use,
337 * in SPRG2. The machine check handler checks SPRG2 and uses its
338 * value if it is non-zero. If we ever needed to free up SPRG2,
339 * we could use a field in the thread_info or thread_struct instead.
340 * (Other exception handlers assume that r1 is a valid kernel stack
341 * pointer when we take an exception from supervisor mode.)
342 * -- paulus.
343 */
344 . = 0x200
345 mtspr SPRN_SPRG0,r10
346 mtspr SPRN_SPRG1,r11
347 mfcr r10
348 #ifdef CONFIG_PPC_CHRP
349 mfspr r11,SPRN_SPRG2
350 cmpwi 0,r11,0
351 bne 7f
352 #endif /* CONFIG_PPC_CHRP */
353 EXCEPTION_PROLOG_1
354 7: EXCEPTION_PROLOG_2
355 addi r3,r1,STACK_FRAME_OVERHEAD
356 #ifdef CONFIG_PPC_CHRP
357 mfspr r4,SPRN_SPRG2
358 cmpwi cr1,r4,0
359 bne cr1,1f
360 #endif
361 EXC_XFER_STD(0x200, machine_check_exception)
362 #ifdef CONFIG_PPC_CHRP
363 1: b machine_check_in_rtas
364 #endif
365
366 /* Data access exception. */
367 . = 0x300
368 DataAccess:
369 EXCEPTION_PROLOG
370 mfspr r10,SPRN_DSISR
371 andis. r0,r10,0xa470 /* weird error? */
372 bne 1f /* if not, try to put a PTE */
373 mfspr r4,SPRN_DAR /* into the hash table */
374 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
375 bl hash_page
376 1: stw r10,_DSISR(r11)
377 mr r5,r10
378 mfspr r4,SPRN_DAR
379 EXC_XFER_EE_LITE(0x300, handle_page_fault)
380
381
382 /* Instruction access exception. */
383 . = 0x400
384 InstructionAccess:
385 EXCEPTION_PROLOG
386 andis. r0,r9,0x4000 /* no pte found? */
387 beq 1f /* if so, try to put a PTE */
388 li r3,0 /* into the hash table */
389 mr r4,r12 /* SRR0 is fault address */
390 bl hash_page
391 1: mr r4,r12
392 mr r5,r9
393 EXC_XFER_EE_LITE(0x400, handle_page_fault)
394
395 /* External interrupt */
396 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
397
398 /* Alignment exception */
399 . = 0x600
400 Alignment:
401 EXCEPTION_PROLOG
402 mfspr r4,SPRN_DAR
403 stw r4,_DAR(r11)
404 mfspr r5,SPRN_DSISR
405 stw r5,_DSISR(r11)
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 EXC_XFER_EE(0x600, alignment_exception)
408
409 /* Program check exception */
410 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
411
412 /* Floating-point unavailable */
413 . = 0x800
414 FPUnavailable:
415 BEGIN_FTR_SECTION
416 /*
417 * Certain Freescale cores don't have a FPU and treat fp instructions
418 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
419 */
420 b ProgramCheck
421 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
422 EXCEPTION_PROLOG
423 bne load_up_fpu /* if from user, just load it up */
424 addi r3,r1,STACK_FRAME_OVERHEAD
425 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
426
427 /* Decrementer */
428 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
429
430 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
431 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
432
433 /* System call */
434 . = 0xc00
435 SystemCall:
436 EXCEPTION_PROLOG
437 EXC_XFER_EE_LITE(0xc00, DoSyscall)
438
439 /* Single step - not used on 601 */
440 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
441 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
442
443 /*
444 * The Altivec unavailable trap is at 0x0f20. Foo.
445 * We effectively remap it to 0x3000.
446 * We include an altivec unavailable exception vector even if
447 * not configured for Altivec, so that you can't panic a
448 * non-altivec kernel running on a machine with altivec just
449 * by executing an altivec instruction.
450 */
451 . = 0xf00
452 b PerformanceMonitor
453
454 . = 0xf20
455 b AltiVecUnavailable
456
457 /*
458 * Handle TLB miss for instruction on 603/603e.
459 * Note: we get an alternate set of r0 - r3 to use automatically.
460 */
461 . = 0x1000
462 InstructionTLBMiss:
463 /*
464 * r0: stored ctr
465 * r1: linux style pte ( later becomes ppc hardware pte )
466 * r2: ptr to linux-style pte
467 * r3: scratch
468 */
469 mfctr r0
470 /* Get PTE (linux-style) and check access */
471 mfspr r3,SPRN_IMISS
472 lis r1,KERNELBASE@h /* check if kernel address */
473 cmplw 0,r3,r1
474 mfspr r2,SPRN_SPRG3
475 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
476 lwz r2,PGDIR(r2)
477 blt+ 112f
478 lis r2,swapper_pg_dir@ha /* if kernel address, use */
479 addi r2,r2,swapper_pg_dir@l /* kernel page table */
480 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
481 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
482 112: tophys(r2,r2)
483 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
484 lwz r2,0(r2) /* get pmd entry */
485 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
486 beq- InstructionAddressInvalid /* return if no mapping */
487 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
488 lwz r3,0(r2) /* get linux-style pte */
489 andc. r1,r1,r3 /* check access & ~permission */
490 bne- InstructionAddressInvalid /* return if access not permitted */
491 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
492 /*
493 * NOTE! We are assuming this is not an SMP system, otherwise
494 * we would need to update the pte atomically with lwarx/stwcx.
495 */
496 stw r3,0(r2) /* update PTE (accessed bit) */
497 /* Convert linux-style PTE to low word of PPC-style PTE */
498 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
499 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
500 and r1,r1,r2 /* writable if _RW and _DIRTY */
501 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
502 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
503 ori r1,r1,0xe14 /* clear out reserved bits and M */
504 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
505 mtspr SPRN_RPA,r1
506 mfspr r3,SPRN_IMISS
507 tlbli r3
508 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
509 mtcrf 0x80,r3
510 rfi
511 InstructionAddressInvalid:
512 mfspr r3,SPRN_SRR1
513 rlwinm r1,r3,9,6,6 /* Get load/store bit */
514
515 addis r1,r1,0x2000
516 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
517 mtctr r0 /* Restore CTR */
518 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
519 or r2,r2,r1
520 mtspr SPRN_SRR1,r2
521 mfspr r1,SPRN_IMISS /* Get failing address */
522 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
523 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
524 xor r1,r1,r2
525 mtspr SPRN_DAR,r1 /* Set fault address */
526 mfmsr r0 /* Restore "normal" registers */
527 xoris r0,r0,MSR_TGPR>>16
528 mtcrf 0x80,r3 /* Restore CR0 */
529 mtmsr r0
530 b InstructionAccess
531
532 /*
533 * Handle TLB miss for DATA Load operation on 603/603e
534 */
535 . = 0x1100
536 DataLoadTLBMiss:
537 /*
538 * r0: stored ctr
539 * r1: linux style pte ( later becomes ppc hardware pte )
540 * r2: ptr to linux-style pte
541 * r3: scratch
542 */
543 mfctr r0
544 /* Get PTE (linux-style) and check access */
545 mfspr r3,SPRN_DMISS
546 lis r1,KERNELBASE@h /* check if kernel address */
547 cmplw 0,r3,r1
548 mfspr r2,SPRN_SPRG3
549 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
550 lwz r2,PGDIR(r2)
551 blt+ 112f
552 lis r2,swapper_pg_dir@ha /* if kernel address, use */
553 addi r2,r2,swapper_pg_dir@l /* kernel page table */
554 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
555 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
556 112: tophys(r2,r2)
557 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
558 lwz r2,0(r2) /* get pmd entry */
559 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
560 beq- DataAddressInvalid /* return if no mapping */
561 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
562 lwz r3,0(r2) /* get linux-style pte */
563 andc. r1,r1,r3 /* check access & ~permission */
564 bne- DataAddressInvalid /* return if access not permitted */
565 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
566 /*
567 * NOTE! We are assuming this is not an SMP system, otherwise
568 * we would need to update the pte atomically with lwarx/stwcx.
569 */
570 stw r3,0(r2) /* update PTE (accessed bit) */
571 /* Convert linux-style PTE to low word of PPC-style PTE */
572 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
573 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
574 and r1,r1,r2 /* writable if _RW and _DIRTY */
575 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
576 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
577 ori r1,r1,0xe14 /* clear out reserved bits and M */
578 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
579 mtspr SPRN_RPA,r1
580 mfspr r3,SPRN_DMISS
581 tlbld r3
582 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
583 mtcrf 0x80,r3
584 rfi
585 DataAddressInvalid:
586 mfspr r3,SPRN_SRR1
587 rlwinm r1,r3,9,6,6 /* Get load/store bit */
588 addis r1,r1,0x2000
589 mtspr SPRN_DSISR,r1
590 mtctr r0 /* Restore CTR */
591 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
592 mtspr SPRN_SRR1,r2
593 mfspr r1,SPRN_DMISS /* Get failing address */
594 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
595 beq 20f /* Jump if big endian */
596 xori r1,r1,3
597 20: mtspr SPRN_DAR,r1 /* Set fault address */
598 mfmsr r0 /* Restore "normal" registers */
599 xoris r0,r0,MSR_TGPR>>16
600 mtcrf 0x80,r3 /* Restore CR0 */
601 mtmsr r0
602 b DataAccess
603
604 /*
605 * Handle TLB miss for DATA Store on 603/603e
606 */
607 . = 0x1200
608 DataStoreTLBMiss:
609 /*
610 * r0: stored ctr
611 * r1: linux style pte ( later becomes ppc hardware pte )
612 * r2: ptr to linux-style pte
613 * r3: scratch
614 */
615 mfctr r0
616 /* Get PTE (linux-style) and check access */
617 mfspr r3,SPRN_DMISS
618 lis r1,KERNELBASE@h /* check if kernel address */
619 cmplw 0,r3,r1
620 mfspr r2,SPRN_SPRG3
621 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
622 lwz r2,PGDIR(r2)
623 blt+ 112f
624 lis r2,swapper_pg_dir@ha /* if kernel address, use */
625 addi r2,r2,swapper_pg_dir@l /* kernel page table */
626 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
627 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
628 112: tophys(r2,r2)
629 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
630 lwz r2,0(r2) /* get pmd entry */
631 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
632 beq- DataAddressInvalid /* return if no mapping */
633 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
634 lwz r3,0(r2) /* get linux-style pte */
635 andc. r1,r1,r3 /* check access & ~permission */
636 bne- DataAddressInvalid /* return if access not permitted */
637 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
638 /*
639 * NOTE! We are assuming this is not an SMP system, otherwise
640 * we would need to update the pte atomically with lwarx/stwcx.
641 */
642 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
643 /* Convert linux-style PTE to low word of PPC-style PTE */
644 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
645 li r1,0xe15 /* clear out reserved bits and M */
646 andc r1,r3,r1 /* PP = user? 2: 0 */
647 mtspr SPRN_RPA,r1
648 mfspr r3,SPRN_DMISS
649 tlbld r3
650 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
651 mtcrf 0x80,r3
652 rfi
653
654 #ifndef CONFIG_ALTIVEC
655 #define altivec_assist_exception unknown_exception
656 #endif
657
658 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
659 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
660 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
662 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
663 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
664 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
665 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
666 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
667 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
668 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
669 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
672 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
678 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
679 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
680 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
681 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
682 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
683 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
684 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
685 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
686 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
687
688 .globl mol_trampoline
689 .set mol_trampoline, i0x2f00
690
691 . = 0x3000
692
693 AltiVecUnavailable:
694 EXCEPTION_PROLOG
695 #ifdef CONFIG_ALTIVEC
696 bne load_up_altivec /* if from user, just load it up */
697 #endif /* CONFIG_ALTIVEC */
698 addi r3,r1,STACK_FRAME_OVERHEAD
699 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
700
701 PerformanceMonitor:
702 EXCEPTION_PROLOG
703 addi r3,r1,STACK_FRAME_OVERHEAD
704 EXC_XFER_STD(0xf00, performance_monitor_exception)
705
706 #ifdef CONFIG_ALTIVEC
707 /* Note that the AltiVec support is closely modeled after the FP
708 * support. Changes to one are likely to be applicable to the
709 * other! */
710 load_up_altivec:
711 /*
712 * Disable AltiVec for the task which had AltiVec previously,
713 * and save its AltiVec registers in its thread_struct.
714 * Enables AltiVec for use in the kernel on return.
715 * On SMP we know the AltiVec units are free, since we give it up every
716 * switch. -- Kumar
717 */
718 mfmsr r5
719 oris r5,r5,MSR_VEC@h
720 MTMSRD(r5) /* enable use of AltiVec now */
721 isync
722 /*
723 * For SMP, we don't do lazy AltiVec switching because it just gets too
724 * horrendously complex, especially when a task switches from one CPU
725 * to another. Instead we call giveup_altivec in switch_to.
726 */
727 #ifndef CONFIG_SMP
728 tophys(r6,0)
729 addis r3,r6,last_task_used_altivec@ha
730 lwz r4,last_task_used_altivec@l(r3)
731 cmpwi 0,r4,0
732 beq 1f
733 add r4,r4,r6
734 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
735 SAVE_32VRS(0,r10,r4)
736 mfvscr vr0
737 li r10,THREAD_VSCR
738 stvx vr0,r10,r4
739 lwz r5,PT_REGS(r4)
740 add r5,r5,r6
741 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
742 lis r10,MSR_VEC@h
743 andc r4,r4,r10 /* disable altivec for previous task */
744 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
745 1:
746 #endif /* CONFIG_SMP */
747 /* enable use of AltiVec after return */
748 oris r9,r9,MSR_VEC@h
749 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
750 li r4,1
751 li r10,THREAD_VSCR
752 stw r4,THREAD_USED_VR(r5)
753 lvx vr0,r10,r5
754 mtvscr vr0
755 REST_32VRS(0,r10,r5)
756 #ifndef CONFIG_SMP
757 subi r4,r5,THREAD
758 sub r4,r4,r6
759 stw r4,last_task_used_altivec@l(r3)
760 #endif /* CONFIG_SMP */
761 /* restore registers and return */
762 /* we haven't used ctr or xer or lr */
763 b fast_exception_return
764
765 /*
766 * AltiVec unavailable trap from kernel - print a message, but let
767 * the task use AltiVec in the kernel until it returns to user mode.
768 */
769 KernelAltiVec:
770 lwz r3,_MSR(r1)
771 oris r3,r3,MSR_VEC@h
772 stw r3,_MSR(r1) /* enable use of AltiVec after return */
773 lis r3,87f@h
774 ori r3,r3,87f@l
775 mr r4,r2 /* current */
776 lwz r5,_NIP(r1)
777 bl printk
778 b ret_from_except
779 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
780 .align 4,0
781
782 /*
783 * giveup_altivec(tsk)
784 * Disable AltiVec for the task given as the argument,
785 * and save the AltiVec registers in its thread_struct.
786 * Enables AltiVec for use in the kernel on return.
787 */
788
789 .globl giveup_altivec
790 giveup_altivec:
791 mfmsr r5
792 oris r5,r5,MSR_VEC@h
793 SYNC
794 MTMSRD(r5) /* enable use of AltiVec now */
795 isync
796 cmpwi 0,r3,0
797 beqlr- /* if no previous owner, done */
798 addi r3,r3,THREAD /* want THREAD of task */
799 lwz r5,PT_REGS(r3)
800 cmpwi 0,r5,0
801 SAVE_32VRS(0, r4, r3)
802 mfvscr vr0
803 li r4,THREAD_VSCR
804 stvx vr0,r4,r3
805 beq 1f
806 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
807 lis r3,MSR_VEC@h
808 andc r4,r4,r3 /* disable AltiVec for previous task */
809 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
810 1:
811 #ifndef CONFIG_SMP
812 li r5,0
813 lis r4,last_task_used_altivec@ha
814 stw r5,last_task_used_altivec@l(r4)
815 #endif /* CONFIG_SMP */
816 blr
817 #endif /* CONFIG_ALTIVEC */
818
819 /*
820 * This code is jumped to from the startup code to copy
821 * the kernel image to physical address 0.
822 */
823 relocate_kernel:
824 addis r9,r26,klimit@ha /* fetch klimit */
825 lwz r25,klimit@l(r9)
826 addis r25,r25,-KERNELBASE@h
827 li r3,0 /* Destination base address */
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
834 4: mr r5,r25
835 bl copy_and_flush /* copy the rest */
836 b turn_on_mmu
837
838 /*
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */
844 _GLOBAL(copy_and_flush)
845 addi r5,r5,-4
846 addi r6,r6,-4
847 4: li r0,L1_CACHE_BYTES/4
848 mtctr r0
849 3: addi r6,r6,4 /* copy a cache line */
850 lwzx r0,r6,r4
851 stwx r0,r6,r3
852 bdnz 3b
853 dcbst r6,r3 /* write it to memory */
854 sync
855 icbi r6,r3 /* flush the icache line */
856 cmplw 0,r6,r5
857 blt 4b
858 sync /* additional sync needed on g4 */
859 isync
860 addi r5,r5,4
861 addi r6,r6,4
862 blr
863
864 #ifdef CONFIG_SMP
865 #ifdef CONFIG_GEMINI
866 .globl __secondary_start_gemini
867 __secondary_start_gemini:
868 mfspr r4,SPRN_HID0
869 ori r4,r4,HID0_ICFI
870 li r3,0
871 ori r3,r3,HID0_ICE
872 andc r4,r4,r3
873 mtspr SPRN_HID0,r4
874 sync
875 b __secondary_start
876 #endif /* CONFIG_GEMINI */
877
878 .globl __secondary_start_mpc86xx
879 __secondary_start_mpc86xx:
880 mfspr r3, SPRN_PIR
881 stw r3, __secondary_hold_acknowledge@l(0)
882 mr r24, r3 /* cpu # */
883 b __secondary_start
884
885 .globl __secondary_start_pmac_0
886 __secondary_start_pmac_0:
887 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
888 li r24,0
889 b 1f
890 li r24,1
891 b 1f
892 li r24,2
893 b 1f
894 li r24,3
895 1:
896 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
897 set to map the 0xf0000000 - 0xffffffff region */
898 mfmsr r0
899 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
900 SYNC
901 mtmsr r0
902 isync
903
904 .globl __secondary_start
905 __secondary_start:
906 /* Copy some CPU settings from CPU 0 */
907 bl __restore_cpu_setup
908
909 lis r3,-KERNELBASE@h
910 mr r4,r24
911 bl call_setup_cpu /* Call setup_cpu for this CPU */
912 #ifdef CONFIG_6xx
913 lis r3,-KERNELBASE@h
914 bl init_idle_6xx
915 #endif /* CONFIG_6xx */
916
917 /* get current_thread_info and current */
918 lis r1,secondary_ti@ha
919 tophys(r1,r1)
920 lwz r1,secondary_ti@l(r1)
921 tophys(r2,r1)
922 lwz r2,TI_TASK(r2)
923
924 /* stack */
925 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
926 li r0,0
927 tophys(r3,r1)
928 stw r0,0(r3)
929
930 /* load up the MMU */
931 bl load_up_mmu
932
933 /* ptr to phys current thread */
934 tophys(r4,r2)
935 addi r4,r4,THREAD /* phys address of our thread_struct */
936 CLR_TOP32(r4)
937 mtspr SPRN_SPRG3,r4
938 li r3,0
939 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
940
941 /* enable MMU and jump to start_secondary */
942 li r4,MSR_KERNEL
943 FIX_SRR1(r4,r5)
944 lis r3,start_secondary@h
945 ori r3,r3,start_secondary@l
946 mtspr SPRN_SRR0,r3
947 mtspr SPRN_SRR1,r4
948 SYNC
949 RFI
950 #endif /* CONFIG_SMP */
951
952 /*
953 * Those generic dummy functions are kept for CPUs not
954 * included in CONFIG_6xx
955 */
956 #if !defined(CONFIG_6xx)
957 _GLOBAL(__save_cpu_setup)
958 blr
959 _GLOBAL(__restore_cpu_setup)
960 blr
961 #endif /* !defined(CONFIG_6xx) */
962
963
964 /*
965 * Load stuff into the MMU. Intended to be called with
966 * IR=0 and DR=0.
967 */
968 load_up_mmu:
969 sync /* Force all PTE updates to finish */
970 isync
971 tlbia /* Clear all TLB entries */
972 sync /* wait for tlbia/tlbie to finish */
973 TLBSYNC /* ... on all CPUs */
974 /* Load the SDR1 register (hash table base & size) */
975 lis r6,_SDR1@ha
976 tophys(r6,r6)
977 lwz r6,_SDR1@l(r6)
978 mtspr SPRN_SDR1,r6
979 li r0,16 /* load up segment register values */
980 mtctr r0 /* for context 0 */
981 lis r3,0x2000 /* Ku = 1, VSID = 0 */
982 li r4,0
983 3: mtsrin r3,r4
984 addi r3,r3,0x111 /* increment VSID */
985 addis r4,r4,0x1000 /* address of next segment */
986 bdnz 3b
987
988 /* Load the BAT registers with the values set up by MMU_init.
989 MMU_init takes care of whether we're on a 601 or not. */
990 mfpvr r3
991 srwi r3,r3,16
992 cmpwi r3,1
993 lis r3,BATS@ha
994 addi r3,r3,BATS@l
995 tophys(r3,r3)
996 LOAD_BAT(0,r3,r4,r5)
997 LOAD_BAT(1,r3,r4,r5)
998 LOAD_BAT(2,r3,r4,r5)
999 LOAD_BAT(3,r3,r4,r5)
1000 BEGIN_FTR_SECTION
1001 LOAD_BAT(4,r3,r4,r5)
1002 LOAD_BAT(5,r3,r4,r5)
1003 LOAD_BAT(6,r3,r4,r5)
1004 LOAD_BAT(7,r3,r4,r5)
1005 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1006 blr
1007
1008 /*
1009 * This is where the main kernel code starts.
1010 */
1011 start_here:
1012 /* ptr to current */
1013 lis r2,init_task@h
1014 ori r2,r2,init_task@l
1015 /* Set up for using our exception vectors */
1016 /* ptr to phys current thread */
1017 tophys(r4,r2)
1018 addi r4,r4,THREAD /* init task's THREAD */
1019 CLR_TOP32(r4)
1020 mtspr SPRN_SPRG3,r4
1021 li r3,0
1022 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1023
1024 /* stack */
1025 lis r1,init_thread_union@ha
1026 addi r1,r1,init_thread_union@l
1027 li r0,0
1028 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1029 /*
1030 * Do early platform-specific initialization,
1031 * and set up the MMU.
1032 */
1033 mr r3,r31
1034 mr r4,r30
1035 bl machine_init
1036 bl __save_cpu_setup
1037 bl MMU_init
1038
1039 /*
1040 * Go back to running unmapped so we can load up new values
1041 * for SDR1 (hash table pointer) and the segment registers
1042 * and change to using our exception vectors.
1043 */
1044 lis r4,2f@h
1045 ori r4,r4,2f@l
1046 tophys(r4,r4)
1047 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1048 FIX_SRR1(r3,r5)
1049 mtspr SPRN_SRR0,r4
1050 mtspr SPRN_SRR1,r3
1051 SYNC
1052 RFI
1053 /* Load up the kernel context */
1054 2: bl load_up_mmu
1055
1056 #ifdef CONFIG_BDI_SWITCH
1057 /* Add helper information for the Abatron bdiGDB debugger.
1058 * We do this here because we know the mmu is disabled, and
1059 * will be enabled for real in just a few instructions.
1060 */
1061 lis r5, abatron_pteptrs@h
1062 ori r5, r5, abatron_pteptrs@l
1063 stw r5, 0xf0(r0) /* This much match your Abatron config */
1064 lis r6, swapper_pg_dir@h
1065 ori r6, r6, swapper_pg_dir@l
1066 tophys(r5, r5)
1067 stw r6, 0(r5)
1068 #endif /* CONFIG_BDI_SWITCH */
1069
1070 /* Now turn on the MMU for real! */
1071 li r4,MSR_KERNEL
1072 FIX_SRR1(r4,r5)
1073 lis r3,start_kernel@h
1074 ori r3,r3,start_kernel@l
1075 mtspr SPRN_SRR0,r3
1076 mtspr SPRN_SRR1,r4
1077 SYNC
1078 RFI
1079
1080 /*
1081 * Set up the segment registers for a new context.
1082 */
1083 _GLOBAL(set_context)
1084 mulli r3,r3,897 /* multiply context by skew factor */
1085 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1086 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1087 li r0,NUM_USER_SEGMENTS
1088 mtctr r0
1089
1090 #ifdef CONFIG_BDI_SWITCH
1091 /* Context switch the PTE pointer for the Abatron BDI2000.
1092 * The PGDIR is passed as second argument.
1093 */
1094 lis r5, KERNELBASE@h
1095 lwz r5, 0xf0(r5)
1096 stw r4, 0x4(r5)
1097 #endif
1098 li r4,0
1099 isync
1100 3:
1101 mtsrin r3,r4
1102 addi r3,r3,0x111 /* next VSID */
1103 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1104 addis r4,r4,0x1000 /* address of next segment */
1105 bdnz 3b
1106 sync
1107 isync
1108 blr
1109
1110 /*
1111 * An undocumented "feature" of 604e requires that the v bit
1112 * be cleared before changing BAT values.
1113 *
1114 * Also, newer IBM firmware does not clear bat3 and 4 so
1115 * this makes sure it's done.
1116 * -- Cort
1117 */
1118 clear_bats:
1119 li r10,0
1120 mfspr r9,SPRN_PVR
1121 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1122 cmpwi r9, 1
1123 beq 1f
1124
1125 mtspr SPRN_DBAT0U,r10
1126 mtspr SPRN_DBAT0L,r10
1127 mtspr SPRN_DBAT1U,r10
1128 mtspr SPRN_DBAT1L,r10
1129 mtspr SPRN_DBAT2U,r10
1130 mtspr SPRN_DBAT2L,r10
1131 mtspr SPRN_DBAT3U,r10
1132 mtspr SPRN_DBAT3L,r10
1133 1:
1134 mtspr SPRN_IBAT0U,r10
1135 mtspr SPRN_IBAT0L,r10
1136 mtspr SPRN_IBAT1U,r10
1137 mtspr SPRN_IBAT1L,r10
1138 mtspr SPRN_IBAT2U,r10
1139 mtspr SPRN_IBAT2L,r10
1140 mtspr SPRN_IBAT3U,r10
1141 mtspr SPRN_IBAT3L,r10
1142 BEGIN_FTR_SECTION
1143 /* Here's a tweak: at this point, CPU setup have
1144 * not been called yet, so HIGH_BAT_EN may not be
1145 * set in HID0 for the 745x processors. However, it
1146 * seems that doesn't affect our ability to actually
1147 * write to these SPRs.
1148 */
1149 mtspr SPRN_DBAT4U,r10
1150 mtspr SPRN_DBAT4L,r10
1151 mtspr SPRN_DBAT5U,r10
1152 mtspr SPRN_DBAT5L,r10
1153 mtspr SPRN_DBAT6U,r10
1154 mtspr SPRN_DBAT6L,r10
1155 mtspr SPRN_DBAT7U,r10
1156 mtspr SPRN_DBAT7L,r10
1157 mtspr SPRN_IBAT4U,r10
1158 mtspr SPRN_IBAT4L,r10
1159 mtspr SPRN_IBAT5U,r10
1160 mtspr SPRN_IBAT5L,r10
1161 mtspr SPRN_IBAT6U,r10
1162 mtspr SPRN_IBAT6L,r10
1163 mtspr SPRN_IBAT7U,r10
1164 mtspr SPRN_IBAT7L,r10
1165 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1166 blr
1167
1168 flush_tlbs:
1169 lis r10, 0x40
1170 1: addic. r10, r10, -0x1000
1171 tlbie r10
1172 blt 1b
1173 sync
1174 blr
1175
1176 mmu_off:
1177 addi r4, r3, __after_mmu_off - _start
1178 mfmsr r3
1179 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1180 beqlr
1181 andc r3,r3,r0
1182 mtspr SPRN_SRR0,r4
1183 mtspr SPRN_SRR1,r3
1184 sync
1185 RFI
1186
1187 /*
1188 * Use the first pair of BAT registers to map the 1st 16MB
1189 * of RAM to KERNELBASE. From this point on we can't safely
1190 * call OF any more.
1191 */
1192 initial_bats:
1193 lis r11,KERNELBASE@h
1194 mfspr r9,SPRN_PVR
1195 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1196 cmpwi 0,r9,1
1197 bne 4f
1198 ori r11,r11,4 /* set up BAT registers for 601 */
1199 li r8,0x7f /* valid, block length = 8MB */
1200 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1201 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1202 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1203 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1204 mtspr SPRN_IBAT1U,r9
1205 mtspr SPRN_IBAT1L,r10
1206 isync
1207 blr
1208
1209 4: tophys(r8,r11)
1210 #ifdef CONFIG_SMP
1211 ori r8,r8,0x12 /* R/W access, M=1 */
1212 #else
1213 ori r8,r8,2 /* R/W access */
1214 #endif /* CONFIG_SMP */
1215 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1216
1217 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1218 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1219 mtspr SPRN_IBAT0L,r8
1220 mtspr SPRN_IBAT0U,r11
1221 isync
1222 blr
1223
1224
1225 #ifdef CONFIG_BOOTX_TEXT
1226 setup_disp_bat:
1227 /*
1228 * setup the display bat prepared for us in prom.c
1229 */
1230 mflr r8
1231 bl reloc_offset
1232 mtlr r8
1233 addis r8,r3,disp_BAT@ha
1234 addi r8,r8,disp_BAT@l
1235 cmpwi cr0,r8,0
1236 beqlr
1237 lwz r11,0(r8)
1238 lwz r8,4(r8)
1239 mfspr r9,SPRN_PVR
1240 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1241 cmpwi 0,r9,1
1242 beq 1f
1243 mtspr SPRN_DBAT3L,r8
1244 mtspr SPRN_DBAT3U,r11
1245 blr
1246 1: mtspr SPRN_IBAT3L,r8
1247 mtspr SPRN_IBAT3U,r11
1248 blr
1249 #endif /* CONFIG_BOOTX_TEXT */
1250
1251 #ifdef CONFIG_8260
1252 /* Jump into the system reset for the rom.
1253 * We first disable the MMU, and then jump to the ROM reset address.
1254 *
1255 * r3 is the board info structure, r4 is the location for starting.
1256 * I use this for building a small kernel that can load other kernels,
1257 * rather than trying to write or rely on a rom monitor that can tftp load.
1258 */
1259 .globl m8260_gorom
1260 m8260_gorom:
1261 mfmsr r0
1262 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1263 sync
1264 mtmsr r0
1265 sync
1266 mfspr r11, SPRN_HID0
1267 lis r10, 0
1268 ori r10,r10,HID0_ICE|HID0_DCE
1269 andc r11, r11, r10
1270 mtspr SPRN_HID0, r11
1271 isync
1272 li r5, MSR_ME|MSR_RI
1273 lis r6,2f@h
1274 addis r6,r6,-KERNELBASE@h
1275 ori r6,r6,2f@l
1276 mtspr SPRN_SRR0,r6
1277 mtspr SPRN_SRR1,r5
1278 isync
1279 sync
1280 rfi
1281 2:
1282 mtlr r4
1283 blr
1284 #endif
1285
1286
1287 /*
1288 * We put a few things here that have to be page-aligned.
1289 * This stuff goes at the beginning of the data segment,
1290 * which is page-aligned.
1291 */
1292 .data
1293 .globl sdata
1294 sdata:
1295 .globl empty_zero_page
1296 empty_zero_page:
1297 .space 4096
1298
1299 .globl swapper_pg_dir
1300 swapper_pg_dir:
1301 .space 4096
1302
1303 /*
1304 * This space gets a copy of optional info passed to us by the bootstrap
1305 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1306 */
1307 .globl cmd_line
1308 cmd_line:
1309 .space 512
1310
1311 .globl intercept_table
1312 intercept_table:
1313 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1314 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1315 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1316 .long 0, 0, 0, 0, 0, 0, 0, 0
1317 .long 0, 0, 0, 0, 0, 0, 0, 0
1318 .long 0, 0, 0, 0, 0, 0, 0, 0
1319
1320 /* Room for two PTE pointers, usually the kernel and current user pointers
1321 * to their respective root page table.
1322 */
1323 abatron_pteptrs:
1324 .space 8
This page took 0.117804 seconds and 5 git commands to generate.