[PATCH] ieee80211: Fix debug comments ipw->ieee80211
[deliverable/linux.git] / arch / ppc64 / kernel / head.S
1 /*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #define SECONDARY_PROCESSORS
27
28 #include <linux/config.h>
29 #include <linux/threads.h>
30 #include <asm/processor.h>
31 #include <asm/page.h>
32 #include <asm/mmu.h>
33 #include <asm/naca.h>
34 #include <asm/systemcfg.h>
35 #include <asm/ppc_asm.h>
36 #include <asm/offsets.h>
37 #include <asm/bug.h>
38 #include <asm/cputable.h>
39 #include <asm/setup.h>
40 #include <asm/hvcall.h>
41 #include <asm/iSeries/LparMap.h>
42
43 #ifdef CONFIG_PPC_ISERIES
44 #define DO_SOFT_DISABLE
45 #endif
46
47 /*
48 * hcall interface to pSeries LPAR
49 */
50 #define H_SET_ASR 0x30
51
52 /*
53 * We layout physical memory as follows:
54 * 0x0000 - 0x00ff : Secondary processor spin code
55 * 0x0100 - 0x2fff : pSeries Interrupt prologs
56 * 0x3000 - 0x3fff : Interrupt support
57 * 0x4000 - 0x4fff : NACA
58 * 0x6000 : iSeries and common interrupt prologs
59 * 0x9000 - 0x9fff : Initial segment table
60 */
61
62 /*
63 * SPRG Usage
64 *
65 * Register Definition
66 *
67 * SPRG0 reserved for hypervisor
68 * SPRG1 temp - used to save gpr
69 * SPRG2 temp - used to save gpr
70 * SPRG3 virt addr of paca
71 */
72
73 /*
74 * Entering into this code we make the following assumptions:
75 * For pSeries:
76 * 1. The MMU is off & open firmware is running in real mode.
77 * 2. The kernel is entered at __start
78 *
79 * For iSeries:
80 * 1. The MMU is on (as it always is for iSeries)
81 * 2. The kernel is entered at system_reset_iSeries
82 */
83
84 .text
85 .globl _stext
86 _stext:
87 #ifdef CONFIG_PPC_MULTIPLATFORM
88 _GLOBAL(__start)
89 /* NOP this out unconditionally */
90 BEGIN_FTR_SECTION
91 b .__start_initialization_multiplatform
92 END_FTR_SECTION(0, 1)
93 #endif /* CONFIG_PPC_MULTIPLATFORM */
94
95 /* Catch branch to 0 in real mode */
96 trap
97 #ifdef CONFIG_PPC_ISERIES
98 /*
99 * At offset 0x20, there is a pointer to iSeries LPAR data.
100 * This is required by the hypervisor
101 */
102 . = 0x20
103 .llong hvReleaseData-KERNELBASE
104
105 /*
106 * At offset 0x28 and 0x30 are offsets to the msChunks
107 * array (used by the iSeries LPAR debugger to do translation
108 * between physical addresses and absolute addresses) and
109 * to the pidhash table (also used by the debugger)
110 */
111 .llong msChunks-KERNELBASE
112 .llong 0 /* pidhash-KERNELBASE SFRXXX */
113
114 /* Offset 0x38 - Pointer to start of embedded System.map */
115 .globl embedded_sysmap_start
116 embedded_sysmap_start:
117 .llong 0
118 /* Offset 0x40 - Pointer to end of embedded System.map */
119 .globl embedded_sysmap_end
120 embedded_sysmap_end:
121 .llong 0
122
123 #else /* CONFIG_PPC_ISERIES */
124
125 /* Secondary processors spin on this value until it goes to 1. */
126 .globl __secondary_hold_spinloop
127 __secondary_hold_spinloop:
128 .llong 0x0
129
130 /* Secondary processors write this value with their cpu # */
131 /* after they enter the spin loop immediately below. */
132 .globl __secondary_hold_acknowledge
133 __secondary_hold_acknowledge:
134 .llong 0x0
135
136 . = 0x60
137 /*
138 * The following code is used on pSeries to hold secondary processors
139 * in a spin loop after they have been freed from OpenFirmware, but
140 * before the bulk of the kernel has been relocated. This code
141 * is relocated to physical address 0x60 before prom_init is run.
142 * All of it must fit below the first exception vector at 0x100.
143 */
144 _GLOBAL(__secondary_hold)
145 mfmsr r24
146 ori r24,r24,MSR_RI
147 mtmsrd r24 /* RI on */
148
149 /* Grab our linux cpu number */
150 mr r24,r3
151
152 /* Tell the master cpu we're here */
153 /* Relocation is off & we are located at an address less */
154 /* than 0x100, so only need to grab low order offset. */
155 std r24,__secondary_hold_acknowledge@l(0)
156 sync
157
158 /* All secondary cpu's wait here until told to start. */
159 100: ld r4,__secondary_hold_spinloop@l(0)
160 cmpdi 0,r4,1
161 bne 100b
162
163 #ifdef CONFIG_HMT
164 b .hmt_init
165 #else
166 #ifdef CONFIG_SMP
167 mr r3,r24
168 b .pSeries_secondary_smp_init
169 #else
170 BUG_OPCODE
171 #endif
172 #endif
173 #endif
174
175 /* This value is used to mark exception frames on the stack. */
176 .section ".toc","aw"
177 exception_marker:
178 .tc ID_72656773_68657265[TC],0x7265677368657265
179 .text
180
181 /*
182 * The following macros define the code that appears as
183 * the prologue to each of the exception handlers. They
184 * are split into two parts to allow a single kernel binary
185 * to be used for pSeries and iSeries.
186 * LOL. One day... - paulus
187 */
188
189 /*
190 * We make as much of the exception code common between native
191 * exception handlers (including pSeries LPAR) and iSeries LPAR
192 * implementations as possible.
193 */
194
195 /*
196 * This is the start of the interrupt handlers for pSeries
197 * This code runs with relocation off.
198 */
199 #define EX_R9 0
200 #define EX_R10 8
201 #define EX_R11 16
202 #define EX_R12 24
203 #define EX_R13 32
204 #define EX_SRR0 40
205 #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
206 #define EX_DAR 48
207 #define EX_LR 48 /* SLB miss saves LR, but not DAR */
208 #define EX_DSISR 56
209 #define EX_CCR 60
210
211 #define EXCEPTION_PROLOG_PSERIES(area, label) \
212 mfspr r13,SPRG3; /* get paca address into r13 */ \
213 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
214 std r10,area+EX_R10(r13); \
215 std r11,area+EX_R11(r13); \
216 std r12,area+EX_R12(r13); \
217 mfspr r9,SPRG1; \
218 std r9,area+EX_R13(r13); \
219 mfcr r9; \
220 clrrdi r12,r13,32; /* get high part of &label */ \
221 mfmsr r10; \
222 mfspr r11,SRR0; /* save SRR0 */ \
223 ori r12,r12,(label)@l; /* virt addr of handler */ \
224 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
225 mtspr SRR0,r12; \
226 mfspr r12,SRR1; /* and SRR1 */ \
227 mtspr SRR1,r10; \
228 rfid; \
229 b . /* prevent speculative execution */
230
231 /*
232 * This is the start of the interrupt handlers for iSeries
233 * This code runs with relocation on.
234 */
235 #define EXCEPTION_PROLOG_ISERIES_1(area) \
236 mfspr r13,SPRG3; /* get paca address into r13 */ \
237 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
238 std r10,area+EX_R10(r13); \
239 std r11,area+EX_R11(r13); \
240 std r12,area+EX_R12(r13); \
241 mfspr r9,SPRG1; \
242 std r9,area+EX_R13(r13); \
243 mfcr r9
244
245 #define EXCEPTION_PROLOG_ISERIES_2 \
246 mfmsr r10; \
247 ld r11,PACALPPACA+LPPACASRR0(r13); \
248 ld r12,PACALPPACA+LPPACASRR1(r13); \
249 ori r10,r10,MSR_RI; \
250 mtmsrd r10,1
251
252 /*
253 * The common exception prolog is used for all except a few exceptions
254 * such as a segment miss on a kernel address. We have to be prepared
255 * to take another exception from the point where we first touch the
256 * kernel stack onwards.
257 *
258 * On entry r13 points to the paca, r9-r13 are saved in the paca,
259 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
260 * SRR1, and relocation is on.
261 */
262 #define EXCEPTION_PROLOG_COMMON(n, area) \
263 andi. r10,r12,MSR_PR; /* See if coming from user */ \
264 mr r10,r1; /* Save r1 */ \
265 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
266 beq- 1f; \
267 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
268 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
269 bge- cr1,bad_stack; /* abort if it is */ \
270 std r9,_CCR(r1); /* save CR in stackframe */ \
271 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
272 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
273 std r10,0(r1); /* make stack chain pointer */ \
274 std r0,GPR0(r1); /* save r0 in stackframe */ \
275 std r10,GPR1(r1); /* save r1 in stackframe */ \
276 std r2,GPR2(r1); /* save r2 in stackframe */ \
277 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
278 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
279 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
280 ld r10,area+EX_R10(r13); \
281 std r9,GPR9(r1); \
282 std r10,GPR10(r1); \
283 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
284 ld r10,area+EX_R12(r13); \
285 ld r11,area+EX_R13(r13); \
286 std r9,GPR11(r1); \
287 std r10,GPR12(r1); \
288 std r11,GPR13(r1); \
289 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
290 mflr r9; /* save LR in stackframe */ \
291 std r9,_LINK(r1); \
292 mfctr r10; /* save CTR in stackframe */ \
293 std r10,_CTR(r1); \
294 mfspr r11,XER; /* save XER in stackframe */ \
295 std r11,_XER(r1); \
296 li r9,(n)+1; \
297 std r9,_TRAP(r1); /* set trap number */ \
298 li r10,0; \
299 ld r11,exception_marker@toc(r2); \
300 std r10,RESULT(r1); /* clear regs->result */ \
301 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
302
303 /*
304 * Exception vectors.
305 */
306 #define STD_EXCEPTION_PSERIES(n, label) \
307 . = n; \
308 .globl label##_pSeries; \
309 label##_pSeries: \
310 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
314
315 #define STD_EXCEPTION_ISERIES(n, label, area) \
316 .globl label##_iSeries; \
317 label##_iSeries: \
318 HMT_MEDIUM; \
319 mtspr SPRG1,r13; /* save r13 */ \
320 RUNLATCH_ON(r13); \
321 EXCEPTION_PROLOG_ISERIES_1(area); \
322 EXCEPTION_PROLOG_ISERIES_2; \
323 b label##_common
324
325 #define MASKABLE_EXCEPTION_ISERIES(n, label) \
326 .globl label##_iSeries; \
327 label##_iSeries: \
328 HMT_MEDIUM; \
329 mtspr SPRG1,r13; /* save r13 */ \
330 RUNLATCH_ON(r13); \
331 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
332 lbz r10,PACAPROCENABLED(r13); \
333 cmpwi 0,r10,0; \
334 beq- label##_iSeries_masked; \
335 EXCEPTION_PROLOG_ISERIES_2; \
336 b label##_common; \
337
338 #ifdef DO_SOFT_DISABLE
339 #define DISABLE_INTS \
340 lbz r10,PACAPROCENABLED(r13); \
341 li r11,0; \
342 std r10,SOFTE(r1); \
343 mfmsr r10; \
344 stb r11,PACAPROCENABLED(r13); \
345 ori r10,r10,MSR_EE; \
346 mtmsrd r10,1
347
348 #define ENABLE_INTS \
349 lbz r10,PACAPROCENABLED(r13); \
350 mfmsr r11; \
351 std r10,SOFTE(r1); \
352 ori r11,r11,MSR_EE; \
353 mtmsrd r11,1
354
355 #else /* hard enable/disable interrupts */
356 #define DISABLE_INTS
357
358 #define ENABLE_INTS \
359 ld r12,_MSR(r1); \
360 mfmsr r11; \
361 rlwimi r11,r12,0,MSR_EE; \
362 mtmsrd r11,1
363
364 #endif
365
366 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
367 .align 7; \
368 .globl label##_common; \
369 label##_common: \
370 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
371 DISABLE_INTS; \
372 bl .save_nvgprs; \
373 addi r3,r1,STACK_FRAME_OVERHEAD; \
374 bl hdlr; \
375 b .ret_from_except
376
377 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
378 .align 7; \
379 .globl label##_common; \
380 label##_common: \
381 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
382 DISABLE_INTS; \
383 addi r3,r1,STACK_FRAME_OVERHEAD; \
384 bl hdlr; \
385 b .ret_from_except_lite
386
387 /*
388 * Start of pSeries system interrupt routines
389 */
390 . = 0x100
391 .globl __start_interrupts
392 __start_interrupts:
393
394 STD_EXCEPTION_PSERIES(0x100, system_reset)
395
396 . = 0x200
397 _machine_check_pSeries:
398 HMT_MEDIUM
399 mtspr SPRG1,r13 /* save r13 */
400 RUNLATCH_ON(r13)
401 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
402
403 . = 0x300
404 .globl data_access_pSeries
405 data_access_pSeries:
406 HMT_MEDIUM
407 mtspr SPRG1,r13
408 BEGIN_FTR_SECTION
409 mtspr SPRG2,r12
410 mfspr r13,DAR
411 mfspr r12,DSISR
412 srdi r13,r13,60
413 rlwimi r13,r12,16,0x20
414 mfcr r12
415 cmpwi r13,0x2c
416 beq .do_stab_bolted_pSeries
417 mtcrf 0x80,r12
418 mfspr r12,SPRG2
419 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
420 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
421
422 . = 0x380
423 .globl data_access_slb_pSeries
424 data_access_slb_pSeries:
425 HMT_MEDIUM
426 mtspr SPRG1,r13
427 RUNLATCH_ON(r13)
428 mfspr r13,SPRG3 /* get paca address into r13 */
429 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
430 std r10,PACA_EXSLB+EX_R10(r13)
431 std r11,PACA_EXSLB+EX_R11(r13)
432 std r12,PACA_EXSLB+EX_R12(r13)
433 std r3,PACA_EXSLB+EX_R3(r13)
434 mfspr r9,SPRG1
435 std r9,PACA_EXSLB+EX_R13(r13)
436 mfcr r9
437 mfspr r12,SRR1 /* and SRR1 */
438 mfspr r3,DAR
439 b .do_slb_miss /* Rel. branch works in real mode */
440
441 STD_EXCEPTION_PSERIES(0x400, instruction_access)
442
443 . = 0x480
444 .globl instruction_access_slb_pSeries
445 instruction_access_slb_pSeries:
446 HMT_MEDIUM
447 mtspr SPRG1,r13
448 RUNLATCH_ON(r13)
449 mfspr r13,SPRG3 /* get paca address into r13 */
450 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
451 std r10,PACA_EXSLB+EX_R10(r13)
452 std r11,PACA_EXSLB+EX_R11(r13)
453 std r12,PACA_EXSLB+EX_R12(r13)
454 std r3,PACA_EXSLB+EX_R3(r13)
455 mfspr r9,SPRG1
456 std r9,PACA_EXSLB+EX_R13(r13)
457 mfcr r9
458 mfspr r12,SRR1 /* and SRR1 */
459 mfspr r3,SRR0 /* SRR0 is faulting address */
460 b .do_slb_miss /* Rel. branch works in real mode */
461
462 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
463 STD_EXCEPTION_PSERIES(0x600, alignment)
464 STD_EXCEPTION_PSERIES(0x700, program_check)
465 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
466 STD_EXCEPTION_PSERIES(0x900, decrementer)
467 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
468 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
469
470 . = 0xc00
471 .globl system_call_pSeries
472 system_call_pSeries:
473 HMT_MEDIUM
474 RUNLATCH_ON(r9)
475 mr r9,r13
476 mfmsr r10
477 mfspr r13,SPRG3
478 mfspr r11,SRR0
479 clrrdi r12,r13,32
480 oris r12,r12,system_call_common@h
481 ori r12,r12,system_call_common@l
482 mtspr SRR0,r12
483 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
484 mfspr r12,SRR1
485 mtspr SRR1,r10
486 rfid
487 b . /* prevent speculative execution */
488
489 STD_EXCEPTION_PSERIES(0xd00, single_step)
490 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
491
492 /* We need to deal with the Altivec unavailable exception
493 * here which is at 0xf20, thus in the middle of the
494 * prolog code of the PerformanceMonitor one. A little
495 * trickery is thus necessary
496 */
497 . = 0xf00
498 b performance_monitor_pSeries
499
500 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
501
502 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
503 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
504
505 /* moved from 0xf00 */
506 STD_EXCEPTION_PSERIES(0x3000, performance_monitor)
507
508 . = 0x3100
509 _GLOBAL(do_stab_bolted_pSeries)
510 mtcrf 0x80,r12
511 mfspr r12,SPRG2
512 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
513
514
515 /* Space for the naca. Architected to be located at real address
516 * NACA_PHYS_ADDR. Various tools rely on this location being fixed.
517 * The first dword of the naca is required by iSeries LPAR to
518 * point to itVpdAreas. On pSeries native, this value is not used.
519 */
520 . = NACA_PHYS_ADDR
521 .globl __end_interrupts
522 __end_interrupts:
523 #ifdef CONFIG_PPC_ISERIES
524 .globl naca
525 naca:
526 .llong itVpdAreas
527 .llong 0 /* xRamDisk */
528 .llong 0 /* xRamDiskSize */
529
530 . = 0x6100
531
532 /*** ISeries-LPAR interrupt handlers ***/
533
534 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
535
536 .globl data_access_iSeries
537 data_access_iSeries:
538 mtspr SPRG1,r13
539 BEGIN_FTR_SECTION
540 mtspr SPRG2,r12
541 mfspr r13,DAR
542 mfspr r12,DSISR
543 srdi r13,r13,60
544 rlwimi r13,r12,16,0x20
545 mfcr r12
546 cmpwi r13,0x2c
547 beq .do_stab_bolted_iSeries
548 mtcrf 0x80,r12
549 mfspr r12,SPRG2
550 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
551 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
552 EXCEPTION_PROLOG_ISERIES_2
553 b data_access_common
554
555 .do_stab_bolted_iSeries:
556 mtcrf 0x80,r12
557 mfspr r12,SPRG2
558 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
559 EXCEPTION_PROLOG_ISERIES_2
560 b .do_stab_bolted
561
562 .globl data_access_slb_iSeries
563 data_access_slb_iSeries:
564 mtspr SPRG1,r13 /* save r13 */
565 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
566 std r3,PACA_EXSLB+EX_R3(r13)
567 ld r12,PACALPPACA+LPPACASRR1(r13)
568 mfspr r3,DAR
569 b .do_slb_miss
570
571 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
572
573 .globl instruction_access_slb_iSeries
574 instruction_access_slb_iSeries:
575 mtspr SPRG1,r13 /* save r13 */
576 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
577 std r3,PACA_EXSLB+EX_R3(r13)
578 ld r12,PACALPPACA+LPPACASRR1(r13)
579 ld r3,PACALPPACA+LPPACASRR0(r13)
580 b .do_slb_miss
581
582 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
583 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
584 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
585 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
586 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
587 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
588 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
589
590 .globl system_call_iSeries
591 system_call_iSeries:
592 mr r9,r13
593 mfspr r13,SPRG3
594 EXCEPTION_PROLOG_ISERIES_2
595 b system_call_common
596
597 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
598 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
599 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
600
601 .globl system_reset_iSeries
602 system_reset_iSeries:
603 mfspr r13,SPRG3 /* Get paca address */
604 mfmsr r24
605 ori r24,r24,MSR_RI
606 mtmsrd r24 /* RI on */
607 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
608 cmpwi 0,r24,0 /* Are we processor 0? */
609 beq .__start_initialization_iSeries /* Start up the first processor */
610 mfspr r4,SPRN_CTRLF
611 li r5,CTRL_RUNLATCH /* Turn off the run light */
612 andc r4,r4,r5
613 mtspr SPRN_CTRLT,r4
614
615 1:
616 HMT_LOW
617 #ifdef CONFIG_SMP
618 lbz r23,PACAPROCSTART(r13) /* Test if this processor
619 * should start */
620 sync
621 LOADADDR(r3,current_set)
622 sldi r28,r24,3 /* get current_set[cpu#] */
623 ldx r3,r3,r28
624 addi r1,r3,THREAD_SIZE
625 subi r1,r1,STACK_FRAME_OVERHEAD
626
627 cmpwi 0,r23,0
628 beq iSeries_secondary_smp_loop /* Loop until told to go */
629 #ifdef SECONDARY_PROCESSORS
630 bne .__secondary_start /* Loop until told to go */
631 #endif
632 iSeries_secondary_smp_loop:
633 /* Let the Hypervisor know we are alive */
634 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
635 lis r3,0x8002
636 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
637 #else /* CONFIG_SMP */
638 /* Yield the processor. This is required for non-SMP kernels
639 which are running on multi-threaded machines. */
640 lis r3,0x8000
641 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
642 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
643 li r4,0 /* "yield timed" */
644 li r5,-1 /* "yield forever" */
645 #endif /* CONFIG_SMP */
646 li r0,-1 /* r0=-1 indicates a Hypervisor call */
647 sc /* Invoke the hypervisor via a system call */
648 mfspr r13,SPRG3 /* Put r13 back ???? */
649 b 1b /* If SMP not configured, secondaries
650 * loop forever */
651
652 .globl decrementer_iSeries_masked
653 decrementer_iSeries_masked:
654 li r11,1
655 stb r11,PACALPPACA+LPPACADECRINT(r13)
656 lwz r12,PACADEFAULTDECR(r13)
657 mtspr SPRN_DEC,r12
658 /* fall through */
659
660 .globl hardware_interrupt_iSeries_masked
661 hardware_interrupt_iSeries_masked:
662 mtcrf 0x80,r9 /* Restore regs */
663 ld r11,PACALPPACA+LPPACASRR0(r13)
664 ld r12,PACALPPACA+LPPACASRR1(r13)
665 mtspr SRR0,r11
666 mtspr SRR1,r12
667 ld r9,PACA_EXGEN+EX_R9(r13)
668 ld r10,PACA_EXGEN+EX_R10(r13)
669 ld r11,PACA_EXGEN+EX_R11(r13)
670 ld r12,PACA_EXGEN+EX_R12(r13)
671 ld r13,PACA_EXGEN+EX_R13(r13)
672 rfid
673 b . /* prevent speculative execution */
674 #endif
675
676 /*
677 * Data area reserved for FWNMI option.
678 */
679 .= 0x7000
680 .globl fwnmi_data_area
681 fwnmi_data_area:
682
683 #ifdef CONFIG_PPC_ISERIES
684 . = LPARMAP_PHYS
685 #include "lparmap.s"
686 #endif /* CONFIG_PPC_ISERIES */
687
688 /*
689 * Vectors for the FWNMI option. Share common code.
690 */
691 . = 0x8000
692 .globl system_reset_fwnmi
693 system_reset_fwnmi:
694 HMT_MEDIUM
695 mtspr SPRG1,r13 /* save r13 */
696 RUNLATCH_ON(r13)
697 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
698 .globl machine_check_fwnmi
699 machine_check_fwnmi:
700 HMT_MEDIUM
701 mtspr SPRG1,r13 /* save r13 */
702 RUNLATCH_ON(r13)
703 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
704
705 /*
706 * Space for the initial segment table
707 * For LPAR, the hypervisor must fill in at least one entry
708 * before we get control (with relocate on)
709 */
710 . = STAB0_PHYS_ADDR
711 .globl __start_stab
712 __start_stab:
713
714 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
715 .globl __end_stab
716 __end_stab:
717
718
719 /*** Common interrupt handlers ***/
720
721 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
722
723 /*
724 * Machine check is different because we use a different
725 * save area: PACA_EXMC instead of PACA_EXGEN.
726 */
727 .align 7
728 .globl machine_check_common
729 machine_check_common:
730 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
731 DISABLE_INTS
732 bl .save_nvgprs
733 addi r3,r1,STACK_FRAME_OVERHEAD
734 bl .machine_check_exception
735 b .ret_from_except
736
737 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
738 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
739 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
740 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
741 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
742 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
743 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
744 #ifdef CONFIG_ALTIVEC
745 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
746 #else
747 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
748 #endif
749
750 /*
751 * Here we have detected that the kernel stack pointer is bad.
752 * R9 contains the saved CR, r13 points to the paca,
753 * r10 contains the (bad) kernel stack pointer,
754 * r11 and r12 contain the saved SRR0 and SRR1.
755 * We switch to using the paca guard page as an emergency stack,
756 * save the registers there, and call kernel_bad_stack(), which panics.
757 */
758 bad_stack:
759 ld r1,PACAEMERGSP(r13)
760 subi r1,r1,64+INT_FRAME_SIZE
761 std r9,_CCR(r1)
762 std r10,GPR1(r1)
763 std r11,_NIP(r1)
764 std r12,_MSR(r1)
765 mfspr r11,DAR
766 mfspr r12,DSISR
767 std r11,_DAR(r1)
768 std r12,_DSISR(r1)
769 mflr r10
770 mfctr r11
771 mfxer r12
772 std r10,_LINK(r1)
773 std r11,_CTR(r1)
774 std r12,_XER(r1)
775 SAVE_GPR(0,r1)
776 SAVE_GPR(2,r1)
777 SAVE_4GPRS(3,r1)
778 SAVE_2GPRS(7,r1)
779 SAVE_10GPRS(12,r1)
780 SAVE_10GPRS(22,r1)
781 addi r11,r1,INT_FRAME_SIZE
782 std r11,0(r1)
783 li r12,0
784 std r12,0(r11)
785 ld r2,PACATOC(r13)
786 1: addi r3,r1,STACK_FRAME_OVERHEAD
787 bl .kernel_bad_stack
788 b 1b
789
790 /*
791 * Return from an exception with minimal checks.
792 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
793 * If interrupts have been enabled, or anything has been
794 * done that might have changed the scheduling status of
795 * any task or sent any task a signal, you should use
796 * ret_from_except or ret_from_except_lite instead of this.
797 */
798 fast_exception_return:
799 ld r12,_MSR(r1)
800 ld r11,_NIP(r1)
801 andi. r3,r12,MSR_RI /* check if RI is set */
802 beq- unrecov_fer
803 ld r3,_CCR(r1)
804 ld r4,_LINK(r1)
805 ld r5,_CTR(r1)
806 ld r6,_XER(r1)
807 mtcr r3
808 mtlr r4
809 mtctr r5
810 mtxer r6
811 REST_GPR(0, r1)
812 REST_8GPRS(2, r1)
813
814 mfmsr r10
815 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
816 mtmsrd r10,1
817
818 mtspr SRR1,r12
819 mtspr SRR0,r11
820 REST_4GPRS(10, r1)
821 ld r1,GPR1(r1)
822 rfid
823 b . /* prevent speculative execution */
824
825 unrecov_fer:
826 bl .save_nvgprs
827 1: addi r3,r1,STACK_FRAME_OVERHEAD
828 bl .unrecoverable_exception
829 b 1b
830
831 /*
832 * Here r13 points to the paca, r9 contains the saved CR,
833 * SRR0 and SRR1 are saved in r11 and r12,
834 * r9 - r13 are saved in paca->exgen.
835 */
836 .align 7
837 .globl data_access_common
838 data_access_common:
839 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
840 mfspr r10,DAR
841 std r10,PACA_EXGEN+EX_DAR(r13)
842 mfspr r10,DSISR
843 stw r10,PACA_EXGEN+EX_DSISR(r13)
844 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
845 ld r3,PACA_EXGEN+EX_DAR(r13)
846 lwz r4,PACA_EXGEN+EX_DSISR(r13)
847 li r5,0x300
848 b .do_hash_page /* Try to handle as hpte fault */
849
850 .align 7
851 .globl instruction_access_common
852 instruction_access_common:
853 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
854 ld r3,_NIP(r1)
855 andis. r4,r12,0x5820
856 li r5,0x400
857 b .do_hash_page /* Try to handle as hpte fault */
858
859 .align 7
860 .globl hardware_interrupt_common
861 .globl hardware_interrupt_entry
862 hardware_interrupt_common:
863 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
864 hardware_interrupt_entry:
865 DISABLE_INTS
866 addi r3,r1,STACK_FRAME_OVERHEAD
867 bl .do_IRQ
868 b .ret_from_except_lite
869
870 .align 7
871 .globl alignment_common
872 alignment_common:
873 mfspr r10,DAR
874 std r10,PACA_EXGEN+EX_DAR(r13)
875 mfspr r10,DSISR
876 stw r10,PACA_EXGEN+EX_DSISR(r13)
877 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
878 ld r3,PACA_EXGEN+EX_DAR(r13)
879 lwz r4,PACA_EXGEN+EX_DSISR(r13)
880 std r3,_DAR(r1)
881 std r4,_DSISR(r1)
882 bl .save_nvgprs
883 addi r3,r1,STACK_FRAME_OVERHEAD
884 ENABLE_INTS
885 bl .alignment_exception
886 b .ret_from_except
887
888 .align 7
889 .globl program_check_common
890 program_check_common:
891 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
892 bl .save_nvgprs
893 addi r3,r1,STACK_FRAME_OVERHEAD
894 ENABLE_INTS
895 bl .program_check_exception
896 b .ret_from_except
897
898 .align 7
899 .globl fp_unavailable_common
900 fp_unavailable_common:
901 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
902 bne .load_up_fpu /* if from user, just load it up */
903 bl .save_nvgprs
904 addi r3,r1,STACK_FRAME_OVERHEAD
905 ENABLE_INTS
906 bl .kernel_fp_unavailable_exception
907 BUG_OPCODE
908
909 .align 7
910 .globl altivec_unavailable_common
911 altivec_unavailable_common:
912 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
913 #ifdef CONFIG_ALTIVEC
914 BEGIN_FTR_SECTION
915 bne .load_up_altivec /* if from user, just load it up */
916 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
917 #endif
918 bl .save_nvgprs
919 addi r3,r1,STACK_FRAME_OVERHEAD
920 ENABLE_INTS
921 bl .altivec_unavailable_exception
922 b .ret_from_except
923
924 /*
925 * Hash table stuff
926 */
927 .align 7
928 _GLOBAL(do_hash_page)
929 std r3,_DAR(r1)
930 std r4,_DSISR(r1)
931
932 andis. r0,r4,0xa450 /* weird error? */
933 bne- .handle_page_fault /* if not, try to insert a HPTE */
934 BEGIN_FTR_SECTION
935 andis. r0,r4,0x0020 /* Is it a segment table fault? */
936 bne- .do_ste_alloc /* If so handle it */
937 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
938
939 /*
940 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
941 * accessing a userspace segment (even from the kernel). We assume
942 * kernel addresses always have the high bit set.
943 */
944 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
945 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
946 orc r0,r12,r0 /* MSR_PR | ~high_bit */
947 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
948 ori r4,r4,1 /* add _PAGE_PRESENT */
949 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
950
951 /*
952 * On iSeries, we soft-disable interrupts here, then
953 * hard-enable interrupts so that the hash_page code can spin on
954 * the hash_table_lock without problems on a shared processor.
955 */
956 DISABLE_INTS
957
958 /*
959 * r3 contains the faulting address
960 * r4 contains the required access permissions
961 * r5 contains the trap number
962 *
963 * at return r3 = 0 for success
964 */
965 bl .hash_page /* build HPTE if possible */
966 cmpdi r3,0 /* see if hash_page succeeded */
967
968 #ifdef DO_SOFT_DISABLE
969 /*
970 * If we had interrupts soft-enabled at the point where the
971 * DSI/ISI occurred, and an interrupt came in during hash_page,
972 * handle it now.
973 * We jump to ret_from_except_lite rather than fast_exception_return
974 * because ret_from_except_lite will check for and handle pending
975 * interrupts if necessary.
976 */
977 beq .ret_from_except_lite
978 /* For a hash failure, we don't bother re-enabling interrupts */
979 ble- 12f
980
981 /*
982 * hash_page couldn't handle it, set soft interrupt enable back
983 * to what it was before the trap. Note that .local_irq_restore
984 * handles any interrupts pending at this point.
985 */
986 ld r3,SOFTE(r1)
987 bl .local_irq_restore
988 b 11f
989 #else
990 beq fast_exception_return /* Return from exception on success */
991 ble- 12f /* Failure return from hash_page */
992
993 /* fall through */
994 #endif
995
996 /* Here we have a page fault that hash_page can't handle. */
997 _GLOBAL(handle_page_fault)
998 ENABLE_INTS
999 11: ld r4,_DAR(r1)
1000 ld r5,_DSISR(r1)
1001 addi r3,r1,STACK_FRAME_OVERHEAD
1002 bl .do_page_fault
1003 cmpdi r3,0
1004 beq+ .ret_from_except_lite
1005 bl .save_nvgprs
1006 mr r5,r3
1007 addi r3,r1,STACK_FRAME_OVERHEAD
1008 lwz r4,_DAR(r1)
1009 bl .bad_page_fault
1010 b .ret_from_except
1011
1012 /* We have a page fault that hash_page could handle but HV refused
1013 * the PTE insertion
1014 */
1015 12: bl .save_nvgprs
1016 addi r3,r1,STACK_FRAME_OVERHEAD
1017 lwz r4,_DAR(r1)
1018 bl .low_hash_fault
1019 b .ret_from_except
1020
1021 /* here we have a segment miss */
1022 _GLOBAL(do_ste_alloc)
1023 bl .ste_allocate /* try to insert stab entry */
1024 cmpdi r3,0
1025 beq+ fast_exception_return
1026 b .handle_page_fault
1027
1028 /*
1029 * r13 points to the PACA, r9 contains the saved CR,
1030 * r11 and r12 contain the saved SRR0 and SRR1.
1031 * r9 - r13 are saved in paca->exslb.
1032 * We assume we aren't going to take any exceptions during this procedure.
1033 * We assume (DAR >> 60) == 0xc.
1034 */
1035 .align 7
1036 _GLOBAL(do_stab_bolted)
1037 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1038 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1039
1040 /* Hash to the primary group */
1041 ld r10,PACASTABVIRT(r13)
1042 mfspr r11,DAR
1043 srdi r11,r11,28
1044 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1045
1046 /* Calculate VSID */
1047 /* This is a kernel address, so protovsid = ESID */
1048 ASM_VSID_SCRAMBLE(r11, r9)
1049 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1050
1051 /* Search the primary group for a free entry */
1052 1: ld r11,0(r10) /* Test valid bit of the current ste */
1053 andi. r11,r11,0x80
1054 beq 2f
1055 addi r10,r10,16
1056 andi. r11,r10,0x70
1057 bne 1b
1058
1059 /* Stick for only searching the primary group for now. */
1060 /* At least for now, we use a very simple random castout scheme */
1061 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1062 mftb r11
1063 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1064 ori r11,r11,0x10
1065
1066 /* r10 currently points to an ste one past the group of interest */
1067 /* make it point to the randomly selected entry */
1068 subi r10,r10,128
1069 or r10,r10,r11 /* r10 is the entry to invalidate */
1070
1071 isync /* mark the entry invalid */
1072 ld r11,0(r10)
1073 rldicl r11,r11,56,1 /* clear the valid bit */
1074 rotldi r11,r11,8
1075 std r11,0(r10)
1076 sync
1077
1078 clrrdi r11,r11,28 /* Get the esid part of the ste */
1079 slbie r11
1080
1081 2: std r9,8(r10) /* Store the vsid part of the ste */
1082 eieio
1083
1084 mfspr r11,DAR /* Get the new esid */
1085 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1086 ori r11,r11,0x90 /* Turn on valid and kp */
1087 std r11,0(r10) /* Put new entry back into the stab */
1088
1089 sync
1090
1091 /* All done -- return from exception. */
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1094
1095 andi. r10,r12,MSR_RI
1096 beq- unrecov_slb
1097
1098 mtcrf 0x80,r9 /* restore CR */
1099
1100 mfmsr r10
1101 clrrdi r10,r10,2
1102 mtmsrd r10,1
1103
1104 mtspr SRR0,r11
1105 mtspr SRR1,r12
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
1114 /*
1115 * r13 points to the PACA, r9 contains the saved CR,
1116 * r11 and r12 contain the saved SRR0 and SRR1.
1117 * r3 has the faulting address
1118 * r9 - r13 are saved in paca->exslb.
1119 * r3 is saved in paca->slb_r3
1120 * We assume we aren't going to take any exceptions during this procedure.
1121 */
1122 _GLOBAL(do_slb_miss)
1123 mflr r10
1124
1125 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1126 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1127
1128 bl .slb_allocate /* handle it */
1129
1130 /* All done -- return from exception. */
1131
1132 ld r10,PACA_EXSLB+EX_LR(r13)
1133 ld r3,PACA_EXSLB+EX_R3(r13)
1134 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1135 #ifdef CONFIG_PPC_ISERIES
1136 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1137 #endif /* CONFIG_PPC_ISERIES */
1138
1139 mtlr r10
1140
1141 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1142 beq- unrecov_slb
1143
1144 .machine push
1145 .machine "power4"
1146 mtcrf 0x80,r9
1147 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1148 .machine pop
1149
1150 #ifdef CONFIG_PPC_ISERIES
1151 mtspr SRR0,r11
1152 mtspr SRR1,r12
1153 #endif /* CONFIG_PPC_ISERIES */
1154 ld r9,PACA_EXSLB+EX_R9(r13)
1155 ld r10,PACA_EXSLB+EX_R10(r13)
1156 ld r11,PACA_EXSLB+EX_R11(r13)
1157 ld r12,PACA_EXSLB+EX_R12(r13)
1158 ld r13,PACA_EXSLB+EX_R13(r13)
1159 rfid
1160 b . /* prevent speculative execution */
1161
1162 unrecov_slb:
1163 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1164 DISABLE_INTS
1165 bl .save_nvgprs
1166 1: addi r3,r1,STACK_FRAME_OVERHEAD
1167 bl .unrecoverable_exception
1168 b 1b
1169
1170
1171 /*
1172 * On pSeries, secondary processors spin in the following code.
1173 * At entry, r3 = this processor's number (physical cpu id)
1174 */
1175 _GLOBAL(pSeries_secondary_smp_init)
1176 mr r24,r3
1177
1178 /* turn on 64-bit mode */
1179 bl .enable_64b_mode
1180 isync
1181
1182 /* Copy some CPU settings from CPU 0 */
1183 bl .__restore_cpu_setup
1184
1185 /* Set up a paca value for this processor. Since we have the
1186 * physical cpu id in r24, we need to search the pacas to find
1187 * which logical id maps to our physical one.
1188 */
1189 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1190 li r5,0 /* logical cpu id */
1191 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1192 cmpw r6,r24 /* Compare to our id */
1193 beq 2f
1194 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1195 addi r5,r5,1
1196 cmpwi r5,NR_CPUS
1197 blt 1b
1198
1199 mr r3,r24 /* not found, copy phys to r3 */
1200 b .kexec_wait /* next kernel might do better */
1201
1202 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1203 /* From now on, r24 is expected to be logica cpuid */
1204 mr r24,r5
1205 3: HMT_LOW
1206 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1207 /* start. */
1208 sync
1209
1210 /* Create a temp kernel stack for use before relocation is on. */
1211 ld r1,PACAEMERGSP(r13)
1212 subi r1,r1,STACK_FRAME_OVERHEAD
1213
1214 cmpwi 0,r23,0
1215 #ifdef CONFIG_SMP
1216 #ifdef SECONDARY_PROCESSORS
1217 bne .__secondary_start
1218 #endif
1219 #endif
1220 b 3b /* Loop until told to go */
1221
1222 #ifdef CONFIG_PPC_ISERIES
1223 _STATIC(__start_initialization_iSeries)
1224 /* Clear out the BSS */
1225 LOADADDR(r11,__bss_stop)
1226 LOADADDR(r8,__bss_start)
1227 sub r11,r11,r8 /* bss size */
1228 addi r11,r11,7 /* round up to an even double word */
1229 rldicl. r11,r11,61,3 /* shift right by 3 */
1230 beq 4f
1231 addi r8,r8,-8
1232 li r0,0
1233 mtctr r11 /* zero this many doublewords */
1234 3: stdu r0,8(r8)
1235 bdnz 3b
1236 4:
1237 LOADADDR(r1,init_thread_union)
1238 addi r1,r1,THREAD_SIZE
1239 li r0,0
1240 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1241
1242 LOADADDR(r3,cpu_specs)
1243 LOADADDR(r4,cur_cpu_spec)
1244 li r5,0
1245 bl .identify_cpu
1246
1247 LOADADDR(r2,__toc_start)
1248 addi r2,r2,0x4000
1249 addi r2,r2,0x4000
1250
1251 bl .iSeries_early_setup
1252
1253 /* relocation is on at this point */
1254
1255 b .start_here_common
1256 #endif /* CONFIG_PPC_ISERIES */
1257
1258 #ifdef CONFIG_PPC_MULTIPLATFORM
1259
1260 _STATIC(__mmu_off)
1261 mfmsr r3
1262 andi. r0,r3,MSR_IR|MSR_DR
1263 beqlr
1264 andc r3,r3,r0
1265 mtspr SPRN_SRR0,r4
1266 mtspr SPRN_SRR1,r3
1267 sync
1268 rfid
1269 b . /* prevent speculative execution */
1270
1271
1272 /*
1273 * Here is our main kernel entry point. We support currently 2 kind of entries
1274 * depending on the value of r5.
1275 *
1276 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1277 * in r3...r7
1278 *
1279 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1280 * DT block, r4 is a physical pointer to the kernel itself
1281 *
1282 */
1283 _GLOBAL(__start_initialization_multiplatform)
1284 /*
1285 * Are we booted from a PROM Of-type client-interface ?
1286 */
1287 cmpldi cr0,r5,0
1288 bne .__boot_from_prom /* yes -> prom */
1289
1290 /* Save parameters */
1291 mr r31,r3
1292 mr r30,r4
1293
1294 /* Make sure we are running in 64 bits mode */
1295 bl .enable_64b_mode
1296
1297 /* Setup some critical 970 SPRs before switching MMU off */
1298 bl .__970_cpu_preinit
1299
1300 /* cpu # */
1301 li r24,0
1302
1303 /* Switch off MMU if not already */
1304 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1305 add r4,r4,r30
1306 bl .__mmu_off
1307 b .__after_prom_start
1308
1309 _STATIC(__boot_from_prom)
1310 /* Save parameters */
1311 mr r31,r3
1312 mr r30,r4
1313 mr r29,r5
1314 mr r28,r6
1315 mr r27,r7
1316
1317 /* Make sure we are running in 64 bits mode */
1318 bl .enable_64b_mode
1319
1320 /* put a relocation offset into r3 */
1321 bl .reloc_offset
1322
1323 LOADADDR(r2,__toc_start)
1324 addi r2,r2,0x4000
1325 addi r2,r2,0x4000
1326
1327 /* Relocate the TOC from a virt addr to a real addr */
1328 sub r2,r2,r3
1329
1330 /* Restore parameters */
1331 mr r3,r31
1332 mr r4,r30
1333 mr r5,r29
1334 mr r6,r28
1335 mr r7,r27
1336
1337 /* Do all of the interaction with OF client interface */
1338 bl .prom_init
1339 /* We never return */
1340 trap
1341
1342 /*
1343 * At this point, r3 contains the physical address we are running at,
1344 * returned by prom_init()
1345 */
1346 _STATIC(__after_prom_start)
1347
1348 /*
1349 * We need to run with __start at physical address 0.
1350 * This will leave some code in the first 256B of
1351 * real memory, which are reserved for software use.
1352 * The remainder of the first page is loaded with the fixed
1353 * interrupt vectors. The next two pages are filled with
1354 * unknown exception placeholders.
1355 *
1356 * Note: This process overwrites the OF exception vectors.
1357 * r26 == relocation offset
1358 * r27 == KERNELBASE
1359 */
1360 bl .reloc_offset
1361 mr r26,r3
1362 SET_REG_TO_CONST(r27,KERNELBASE)
1363
1364 li r3,0 /* target addr */
1365
1366 // XXX FIXME: Use phys returned by OF (r30)
1367 sub r4,r27,r26 /* source addr */
1368 /* current address of _start */
1369 /* i.e. where we are running */
1370 /* the source addr */
1371
1372 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1373 sub r5,r5,r27
1374
1375 li r6,0x100 /* Start offset, the first 0x100 */
1376 /* bytes were copied earlier. */
1377
1378 bl .copy_and_flush /* copy the first n bytes */
1379 /* this includes the code being */
1380 /* executed here. */
1381
1382 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1383 mtctr r0 /* that we just made/relocated */
1384 bctr
1385
1386 4: LOADADDR(r5,klimit)
1387 sub r5,r5,r26
1388 ld r5,0(r5) /* get the value of klimit */
1389 sub r5,r5,r27
1390 bl .copy_and_flush /* copy the rest */
1391 b .start_here_multiplatform
1392
1393 #endif /* CONFIG_PPC_MULTIPLATFORM */
1394
1395 /*
1396 * Copy routine used to copy the kernel to start at physical address 0
1397 * and flush and invalidate the caches as needed.
1398 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1399 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1400 *
1401 * Note: this routine *only* clobbers r0, r6 and lr
1402 */
1403 _GLOBAL(copy_and_flush)
1404 addi r5,r5,-8
1405 addi r6,r6,-8
1406 4: li r0,16 /* Use the least common */
1407 /* denominator cache line */
1408 /* size. This results in */
1409 /* extra cache line flushes */
1410 /* but operation is correct. */
1411 /* Can't get cache line size */
1412 /* from NACA as it is being */
1413 /* moved too. */
1414
1415 mtctr r0 /* put # words/line in ctr */
1416 3: addi r6,r6,8 /* copy a cache line */
1417 ldx r0,r6,r4
1418 stdx r0,r6,r3
1419 bdnz 3b
1420 dcbst r6,r3 /* write it to memory */
1421 sync
1422 icbi r6,r3 /* flush the icache line */
1423 cmpld 0,r6,r5
1424 blt 4b
1425 sync
1426 addi r5,r5,8
1427 addi r6,r6,8
1428 blr
1429
1430 .align 8
1431 copy_to_here:
1432
1433 /*
1434 * load_up_fpu(unused, unused, tsk)
1435 * Disable FP for the task which had the FPU previously,
1436 * and save its floating-point registers in its thread_struct.
1437 * Enables the FPU for use in the kernel on return.
1438 * On SMP we know the fpu is free, since we give it up every
1439 * switch (ie, no lazy save of the FP registers).
1440 * On entry: r13 == 'current' && last_task_used_math != 'current'
1441 */
1442 _STATIC(load_up_fpu)
1443 mfmsr r5 /* grab the current MSR */
1444 ori r5,r5,MSR_FP
1445 mtmsrd r5 /* enable use of fpu now */
1446 isync
1447 /*
1448 * For SMP, we don't do lazy FPU switching because it just gets too
1449 * horrendously complex, especially when a task switches from one CPU
1450 * to another. Instead we call giveup_fpu in switch_to.
1451 *
1452 */
1453 #ifndef CONFIG_SMP
1454 ld r3,last_task_used_math@got(r2)
1455 ld r4,0(r3)
1456 cmpdi 0,r4,0
1457 beq 1f
1458 /* Save FP state to last_task_used_math's THREAD struct */
1459 addi r4,r4,THREAD
1460 SAVE_32FPRS(0, r4)
1461 mffs fr0
1462 stfd fr0,THREAD_FPSCR(r4)
1463 /* Disable FP for last_task_used_math */
1464 ld r5,PT_REGS(r4)
1465 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1466 li r6,MSR_FP|MSR_FE0|MSR_FE1
1467 andc r4,r4,r6
1468 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1469 1:
1470 #endif /* CONFIG_SMP */
1471 /* enable use of FP after return */
1472 ld r4,PACACURRENT(r13)
1473 addi r5,r4,THREAD /* Get THREAD */
1474 ld r4,THREAD_FPEXC_MODE(r5)
1475 ori r12,r12,MSR_FP
1476 or r12,r12,r4
1477 std r12,_MSR(r1)
1478 lfd fr0,THREAD_FPSCR(r5)
1479 mtfsf 0xff,fr0
1480 REST_32FPRS(0, r5)
1481 #ifndef CONFIG_SMP
1482 /* Update last_task_used_math to 'current' */
1483 subi r4,r5,THREAD /* Back to 'current' */
1484 std r4,0(r3)
1485 #endif /* CONFIG_SMP */
1486 /* restore registers and return */
1487 b fast_exception_return
1488
1489 /*
1490 * disable_kernel_fp()
1491 * Disable the FPU.
1492 */
1493 _GLOBAL(disable_kernel_fp)
1494 mfmsr r3
1495 rldicl r0,r3,(63-MSR_FP_LG),1
1496 rldicl r3,r0,(MSR_FP_LG+1),0
1497 mtmsrd r3 /* disable use of fpu now */
1498 isync
1499 blr
1500
1501 /*
1502 * giveup_fpu(tsk)
1503 * Disable FP for the task given as the argument,
1504 * and save the floating-point registers in its thread_struct.
1505 * Enables the FPU for use in the kernel on return.
1506 */
1507 _GLOBAL(giveup_fpu)
1508 mfmsr r5
1509 ori r5,r5,MSR_FP
1510 mtmsrd r5 /* enable use of fpu now */
1511 isync
1512 cmpdi 0,r3,0
1513 beqlr- /* if no previous owner, done */
1514 addi r3,r3,THREAD /* want THREAD of task */
1515 ld r5,PT_REGS(r3)
1516 cmpdi 0,r5,0
1517 SAVE_32FPRS(0, r3)
1518 mffs fr0
1519 stfd fr0,THREAD_FPSCR(r3)
1520 beq 1f
1521 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1522 li r3,MSR_FP|MSR_FE0|MSR_FE1
1523 andc r4,r4,r3 /* disable FP for previous task */
1524 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1525 1:
1526 #ifndef CONFIG_SMP
1527 li r5,0
1528 ld r4,last_task_used_math@got(r2)
1529 std r5,0(r4)
1530 #endif /* CONFIG_SMP */
1531 blr
1532
1533
1534 #ifdef CONFIG_ALTIVEC
1535
1536 /*
1537 * load_up_altivec(unused, unused, tsk)
1538 * Disable VMX for the task which had it previously,
1539 * and save its vector registers in its thread_struct.
1540 * Enables the VMX for use in the kernel on return.
1541 * On SMP we know the VMX is free, since we give it up every
1542 * switch (ie, no lazy save of the vector registers).
1543 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1544 */
1545 _STATIC(load_up_altivec)
1546 mfmsr r5 /* grab the current MSR */
1547 oris r5,r5,MSR_VEC@h
1548 mtmsrd r5 /* enable use of VMX now */
1549 isync
1550
1551 /*
1552 * For SMP, we don't do lazy VMX switching because it just gets too
1553 * horrendously complex, especially when a task switches from one CPU
1554 * to another. Instead we call giveup_altvec in switch_to.
1555 * VRSAVE isn't dealt with here, that is done in the normal context
1556 * switch code. Note that we could rely on vrsave value to eventually
1557 * avoid saving all of the VREGs here...
1558 */
1559 #ifndef CONFIG_SMP
1560 ld r3,last_task_used_altivec@got(r2)
1561 ld r4,0(r3)
1562 cmpdi 0,r4,0
1563 beq 1f
1564 /* Save VMX state to last_task_used_altivec's THREAD struct */
1565 addi r4,r4,THREAD
1566 SAVE_32VRS(0,r5,r4)
1567 mfvscr vr0
1568 li r10,THREAD_VSCR
1569 stvx vr0,r10,r4
1570 /* Disable VMX for last_task_used_altivec */
1571 ld r5,PT_REGS(r4)
1572 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1573 lis r6,MSR_VEC@h
1574 andc r4,r4,r6
1575 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1576 1:
1577 #endif /* CONFIG_SMP */
1578 /* Hack: if we get an altivec unavailable trap with VRSAVE
1579 * set to all zeros, we assume this is a broken application
1580 * that fails to set it properly, and thus we switch it to
1581 * all 1's
1582 */
1583 mfspr r4,SPRN_VRSAVE
1584 cmpdi 0,r4,0
1585 bne+ 1f
1586 li r4,-1
1587 mtspr SPRN_VRSAVE,r4
1588 1:
1589 /* enable use of VMX after return */
1590 ld r4,PACACURRENT(r13)
1591 addi r5,r4,THREAD /* Get THREAD */
1592 oris r12,r12,MSR_VEC@h
1593 std r12,_MSR(r1)
1594 li r4,1
1595 li r10,THREAD_VSCR
1596 stw r4,THREAD_USED_VR(r5)
1597 lvx vr0,r10,r5
1598 mtvscr vr0
1599 REST_32VRS(0,r4,r5)
1600 #ifndef CONFIG_SMP
1601 /* Update last_task_used_math to 'current' */
1602 subi r4,r5,THREAD /* Back to 'current' */
1603 std r4,0(r3)
1604 #endif /* CONFIG_SMP */
1605 /* restore registers and return */
1606 b fast_exception_return
1607
1608 /*
1609 * disable_kernel_altivec()
1610 * Disable the VMX.
1611 */
1612 _GLOBAL(disable_kernel_altivec)
1613 mfmsr r3
1614 rldicl r0,r3,(63-MSR_VEC_LG),1
1615 rldicl r3,r0,(MSR_VEC_LG+1),0
1616 mtmsrd r3 /* disable use of VMX now */
1617 isync
1618 blr
1619
1620 /*
1621 * giveup_altivec(tsk)
1622 * Disable VMX for the task given as the argument,
1623 * and save the vector registers in its thread_struct.
1624 * Enables the VMX for use in the kernel on return.
1625 */
1626 _GLOBAL(giveup_altivec)
1627 mfmsr r5
1628 oris r5,r5,MSR_VEC@h
1629 mtmsrd r5 /* enable use of VMX now */
1630 isync
1631 cmpdi 0,r3,0
1632 beqlr- /* if no previous owner, done */
1633 addi r3,r3,THREAD /* want THREAD of task */
1634 ld r5,PT_REGS(r3)
1635 cmpdi 0,r5,0
1636 SAVE_32VRS(0,r4,r3)
1637 mfvscr vr0
1638 li r4,THREAD_VSCR
1639 stvx vr0,r4,r3
1640 beq 1f
1641 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1642 lis r3,MSR_VEC@h
1643 andc r4,r4,r3 /* disable FP for previous task */
1644 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1645 1:
1646 #ifndef CONFIG_SMP
1647 li r5,0
1648 ld r4,last_task_used_altivec@got(r2)
1649 std r5,0(r4)
1650 #endif /* CONFIG_SMP */
1651 blr
1652
1653 #endif /* CONFIG_ALTIVEC */
1654
1655 #ifdef CONFIG_SMP
1656 #ifdef CONFIG_PPC_PMAC
1657 /*
1658 * On PowerMac, secondary processors starts from the reset vector, which
1659 * is temporarily turned into a call to one of the functions below.
1660 */
1661 .section ".text";
1662 .align 2 ;
1663
1664 .globl pmac_secondary_start_1
1665 pmac_secondary_start_1:
1666 li r24, 1
1667 b .pmac_secondary_start
1668
1669 .globl pmac_secondary_start_2
1670 pmac_secondary_start_2:
1671 li r24, 2
1672 b .pmac_secondary_start
1673
1674 .globl pmac_secondary_start_3
1675 pmac_secondary_start_3:
1676 li r24, 3
1677 b .pmac_secondary_start
1678
1679 _GLOBAL(pmac_secondary_start)
1680 /* turn on 64-bit mode */
1681 bl .enable_64b_mode
1682 isync
1683
1684 /* Copy some CPU settings from CPU 0 */
1685 bl .__restore_cpu_setup
1686
1687 /* pSeries do that early though I don't think we really need it */
1688 mfmsr r3
1689 ori r3,r3,MSR_RI
1690 mtmsrd r3 /* RI on */
1691
1692 /* Set up a paca value for this processor. */
1693 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1694 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1695 add r13,r13,r4 /* for this processor. */
1696 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1697
1698 /* Create a temp kernel stack for use before relocation is on. */
1699 ld r1,PACAEMERGSP(r13)
1700 subi r1,r1,STACK_FRAME_OVERHEAD
1701
1702 b .__secondary_start
1703
1704 #endif /* CONFIG_PPC_PMAC */
1705
1706 /*
1707 * This function is called after the master CPU has released the
1708 * secondary processors. The execution environment is relocation off.
1709 * The paca for this processor has the following fields initialized at
1710 * this point:
1711 * 1. Processor number
1712 * 2. Segment table pointer (virtual address)
1713 * On entry the following are set:
1714 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1715 * r24 = cpu# (in Linux terms)
1716 * r13 = paca virtual address
1717 * SPRG3 = paca virtual address
1718 */
1719 _GLOBAL(__secondary_start)
1720
1721 HMT_MEDIUM /* Set thread priority to MEDIUM */
1722
1723 ld r2,PACATOC(r13)
1724 li r6,0
1725 stb r6,PACAPROCENABLED(r13)
1726
1727 #ifndef CONFIG_PPC_ISERIES
1728 /* Initialize the page table pointer register. */
1729 LOADADDR(r6,_SDR1)
1730 ld r6,0(r6) /* get the value of _SDR1 */
1731 mtspr SDR1,r6 /* set the htab location */
1732 #endif
1733 /* Initialize the first segment table (or SLB) entry */
1734 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1735 bl .stab_initialize
1736
1737 /* Initialize the kernel stack. Just a repeat for iSeries. */
1738 LOADADDR(r3,current_set)
1739 sldi r28,r24,3 /* get current_set[cpu#] */
1740 ldx r1,r3,r28
1741 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1742 std r1,PACAKSAVE(r13)
1743
1744 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1745 ori r4,r3,1 /* turn on valid bit */
1746
1747 #ifdef CONFIG_PPC_ISERIES
1748 li r0,-1 /* hypervisor call */
1749 li r3,1
1750 sldi r3,r3,63 /* 0x8000000000000000 */
1751 ori r3,r3,4 /* 0x8000000000000004 */
1752 sc /* HvCall_setASR */
1753 #else
1754 /* set the ASR */
1755 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1756 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1757 cmpldi r3,PLATFORM_PSERIES_LPAR
1758 bne 98f
1759 mfspr r3,PVR
1760 srwi r3,r3,16
1761 cmpwi r3,0x37 /* SStar */
1762 beq 97f
1763 cmpwi r3,0x36 /* IStar */
1764 beq 97f
1765 cmpwi r3,0x34 /* Pulsar */
1766 bne 98f
1767 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1768 HVSC /* Invoking hcall */
1769 b 99f
1770 98: /* !(rpa hypervisor) || !(star) */
1771 mtasr r4 /* set the stab location */
1772 99:
1773 #endif
1774 li r7,0
1775 mtlr r7
1776
1777 /* enable MMU and jump to start_secondary */
1778 LOADADDR(r3,.start_secondary_prolog)
1779 SET_REG_TO_CONST(r4, MSR_KERNEL)
1780 #ifdef DO_SOFT_DISABLE
1781 ori r4,r4,MSR_EE
1782 #endif
1783 mtspr SRR0,r3
1784 mtspr SRR1,r4
1785 rfid
1786 b . /* prevent speculative execution */
1787
1788 /*
1789 * Running with relocation on at this point. All we want to do is
1790 * zero the stack back-chain pointer before going into C code.
1791 */
1792 _GLOBAL(start_secondary_prolog)
1793 li r3,0
1794 std r3,0(r1) /* Zero the stack frame pointer */
1795 bl .start_secondary
1796 #endif
1797
1798 /*
1799 * This subroutine clobbers r11 and r12
1800 */
1801 _GLOBAL(enable_64b_mode)
1802 mfmsr r11 /* grab the current MSR */
1803 li r12,1
1804 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1805 or r11,r11,r12
1806 li r12,1
1807 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1808 or r11,r11,r12
1809 mtmsrd r11
1810 isync
1811 blr
1812
1813 #ifdef CONFIG_PPC_MULTIPLATFORM
1814 /*
1815 * This is where the main kernel code starts.
1816 */
1817 _STATIC(start_here_multiplatform)
1818 /* get a new offset, now that the kernel has moved. */
1819 bl .reloc_offset
1820 mr r26,r3
1821
1822 /* Clear out the BSS. It may have been done in prom_init,
1823 * already but that's irrelevant since prom_init will soon
1824 * be detached from the kernel completely. Besides, we need
1825 * to clear it now for kexec-style entry.
1826 */
1827 LOADADDR(r11,__bss_stop)
1828 LOADADDR(r8,__bss_start)
1829 sub r11,r11,r8 /* bss size */
1830 addi r11,r11,7 /* round up to an even double word */
1831 rldicl. r11,r11,61,3 /* shift right by 3 */
1832 beq 4f
1833 addi r8,r8,-8
1834 li r0,0
1835 mtctr r11 /* zero this many doublewords */
1836 3: stdu r0,8(r8)
1837 bdnz 3b
1838 4:
1839
1840 mfmsr r6
1841 ori r6,r6,MSR_RI
1842 mtmsrd r6 /* RI on */
1843
1844 #ifdef CONFIG_HMT
1845 /* Start up the second thread on cpu 0 */
1846 mfspr r3,PVR
1847 srwi r3,r3,16
1848 cmpwi r3,0x34 /* Pulsar */
1849 beq 90f
1850 cmpwi r3,0x36 /* Icestar */
1851 beq 90f
1852 cmpwi r3,0x37 /* SStar */
1853 beq 90f
1854 b 91f /* HMT not supported */
1855 90: li r3,0
1856 bl .hmt_start_secondary
1857 91:
1858 #endif
1859
1860 /* The following gets the stack and TOC set up with the regs */
1861 /* pointing to the real addr of the kernel stack. This is */
1862 /* all done to support the C function call below which sets */
1863 /* up the htab. This is done because we have relocated the */
1864 /* kernel but are still running in real mode. */
1865
1866 LOADADDR(r3,init_thread_union)
1867 sub r3,r3,r26
1868
1869 /* set up a stack pointer (physical address) */
1870 addi r1,r3,THREAD_SIZE
1871 li r0,0
1872 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1873
1874 /* set up the TOC (physical address) */
1875 LOADADDR(r2,__toc_start)
1876 addi r2,r2,0x4000
1877 addi r2,r2,0x4000
1878 sub r2,r2,r26
1879
1880 LOADADDR(r3,cpu_specs)
1881 sub r3,r3,r26
1882 LOADADDR(r4,cur_cpu_spec)
1883 sub r4,r4,r26
1884 mr r5,r26
1885 bl .identify_cpu
1886
1887 /* Save some low level config HIDs of CPU0 to be copied to
1888 * other CPUs later on, or used for suspend/resume
1889 */
1890 bl .__save_cpu_setup
1891 sync
1892
1893 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1894 * note that boot_cpuid can always be 0 nowadays since there is
1895 * nowhere it can be initialized differently before we reach this
1896 * code
1897 */
1898 LOADADDR(r27, boot_cpuid)
1899 sub r27,r27,r26
1900 lwz r27,0(r27)
1901
1902 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1903 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1904 add r13,r13,r24 /* for this processor. */
1905 sub r13,r13,r26 /* convert to physical addr */
1906 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1907
1908 /* Do very early kernel initializations, including initial hash table,
1909 * stab and slb setup before we turn on relocation. */
1910
1911 /* Restore parameters passed from prom_init/kexec */
1912 mr r3,r31
1913 bl .early_setup
1914
1915 /* set the ASR */
1916 ld r3,PACASTABREAL(r13)
1917 ori r4,r3,1 /* turn on valid bit */
1918 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1919 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1920 cmpldi r3,PLATFORM_PSERIES_LPAR
1921 bne 98f
1922 mfspr r3,PVR
1923 srwi r3,r3,16
1924 cmpwi r3,0x37 /* SStar */
1925 beq 97f
1926 cmpwi r3,0x36 /* IStar */
1927 beq 97f
1928 cmpwi r3,0x34 /* Pulsar */
1929 bne 98f
1930 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1931 HVSC /* Invoking hcall */
1932 b 99f
1933 98: /* !(rpa hypervisor) || !(star) */
1934 mtasr r4 /* set the stab location */
1935 99:
1936 /* Set SDR1 (hash table pointer) */
1937 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1938 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1939 /* Test if bit 0 is set (LPAR bit) */
1940 andi. r3,r3,0x1
1941 bne 98f
1942 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1943 sub r6,r6,r26
1944 ld r6,0(r6) /* get the value of _SDR1 */
1945 mtspr SDR1,r6 /* set the htab location */
1946 98:
1947 LOADADDR(r3,.start_here_common)
1948 SET_REG_TO_CONST(r4, MSR_KERNEL)
1949 mtspr SRR0,r3
1950 mtspr SRR1,r4
1951 rfid
1952 b . /* prevent speculative execution */
1953 #endif /* CONFIG_PPC_MULTIPLATFORM */
1954
1955 /* This is where all platforms converge execution */
1956 _STATIC(start_here_common)
1957 /* relocation is on at this point */
1958
1959 /* The following code sets up the SP and TOC now that we are */
1960 /* running with translation enabled. */
1961
1962 LOADADDR(r3,init_thread_union)
1963
1964 /* set up the stack */
1965 addi r1,r3,THREAD_SIZE
1966 li r0,0
1967 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1968
1969 /* Apply the CPUs-specific fixups (nop out sections not relevant
1970 * to this CPU
1971 */
1972 li r3,0
1973 bl .do_cpu_ftr_fixups
1974
1975 LOADADDR(r26, boot_cpuid)
1976 lwz r26,0(r26)
1977
1978 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1979 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1980 add r13,r13,r24 /* for this processor. */
1981 mtspr SPRG3,r13
1982
1983 /* ptr to current */
1984 LOADADDR(r4,init_task)
1985 std r4,PACACURRENT(r13)
1986
1987 /* Load the TOC */
1988 ld r2,PACATOC(r13)
1989 std r1,PACAKSAVE(r13)
1990
1991 bl .setup_system
1992
1993 /* Load up the kernel context */
1994 5:
1995 #ifdef DO_SOFT_DISABLE
1996 li r5,0
1997 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1998 mfmsr r5
1999 ori r5,r5,MSR_EE /* Hard Enabled */
2000 mtmsrd r5
2001 #endif
2002
2003 bl .start_kernel
2004
2005 _GLOBAL(__setup_cpu_power3)
2006 blr
2007
2008 _GLOBAL(hmt_init)
2009 #ifdef CONFIG_HMT
2010 LOADADDR(r5, hmt_thread_data)
2011 mfspr r7,PVR
2012 srwi r7,r7,16
2013 cmpwi r7,0x34 /* Pulsar */
2014 beq 90f
2015 cmpwi r7,0x36 /* Icestar */
2016 beq 91f
2017 cmpwi r7,0x37 /* SStar */
2018 beq 91f
2019 b 101f
2020 90: mfspr r6,PIR
2021 andi. r6,r6,0x1f
2022 b 92f
2023 91: mfspr r6,PIR
2024 andi. r6,r6,0x3ff
2025 92: sldi r4,r24,3
2026 stwx r6,r5,r4
2027 bl .hmt_start_secondary
2028 b 101f
2029
2030 __hmt_secondary_hold:
2031 LOADADDR(r5, hmt_thread_data)
2032 clrldi r5,r5,4
2033 li r7,0
2034 mfspr r6,PIR
2035 mfspr r8,PVR
2036 srwi r8,r8,16
2037 cmpwi r8,0x34
2038 bne 93f
2039 andi. r6,r6,0x1f
2040 b 103f
2041 93: andi. r6,r6,0x3f
2042
2043 103: lwzx r8,r5,r7
2044 cmpw r8,r6
2045 beq 104f
2046 addi r7,r7,8
2047 b 103b
2048
2049 104: addi r7,r7,4
2050 lwzx r9,r5,r7
2051 mr r24,r9
2052 101:
2053 #endif
2054 mr r3,r24
2055 b .pSeries_secondary_smp_init
2056
2057 #ifdef CONFIG_HMT
2058 _GLOBAL(hmt_start_secondary)
2059 LOADADDR(r4,__hmt_secondary_hold)
2060 clrldi r4,r4,4
2061 mtspr NIADORM, r4
2062 mfspr r4, MSRDORM
2063 li r5, -65
2064 and r4, r4, r5
2065 mtspr MSRDORM, r4
2066 lis r4,0xffef
2067 ori r4,r4,0x7403
2068 mtspr TSC, r4
2069 li r4,0x1f4
2070 mtspr TST, r4
2071 mfspr r4, HID0
2072 ori r4, r4, 0x1
2073 mtspr HID0, r4
2074 mfspr r4, SPRN_CTRLF
2075 oris r4, r4, 0x40
2076 mtspr SPRN_CTRLT, r4
2077 blr
2078 #endif
2079
2080 #if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
2081 _GLOBAL(smp_release_cpus)
2082 /* All secondary cpus are spinning on a common
2083 * spinloop, release them all now so they can start
2084 * to spin on their individual paca spinloops.
2085 * For non SMP kernels, the secondary cpus never
2086 * get out of the common spinloop.
2087 */
2088 li r3,1
2089 LOADADDR(r5,__secondary_hold_spinloop)
2090 std r3,0(r5)
2091 sync
2092 blr
2093 #endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2094
2095
2096 /*
2097 * We put a few things here that have to be page-aligned.
2098 * This stuff goes at the beginning of the data segment,
2099 * which is page-aligned.
2100 */
2101 .data
2102 .align 12
2103 .globl sdata
2104 sdata:
2105 .globl empty_zero_page
2106 empty_zero_page:
2107 .space 4096
2108
2109 .globl swapper_pg_dir
2110 swapper_pg_dir:
2111 .space 4096
2112
2113 /*
2114 * This space gets a copy of optional info passed to us by the bootstrap
2115 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2116 */
2117 .globl cmd_line
2118 cmd_line:
2119 .space COMMAND_LINE_SIZE
This page took 0.100682 seconds and 5 git commands to generate.