powerpc: Fix idle.c compile warning
[deliverable/linux.git] / arch / powerpc / kernel / head_64.S
CommitLineData
14cf11af
PM
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 - : Early init and support code
52 */
53
54/*
55 * SPRG Usage
56 *
57 * Register Definition
58 *
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 * For pSeries:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
70 *
71 * For iSeries:
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
74 */
75
76 .text
77 .globl _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION
b85a046a 83 b .__start_initialization_multiplatform
14cf11af
PM
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87 /* Catch branch to 0 in real mode */
88 trap
89
90#ifdef CONFIG_PPC_ISERIES
91 /*
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
94 */
95 . = 0x20
96 .llong hvReleaseData-KERNELBASE
97
98 /*
99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
103 */
104 .llong mschunks_map-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
106
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109embedded_sysmap_start:
110 .llong 0
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
113embedded_sysmap_end:
114 .llong 0
115
116#endif /* CONFIG_PPC_ISERIES */
117
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120__secondary_hold_spinloop:
121 .llong 0x0
122
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127 .llong 0x0
128
129 . = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138 mfmsr r24
139 ori r24,r24,MSR_RI
140 mtmsrd r24 /* RI on */
141
142 /* Grab our linux cpu number */
143 mr r24,r3
144
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
149 sync
150
151 /* All secondary cpus wait here until told to start. */
152100: ld r4,__secondary_hold_spinloop@l(0)
153 cmpdi 0,r4,1
154 bne 100b
155
156#ifdef CONFIG_HMT
157 b .hmt_init
158#else
159#ifdef CONFIG_SMP
160 mr r3,r24
161 b .pSeries_secondary_smp_init
162#else
163 BUG_OPCODE
164#endif
165#endif
166
167/* This value is used to mark exception frames on the stack. */
168 .section ".toc","aw"
169exception_marker:
170 .tc ID_72656773_68657265[TC],0x7265677368657265
171 .text
172
173/*
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers. They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL. One day... - paulus
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_R9 0
192#define EX_R10 8
193#define EX_R11 16
194#define EX_R12 24
195#define EX_R13 32
196#define EX_SRR0 40
197#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
198#define EX_DAR 48
199#define EX_LR 48 /* SLB miss saves LR, but not DAR */
200#define EX_DSISR 56
201#define EX_CCR 60
202
203#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \
210 std r9,area+EX_R13(r13); \
211 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \
220 rfid; \
221 b . /* prevent speculative execution */
222
223/*
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
226 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \
234 std r9,area+EX_R13(r13); \
235 mfcr r9
236
237#define EXCEPTION_PROLOG_ISERIES_2 \
238 mfmsr r10; \
239 ld r11,PACALPPACA+LPPACASRR0(r13); \
240 ld r12,PACALPPACA+LPPACASRR1(r13); \
241 ori r10,r10,MSR_RI; \
242 mtmsrd r10,1
243
244/*
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address. We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
249 *
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
253 */
254#define EXCEPTION_PROLOG_COMMON(n, area) \
255 andi. r10,r12,MSR_PR; /* See if coming from user */ \
256 mr r10,r1; /* Save r1 */ \
257 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
258 beq- 1f; \
259 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2601: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
261 bge- cr1,bad_stack; /* abort if it is */ \
262 std r9,_CCR(r1); /* save CR in stackframe */ \
263 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
264 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
265 std r10,0(r1); /* make stack chain pointer */ \
266 std r0,GPR0(r1); /* save r0 in stackframe */ \
267 std r10,GPR1(r1); /* save r1 in stackframe */ \
268 std r2,GPR2(r1); /* save r2 in stackframe */ \
269 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
270 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
271 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
272 ld r10,area+EX_R10(r13); \
273 std r9,GPR9(r1); \
274 std r10,GPR10(r1); \
275 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
276 ld r10,area+EX_R12(r13); \
277 ld r11,area+EX_R13(r13); \
278 std r9,GPR11(r1); \
279 std r10,GPR12(r1); \
280 std r11,GPR13(r1); \
281 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
282 mflr r9; /* save LR in stackframe */ \
283 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \
288 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \
290 li r10,0; \
291 ld r11,exception_marker@toc(r2); \
292 std r10,RESULT(r1); /* clear regs->result */ \
293 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
294
295/*
296 * Exception vectors.
297 */
298#define STD_EXCEPTION_PSERIES(n, label) \
299 . = n; \
300 .globl label##_pSeries; \
301label##_pSeries: \
302 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306
307#define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_iSeries; \
309label##_iSeries: \
310 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \
315 b label##_common
316
317#define MASKABLE_EXCEPTION_ISERIES(n, label) \
318 .globl label##_iSeries; \
319label##_iSeries: \
320 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \
325 cmpwi 0,r10,0; \
326 beq- label##_iSeries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
328 b label##_common; \
329
330#ifdef DO_SOFT_DISABLE
331#define DISABLE_INTS \
332 lbz r10,PACAPROCENABLED(r13); \
333 li r11,0; \
334 std r10,SOFTE(r1); \
335 mfmsr r10; \
336 stb r11,PACAPROCENABLED(r13); \
337 ori r10,r10,MSR_EE; \
338 mtmsrd r10,1
339
340#define ENABLE_INTS \
341 lbz r10,PACAPROCENABLED(r13); \
342 mfmsr r11; \
343 std r10,SOFTE(r1); \
344 ori r11,r11,MSR_EE; \
345 mtmsrd r11,1
346
347#else /* hard enable/disable interrupts */
348#define DISABLE_INTS
349
350#define ENABLE_INTS \
351 ld r12,_MSR(r1); \
352 mfmsr r11; \
353 rlwimi r11,r12,0,MSR_EE; \
354 mtmsrd r11,1
355
356#endif
357
358#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
359 .align 7; \
360 .globl label##_common; \
361label##_common: \
362 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
363 DISABLE_INTS; \
364 bl .save_nvgprs; \
365 addi r3,r1,STACK_FRAME_OVERHEAD; \
366 bl hdlr; \
367 b .ret_from_except
368
369#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
370 .align 7; \
371 .globl label##_common; \
372label##_common: \
373 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
374 DISABLE_INTS; \
375 addi r3,r1,STACK_FRAME_OVERHEAD; \
376 bl hdlr; \
377 b .ret_from_except_lite
378
379/*
380 * Start of pSeries system interrupt routines
381 */
382 . = 0x100
383 .globl __start_interrupts
384__start_interrupts:
385
386 STD_EXCEPTION_PSERIES(0x100, system_reset)
387
388 . = 0x200
389_machine_check_pSeries:
390 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394
395 . = 0x300
396 .globl data_access_pSeries
397data_access_pSeries:
398 HMT_MEDIUM
399 mtspr SPRG1,r13
400BEGIN_FTR_SECTION
401 mtspr SPRG2,r12
402 mfspr r13,DAR
403 mfspr r12,DSISR
404 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20
406 mfcr r12
407 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12
410 mfspr r12,SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413
414 . = 0x380
415 .globl data_access_slb_pSeries
416data_access_slb_pSeries:
417 HMT_MEDIUM
418 mtspr SPRG1,r13
419 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */
430 mfspr r3,DAR
431 b .do_slb_miss /* Rel. branch works in real mode */
432
433 STD_EXCEPTION_PSERIES(0x400, instruction_access)
434
435 . = 0x480
436 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries:
438 HMT_MEDIUM
439 mtspr SPRG1,r13
440 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */
453
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
455 STD_EXCEPTION_PSERIES(0x600, alignment)
456 STD_EXCEPTION_PSERIES(0x700, program_check)
457 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
458 STD_EXCEPTION_PSERIES(0x900, decrementer)
459 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
460 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
461
462 . = 0xc00
463 .globl system_call_pSeries
464system_call_pSeries:
465 HMT_MEDIUM
466 RUNLATCH_ON(r9)
467 mr r9,r13
468 mfmsr r10
469 mfspr r13,SPRG3
470 mfspr r11,SRR0
471 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1
477 mtspr SRR1,r10
478 rfid
479 b . /* prevent speculative execution */
480
481 STD_EXCEPTION_PSERIES(0xd00, single_step)
482 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483
484 /* We need to deal with the Altivec unavailable exception
485 * here which is at 0xf20, thus in the middle of the
486 * prolog code of the PerformanceMonitor one. A little
487 * trickery is thus necessary
488 */
489 . = 0xf00
490 b performance_monitor_pSeries
491
492 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493
494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496
497 . = 0x3000
498
499/*** pSeries interrupt support ***/
500
501 /* moved from 0xf00 */
502 STD_EXCEPTION_PSERIES(., performance_monitor)
503
504 .align 7
505_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12
507 mfspr r12,SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509
510/*
511 * Vectors for the FWNMI option. Share common code.
512 */
513 .globl system_reset_fwnmi
514system_reset_fwnmi:
515 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519
520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526
527#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/
529
530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
531
532 .globl data_access_iSeries
533data_access_iSeries:
534 mtspr SPRG1,r13
535BEGIN_FTR_SECTION
536 mtspr SPRG2,r12
537 mfspr r13,DAR
538 mfspr r12,DSISR
539 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20
541 mfcr r12
542 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12
545 mfspr r12,SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2
549 b data_access_common
550
551.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12
553 mfspr r12,SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted
557
558 .globl data_access_slb_iSeries
559data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR
565 b .do_slb_miss
566
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568
569 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13)
575 ld r3,PACALPPACA+LPPACASRR0(r13)
576 b .do_slb_miss
577
578 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
579 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
580 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
582 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
583 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
584 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
585
586 .globl system_call_iSeries
587system_call_iSeries:
588 mr r9,r13
589 mfspr r13,SPRG3
590 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common
592
593 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
594 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
596
597 .globl system_reset_iSeries
598system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */
600 mfmsr r24
601 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */
603 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
604 cmpwi 0,r24,0 /* Are we processor 0? */
605 beq .__start_initialization_iSeries /* Start up the first processor */
606 mfspr r4,SPRN_CTRLF
607 li r5,CTRL_RUNLATCH /* Turn off the run light */
608 andc r4,r4,r5
609 mtspr SPRN_CTRLT,r4
610
6111:
612 HMT_LOW
613#ifdef CONFIG_SMP
614 lbz r23,PACAPROCSTART(r13) /* Test if this processor
615 * should start */
616 sync
617 LOADADDR(r3,current_set)
618 sldi r28,r24,3 /* get current_set[cpu#] */
619 ldx r3,r3,r28
620 addi r1,r3,THREAD_SIZE
621 subi r1,r1,STACK_FRAME_OVERHEAD
622
623 cmpwi 0,r23,0
624 beq iSeries_secondary_smp_loop /* Loop until told to go */
625 bne .__secondary_start /* Loop until told to go */
626iSeries_secondary_smp_loop:
627 /* Let the Hypervisor know we are alive */
628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
629 lis r3,0x8002
630 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
631#else /* CONFIG_SMP */
632 /* Yield the processor. This is required for non-SMP kernels
633 which are running on multi-threaded machines. */
634 lis r3,0x8000
635 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
636 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
637 li r4,0 /* "yield timed" */
638 li r5,-1 /* "yield forever" */
639#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries
644 * loop forever */
645
646 .globl decrementer_iSeries_masked
647decrementer_iSeries_masked:
648 li r11,1
649 stb r11,PACALPPACA+LPPACADECRINT(r13)
650 lwz r12,PACADEFAULTDECR(r13)
651 mtspr SPRN_DEC,r12
652 /* fall through */
653
654 .globl hardware_interrupt_iSeries_masked
655hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11
660 mtspr SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13)
664 ld r12,PACA_EXGEN+EX_R12(r13)
665 ld r13,PACA_EXGEN+EX_R13(r13)
666 rfid
667 b . /* prevent speculative execution */
668#endif /* CONFIG_PPC_ISERIES */
669
670/*** Common interrupt handlers ***/
671
672 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
673
674 /*
675 * Machine check is different because we use a different
676 * save area: PACA_EXMC instead of PACA_EXGEN.
677 */
678 .align 7
679 .globl machine_check_common
680machine_check_common:
681 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
682 DISABLE_INTS
683 bl .save_nvgprs
684 addi r3,r1,STACK_FRAME_OVERHEAD
685 bl .machine_check_exception
686 b .ret_from_except
687
688 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
689 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
690 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
692 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
693 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
694 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
695#ifdef CONFIG_ALTIVEC
696 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
697#else
698 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
699#endif
700
701/*
702 * Here we have detected that the kernel stack pointer is bad.
703 * R9 contains the saved CR, r13 points to the paca,
704 * r10 contains the (bad) kernel stack pointer,
705 * r11 and r12 contain the saved SRR0 and SRR1.
706 * We switch to using an emergency stack, save the registers there,
707 * and call kernel_bad_stack(), which panics.
708 */
709bad_stack:
710 ld r1,PACAEMERGSP(r13)
711 subi r1,r1,64+INT_FRAME_SIZE
712 std r9,_CCR(r1)
713 std r10,GPR1(r1)
714 std r11,_NIP(r1)
715 std r12,_MSR(r1)
716 mfspr r11,DAR
717 mfspr r12,DSISR
718 std r11,_DAR(r1)
719 std r12,_DSISR(r1)
720 mflr r10
721 mfctr r11
722 mfxer r12
723 std r10,_LINK(r1)
724 std r11,_CTR(r1)
725 std r12,_XER(r1)
726 SAVE_GPR(0,r1)
727 SAVE_GPR(2,r1)
728 SAVE_4GPRS(3,r1)
729 SAVE_2GPRS(7,r1)
730 SAVE_10GPRS(12,r1)
731 SAVE_10GPRS(22,r1)
732 addi r11,r1,INT_FRAME_SIZE
733 std r11,0(r1)
734 li r12,0
735 std r12,0(r11)
736 ld r2,PACATOC(r13)
7371: addi r3,r1,STACK_FRAME_OVERHEAD
738 bl .kernel_bad_stack
739 b 1b
740
741/*
742 * Return from an exception with minimal checks.
743 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
744 * If interrupts have been enabled, or anything has been
745 * done that might have changed the scheduling status of
746 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this.
748 */
749fast_exception_return:
750 ld r12,_MSR(r1)
751 ld r11,_NIP(r1)
752 andi. r3,r12,MSR_RI /* check if RI is set */
753 beq- unrecov_fer
754 ld r3,_CCR(r1)
755 ld r4,_LINK(r1)
756 ld r5,_CTR(r1)
757 ld r6,_XER(r1)
758 mtcr r3
759 mtlr r4
760 mtctr r5
761 mtxer r6
762 REST_GPR(0, r1)
763 REST_8GPRS(2, r1)
764
765 mfmsr r10
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1
768
769 mtspr SRR1,r12
770 mtspr SRR0,r11
771 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1)
773 rfid
774 b . /* prevent speculative execution */
775
776unrecov_fer:
777 bl .save_nvgprs
7781: addi r3,r1,STACK_FRAME_OVERHEAD
779 bl .unrecoverable_exception
780 b 1b
781
782/*
783 * Here r13 points to the paca, r9 contains the saved CR,
784 * SRR0 and SRR1 are saved in r11 and r12,
785 * r9 - r13 are saved in paca->exgen.
786 */
787 .align 7
788 .globl data_access_common
789data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR
792 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13)
797 lwz r4,PACA_EXGEN+EX_DSISR(r13)
798 li r5,0x300
799 b .do_hash_page /* Try to handle as hpte fault */
800
801 .align 7
802 .globl instruction_access_common
803instruction_access_common:
804 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
805 ld r3,_NIP(r1)
806 andis. r4,r12,0x5820
807 li r5,0x400
808 b .do_hash_page /* Try to handle as hpte fault */
809
810 .align 7
811 .globl hardware_interrupt_common
812 .globl hardware_interrupt_entry
813hardware_interrupt_common:
814 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
815hardware_interrupt_entry:
816 DISABLE_INTS
817 addi r3,r1,STACK_FRAME_OVERHEAD
818 bl .do_IRQ
819 b .ret_from_except_lite
820
821 .align 7
822 .globl alignment_common
823alignment_common:
824 mfspr r10,DAR
825 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13)
830 lwz r4,PACA_EXGEN+EX_DSISR(r13)
831 std r3,_DAR(r1)
832 std r4,_DSISR(r1)
833 bl .save_nvgprs
834 addi r3,r1,STACK_FRAME_OVERHEAD
835 ENABLE_INTS
836 bl .alignment_exception
837 b .ret_from_except
838
839 .align 7
840 .globl program_check_common
841program_check_common:
842 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
843 bl .save_nvgprs
844 addi r3,r1,STACK_FRAME_OVERHEAD
845 ENABLE_INTS
846 bl .program_check_exception
847 b .ret_from_except
848
849 .align 7
850 .globl fp_unavailable_common
851fp_unavailable_common:
852 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
853 bne .load_up_fpu /* if from user, just load it up */
854 bl .save_nvgprs
855 addi r3,r1,STACK_FRAME_OVERHEAD
856 ENABLE_INTS
857 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE
859
14cf11af
PM
860 .align 7
861 .globl altivec_unavailable_common
862altivec_unavailable_common:
863 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
864#ifdef CONFIG_ALTIVEC
865BEGIN_FTR_SECTION
866 bne .load_up_altivec /* if from user, just load it up */
867END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
868#endif
869 bl .save_nvgprs
870 addi r3,r1,STACK_FRAME_OVERHEAD
871 ENABLE_INTS
872 bl .altivec_unavailable_exception
873 b .ret_from_except
874
875#ifdef CONFIG_ALTIVEC
876/*
877 * load_up_altivec(unused, unused, tsk)
878 * Disable VMX for the task which had it previously,
879 * and save its vector registers in its thread_struct.
880 * Enables the VMX for use in the kernel on return.
881 * On SMP we know the VMX is free, since we give it up every
882 * switch (ie, no lazy save of the vector registers).
883 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
884 */
885_STATIC(load_up_altivec)
886 mfmsr r5 /* grab the current MSR */
887 oris r5,r5,MSR_VEC@h
888 mtmsrd r5 /* enable use of VMX now */
889 isync
890
891/*
892 * For SMP, we don't do lazy VMX switching because it just gets too
893 * horrendously complex, especially when a task switches from one CPU
894 * to another. Instead we call giveup_altvec in switch_to.
895 * VRSAVE isn't dealt with here, that is done in the normal context
896 * switch code. Note that we could rely on vrsave value to eventually
897 * avoid saving all of the VREGs here...
898 */
899#ifndef CONFIG_SMP
900 ld r3,last_task_used_altivec@got(r2)
901 ld r4,0(r3)
902 cmpdi 0,r4,0
903 beq 1f
904 /* Save VMX state to last_task_used_altivec's THREAD struct */
905 addi r4,r4,THREAD
906 SAVE_32VRS(0,r5,r4)
907 mfvscr vr0
908 li r10,THREAD_VSCR
909 stvx vr0,r10,r4
910 /* Disable VMX for last_task_used_altivec */
911 ld r5,PT_REGS(r4)
912 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
913 lis r6,MSR_VEC@h
914 andc r4,r4,r6
915 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9161:
917#endif /* CONFIG_SMP */
918 /* Hack: if we get an altivec unavailable trap with VRSAVE
919 * set to all zeros, we assume this is a broken application
920 * that fails to set it properly, and thus we switch it to
921 * all 1's
922 */
923 mfspr r4,SPRN_VRSAVE
924 cmpdi 0,r4,0
925 bne+ 1f
926 li r4,-1
927 mtspr SPRN_VRSAVE,r4
9281:
929 /* enable use of VMX after return */
930 ld r4,PACACURRENT(r13)
931 addi r5,r4,THREAD /* Get THREAD */
932 oris r12,r12,MSR_VEC@h
933 std r12,_MSR(r1)
934 li r4,1
935 li r10,THREAD_VSCR
936 stw r4,THREAD_USED_VR(r5)
937 lvx vr0,r10,r5
938 mtvscr vr0
939 REST_32VRS(0,r4,r5)
940#ifndef CONFIG_SMP
941 /* Update last_task_used_math to 'current' */
942 subi r4,r5,THREAD /* Back to 'current' */
943 std r4,0(r3)
944#endif /* CONFIG_SMP */
945 /* restore registers and return */
946 b fast_exception_return
947#endif /* CONFIG_ALTIVEC */
948
949/*
950 * Hash table stuff
951 */
952 .align 7
953_GLOBAL(do_hash_page)
954 std r3,_DAR(r1)
955 std r4,_DSISR(r1)
956
957 andis. r0,r4,0xa450 /* weird error? */
958 bne- .handle_page_fault /* if not, try to insert a HPTE */
959BEGIN_FTR_SECTION
960 andis. r0,r4,0x0020 /* Is it a segment table fault? */
961 bne- .do_ste_alloc /* If so handle it */
962END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
963
964 /*
965 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
966 * accessing a userspace segment (even from the kernel). We assume
967 * kernel addresses always have the high bit set.
968 */
969 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
970 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
971 orc r0,r12,r0 /* MSR_PR | ~high_bit */
972 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
973 ori r4,r4,1 /* add _PAGE_PRESENT */
974 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
975
976 /*
977 * On iSeries, we soft-disable interrupts here, then
978 * hard-enable interrupts so that the hash_page code can spin on
979 * the hash_table_lock without problems on a shared processor.
980 */
981 DISABLE_INTS
982
983 /*
984 * r3 contains the faulting address
985 * r4 contains the required access permissions
986 * r5 contains the trap number
987 *
988 * at return r3 = 0 for success
989 */
990 bl .hash_page /* build HPTE if possible */
991 cmpdi r3,0 /* see if hash_page succeeded */
992
993#ifdef DO_SOFT_DISABLE
994 /*
995 * If we had interrupts soft-enabled at the point where the
996 * DSI/ISI occurred, and an interrupt came in during hash_page,
997 * handle it now.
998 * We jump to ret_from_except_lite rather than fast_exception_return
999 * because ret_from_except_lite will check for and handle pending
1000 * interrupts if necessary.
1001 */
1002 beq .ret_from_except_lite
1003 /* For a hash failure, we don't bother re-enabling interrupts */
1004 ble- 12f
1005
1006 /*
1007 * hash_page couldn't handle it, set soft interrupt enable back
1008 * to what it was before the trap. Note that .local_irq_restore
1009 * handles any interrupts pending at this point.
1010 */
1011 ld r3,SOFTE(r1)
1012 bl .local_irq_restore
1013 b 11f
1014#else
1015 beq fast_exception_return /* Return from exception on success */
1016 ble- 12f /* Failure return from hash_page */
1017
1018 /* fall through */
1019#endif
1020
1021/* Here we have a page fault that hash_page can't handle. */
1022_GLOBAL(handle_page_fault)
1023 ENABLE_INTS
102411: ld r4,_DAR(r1)
1025 ld r5,_DSISR(r1)
1026 addi r3,r1,STACK_FRAME_OVERHEAD
1027 bl .do_page_fault
1028 cmpdi r3,0
1029 beq+ .ret_from_except_lite
1030 bl .save_nvgprs
1031 mr r5,r3
1032 addi r3,r1,STACK_FRAME_OVERHEAD
1033 lwz r4,_DAR(r1)
1034 bl .bad_page_fault
1035 b .ret_from_except
1036
1037/* We have a page fault that hash_page could handle but HV refused
1038 * the PTE insertion
1039 */
104012: bl .save_nvgprs
1041 addi r3,r1,STACK_FRAME_OVERHEAD
1042 lwz r4,_DAR(r1)
1043 bl .low_hash_fault
1044 b .ret_from_except
1045
1046 /* here we have a segment miss */
1047_GLOBAL(do_ste_alloc)
1048 bl .ste_allocate /* try to insert stab entry */
1049 cmpdi r3,0
1050 beq+ fast_exception_return
1051 b .handle_page_fault
1052
1053/*
1054 * r13 points to the PACA, r9 contains the saved CR,
1055 * r11 and r12 contain the saved SRR0 and SRR1.
1056 * r9 - r13 are saved in paca->exslb.
1057 * We assume we aren't going to take any exceptions during this procedure.
1058 * We assume (DAR >> 60) == 0xc.
1059 */
1060 .align 7
1061_GLOBAL(do_stab_bolted)
1062 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1063 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1064
1065 /* Hash to the primary group */
1066 ld r10,PACASTABVIRT(r13)
1067 mfspr r11,DAR
1068 srdi r11,r11,28
1069 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1070
1071 /* Calculate VSID */
1072 /* This is a kernel address, so protovsid = ESID */
1073 ASM_VSID_SCRAMBLE(r11, r9)
1074 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1075
1076 /* Search the primary group for a free entry */
10771: ld r11,0(r10) /* Test valid bit of the current ste */
1078 andi. r11,r11,0x80
1079 beq 2f
1080 addi r10,r10,16
1081 andi. r11,r10,0x70
1082 bne 1b
1083
1084 /* Stick for only searching the primary group for now. */
1085 /* At least for now, we use a very simple random castout scheme */
1086 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1087 mftb r11
1088 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1089 ori r11,r11,0x10
1090
1091 /* r10 currently points to an ste one past the group of interest */
1092 /* make it point to the randomly selected entry */
1093 subi r10,r10,128
1094 or r10,r10,r11 /* r10 is the entry to invalidate */
1095
1096 isync /* mark the entry invalid */
1097 ld r11,0(r10)
1098 rldicl r11,r11,56,1 /* clear the valid bit */
1099 rotldi r11,r11,8
1100 std r11,0(r10)
1101 sync
1102
1103 clrrdi r11,r11,28 /* Get the esid part of the ste */
1104 slbie r11
1105
11062: std r9,8(r10) /* Store the vsid part of the ste */
1107 eieio
1108
1109 mfspr r11,DAR /* Get the new esid */
1110 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1111 ori r11,r11,0x90 /* Turn on valid and kp */
1112 std r11,0(r10) /* Put new entry back into the stab */
1113
1114 sync
1115
1116 /* All done -- return from exception. */
1117 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1118 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1119
1120 andi. r10,r12,MSR_RI
1121 beq- unrecov_slb
1122
1123 mtcrf 0x80,r9 /* restore CR */
1124
1125 mfmsr r10
1126 clrrdi r10,r10,2
1127 mtmsrd r10,1
1128
1129 mtspr SRR0,r11
1130 mtspr SRR1,r12
1131 ld r9,PACA_EXSLB+EX_R9(r13)
1132 ld r10,PACA_EXSLB+EX_R10(r13)
1133 ld r11,PACA_EXSLB+EX_R11(r13)
1134 ld r12,PACA_EXSLB+EX_R12(r13)
1135 ld r13,PACA_EXSLB+EX_R13(r13)
1136 rfid
1137 b . /* prevent speculative execution */
1138
1139/*
1140 * r13 points to the PACA, r9 contains the saved CR,
1141 * r11 and r12 contain the saved SRR0 and SRR1.
1142 * r3 has the faulting address
1143 * r9 - r13 are saved in paca->exslb.
1144 * r3 is saved in paca->slb_r3
1145 * We assume we aren't going to take any exceptions during this procedure.
1146 */
1147_GLOBAL(do_slb_miss)
1148 mflr r10
1149
1150 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1151 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1152
1153 bl .slb_allocate /* handle it */
1154
1155 /* All done -- return from exception. */
1156
1157 ld r10,PACA_EXSLB+EX_LR(r13)
1158 ld r3,PACA_EXSLB+EX_R3(r13)
1159 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1160#ifdef CONFIG_PPC_ISERIES
1161 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1162#endif /* CONFIG_PPC_ISERIES */
1163
1164 mtlr r10
1165
1166 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1167 beq- unrecov_slb
1168
1169.machine push
1170.machine "power4"
1171 mtcrf 0x80,r9
1172 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1173.machine pop
1174
1175#ifdef CONFIG_PPC_ISERIES
1176 mtspr SRR0,r11
1177 mtspr SRR1,r12
1178#endif /* CONFIG_PPC_ISERIES */
1179 ld r9,PACA_EXSLB+EX_R9(r13)
1180 ld r10,PACA_EXSLB+EX_R10(r13)
1181 ld r11,PACA_EXSLB+EX_R11(r13)
1182 ld r12,PACA_EXSLB+EX_R12(r13)
1183 ld r13,PACA_EXSLB+EX_R13(r13)
1184 rfid
1185 b . /* prevent speculative execution */
1186
1187unrecov_slb:
1188 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1189 DISABLE_INTS
1190 bl .save_nvgprs
11911: addi r3,r1,STACK_FRAME_OVERHEAD
1192 bl .unrecoverable_exception
1193 b 1b
1194
1195/*
1196 * Space for CPU0's segment table.
1197 *
1198 * On iSeries, the hypervisor must fill in at least one entry before
1199 * we get control (with relocate on). The address is give to the hv
ee400b63 1200 * as a page number (see xLparMap in lpardata.c), so this must be at a
14cf11af
PM
1201 * fixed address (the linker can't compute (u64)&initial_stab >>
1202 * PAGE_SHIFT).
1203 */
1204 . = STAB0_PHYS_ADDR /* 0x6000 */
1205 .globl initial_stab
1206initial_stab:
1207 .space 4096
1208
1209/*
1210 * Data area reserved for FWNMI option.
1211 * This address (0x7000) is fixed by the RPA.
1212 */
1213 .= 0x7000
1214 .globl fwnmi_data_area
1215fwnmi_data_area:
1216
1217 /* iSeries does not use the FWNMI stuff, so it is safe to put
1218 * this here, even if we later allow kernels that will boot on
1219 * both pSeries and iSeries */
1220#ifdef CONFIG_PPC_ISERIES
1221 . = LPARMAP_PHYS
1222#include "lparmap.s"
1223/*
1224 * This ".text" is here for old compilers that generate a trailing
1225 * .note section when compiling .c files to .s
1226 */
1227 .text
1228#endif /* CONFIG_PPC_ISERIES */
1229
1230 . = 0x8000
1231
1232/*
1233 * On pSeries, secondary processors spin in the following code.
1234 * At entry, r3 = this processor's number (physical cpu id)
1235 */
1236_GLOBAL(pSeries_secondary_smp_init)
1237 mr r24,r3
1238
1239 /* turn on 64-bit mode */
1240 bl .enable_64b_mode
1241 isync
1242
1243 /* Copy some CPU settings from CPU 0 */
1244 bl .__restore_cpu_setup
1245
1246 /* Set up a paca value for this processor. Since we have the
1247 * physical cpu id in r24, we need to search the pacas to find
1248 * which logical id maps to our physical one.
1249 */
1250 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1251 li r5,0 /* logical cpu id */
12521: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1253 cmpw r6,r24 /* Compare to our id */
1254 beq 2f
1255 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1256 addi r5,r5,1
1257 cmpwi r5,NR_CPUS
1258 blt 1b
1259
1260 mr r3,r24 /* not found, copy phys to r3 */
1261 b .kexec_wait /* next kernel might do better */
1262
12632: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1264 /* From now on, r24 is expected to be logical cpuid */
1265 mr r24,r5
12663: HMT_LOW
1267 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1268 /* start. */
1269 sync
1270
1271 /* Create a temp kernel stack for use before relocation is on. */
1272 ld r1,PACAEMERGSP(r13)
1273 subi r1,r1,STACK_FRAME_OVERHEAD
1274
1275 cmpwi 0,r23,0
1276#ifdef CONFIG_SMP
1277 bne .__secondary_start
1278#endif
1279 b 3b /* Loop until told to go */
1280
1281#ifdef CONFIG_PPC_ISERIES
1282_STATIC(__start_initialization_iSeries)
1283 /* Clear out the BSS */
1284 LOADADDR(r11,__bss_stop)
1285 LOADADDR(r8,__bss_start)
1286 sub r11,r11,r8 /* bss size */
1287 addi r11,r11,7 /* round up to an even double word */
1288 rldicl. r11,r11,61,3 /* shift right by 3 */
1289 beq 4f
1290 addi r8,r8,-8
1291 li r0,0
1292 mtctr r11 /* zero this many doublewords */
12933: stdu r0,8(r8)
1294 bdnz 3b
12954:
1296 LOADADDR(r1,init_thread_union)
1297 addi r1,r1,THREAD_SIZE
1298 li r0,0
1299 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1300
1301 LOADADDR(r3,cpu_specs)
1302 LOADADDR(r4,cur_cpu_spec)
1303 li r5,0
1304 bl .identify_cpu
1305
1306 LOADADDR(r2,__toc_start)
1307 addi r2,r2,0x4000
1308 addi r2,r2,0x4000
1309
1310 bl .iSeries_early_setup
ee400b63 1311 bl .early_setup
14cf11af
PM
1312
1313 /* relocation is on at this point */
1314
1315 b .start_here_common
1316#endif /* CONFIG_PPC_ISERIES */
1317
1318#ifdef CONFIG_PPC_MULTIPLATFORM
1319
1320_STATIC(__mmu_off)
1321 mfmsr r3
1322 andi. r0,r3,MSR_IR|MSR_DR
1323 beqlr
1324 andc r3,r3,r0
1325 mtspr SPRN_SRR0,r4
1326 mtspr SPRN_SRR1,r3
1327 sync
1328 rfid
1329 b . /* prevent speculative execution */
1330
1331
1332/*
1333 * Here is our main kernel entry point. We support currently 2 kind of entries
1334 * depending on the value of r5.
1335 *
1336 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1337 * in r3...r7
1338 *
1339 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1340 * DT block, r4 is a physical pointer to the kernel itself
1341 *
1342 */
1343_GLOBAL(__start_initialization_multiplatform)
1344 /*
1345 * Are we booted from a PROM Of-type client-interface ?
1346 */
1347 cmpldi cr0,r5,0
1348 bne .__boot_from_prom /* yes -> prom */
1349
1350 /* Save parameters */
1351 mr r31,r3
1352 mr r30,r4
1353
1354 /* Make sure we are running in 64 bits mode */
1355 bl .enable_64b_mode
1356
1357 /* Setup some critical 970 SPRs before switching MMU off */
1358 bl .__970_cpu_preinit
1359
1360 /* cpu # */
1361 li r24,0
1362
1363 /* Switch off MMU if not already */
1364 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1365 add r4,r4,r30
1366 bl .__mmu_off
1367 b .__after_prom_start
1368
1369_STATIC(__boot_from_prom)
1370 /* Save parameters */
1371 mr r31,r3
1372 mr r30,r4
1373 mr r29,r5
1374 mr r28,r6
1375 mr r27,r7
1376
1377 /* Make sure we are running in 64 bits mode */
1378 bl .enable_64b_mode
1379
1380 /* put a relocation offset into r3 */
1381 bl .reloc_offset
1382
1383 LOADADDR(r2,__toc_start)
1384 addi r2,r2,0x4000
1385 addi r2,r2,0x4000
1386
1387 /* Relocate the TOC from a virt addr to a real addr */
1388 sub r2,r2,r3
1389
1390 /* Restore parameters */
1391 mr r3,r31
1392 mr r4,r30
1393 mr r5,r29
1394 mr r6,r28
1395 mr r7,r27
1396
1397 /* Do all of the interaction with OF client interface */
1398 bl .prom_init
1399 /* We never return */
1400 trap
1401
1402/*
1403 * At this point, r3 contains the physical address we are running at,
1404 * returned by prom_init()
1405 */
1406_STATIC(__after_prom_start)
1407
1408/*
1409 * We need to run with __start at physical address 0.
1410 * This will leave some code in the first 256B of
1411 * real memory, which are reserved for software use.
1412 * The remainder of the first page is loaded with the fixed
1413 * interrupt vectors. The next two pages are filled with
1414 * unknown exception placeholders.
1415 *
1416 * Note: This process overwrites the OF exception vectors.
1417 * r26 == relocation offset
1418 * r27 == KERNELBASE
1419 */
1420 bl .reloc_offset
1421 mr r26,r3
1422 SET_REG_TO_CONST(r27,KERNELBASE)
1423
1424 li r3,0 /* target addr */
1425
1426 // XXX FIXME: Use phys returned by OF (r30)
1427 sub r4,r27,r26 /* source addr */
1428 /* current address of _start */
1429 /* i.e. where we are running */
1430 /* the source addr */
1431
1432 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1433 sub r5,r5,r27
1434
1435 li r6,0x100 /* Start offset, the first 0x100 */
1436 /* bytes were copied earlier. */
1437
1438 bl .copy_and_flush /* copy the first n bytes */
1439 /* this includes the code being */
1440 /* executed here. */
1441
1442 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1443 mtctr r0 /* that we just made/relocated */
1444 bctr
1445
14464: LOADADDR(r5,klimit)
1447 sub r5,r5,r26
1448 ld r5,0(r5) /* get the value of klimit */
1449 sub r5,r5,r27
1450 bl .copy_and_flush /* copy the rest */
1451 b .start_here_multiplatform
1452
1453#endif /* CONFIG_PPC_MULTIPLATFORM */
1454
1455/*
1456 * Copy routine used to copy the kernel to start at physical address 0
1457 * and flush and invalidate the caches as needed.
1458 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1459 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1460 *
1461 * Note: this routine *only* clobbers r0, r6 and lr
1462 */
1463_GLOBAL(copy_and_flush)
1464 addi r5,r5,-8
1465 addi r6,r6,-8
14664: li r0,16 /* Use the least common */
1467 /* denominator cache line */
1468 /* size. This results in */
1469 /* extra cache line flushes */
1470 /* but operation is correct. */
1471 /* Can't get cache line size */
1472 /* from NACA as it is being */
1473 /* moved too. */
1474
1475 mtctr r0 /* put # words/line in ctr */
14763: addi r6,r6,8 /* copy a cache line */
1477 ldx r0,r6,r4
1478 stdx r0,r6,r3
1479 bdnz 3b
1480 dcbst r6,r3 /* write it to memory */
1481 sync
1482 icbi r6,r3 /* flush the icache line */
1483 cmpld 0,r6,r5
1484 blt 4b
1485 sync
1486 addi r5,r5,8
1487 addi r6,r6,8
1488 blr
1489
1490.align 8
1491copy_to_here:
1492
1493#ifdef CONFIG_SMP
1494#ifdef CONFIG_PPC_PMAC
1495/*
1496 * On PowerMac, secondary processors starts from the reset vector, which
1497 * is temporarily turned into a call to one of the functions below.
1498 */
1499 .section ".text";
1500 .align 2 ;
1501
1502 .globl pmac_secondary_start_1
1503pmac_secondary_start_1:
1504 li r24, 1
1505 b .pmac_secondary_start
1506
1507 .globl pmac_secondary_start_2
1508pmac_secondary_start_2:
1509 li r24, 2
1510 b .pmac_secondary_start
1511
1512 .globl pmac_secondary_start_3
1513pmac_secondary_start_3:
1514 li r24, 3
1515 b .pmac_secondary_start
1516
1517_GLOBAL(pmac_secondary_start)
1518 /* turn on 64-bit mode */
1519 bl .enable_64b_mode
1520 isync
1521
1522 /* Copy some CPU settings from CPU 0 */
1523 bl .__restore_cpu_setup
1524
1525 /* pSeries do that early though I don't think we really need it */
1526 mfmsr r3
1527 ori r3,r3,MSR_RI
1528 mtmsrd r3 /* RI on */
1529
1530 /* Set up a paca value for this processor. */
1531 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1532 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1533 add r13,r13,r4 /* for this processor. */
1534 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1535
1536 /* Create a temp kernel stack for use before relocation is on. */
1537 ld r1,PACAEMERGSP(r13)
1538 subi r1,r1,STACK_FRAME_OVERHEAD
1539
1540 b .__secondary_start
1541
1542#endif /* CONFIG_PPC_PMAC */
1543
1544/*
1545 * This function is called after the master CPU has released the
1546 * secondary processors. The execution environment is relocation off.
1547 * The paca for this processor has the following fields initialized at
1548 * this point:
1549 * 1. Processor number
1550 * 2. Segment table pointer (virtual address)
1551 * On entry the following are set:
1552 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1553 * r24 = cpu# (in Linux terms)
1554 * r13 = paca virtual address
1555 * SPRG3 = paca virtual address
1556 */
1557_GLOBAL(__secondary_start)
1558
1559 HMT_MEDIUM /* Set thread priority to MEDIUM */
1560
1561 ld r2,PACATOC(r13)
1562 li r6,0
1563 stb r6,PACAPROCENABLED(r13)
1564
1565#ifndef CONFIG_PPC_ISERIES
1566 /* Initialize the page table pointer register. */
1567 LOADADDR(r6,_SDR1)
1568 ld r6,0(r6) /* get the value of _SDR1 */
1569 mtspr SDR1,r6 /* set the htab location */
1570#endif
1571 /* Initialize the first segment table (or SLB) entry */
1572 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1573 bl .stab_initialize
1574
1575 /* Initialize the kernel stack. Just a repeat for iSeries. */
1576 LOADADDR(r3,current_set)
1577 sldi r28,r24,3 /* get current_set[cpu#] */
1578 ldx r1,r3,r28
1579 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1580 std r1,PACAKSAVE(r13)
1581
1582 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1583 ori r4,r3,1 /* turn on valid bit */
1584
1585#ifdef CONFIG_PPC_ISERIES
1586 li r0,-1 /* hypervisor call */
1587 li r3,1
1588 sldi r3,r3,63 /* 0x8000000000000000 */
1589 ori r3,r3,4 /* 0x8000000000000004 */
1590 sc /* HvCall_setASR */
1591#else
1592 /* set the ASR */
1593 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1594 ld r3,0(r3)
1595 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1596 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1597 beq 98f /* branch if result is 0 */
1598 mfspr r3,PVR
1599 srwi r3,r3,16
1600 cmpwi r3,0x37 /* SStar */
1601 beq 97f
1602 cmpwi r3,0x36 /* IStar */
1603 beq 97f
1604 cmpwi r3,0x34 /* Pulsar */
1605 bne 98f
160697: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1607 HVSC /* Invoking hcall */
1608 b 99f
160998: /* !(rpa hypervisor) || !(star) */
1610 mtasr r4 /* set the stab location */
161199:
1612#endif
1613 li r7,0
1614 mtlr r7
1615
1616 /* enable MMU and jump to start_secondary */
1617 LOADADDR(r3,.start_secondary_prolog)
1618 SET_REG_TO_CONST(r4, MSR_KERNEL)
1619#ifdef DO_SOFT_DISABLE
1620 ori r4,r4,MSR_EE
1621#endif
1622 mtspr SRR0,r3
1623 mtspr SRR1,r4
1624 rfid
1625 b . /* prevent speculative execution */
1626
1627/*
1628 * Running with relocation on at this point. All we want to do is
1629 * zero the stack back-chain pointer before going into C code.
1630 */
1631_GLOBAL(start_secondary_prolog)
1632 li r3,0
1633 std r3,0(r1) /* Zero the stack frame pointer */
1634 bl .start_secondary
1635#endif
1636
1637/*
1638 * This subroutine clobbers r11 and r12
1639 */
1640_GLOBAL(enable_64b_mode)
1641 mfmsr r11 /* grab the current MSR */
1642 li r12,1
1643 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1644 or r11,r11,r12
1645 li r12,1
1646 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1647 or r11,r11,r12
1648 mtmsrd r11
1649 isync
1650 blr
1651
1652#ifdef CONFIG_PPC_MULTIPLATFORM
1653/*
1654 * This is where the main kernel code starts.
1655 */
1656_STATIC(start_here_multiplatform)
1657 /* get a new offset, now that the kernel has moved. */
1658 bl .reloc_offset
1659 mr r26,r3
1660
1661 /* Clear out the BSS. It may have been done in prom_init,
1662 * already but that's irrelevant since prom_init will soon
1663 * be detached from the kernel completely. Besides, we need
1664 * to clear it now for kexec-style entry.
1665 */
1666 LOADADDR(r11,__bss_stop)
1667 LOADADDR(r8,__bss_start)
1668 sub r11,r11,r8 /* bss size */
1669 addi r11,r11,7 /* round up to an even double word */
1670 rldicl. r11,r11,61,3 /* shift right by 3 */
1671 beq 4f
1672 addi r8,r8,-8
1673 li r0,0
1674 mtctr r11 /* zero this many doublewords */
16753: stdu r0,8(r8)
1676 bdnz 3b
16774:
1678
1679 mfmsr r6
1680 ori r6,r6,MSR_RI
1681 mtmsrd r6 /* RI on */
1682
1683#ifdef CONFIG_HMT
1684 /* Start up the second thread on cpu 0 */
1685 mfspr r3,PVR
1686 srwi r3,r3,16
1687 cmpwi r3,0x34 /* Pulsar */
1688 beq 90f
1689 cmpwi r3,0x36 /* Icestar */
1690 beq 90f
1691 cmpwi r3,0x37 /* SStar */
1692 beq 90f
1693 b 91f /* HMT not supported */
169490: li r3,0
1695 bl .hmt_start_secondary
169691:
1697#endif
1698
1699 /* The following gets the stack and TOC set up with the regs */
1700 /* pointing to the real addr of the kernel stack. This is */
1701 /* all done to support the C function call below which sets */
1702 /* up the htab. This is done because we have relocated the */
1703 /* kernel but are still running in real mode. */
1704
1705 LOADADDR(r3,init_thread_union)
1706 sub r3,r3,r26
1707
1708 /* set up a stack pointer (physical address) */
1709 addi r1,r3,THREAD_SIZE
1710 li r0,0
1711 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1712
1713 /* set up the TOC (physical address) */
1714 LOADADDR(r2,__toc_start)
1715 addi r2,r2,0x4000
1716 addi r2,r2,0x4000
1717 sub r2,r2,r26
1718
1719 LOADADDR(r3,cpu_specs)
1720 sub r3,r3,r26
1721 LOADADDR(r4,cur_cpu_spec)
1722 sub r4,r4,r26
1723 mr r5,r26
1724 bl .identify_cpu
1725
1726 /* Save some low level config HIDs of CPU0 to be copied to
1727 * other CPUs later on, or used for suspend/resume
1728 */
1729 bl .__save_cpu_setup
1730 sync
1731
1732 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1733 * note that boot_cpuid can always be 0 nowadays since there is
1734 * nowhere it can be initialized differently before we reach this
1735 * code
1736 */
1737 LOADADDR(r27, boot_cpuid)
1738 sub r27,r27,r26
1739 lwz r27,0(r27)
1740
1741 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1742 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1743 add r13,r13,r24 /* for this processor. */
1744 sub r13,r13,r26 /* convert to physical addr */
1745 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1746
1747 /* Do very early kernel initializations, including initial hash table,
1748 * stab and slb setup before we turn on relocation. */
1749
1750 /* Restore parameters passed from prom_init/kexec */
1751 mr r3,r31
1752 bl .early_setup
1753
1754 /* set the ASR */
1755 ld r3,PACASTABREAL(r13)
1756 ori r4,r3,1 /* turn on valid bit */
1757 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1758 ld r3,0(r3)
1759 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1760 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1761 beq 98f /* branch if result is 0 */
1762 mfspr r3,PVR
1763 srwi r3,r3,16
1764 cmpwi r3,0x37 /* SStar */
1765 beq 97f
1766 cmpwi r3,0x36 /* IStar */
1767 beq 97f
1768 cmpwi r3,0x34 /* Pulsar */
1769 bne 98f
177097: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1771 HVSC /* Invoking hcall */
1772 b 99f
177398: /* !(rpa hypervisor) || !(star) */
1774 mtasr r4 /* set the stab location */
177599:
1776 /* Set SDR1 (hash table pointer) */
1777 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1778 ld r3,0(r3)
1779 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1780 /* Test if bit 0 is set (LPAR bit) */
1781 andi. r3,r3,PLATFORM_LPAR
1782 bne 98f /* branch if result is !0 */
1783 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1784 sub r6,r6,r26
1785 ld r6,0(r6) /* get the value of _SDR1 */
1786 mtspr SDR1,r6 /* set the htab location */
178798:
1788 LOADADDR(r3,.start_here_common)
1789 SET_REG_TO_CONST(r4, MSR_KERNEL)
1790 mtspr SRR0,r3
1791 mtspr SRR1,r4
1792 rfid
1793 b . /* prevent speculative execution */
1794#endif /* CONFIG_PPC_MULTIPLATFORM */
1795
1796 /* This is where all platforms converge execution */
1797_STATIC(start_here_common)
1798 /* relocation is on at this point */
1799
1800 /* The following code sets up the SP and TOC now that we are */
1801 /* running with translation enabled. */
1802
1803 LOADADDR(r3,init_thread_union)
1804
1805 /* set up the stack */
1806 addi r1,r3,THREAD_SIZE
1807 li r0,0
1808 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1809
1810 /* Apply the CPUs-specific fixups (nop out sections not relevant
1811 * to this CPU
1812 */
1813 li r3,0
1814 bl .do_cpu_ftr_fixups
1815
1816 LOADADDR(r26, boot_cpuid)
1817 lwz r26,0(r26)
1818
1819 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1820 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1821 add r13,r13,r24 /* for this processor. */
1822 mtspr SPRG3,r13
1823
1824 /* ptr to current */
1825 LOADADDR(r4,init_task)
1826 std r4,PACACURRENT(r13)
1827
1828 /* Load the TOC */
1829 ld r2,PACATOC(r13)
1830 std r1,PACAKSAVE(r13)
1831
1832 bl .setup_system
1833
1834 /* Load up the kernel context */
18355:
1836#ifdef DO_SOFT_DISABLE
1837 li r5,0
1838 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1839 mfmsr r5
1840 ori r5,r5,MSR_EE /* Hard Enabled */
1841 mtmsrd r5
1842#endif
1843
1844 bl .start_kernel
1845
1846_GLOBAL(hmt_init)
1847#ifdef CONFIG_HMT
1848 LOADADDR(r5, hmt_thread_data)
1849 mfspr r7,PVR
1850 srwi r7,r7,16
1851 cmpwi r7,0x34 /* Pulsar */
1852 beq 90f
1853 cmpwi r7,0x36 /* Icestar */
1854 beq 91f
1855 cmpwi r7,0x37 /* SStar */
1856 beq 91f
1857 b 101f
185890: mfspr r6,PIR
1859 andi. r6,r6,0x1f
1860 b 92f
186191: mfspr r6,PIR
1862 andi. r6,r6,0x3ff
186392: sldi r4,r24,3
1864 stwx r6,r5,r4
1865 bl .hmt_start_secondary
1866 b 101f
1867
1868__hmt_secondary_hold:
1869 LOADADDR(r5, hmt_thread_data)
1870 clrldi r5,r5,4
1871 li r7,0
1872 mfspr r6,PIR
1873 mfspr r8,PVR
1874 srwi r8,r8,16
1875 cmpwi r8,0x34
1876 bne 93f
1877 andi. r6,r6,0x1f
1878 b 103f
187993: andi. r6,r6,0x3f
1880
1881103: lwzx r8,r5,r7
1882 cmpw r8,r6
1883 beq 104f
1884 addi r7,r7,8
1885 b 103b
1886
1887104: addi r7,r7,4
1888 lwzx r9,r5,r7
1889 mr r24,r9
1890101:
1891#endif
1892 mr r3,r24
1893 b .pSeries_secondary_smp_init
1894
1895#ifdef CONFIG_HMT
1896_GLOBAL(hmt_start_secondary)
1897 LOADADDR(r4,__hmt_secondary_hold)
1898 clrldi r4,r4,4
1899 mtspr NIADORM, r4
1900 mfspr r4, MSRDORM
1901 li r5, -65
1902 and r4, r4, r5
1903 mtspr MSRDORM, r4
1904 lis r4,0xffef
1905 ori r4,r4,0x7403
1906 mtspr TSC, r4
1907 li r4,0x1f4
1908 mtspr TST, r4
1909 mfspr r4, HID0
1910 ori r4, r4, 0x1
1911 mtspr HID0, r4
1912 mfspr r4, SPRN_CTRLF
1913 oris r4, r4, 0x40
1914 mtspr SPRN_CTRLT, r4
1915 blr
1916#endif
1917
ee400b63 1918#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
14cf11af
PM
1919_GLOBAL(smp_release_cpus)
1920 /* All secondary cpus are spinning on a common
1921 * spinloop, release them all now so they can start
1922 * to spin on their individual paca spinloops.
1923 * For non SMP kernels, the secondary cpus never
1924 * get out of the common spinloop.
ee400b63
SR
1925 * XXX This does nothing useful on iSeries, secondaries are
1926 * already waiting on their paca.
14cf11af
PM
1927 */
1928 li r3,1
1929 LOADADDR(r5,__secondary_hold_spinloop)
1930 std r3,0(r5)
1931 sync
1932 blr
ee400b63 1933#endif /* CONFIG_SMP */
14cf11af
PM
1934
1935
1936/*
1937 * We put a few things here that have to be page-aligned.
1938 * This stuff goes at the beginning of the bss, which is page-aligned.
1939 */
1940 .section ".bss"
1941
1942 .align PAGE_SHIFT
1943
1944 .globl empty_zero_page
1945empty_zero_page:
1946 .space PAGE_SIZE
1947
1948 .globl swapper_pg_dir
1949swapper_pg_dir:
1950 .space PAGE_SIZE
1951
1952/*
1953 * This space gets a copy of optional info passed to us by the bootstrap
1954 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1955 */
1956 .globl cmd_line
1957cmd_line:
1958 .space COMMAND_LINE_SIZE
This page took 0.099196 seconds and 5 git commands to generate.