[PATCH] ppc64: dynamically allocate segment tables
[deliverable/linux.git] / arch / ppc64 / kernel / head.S
CommitLineData
1da177e4
LT
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h>
36#include <asm/offsets.h>
37#include <asm/bug.h>
38#include <asm/cputable.h>
39#include <asm/setup.h>
40#include <asm/hvcall.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support
56 * 0x4000 - 0x4fff : NACA
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table
59 */
60
61/*
62 * SPRG Usage
63 *
64 * Register Definition
65 *
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
70 */
71
72/*
73 * Entering into this code we make the following assumptions:
74 * For pSeries:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
77 *
78 * For iSeries:
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at system_reset_iSeries
81 */
82
83 .text
84 .globl _stext
85_stext:
86#ifdef CONFIG_PPC_MULTIPLATFORM
87_GLOBAL(__start)
88 /* NOP this out unconditionally */
89BEGIN_FTR_SECTION
90 b .__start_initialization_multiplatform
91END_FTR_SECTION(0, 1)
92#endif /* CONFIG_PPC_MULTIPLATFORM */
93
94 /* Catch branch to 0 in real mode */
95 trap
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103
104 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks
106 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger)
109 */
110 .llong msChunks-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112
113 /* Offset 0x38 - Pointer to start of embedded System.map */
114 .globl embedded_sysmap_start
115embedded_sysmap_start:
116 .llong 0
117 /* Offset 0x40 - Pointer to end of embedded System.map */
118 .globl embedded_sysmap_end
119embedded_sysmap_end:
120 .llong 0
121
122#else /* CONFIG_PPC_ISERIES */
123
124 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop
126__secondary_hold_spinloop:
127 .llong 0x0
128
129 /* Secondary processors write this value with their cpu # */
130 /* after they enter the spin loop immediately below. */
131 .globl __secondary_hold_acknowledge
132__secondary_hold_acknowledge:
133 .llong 0x0
134
135 . = 0x60
136/*
137 * The following code is used on pSeries to hold secondary processors
138 * in a spin loop after they have been freed from OpenFirmware, but
139 * before the bulk of the kernel has been relocated. This code
140 * is relocated to physical address 0x60 before prom_init is run.
141 * All of it must fit below the first exception vector at 0x100.
142 */
143_GLOBAL(__secondary_hold)
144 mfmsr r24
145 ori r24,r24,MSR_RI
146 mtmsrd r24 /* RI on */
147
148 /* Grab our linux cpu number */
149 mr r24,r3
150
151 /* Tell the master cpu we're here */
152 /* Relocation is off & we are located at an address less */
153 /* than 0x100, so only need to grab low order offset. */
154 std r24,__secondary_hold_acknowledge@l(0)
155 sync
156
157 /* All secondary cpu's wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1
160 bne 100b
161
162#ifdef CONFIG_HMT
163 b .hmt_init
164#else
165#ifdef CONFIG_SMP
166 mr r3,r24
167 b .pSeries_secondary_smp_init
168#else
169 BUG_OPCODE
170#endif
171#endif
172#endif
173
174/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw"
176exception_marker:
177 .tc ID_72656773_68657265[TC],0x7265677368657265
178 .text
179
180/*
181 * The following macros define the code that appears as
182 * the prologue to each of the exception handlers. They
183 * are split into two parts to allow a single kernel binary
184 * to be used for pSeries and iSeries.
185 * LOL. One day... - paulus
186 */
187
188/*
189 * We make as much of the exception code common between native
190 * exception handlers (including pSeries LPAR) and iSeries LPAR
191 * implementations as possible.
192 */
193
194/*
195 * This is the start of the interrupt handlers for pSeries
196 * This code runs with relocation off.
197 */
198#define EX_R9 0
199#define EX_R10 8
200#define EX_R11 16
201#define EX_R12 24
202#define EX_R13 32
203#define EX_SRR0 40
204#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
205#define EX_DAR 48
206#define EX_LR 48 /* SLB miss saves LR, but not DAR */
207#define EX_DSISR 56
208#define EX_CCR 60
209
210#define EXCEPTION_PROLOG_PSERIES(area, label) \
211 mfspr r13,SPRG3; /* get paca address into r13 */ \
212 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
213 std r10,area+EX_R10(r13); \
214 std r11,area+EX_R11(r13); \
215 std r12,area+EX_R12(r13); \
216 mfspr r9,SPRG1; \
217 std r9,area+EX_R13(r13); \
218 mfcr r9; \
219 clrrdi r12,r13,32; /* get high part of &label */ \
220 mfmsr r10; \
221 mfspr r11,SRR0; /* save SRR0 */ \
222 ori r12,r12,(label)@l; /* virt addr of handler */ \
223 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
224 mtspr SRR0,r12; \
225 mfspr r12,SRR1; /* and SRR1 */ \
226 mtspr SRR1,r10; \
227 rfid; \
228 b . /* prevent speculative execution */
229
230/*
231 * This is the start of the interrupt handlers for iSeries
232 * This code runs with relocation on.
233 */
234#define EXCEPTION_PROLOG_ISERIES_1(area) \
235 mfspr r13,SPRG3; /* get paca address into r13 */ \
236 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
237 std r10,area+EX_R10(r13); \
238 std r11,area+EX_R11(r13); \
239 std r12,area+EX_R12(r13); \
240 mfspr r9,SPRG1; \
241 std r9,area+EX_R13(r13); \
242 mfcr r9
243
244#define EXCEPTION_PROLOG_ISERIES_2 \
245 mfmsr r10; \
246 ld r11,PACALPPACA+LPPACASRR0(r13); \
247 ld r12,PACALPPACA+LPPACASRR1(r13); \
248 ori r10,r10,MSR_RI; \
249 mtmsrd r10,1
250
251/*
252 * The common exception prolog is used for all except a few exceptions
253 * such as a segment miss on a kernel address. We have to be prepared
254 * to take another exception from the point where we first touch the
255 * kernel stack onwards.
256 *
257 * On entry r13 points to the paca, r9-r13 are saved in the paca,
258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
259 * SRR1, and relocation is on.
260 */
261#define EXCEPTION_PROLOG_COMMON(n, area) \
262 andi. r10,r12,MSR_PR; /* See if coming from user */ \
263 mr r10,r1; /* Save r1 */ \
264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
265 beq- 1f; \
266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2671: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
268 bge- cr1,bad_stack; /* abort if it is */ \
269 std r9,_CCR(r1); /* save CR in stackframe */ \
270 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
271 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
272 std r10,0(r1); /* make stack chain pointer */ \
273 std r0,GPR0(r1); /* save r0 in stackframe */ \
274 std r10,GPR1(r1); /* save r1 in stackframe */ \
275 std r2,GPR2(r1); /* save r2 in stackframe */ \
276 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
277 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
278 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
279 ld r10,area+EX_R10(r13); \
280 std r9,GPR9(r1); \
281 std r10,GPR10(r1); \
282 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
283 ld r10,area+EX_R12(r13); \
284 ld r11,area+EX_R13(r13); \
285 std r9,GPR11(r1); \
286 std r10,GPR12(r1); \
287 std r11,GPR13(r1); \
288 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
289 mflr r9; /* save LR in stackframe */ \
290 std r9,_LINK(r1); \
291 mfctr r10; /* save CTR in stackframe */ \
292 std r10,_CTR(r1); \
293 mfspr r11,XER; /* save XER in stackframe */ \
294 std r11,_XER(r1); \
295 li r9,(n)+1; \
296 std r9,_TRAP(r1); /* set trap number */ \
297 li r10,0; \
298 ld r11,exception_marker@toc(r2); \
299 std r10,RESULT(r1); /* clear regs->result */ \
300 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
301
302/*
303 * Exception vectors.
304 */
305#define STD_EXCEPTION_PSERIES(n, label) \
306 . = n; \
307 .globl label##_pSeries; \
308label##_pSeries: \
309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \
8dc4fd87 311 RUNLATCH_ON(r13); \
1da177e4
LT
312 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
313
314#define STD_EXCEPTION_ISERIES(n, label, area) \
315 .globl label##_iSeries; \
316label##_iSeries: \
317 HMT_MEDIUM; \
318 mtspr SPRG1,r13; /* save r13 */ \
8dc4fd87 319 RUNLATCH_ON(r13); \
1da177e4
LT
320 EXCEPTION_PROLOG_ISERIES_1(area); \
321 EXCEPTION_PROLOG_ISERIES_2; \
322 b label##_common
323
324#define MASKABLE_EXCEPTION_ISERIES(n, label) \
325 .globl label##_iSeries; \
326label##_iSeries: \
327 HMT_MEDIUM; \
328 mtspr SPRG1,r13; /* save r13 */ \
8dc4fd87 329 RUNLATCH_ON(r13); \
1da177e4
LT
330 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
331 lbz r10,PACAPROCENABLED(r13); \
332 cmpwi 0,r10,0; \
333 beq- label##_iSeries_masked; \
334 EXCEPTION_PROLOG_ISERIES_2; \
335 b label##_common; \
336
337#ifdef DO_SOFT_DISABLE
338#define DISABLE_INTS \
339 lbz r10,PACAPROCENABLED(r13); \
340 li r11,0; \
341 std r10,SOFTE(r1); \
342 mfmsr r10; \
343 stb r11,PACAPROCENABLED(r13); \
344 ori r10,r10,MSR_EE; \
345 mtmsrd r10,1
346
347#define ENABLE_INTS \
348 lbz r10,PACAPROCENABLED(r13); \
349 mfmsr r11; \
350 std r10,SOFTE(r1); \
351 ori r11,r11,MSR_EE; \
352 mtmsrd r11,1
353
354#else /* hard enable/disable interrupts */
355#define DISABLE_INTS
356
357#define ENABLE_INTS \
358 ld r12,_MSR(r1); \
359 mfmsr r11; \
360 rlwimi r11,r12,0,MSR_EE; \
361 mtmsrd r11,1
362
363#endif
364
365#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
366 .align 7; \
367 .globl label##_common; \
368label##_common: \
369 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
370 DISABLE_INTS; \
371 bl .save_nvgprs; \
372 addi r3,r1,STACK_FRAME_OVERHEAD; \
373 bl hdlr; \
374 b .ret_from_except
375
376#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
377 .align 7; \
378 .globl label##_common; \
379label##_common: \
380 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
381 DISABLE_INTS; \
382 addi r3,r1,STACK_FRAME_OVERHEAD; \
383 bl hdlr; \
384 b .ret_from_except_lite
385
386/*
387 * Start of pSeries system interrupt routines
388 */
389 . = 0x100
390 .globl __start_interrupts
391__start_interrupts:
392
393 STD_EXCEPTION_PSERIES(0x100, system_reset)
394
395 . = 0x200
396_machine_check_pSeries:
397 HMT_MEDIUM
398 mtspr SPRG1,r13 /* save r13 */
8dc4fd87 399 RUNLATCH_ON(r13)
1da177e4
LT
400 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
401
402 . = 0x300
403 .globl data_access_pSeries
404data_access_pSeries:
405 HMT_MEDIUM
406 mtspr SPRG1,r13
407BEGIN_FTR_SECTION
408 mtspr SPRG2,r12
409 mfspr r13,DAR
410 mfspr r12,DSISR
411 srdi r13,r13,60
412 rlwimi r13,r12,16,0x20
413 mfcr r12
414 cmpwi r13,0x2c
415 beq .do_stab_bolted_pSeries
416 mtcrf 0x80,r12
417 mfspr r12,SPRG2
418END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
419 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
420
421 . = 0x380
422 .globl data_access_slb_pSeries
423data_access_slb_pSeries:
424 HMT_MEDIUM
425 mtspr SPRG1,r13
8dc4fd87 426 RUNLATCH_ON(r13)
1da177e4
LT
427 mfspr r13,SPRG3 /* get paca address into r13 */
428 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
429 std r10,PACA_EXSLB+EX_R10(r13)
430 std r11,PACA_EXSLB+EX_R11(r13)
431 std r12,PACA_EXSLB+EX_R12(r13)
432 std r3,PACA_EXSLB+EX_R3(r13)
433 mfspr r9,SPRG1
434 std r9,PACA_EXSLB+EX_R13(r13)
435 mfcr r9
436 mfspr r12,SRR1 /* and SRR1 */
437 mfspr r3,DAR
438 b .do_slb_miss /* Rel. branch works in real mode */
439
440 STD_EXCEPTION_PSERIES(0x400, instruction_access)
441
442 . = 0x480
443 .globl instruction_access_slb_pSeries
444instruction_access_slb_pSeries:
445 HMT_MEDIUM
446 mtspr SPRG1,r13
8dc4fd87 447 RUNLATCH_ON(r13)
1da177e4
LT
448 mfspr r13,SPRG3 /* get paca address into r13 */
449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
450 std r10,PACA_EXSLB+EX_R10(r13)
451 std r11,PACA_EXSLB+EX_R11(r13)
452 std r12,PACA_EXSLB+EX_R12(r13)
453 std r3,PACA_EXSLB+EX_R3(r13)
454 mfspr r9,SPRG1
455 std r9,PACA_EXSLB+EX_R13(r13)
456 mfcr r9
457 mfspr r12,SRR1 /* and SRR1 */
458 mfspr r3,SRR0 /* SRR0 is faulting address */
459 b .do_slb_miss /* Rel. branch works in real mode */
460
461 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
462 STD_EXCEPTION_PSERIES(0x600, alignment)
463 STD_EXCEPTION_PSERIES(0x700, program_check)
464 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
465 STD_EXCEPTION_PSERIES(0x900, decrementer)
466 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
467 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
468
469 . = 0xc00
470 .globl system_call_pSeries
471system_call_pSeries:
472 HMT_MEDIUM
8dc4fd87 473 RUNLATCH_ON(r9)
1da177e4
LT
474 mr r9,r13
475 mfmsr r10
476 mfspr r13,SPRG3
477 mfspr r11,SRR0
478 clrrdi r12,r13,32
479 oris r12,r12,system_call_common@h
480 ori r12,r12,system_call_common@l
481 mtspr SRR0,r12
482 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
483 mfspr r12,SRR1
484 mtspr SRR1,r10
485 rfid
486 b . /* prevent speculative execution */
487
488 STD_EXCEPTION_PSERIES(0xd00, single_step)
489 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
490
491 /* We need to deal with the Altivec unavailable exception
492 * here which is at 0xf20, thus in the middle of the
493 * prolog code of the PerformanceMonitor one. A little
494 * trickery is thus necessary
495 */
496 . = 0xf00
497 b performance_monitor_pSeries
498
499 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
500
501 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
502 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
503
504 /* moved from 0xf00 */
505 STD_EXCEPTION_PSERIES(0x3000, performance_monitor)
506
507 . = 0x3100
508_GLOBAL(do_stab_bolted_pSeries)
509 mtcrf 0x80,r12
510 mfspr r12,SPRG2
511 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
512
513
514 /* Space for the naca. Architected to be located at real address
515 * NACA_PHYS_ADDR. Various tools rely on this location being fixed.
516 * The first dword of the naca is required by iSeries LPAR to
517 * point to itVpdAreas. On pSeries native, this value is not used.
518 */
519 . = NACA_PHYS_ADDR
520 .globl __end_interrupts
521__end_interrupts:
522#ifdef CONFIG_PPC_ISERIES
523 .globl naca
524naca:
525 .llong itVpdAreas
526
527 /*
528 * The iSeries LPAR map is at this fixed address
529 * so that the HvReleaseData structure can address
530 * it with a 32-bit offset.
531 *
532 * The VSID values below are dependent on the
533 * VSID generation algorithm. See include/asm/mmu_context.h.
534 */
535
536 . = 0x4800
537
538 .llong 2 /* # ESIDs to be mapped by hypervisor */
539 .llong 1 /* # memory ranges to be mapped by hypervisor */
540 .llong STAB0_PAGE /* Page # of segment table within load area */
541 .llong 0 /* Reserved */
542 .llong 0 /* Reserved */
543 .llong 0 /* Reserved */
544 .llong 0 /* Reserved */
545 .llong 0 /* Reserved */
546 .llong (KERNELBASE>>SID_SHIFT)
547 .llong 0x408f92c94 /* KERNELBASE VSID */
548 /* We have to list the bolted VMALLOC segment here, too, so that it
549 * will be restored on shared processor switch */
550 .llong (VMALLOCBASE>>SID_SHIFT)
551 .llong 0xf09b89af5 /* VMALLOCBASE VSID */
552 .llong 8192 /* # pages to map (32 MB) */
553 .llong 0 /* Offset from start of loadarea to start of map */
554 .llong 0x408f92c940000 /* VPN of first page to map */
555
556 . = 0x6100
557
558/*** ISeries-LPAR interrupt handlers ***/
559
560 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
561
562 .globl data_access_iSeries
563data_access_iSeries:
564 mtspr SPRG1,r13
565BEGIN_FTR_SECTION
566 mtspr SPRG2,r12
567 mfspr r13,DAR
568 mfspr r12,DSISR
569 srdi r13,r13,60
570 rlwimi r13,r12,16,0x20
571 mfcr r12
572 cmpwi r13,0x2c
573 beq .do_stab_bolted_iSeries
574 mtcrf 0x80,r12
575 mfspr r12,SPRG2
576END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
577 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
578 EXCEPTION_PROLOG_ISERIES_2
579 b data_access_common
580
581.do_stab_bolted_iSeries:
582 mtcrf 0x80,r12
583 mfspr r12,SPRG2
584 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
585 EXCEPTION_PROLOG_ISERIES_2
586 b .do_stab_bolted
587
588 .globl data_access_slb_iSeries
589data_access_slb_iSeries:
590 mtspr SPRG1,r13 /* save r13 */
591 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
592 std r3,PACA_EXSLB+EX_R3(r13)
593 ld r12,PACALPPACA+LPPACASRR1(r13)
594 mfspr r3,DAR
595 b .do_slb_miss
596
597 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
598
599 .globl instruction_access_slb_iSeries
600instruction_access_slb_iSeries:
601 mtspr SPRG1,r13 /* save r13 */
602 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
603 std r3,PACA_EXSLB+EX_R3(r13)
604 ld r12,PACALPPACA+LPPACASRR1(r13)
605 ld r3,PACALPPACA+LPPACASRR0(r13)
606 b .do_slb_miss
607
608 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
609 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
610 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
611 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
612 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
613 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
614 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
615
616 .globl system_call_iSeries
617system_call_iSeries:
618 mr r9,r13
619 mfspr r13,SPRG3
620 EXCEPTION_PROLOG_ISERIES_2
621 b system_call_common
622
623 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
624 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
625 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
626
627 .globl system_reset_iSeries
628system_reset_iSeries:
629 mfspr r13,SPRG3 /* Get paca address */
630 mfmsr r24
631 ori r24,r24,MSR_RI
632 mtmsrd r24 /* RI on */
633 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
634 cmpwi 0,r24,0 /* Are we processor 0? */
635 beq .__start_initialization_iSeries /* Start up the first processor */
6dc2f0c7
AB
636 mfspr r4,SPRN_CTRLF
637 li r5,CTRL_RUNLATCH /* Turn off the run light */
1da177e4 638 andc r4,r4,r5
6dc2f0c7 639 mtspr SPRN_CTRLT,r4
1da177e4
LT
640
6411:
642 HMT_LOW
643#ifdef CONFIG_SMP
644 lbz r23,PACAPROCSTART(r13) /* Test if this processor
645 * should start */
646 sync
647 LOADADDR(r3,current_set)
648 sldi r28,r24,3 /* get current_set[cpu#] */
649 ldx r3,r3,r28
650 addi r1,r3,THREAD_SIZE
651 subi r1,r1,STACK_FRAME_OVERHEAD
652
653 cmpwi 0,r23,0
654 beq iSeries_secondary_smp_loop /* Loop until told to go */
655#ifdef SECONDARY_PROCESSORS
656 bne .__secondary_start /* Loop until told to go */
657#endif
658iSeries_secondary_smp_loop:
659 /* Let the Hypervisor know we are alive */
660 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
661 lis r3,0x8002
662 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
663#else /* CONFIG_SMP */
664 /* Yield the processor. This is required for non-SMP kernels
665 which are running on multi-threaded machines. */
666 lis r3,0x8000
667 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
668 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
669 li r4,0 /* "yield timed" */
670 li r5,-1 /* "yield forever" */
671#endif /* CONFIG_SMP */
672 li r0,-1 /* r0=-1 indicates a Hypervisor call */
673 sc /* Invoke the hypervisor via a system call */
674 mfspr r13,SPRG3 /* Put r13 back ???? */
675 b 1b /* If SMP not configured, secondaries
676 * loop forever */
677
678 .globl decrementer_iSeries_masked
679decrementer_iSeries_masked:
680 li r11,1
681 stb r11,PACALPPACA+LPPACADECRINT(r13)
682 lwz r12,PACADEFAULTDECR(r13)
683 mtspr SPRN_DEC,r12
684 /* fall through */
685
686 .globl hardware_interrupt_iSeries_masked
687hardware_interrupt_iSeries_masked:
688 mtcrf 0x80,r9 /* Restore regs */
689 ld r11,PACALPPACA+LPPACASRR0(r13)
690 ld r12,PACALPPACA+LPPACASRR1(r13)
691 mtspr SRR0,r11
692 mtspr SRR1,r12
693 ld r9,PACA_EXGEN+EX_R9(r13)
694 ld r10,PACA_EXGEN+EX_R10(r13)
695 ld r11,PACA_EXGEN+EX_R11(r13)
696 ld r12,PACA_EXGEN+EX_R12(r13)
697 ld r13,PACA_EXGEN+EX_R13(r13)
698 rfid
699 b . /* prevent speculative execution */
700#endif
701
702/*
703 * Data area reserved for FWNMI option.
704 */
705 .= 0x7000
706 .globl fwnmi_data_area
707fwnmi_data_area:
708
709/*
710 * Vectors for the FWNMI option. Share common code.
711 */
712 . = 0x8000
713 .globl system_reset_fwnmi
714system_reset_fwnmi:
715 HMT_MEDIUM
716 mtspr SPRG1,r13 /* save r13 */
8dc4fd87 717 RUNLATCH_ON(r13)
1da177e4
LT
718 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
719 .globl machine_check_fwnmi
720machine_check_fwnmi:
721 HMT_MEDIUM
722 mtspr SPRG1,r13 /* save r13 */
8dc4fd87 723 RUNLATCH_ON(r13)
1da177e4
LT
724 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
725
726 /*
727 * Space for the initial segment table
728 * For LPAR, the hypervisor must fill in at least one entry
729 * before we get control (with relocate on)
730 */
731 . = STAB0_PHYS_ADDR
732 .globl __start_stab
733__start_stab:
734
735 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
736 .globl __end_stab
737__end_stab:
738
739
740/*** Common interrupt handlers ***/
741
742 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
743
744 /*
745 * Machine check is different because we use a different
746 * save area: PACA_EXMC instead of PACA_EXGEN.
747 */
748 .align 7
749 .globl machine_check_common
750machine_check_common:
751 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
752 DISABLE_INTS
753 bl .save_nvgprs
754 addi r3,r1,STACK_FRAME_OVERHEAD
755 bl .machine_check_exception
756 b .ret_from_except
757
758 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
759 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
760 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
761 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
762 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
763 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
764 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
765#ifdef CONFIG_ALTIVEC
766 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
767#else
768 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
769#endif
770
771/*
772 * Here we have detected that the kernel stack pointer is bad.
773 * R9 contains the saved CR, r13 points to the paca,
774 * r10 contains the (bad) kernel stack pointer,
775 * r11 and r12 contain the saved SRR0 and SRR1.
776 * We switch to using the paca guard page as an emergency stack,
777 * save the registers there, and call kernel_bad_stack(), which panics.
778 */
779bad_stack:
780 ld r1,PACAEMERGSP(r13)
781 subi r1,r1,64+INT_FRAME_SIZE
782 std r9,_CCR(r1)
783 std r10,GPR1(r1)
784 std r11,_NIP(r1)
785 std r12,_MSR(r1)
786 mfspr r11,DAR
787 mfspr r12,DSISR
788 std r11,_DAR(r1)
789 std r12,_DSISR(r1)
790 mflr r10
791 mfctr r11
792 mfxer r12
793 std r10,_LINK(r1)
794 std r11,_CTR(r1)
795 std r12,_XER(r1)
796 SAVE_GPR(0,r1)
797 SAVE_GPR(2,r1)
798 SAVE_4GPRS(3,r1)
799 SAVE_2GPRS(7,r1)
800 SAVE_10GPRS(12,r1)
801 SAVE_10GPRS(22,r1)
802 addi r11,r1,INT_FRAME_SIZE
803 std r11,0(r1)
804 li r12,0
805 std r12,0(r11)
806 ld r2,PACATOC(r13)
8071: addi r3,r1,STACK_FRAME_OVERHEAD
808 bl .kernel_bad_stack
809 b 1b
810
811/*
812 * Return from an exception with minimal checks.
813 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
814 * If interrupts have been enabled, or anything has been
815 * done that might have changed the scheduling status of
816 * any task or sent any task a signal, you should use
817 * ret_from_except or ret_from_except_lite instead of this.
818 */
819fast_exception_return:
820 ld r12,_MSR(r1)
821 ld r11,_NIP(r1)
822 andi. r3,r12,MSR_RI /* check if RI is set */
823 beq- unrecov_fer
824 ld r3,_CCR(r1)
825 ld r4,_LINK(r1)
826 ld r5,_CTR(r1)
827 ld r6,_XER(r1)
828 mtcr r3
829 mtlr r4
830 mtctr r5
831 mtxer r6
832 REST_GPR(0, r1)
833 REST_8GPRS(2, r1)
834
835 mfmsr r10
836 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
837 mtmsrd r10,1
838
839 mtspr SRR1,r12
840 mtspr SRR0,r11
841 REST_4GPRS(10, r1)
842 ld r1,GPR1(r1)
843 rfid
844 b . /* prevent speculative execution */
845
846unrecov_fer:
847 bl .save_nvgprs
8481: addi r3,r1,STACK_FRAME_OVERHEAD
849 bl .unrecoverable_exception
850 b 1b
851
852/*
853 * Here r13 points to the paca, r9 contains the saved CR,
854 * SRR0 and SRR1 are saved in r11 and r12,
855 * r9 - r13 are saved in paca->exgen.
856 */
857 .align 7
858 .globl data_access_common
859data_access_common:
8dc4fd87 860 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
1da177e4
LT
861 mfspr r10,DAR
862 std r10,PACA_EXGEN+EX_DAR(r13)
863 mfspr r10,DSISR
864 stw r10,PACA_EXGEN+EX_DSISR(r13)
865 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
866 ld r3,PACA_EXGEN+EX_DAR(r13)
867 lwz r4,PACA_EXGEN+EX_DSISR(r13)
868 li r5,0x300
869 b .do_hash_page /* Try to handle as hpte fault */
870
871 .align 7
872 .globl instruction_access_common
873instruction_access_common:
874 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
875 ld r3,_NIP(r1)
876 andis. r4,r12,0x5820
877 li r5,0x400
878 b .do_hash_page /* Try to handle as hpte fault */
879
880 .align 7
881 .globl hardware_interrupt_common
882 .globl hardware_interrupt_entry
883hardware_interrupt_common:
884 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
885hardware_interrupt_entry:
886 DISABLE_INTS
887 addi r3,r1,STACK_FRAME_OVERHEAD
888 bl .do_IRQ
889 b .ret_from_except_lite
890
891 .align 7
892 .globl alignment_common
893alignment_common:
894 mfspr r10,DAR
895 std r10,PACA_EXGEN+EX_DAR(r13)
896 mfspr r10,DSISR
897 stw r10,PACA_EXGEN+EX_DSISR(r13)
898 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
899 ld r3,PACA_EXGEN+EX_DAR(r13)
900 lwz r4,PACA_EXGEN+EX_DSISR(r13)
901 std r3,_DAR(r1)
902 std r4,_DSISR(r1)
903 bl .save_nvgprs
904 addi r3,r1,STACK_FRAME_OVERHEAD
905 ENABLE_INTS
906 bl .alignment_exception
907 b .ret_from_except
908
909 .align 7
910 .globl program_check_common
911program_check_common:
912 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
913 bl .save_nvgprs
914 addi r3,r1,STACK_FRAME_OVERHEAD
915 ENABLE_INTS
916 bl .program_check_exception
917 b .ret_from_except
918
919 .align 7
920 .globl fp_unavailable_common
921fp_unavailable_common:
922 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
923 bne .load_up_fpu /* if from user, just load it up */
924 bl .save_nvgprs
925 addi r3,r1,STACK_FRAME_OVERHEAD
926 ENABLE_INTS
927 bl .kernel_fp_unavailable_exception
928 BUG_OPCODE
929
930 .align 7
931 .globl altivec_unavailable_common
932altivec_unavailable_common:
933 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
934#ifdef CONFIG_ALTIVEC
187335a4 935BEGIN_FTR_SECTION
1da177e4 936 bne .load_up_altivec /* if from user, just load it up */
187335a4 937END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1da177e4
LT
938#endif
939 bl .save_nvgprs
940 addi r3,r1,STACK_FRAME_OVERHEAD
941 ENABLE_INTS
942 bl .altivec_unavailable_exception
943 b .ret_from_except
944
945/*
946 * Hash table stuff
947 */
948 .align 7
949_GLOBAL(do_hash_page)
950 std r3,_DAR(r1)
951 std r4,_DSISR(r1)
952
953 andis. r0,r4,0xa450 /* weird error? */
954 bne- .handle_page_fault /* if not, try to insert a HPTE */
955BEGIN_FTR_SECTION
956 andis. r0,r4,0x0020 /* Is it a segment table fault? */
957 bne- .do_ste_alloc /* If so handle it */
958END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
959
960 /*
961 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
962 * accessing a userspace segment (even from the kernel). We assume
963 * kernel addresses always have the high bit set.
964 */
965 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
966 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
967 orc r0,r12,r0 /* MSR_PR | ~high_bit */
968 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
969 ori r4,r4,1 /* add _PAGE_PRESENT */
970 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
971
972 /*
973 * On iSeries, we soft-disable interrupts here, then
974 * hard-enable interrupts so that the hash_page code can spin on
975 * the hash_table_lock without problems on a shared processor.
976 */
977 DISABLE_INTS
978
979 /*
980 * r3 contains the faulting address
981 * r4 contains the required access permissions
982 * r5 contains the trap number
983 *
984 * at return r3 = 0 for success
985 */
986 bl .hash_page /* build HPTE if possible */
987 cmpdi r3,0 /* see if hash_page succeeded */
988
989#ifdef DO_SOFT_DISABLE
990 /*
991 * If we had interrupts soft-enabled at the point where the
992 * DSI/ISI occurred, and an interrupt came in during hash_page,
993 * handle it now.
994 * We jump to ret_from_except_lite rather than fast_exception_return
995 * because ret_from_except_lite will check for and handle pending
996 * interrupts if necessary.
997 */
998 beq .ret_from_except_lite
999 /* For a hash failure, we don't bother re-enabling interrupts */
1000 ble- 12f
1001
1002 /*
1003 * hash_page couldn't handle it, set soft interrupt enable back
1004 * to what it was before the trap. Note that .local_irq_restore
1005 * handles any interrupts pending at this point.
1006 */
1007 ld r3,SOFTE(r1)
1008 bl .local_irq_restore
1009 b 11f
1010#else
1011 beq fast_exception_return /* Return from exception on success */
1012 ble- 12f /* Failure return from hash_page */
1013
1014 /* fall through */
1015#endif
1016
1017/* Here we have a page fault that hash_page can't handle. */
1018_GLOBAL(handle_page_fault)
1019 ENABLE_INTS
102011: ld r4,_DAR(r1)
1021 ld r5,_DSISR(r1)
1022 addi r3,r1,STACK_FRAME_OVERHEAD
1023 bl .do_page_fault
1024 cmpdi r3,0
1025 beq+ .ret_from_except_lite
1026 bl .save_nvgprs
1027 mr r5,r3
1028 addi r3,r1,STACK_FRAME_OVERHEAD
1029 lwz r4,_DAR(r1)
1030 bl .bad_page_fault
1031 b .ret_from_except
1032
1033/* We have a page fault that hash_page could handle but HV refused
1034 * the PTE insertion
1035 */
103612: bl .save_nvgprs
1037 addi r3,r1,STACK_FRAME_OVERHEAD
1038 lwz r4,_DAR(r1)
1039 bl .low_hash_fault
1040 b .ret_from_except
1041
1042 /* here we have a segment miss */
1043_GLOBAL(do_ste_alloc)
1044 bl .ste_allocate /* try to insert stab entry */
1045 cmpdi r3,0
1046 beq+ fast_exception_return
1047 b .handle_page_fault
1048
1049/*
1050 * r13 points to the PACA, r9 contains the saved CR,
1051 * r11 and r12 contain the saved SRR0 and SRR1.
1052 * r9 - r13 are saved in paca->exslb.
1053 * We assume we aren't going to take any exceptions during this procedure.
1054 * We assume (DAR >> 60) == 0xc.
1055 */
1056 .align 7
1057_GLOBAL(do_stab_bolted)
1058 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1059 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1060
1061 /* Hash to the primary group */
1062 ld r10,PACASTABVIRT(r13)
1063 mfspr r11,DAR
1064 srdi r11,r11,28
1065 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1066
1067 /* Calculate VSID */
1068 /* This is a kernel address, so protovsid = ESID */
1069 ASM_VSID_SCRAMBLE(r11, r9)
1070 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1071
1072 /* Search the primary group for a free entry */
10731: ld r11,0(r10) /* Test valid bit of the current ste */
1074 andi. r11,r11,0x80
1075 beq 2f
1076 addi r10,r10,16
1077 andi. r11,r10,0x70
1078 bne 1b
1079
1080 /* Stick for only searching the primary group for now. */
1081 /* At least for now, we use a very simple random castout scheme */
1082 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1083 mftb r11
1084 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1085 ori r11,r11,0x10
1086
1087 /* r10 currently points to an ste one past the group of interest */
1088 /* make it point to the randomly selected entry */
1089 subi r10,r10,128
1090 or r10,r10,r11 /* r10 is the entry to invalidate */
1091
1092 isync /* mark the entry invalid */
1093 ld r11,0(r10)
1094 rldicl r11,r11,56,1 /* clear the valid bit */
1095 rotldi r11,r11,8
1096 std r11,0(r10)
1097 sync
1098
1099 clrrdi r11,r11,28 /* Get the esid part of the ste */
1100 slbie r11
1101
11022: std r9,8(r10) /* Store the vsid part of the ste */
1103 eieio
1104
1105 mfspr r11,DAR /* Get the new esid */
1106 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1107 ori r11,r11,0x90 /* Turn on valid and kp */
1108 std r11,0(r10) /* Put new entry back into the stab */
1109
1110 sync
1111
1112 /* All done -- return from exception. */
1113 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1114 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1115
1116 andi. r10,r12,MSR_RI
1117 beq- unrecov_slb
1118
1119 mtcrf 0x80,r9 /* restore CR */
1120
1121 mfmsr r10
1122 clrrdi r10,r10,2
1123 mtmsrd r10,1
1124
1125 mtspr SRR0,r11
1126 mtspr SRR1,r12
1127 ld r9,PACA_EXSLB+EX_R9(r13)
1128 ld r10,PACA_EXSLB+EX_R10(r13)
1129 ld r11,PACA_EXSLB+EX_R11(r13)
1130 ld r12,PACA_EXSLB+EX_R12(r13)
1131 ld r13,PACA_EXSLB+EX_R13(r13)
1132 rfid
1133 b . /* prevent speculative execution */
1134
1135/*
1136 * r13 points to the PACA, r9 contains the saved CR,
1137 * r11 and r12 contain the saved SRR0 and SRR1.
1138 * r3 has the faulting address
1139 * r9 - r13 are saved in paca->exslb.
1140 * r3 is saved in paca->slb_r3
1141 * We assume we aren't going to take any exceptions during this procedure.
1142 */
1143_GLOBAL(do_slb_miss)
1144 mflr r10
1145
1146 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1147 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1148
1149 bl .slb_allocate /* handle it */
1150
1151 /* All done -- return from exception. */
1152
1153 ld r10,PACA_EXSLB+EX_LR(r13)
1154 ld r3,PACA_EXSLB+EX_R3(r13)
1155 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1156#ifdef CONFIG_PPC_ISERIES
1157 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1158#endif /* CONFIG_PPC_ISERIES */
1159
1160 mtlr r10
1161
1162 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1163 beq- unrecov_slb
1164
1165.machine push
1166.machine "power4"
1167 mtcrf 0x80,r9
1168 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1169.machine pop
1170
1171#ifdef CONFIG_PPC_ISERIES
1172 mtspr SRR0,r11
1173 mtspr SRR1,r12
1174#endif /* CONFIG_PPC_ISERIES */
1175 ld r9,PACA_EXSLB+EX_R9(r13)
1176 ld r10,PACA_EXSLB+EX_R10(r13)
1177 ld r11,PACA_EXSLB+EX_R11(r13)
1178 ld r12,PACA_EXSLB+EX_R12(r13)
1179 ld r13,PACA_EXSLB+EX_R13(r13)
1180 rfid
1181 b . /* prevent speculative execution */
1182
1183unrecov_slb:
1184 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1185 DISABLE_INTS
1186 bl .save_nvgprs
11871: addi r3,r1,STACK_FRAME_OVERHEAD
1188 bl .unrecoverable_exception
1189 b 1b
1190
1191
1192/*
1193 * On pSeries, secondary processors spin in the following code.
1194 * At entry, r3 = this processor's number (physical cpu id)
1195 */
1196_GLOBAL(pSeries_secondary_smp_init)
1197 mr r24,r3
1198
1199 /* turn on 64-bit mode */
1200 bl .enable_64b_mode
1201 isync
1202
1203 /* Copy some CPU settings from CPU 0 */
1204 bl .__restore_cpu_setup
1205
1206 /* Set up a paca value for this processor. Since we have the
fce0d574 1207 * physical cpu id in r24, we need to search the pacas to find
1da177e4
LT
1208 * which logical id maps to our physical one.
1209 */
1210 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1211 li r5,0 /* logical cpu id */
12121: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1213 cmpw r6,r24 /* Compare to our id */
1214 beq 2f
1215 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1216 addi r5,r5,1
1217 cmpwi r5,NR_CPUS
1218 blt 1b
1219
fce0d574
S
1220 mr r3,r24 /* not found, copy phys to r3 */
1221 b .kexec_wait /* next kernel might do better */
1da177e4
LT
1222
12232: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1224 /* From now on, r24 is expected to be logica cpuid */
1225 mr r24,r5
12263: HMT_LOW
1227 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1228 /* start. */
1229 sync
1230
1231 /* Create a temp kernel stack for use before relocation is on. */
1232 ld r1,PACAEMERGSP(r13)
1233 subi r1,r1,STACK_FRAME_OVERHEAD
1234
1235 cmpwi 0,r23,0
1236#ifdef CONFIG_SMP
1237#ifdef SECONDARY_PROCESSORS
1238 bne .__secondary_start
1239#endif
1240#endif
1241 b 3b /* Loop until told to go */
1242
1243#ifdef CONFIG_PPC_ISERIES
1244_STATIC(__start_initialization_iSeries)
1245 /* Clear out the BSS */
1246 LOADADDR(r11,__bss_stop)
1247 LOADADDR(r8,__bss_start)
1248 sub r11,r11,r8 /* bss size */
1249 addi r11,r11,7 /* round up to an even double word */
1250 rldicl. r11,r11,61,3 /* shift right by 3 */
1251 beq 4f
1252 addi r8,r8,-8
1253 li r0,0
1254 mtctr r11 /* zero this many doublewords */
12553: stdu r0,8(r8)
1256 bdnz 3b
12574:
1258 LOADADDR(r1,init_thread_union)
1259 addi r1,r1,THREAD_SIZE
1260 li r0,0
1261 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1262
1263 LOADADDR(r3,cpu_specs)
1264 LOADADDR(r4,cur_cpu_spec)
1265 li r5,0
1266 bl .identify_cpu
1267
1268 LOADADDR(r2,__toc_start)
1269 addi r2,r2,0x4000
1270 addi r2,r2,0x4000
1271
1272 bl .iSeries_early_setup
1273
1274 /* relocation is on at this point */
1275
1276 b .start_here_common
1277#endif /* CONFIG_PPC_ISERIES */
1278
1279#ifdef CONFIG_PPC_MULTIPLATFORM
1280
1281_STATIC(__mmu_off)
1282 mfmsr r3
1283 andi. r0,r3,MSR_IR|MSR_DR
1284 beqlr
1285 andc r3,r3,r0
1286 mtspr SPRN_SRR0,r4
1287 mtspr SPRN_SRR1,r3
1288 sync
1289 rfid
1290 b . /* prevent speculative execution */
1291
1292
1293/*
1294 * Here is our main kernel entry point. We support currently 2 kind of entries
1295 * depending on the value of r5.
1296 *
1297 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1298 * in r3...r7
1299 *
1300 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1301 * DT block, r4 is a physical pointer to the kernel itself
1302 *
1303 */
1304_GLOBAL(__start_initialization_multiplatform)
1305 /*
1306 * Are we booted from a PROM Of-type client-interface ?
1307 */
1308 cmpldi cr0,r5,0
1309 bne .__boot_from_prom /* yes -> prom */
1310
1311 /* Save parameters */
1312 mr r31,r3
1313 mr r30,r4
1314
1315 /* Make sure we are running in 64 bits mode */
1316 bl .enable_64b_mode
1317
1318 /* Setup some critical 970 SPRs before switching MMU off */
1319 bl .__970_cpu_preinit
1320
1321 /* cpu # */
1322 li r24,0
1323
1324 /* Switch off MMU if not already */
1325 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1326 add r4,r4,r30
1327 bl .__mmu_off
1328 b .__after_prom_start
1329
1330_STATIC(__boot_from_prom)
1331 /* Save parameters */
1332 mr r31,r3
1333 mr r30,r4
1334 mr r29,r5
1335 mr r28,r6
1336 mr r27,r7
1337
1338 /* Make sure we are running in 64 bits mode */
1339 bl .enable_64b_mode
1340
1341 /* put a relocation offset into r3 */
1342 bl .reloc_offset
1343
1344 LOADADDR(r2,__toc_start)
1345 addi r2,r2,0x4000
1346 addi r2,r2,0x4000
1347
1348 /* Relocate the TOC from a virt addr to a real addr */
1349 sub r2,r2,r3
1350
1351 /* Restore parameters */
1352 mr r3,r31
1353 mr r4,r30
1354 mr r5,r29
1355 mr r6,r28
1356 mr r7,r27
1357
1358 /* Do all of the interaction with OF client interface */
1359 bl .prom_init
1360 /* We never return */
1361 trap
1362
1363/*
1364 * At this point, r3 contains the physical address we are running at,
1365 * returned by prom_init()
1366 */
1367_STATIC(__after_prom_start)
1368
1369/*
1370 * We need to run with __start at physical address 0.
1371 * This will leave some code in the first 256B of
1372 * real memory, which are reserved for software use.
1373 * The remainder of the first page is loaded with the fixed
1374 * interrupt vectors. The next two pages are filled with
1375 * unknown exception placeholders.
1376 *
1377 * Note: This process overwrites the OF exception vectors.
1378 * r26 == relocation offset
1379 * r27 == KERNELBASE
1380 */
1381 bl .reloc_offset
1382 mr r26,r3
1383 SET_REG_TO_CONST(r27,KERNELBASE)
1384
1385 li r3,0 /* target addr */
1386
1387 // XXX FIXME: Use phys returned by OF (r30)
1388 sub r4,r27,r26 /* source addr */
1389 /* current address of _start */
1390 /* i.e. where we are running */
1391 /* the source addr */
1392
1393 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1394 sub r5,r5,r27
1395
1396 li r6,0x100 /* Start offset, the first 0x100 */
1397 /* bytes were copied earlier. */
1398
1399 bl .copy_and_flush /* copy the first n bytes */
1400 /* this includes the code being */
1401 /* executed here. */
1402
1403 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1404 mtctr r0 /* that we just made/relocated */
1405 bctr
1406
14074: LOADADDR(r5,klimit)
1408 sub r5,r5,r26
1409 ld r5,0(r5) /* get the value of klimit */
1410 sub r5,r5,r27
1411 bl .copy_and_flush /* copy the rest */
1412 b .start_here_multiplatform
1413
1414#endif /* CONFIG_PPC_MULTIPLATFORM */
1415
1416/*
1417 * Copy routine used to copy the kernel to start at physical address 0
1418 * and flush and invalidate the caches as needed.
1419 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1420 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1421 *
1422 * Note: this routine *only* clobbers r0, r6 and lr
1423 */
1424_GLOBAL(copy_and_flush)
1425 addi r5,r5,-8
1426 addi r6,r6,-8
14274: li r0,16 /* Use the least common */
1428 /* denominator cache line */
1429 /* size. This results in */
1430 /* extra cache line flushes */
1431 /* but operation is correct. */
1432 /* Can't get cache line size */
1433 /* from NACA as it is being */
1434 /* moved too. */
1435
1436 mtctr r0 /* put # words/line in ctr */
14373: addi r6,r6,8 /* copy a cache line */
1438 ldx r0,r6,r4
1439 stdx r0,r6,r3
1440 bdnz 3b
1441 dcbst r6,r3 /* write it to memory */
1442 sync
1443 icbi r6,r3 /* flush the icache line */
1444 cmpld 0,r6,r5
1445 blt 4b
1446 sync
1447 addi r5,r5,8
1448 addi r6,r6,8
1449 blr
1450
1451.align 8
1452copy_to_here:
1453
1454/*
1455 * load_up_fpu(unused, unused, tsk)
1456 * Disable FP for the task which had the FPU previously,
1457 * and save its floating-point registers in its thread_struct.
1458 * Enables the FPU for use in the kernel on return.
1459 * On SMP we know the fpu is free, since we give it up every
1460 * switch (ie, no lazy save of the FP registers).
1461 * On entry: r13 == 'current' && last_task_used_math != 'current'
1462 */
1463_STATIC(load_up_fpu)
1464 mfmsr r5 /* grab the current MSR */
1465 ori r5,r5,MSR_FP
1466 mtmsrd r5 /* enable use of fpu now */
1467 isync
1468/*
1469 * For SMP, we don't do lazy FPU switching because it just gets too
1470 * horrendously complex, especially when a task switches from one CPU
1471 * to another. Instead we call giveup_fpu in switch_to.
1472 *
1473 */
1474#ifndef CONFIG_SMP
1475 ld r3,last_task_used_math@got(r2)
1476 ld r4,0(r3)
1477 cmpdi 0,r4,0
1478 beq 1f
1479 /* Save FP state to last_task_used_math's THREAD struct */
1480 addi r4,r4,THREAD
1481 SAVE_32FPRS(0, r4)
1482 mffs fr0
1483 stfd fr0,THREAD_FPSCR(r4)
1484 /* Disable FP for last_task_used_math */
1485 ld r5,PT_REGS(r4)
1486 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1487 li r6,MSR_FP|MSR_FE0|MSR_FE1
1488 andc r4,r4,r6
1489 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14901:
1491#endif /* CONFIG_SMP */
1492 /* enable use of FP after return */
1493 ld r4,PACACURRENT(r13)
1494 addi r5,r4,THREAD /* Get THREAD */
1495 ld r4,THREAD_FPEXC_MODE(r5)
1496 ori r12,r12,MSR_FP
1497 or r12,r12,r4
1498 std r12,_MSR(r1)
1499 lfd fr0,THREAD_FPSCR(r5)
1500 mtfsf 0xff,fr0
1501 REST_32FPRS(0, r5)
1502#ifndef CONFIG_SMP
1503 /* Update last_task_used_math to 'current' */
1504 subi r4,r5,THREAD /* Back to 'current' */
1505 std r4,0(r3)
1506#endif /* CONFIG_SMP */
1507 /* restore registers and return */
1508 b fast_exception_return
1509
1510/*
1511 * disable_kernel_fp()
1512 * Disable the FPU.
1513 */
1514_GLOBAL(disable_kernel_fp)
1515 mfmsr r3
1516 rldicl r0,r3,(63-MSR_FP_LG),1
1517 rldicl r3,r0,(MSR_FP_LG+1),0
1518 mtmsrd r3 /* disable use of fpu now */
1519 isync
1520 blr
1521
1522/*
1523 * giveup_fpu(tsk)
1524 * Disable FP for the task given as the argument,
1525 * and save the floating-point registers in its thread_struct.
1526 * Enables the FPU for use in the kernel on return.
1527 */
1528_GLOBAL(giveup_fpu)
1529 mfmsr r5
1530 ori r5,r5,MSR_FP
1531 mtmsrd r5 /* enable use of fpu now */
1532 isync
1533 cmpdi 0,r3,0
1534 beqlr- /* if no previous owner, done */
1535 addi r3,r3,THREAD /* want THREAD of task */
1536 ld r5,PT_REGS(r3)
1537 cmpdi 0,r5,0
1538 SAVE_32FPRS(0, r3)
1539 mffs fr0
1540 stfd fr0,THREAD_FPSCR(r3)
1541 beq 1f
1542 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1543 li r3,MSR_FP|MSR_FE0|MSR_FE1
1544 andc r4,r4,r3 /* disable FP for previous task */
1545 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15461:
1547#ifndef CONFIG_SMP
1548 li r5,0
1549 ld r4,last_task_used_math@got(r2)
1550 std r5,0(r4)
1551#endif /* CONFIG_SMP */
1552 blr
1553
1554
1555#ifdef CONFIG_ALTIVEC
1556
1557/*
1558 * load_up_altivec(unused, unused, tsk)
1559 * Disable VMX for the task which had it previously,
1560 * and save its vector registers in its thread_struct.
1561 * Enables the VMX for use in the kernel on return.
1562 * On SMP we know the VMX is free, since we give it up every
1563 * switch (ie, no lazy save of the vector registers).
1564 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1565 */
1566_STATIC(load_up_altivec)
1567 mfmsr r5 /* grab the current MSR */
1568 oris r5,r5,MSR_VEC@h
1569 mtmsrd r5 /* enable use of VMX now */
1570 isync
1571
1572/*
1573 * For SMP, we don't do lazy VMX switching because it just gets too
1574 * horrendously complex, especially when a task switches from one CPU
1575 * to another. Instead we call giveup_altvec in switch_to.
1576 * VRSAVE isn't dealt with here, that is done in the normal context
1577 * switch code. Note that we could rely on vrsave value to eventually
1578 * avoid saving all of the VREGs here...
1579 */
1580#ifndef CONFIG_SMP
1581 ld r3,last_task_used_altivec@got(r2)
1582 ld r4,0(r3)
1583 cmpdi 0,r4,0
1584 beq 1f
1585 /* Save VMX state to last_task_used_altivec's THREAD struct */
1586 addi r4,r4,THREAD
1587 SAVE_32VRS(0,r5,r4)
1588 mfvscr vr0
1589 li r10,THREAD_VSCR
1590 stvx vr0,r10,r4
1591 /* Disable VMX for last_task_used_altivec */
1592 ld r5,PT_REGS(r4)
1593 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1594 lis r6,MSR_VEC@h
1595 andc r4,r4,r6
1596 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15971:
1598#endif /* CONFIG_SMP */
1599 /* Hack: if we get an altivec unavailable trap with VRSAVE
1600 * set to all zeros, we assume this is a broken application
1601 * that fails to set it properly, and thus we switch it to
1602 * all 1's
1603 */
1604 mfspr r4,SPRN_VRSAVE
1605 cmpdi 0,r4,0
1606 bne+ 1f
1607 li r4,-1
1608 mtspr SPRN_VRSAVE,r4
16091:
1610 /* enable use of VMX after return */
1611 ld r4,PACACURRENT(r13)
1612 addi r5,r4,THREAD /* Get THREAD */
1613 oris r12,r12,MSR_VEC@h
1614 std r12,_MSR(r1)
1615 li r4,1
1616 li r10,THREAD_VSCR
1617 stw r4,THREAD_USED_VR(r5)
1618 lvx vr0,r10,r5
1619 mtvscr vr0
1620 REST_32VRS(0,r4,r5)
1621#ifndef CONFIG_SMP
1622 /* Update last_task_used_math to 'current' */
1623 subi r4,r5,THREAD /* Back to 'current' */
1624 std r4,0(r3)
1625#endif /* CONFIG_SMP */
1626 /* restore registers and return */
1627 b fast_exception_return
1628
1629/*
1630 * disable_kernel_altivec()
1631 * Disable the VMX.
1632 */
1633_GLOBAL(disable_kernel_altivec)
1634 mfmsr r3
1635 rldicl r0,r3,(63-MSR_VEC_LG),1
1636 rldicl r3,r0,(MSR_VEC_LG+1),0
1637 mtmsrd r3 /* disable use of VMX now */
1638 isync
1639 blr
1640
1641/*
1642 * giveup_altivec(tsk)
1643 * Disable VMX for the task given as the argument,
1644 * and save the vector registers in its thread_struct.
1645 * Enables the VMX for use in the kernel on return.
1646 */
1647_GLOBAL(giveup_altivec)
1648 mfmsr r5
1649 oris r5,r5,MSR_VEC@h
1650 mtmsrd r5 /* enable use of VMX now */
1651 isync
1652 cmpdi 0,r3,0
1653 beqlr- /* if no previous owner, done */
1654 addi r3,r3,THREAD /* want THREAD of task */
1655 ld r5,PT_REGS(r3)
1656 cmpdi 0,r5,0
1657 SAVE_32VRS(0,r4,r3)
1658 mfvscr vr0
1659 li r4,THREAD_VSCR
1660 stvx vr0,r4,r3
1661 beq 1f
1662 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1663 lis r3,MSR_VEC@h
1664 andc r4,r4,r3 /* disable FP for previous task */
1665 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16661:
1667#ifndef CONFIG_SMP
1668 li r5,0
1669 ld r4,last_task_used_altivec@got(r2)
1670 std r5,0(r4)
1671#endif /* CONFIG_SMP */
1672 blr
1673
1674#endif /* CONFIG_ALTIVEC */
1675
1676#ifdef CONFIG_SMP
1677#ifdef CONFIG_PPC_PMAC
1678/*
1679 * On PowerMac, secondary processors starts from the reset vector, which
1680 * is temporarily turned into a call to one of the functions below.
1681 */
1682 .section ".text";
1683 .align 2 ;
1684
1685 .globl pmac_secondary_start_1
1686pmac_secondary_start_1:
1687 li r24, 1
1688 b .pmac_secondary_start
1689
1690 .globl pmac_secondary_start_2
1691pmac_secondary_start_2:
1692 li r24, 2
1693 b .pmac_secondary_start
1694
1695 .globl pmac_secondary_start_3
1696pmac_secondary_start_3:
1697 li r24, 3
1698 b .pmac_secondary_start
1699
1700_GLOBAL(pmac_secondary_start)
1701 /* turn on 64-bit mode */
1702 bl .enable_64b_mode
1703 isync
1704
1705 /* Copy some CPU settings from CPU 0 */
1706 bl .__restore_cpu_setup
1707
1708 /* pSeries do that early though I don't think we really need it */
1709 mfmsr r3
1710 ori r3,r3,MSR_RI
1711 mtmsrd r3 /* RI on */
1712
1713 /* Set up a paca value for this processor. */
1714 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1715 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1716 add r13,r13,r4 /* for this processor. */
1717 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1718
1719 /* Create a temp kernel stack for use before relocation is on. */
1720 ld r1,PACAEMERGSP(r13)
1721 subi r1,r1,STACK_FRAME_OVERHEAD
1722
1723 b .__secondary_start
1724
1725#endif /* CONFIG_PPC_PMAC */
1726
1727/*
1728 * This function is called after the master CPU has released the
1729 * secondary processors. The execution environment is relocation off.
1730 * The paca for this processor has the following fields initialized at
1731 * this point:
1732 * 1. Processor number
1733 * 2. Segment table pointer (virtual address)
1734 * On entry the following are set:
1735 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1736 * r24 = cpu# (in Linux terms)
1737 * r13 = paca virtual address
1738 * SPRG3 = paca virtual address
1739 */
1740_GLOBAL(__secondary_start)
1741
1742 HMT_MEDIUM /* Set thread priority to MEDIUM */
1743
1744 ld r2,PACATOC(r13)
1745 li r6,0
1746 stb r6,PACAPROCENABLED(r13)
1747
1748#ifndef CONFIG_PPC_ISERIES
1749 /* Initialize the page table pointer register. */
1750 LOADADDR(r6,_SDR1)
1751 ld r6,0(r6) /* get the value of _SDR1 */
1752 mtspr SDR1,r6 /* set the htab location */
1753#endif
1754 /* Initialize the first segment table (or SLB) entry */
1755 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1756 bl .stab_initialize
1757
1758 /* Initialize the kernel stack. Just a repeat for iSeries. */
1759 LOADADDR(r3,current_set)
1760 sldi r28,r24,3 /* get current_set[cpu#] */
1761 ldx r1,r3,r28
1762 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1763 std r1,PACAKSAVE(r13)
1764
1765 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1766 ori r4,r3,1 /* turn on valid bit */
1767
1768#ifdef CONFIG_PPC_ISERIES
1769 li r0,-1 /* hypervisor call */
1770 li r3,1
1771 sldi r3,r3,63 /* 0x8000000000000000 */
1772 ori r3,r3,4 /* 0x8000000000000004 */
1773 sc /* HvCall_setASR */
1774#else
1775 /* set the ASR */
1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1777 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1778 cmpldi r3,PLATFORM_PSERIES_LPAR
1779 bne 98f
1780 mfspr r3,PVR
1781 srwi r3,r3,16
1782 cmpwi r3,0x37 /* SStar */
1783 beq 97f
1784 cmpwi r3,0x36 /* IStar */
1785 beq 97f
1786 cmpwi r3,0x34 /* Pulsar */
1787 bne 98f
178897: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1789 HVSC /* Invoking hcall */
1790 b 99f
179198: /* !(rpa hypervisor) || !(star) */
1792 mtasr r4 /* set the stab location */
179399:
1794#endif
1795 li r7,0
1796 mtlr r7
1797
1798 /* enable MMU and jump to start_secondary */
1799 LOADADDR(r3,.start_secondary_prolog)
1800 SET_REG_TO_CONST(r4, MSR_KERNEL)
1801#ifdef DO_SOFT_DISABLE
1802 ori r4,r4,MSR_EE
1803#endif
1804 mtspr SRR0,r3
1805 mtspr SRR1,r4
1806 rfid
1807 b . /* prevent speculative execution */
1808
1809/*
1810 * Running with relocation on at this point. All we want to do is
1811 * zero the stack back-chain pointer before going into C code.
1812 */
1813_GLOBAL(start_secondary_prolog)
1814 li r3,0
1815 std r3,0(r1) /* Zero the stack frame pointer */
1816 bl .start_secondary
1817#endif
1818
1819/*
1820 * This subroutine clobbers r11 and r12
1821 */
1822_GLOBAL(enable_64b_mode)
1823 mfmsr r11 /* grab the current MSR */
1824 li r12,1
1825 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1826 or r11,r11,r12
1827 li r12,1
1828 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1829 or r11,r11,r12
1830 mtmsrd r11
1831 isync
1832 blr
1833
1834#ifdef CONFIG_PPC_MULTIPLATFORM
1835/*
1836 * This is where the main kernel code starts.
1837 */
1838_STATIC(start_here_multiplatform)
1839 /* get a new offset, now that the kernel has moved. */
1840 bl .reloc_offset
1841 mr r26,r3
1842
1843 /* Clear out the BSS. It may have been done in prom_init,
1844 * already but that's irrelevant since prom_init will soon
1845 * be detached from the kernel completely. Besides, we need
1846 * to clear it now for kexec-style entry.
1847 */
1848 LOADADDR(r11,__bss_stop)
1849 LOADADDR(r8,__bss_start)
1850 sub r11,r11,r8 /* bss size */
1851 addi r11,r11,7 /* round up to an even double word */
1852 rldicl. r11,r11,61,3 /* shift right by 3 */
1853 beq 4f
1854 addi r8,r8,-8
1855 li r0,0
1856 mtctr r11 /* zero this many doublewords */
18573: stdu r0,8(r8)
1858 bdnz 3b
18594:
1860
1861 mfmsr r6
1862 ori r6,r6,MSR_RI
1863 mtmsrd r6 /* RI on */
1864
1865#ifdef CONFIG_HMT
1866 /* Start up the second thread on cpu 0 */
1867 mfspr r3,PVR
1868 srwi r3,r3,16
1869 cmpwi r3,0x34 /* Pulsar */
1870 beq 90f
1871 cmpwi r3,0x36 /* Icestar */
1872 beq 90f
1873 cmpwi r3,0x37 /* SStar */
1874 beq 90f
1875 b 91f /* HMT not supported */
187690: li r3,0
1877 bl .hmt_start_secondary
187891:
1879#endif
1880
1881 /* The following gets the stack and TOC set up with the regs */
1882 /* pointing to the real addr of the kernel stack. This is */
1883 /* all done to support the C function call below which sets */
1884 /* up the htab. This is done because we have relocated the */
1885 /* kernel but are still running in real mode. */
1886
1887 LOADADDR(r3,init_thread_union)
1888 sub r3,r3,r26
1889
1890 /* set up a stack pointer (physical address) */
1891 addi r1,r3,THREAD_SIZE
1892 li r0,0
1893 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1894
1895 /* set up the TOC (physical address) */
1896 LOADADDR(r2,__toc_start)
1897 addi r2,r2,0x4000
1898 addi r2,r2,0x4000
1899 sub r2,r2,r26
1900
1901 LOADADDR(r3,cpu_specs)
1902 sub r3,r3,r26
1903 LOADADDR(r4,cur_cpu_spec)
1904 sub r4,r4,r26
1905 mr r5,r26
1906 bl .identify_cpu
1907
1908 /* Save some low level config HIDs of CPU0 to be copied to
1909 * other CPUs later on, or used for suspend/resume
1910 */
1911 bl .__save_cpu_setup
1912 sync
1913
1914 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1915 * note that boot_cpuid can always be 0 nowadays since there is
1916 * nowhere it can be initialized differently before we reach this
1917 * code
1918 */
1919 LOADADDR(r27, boot_cpuid)
1920 sub r27,r27,r26
1921 lwz r27,0(r27)
1922
1923 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1924 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1925 add r13,r13,r24 /* for this processor. */
1926 sub r13,r13,r26 /* convert to physical addr */
1927 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1928
1929 /* Do very early kernel initializations, including initial hash table,
1930 * stab and slb setup before we turn on relocation. */
1931
1932 /* Restore parameters passed from prom_init/kexec */
1933 mr r3,r31
1934 bl .early_setup
1935
1936 /* set the ASR */
1937 ld r3,PACASTABREAL(r13)
1938 ori r4,r3,1 /* turn on valid bit */
1939 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1940 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1941 cmpldi r3,PLATFORM_PSERIES_LPAR
1942 bne 98f
1943 mfspr r3,PVR
1944 srwi r3,r3,16
1945 cmpwi r3,0x37 /* SStar */
1946 beq 97f
1947 cmpwi r3,0x36 /* IStar */
1948 beq 97f
1949 cmpwi r3,0x34 /* Pulsar */
1950 bne 98f
195197: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1952 HVSC /* Invoking hcall */
1953 b 99f
195498: /* !(rpa hypervisor) || !(star) */
1955 mtasr r4 /* set the stab location */
195699:
1957 /* Set SDR1 (hash table pointer) */
1958 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1959 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1960 /* Test if bit 0 is set (LPAR bit) */
1961 andi. r3,r3,0x1
1962 bne 98f
1963 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1964 sub r6,r6,r26
1965 ld r6,0(r6) /* get the value of _SDR1 */
1966 mtspr SDR1,r6 /* set the htab location */
196798:
1968 LOADADDR(r3,.start_here_common)
1969 SET_REG_TO_CONST(r4, MSR_KERNEL)
1970 mtspr SRR0,r3
1971 mtspr SRR1,r4
1972 rfid
1973 b . /* prevent speculative execution */
1974#endif /* CONFIG_PPC_MULTIPLATFORM */
1975
1976 /* This is where all platforms converge execution */
1977_STATIC(start_here_common)
1978 /* relocation is on at this point */
1979
1980 /* The following code sets up the SP and TOC now that we are */
1981 /* running with translation enabled. */
1982
1983 LOADADDR(r3,init_thread_union)
1984
1985 /* set up the stack */
1986 addi r1,r3,THREAD_SIZE
1987 li r0,0
1988 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1989
1990 /* Apply the CPUs-specific fixups (nop out sections not relevant
1991 * to this CPU
1992 */
1993 li r3,0
1994 bl .do_cpu_ftr_fixups
1995
1996 LOADADDR(r26, boot_cpuid)
1997 lwz r26,0(r26)
1998
1999 LOADADDR(r24, paca) /* Get base vaddr of paca array */
2000 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
2001 add r13,r13,r24 /* for this processor. */
2002 mtspr SPRG3,r13
2003
2004 /* ptr to current */
2005 LOADADDR(r4,init_task)
2006 std r4,PACACURRENT(r13)
2007
2008 /* Load the TOC */
2009 ld r2,PACATOC(r13)
2010 std r1,PACAKSAVE(r13)
2011
2012 bl .setup_system
2013
2014 /* Load up the kernel context */
20155:
2016#ifdef DO_SOFT_DISABLE
2017 li r5,0
2018 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
2019 mfmsr r5
2020 ori r5,r5,MSR_EE /* Hard Enabled */
2021 mtmsrd r5
2022#endif
2023
2024 bl .start_kernel
2025
2026_GLOBAL(__setup_cpu_power3)
2027 blr
2028
2029_GLOBAL(hmt_init)
2030#ifdef CONFIG_HMT
2031 LOADADDR(r5, hmt_thread_data)
2032 mfspr r7,PVR
2033 srwi r7,r7,16
2034 cmpwi r7,0x34 /* Pulsar */
2035 beq 90f
2036 cmpwi r7,0x36 /* Icestar */
2037 beq 91f
2038 cmpwi r7,0x37 /* SStar */
2039 beq 91f
2040 b 101f
204190: mfspr r6,PIR
2042 andi. r6,r6,0x1f
2043 b 92f
204491: mfspr r6,PIR
2045 andi. r6,r6,0x3ff
204692: sldi r4,r24,3
2047 stwx r6,r5,r4
2048 bl .hmt_start_secondary
2049 b 101f
2050
2051__hmt_secondary_hold:
2052 LOADADDR(r5, hmt_thread_data)
2053 clrldi r5,r5,4
2054 li r7,0
2055 mfspr r6,PIR
2056 mfspr r8,PVR
2057 srwi r8,r8,16
2058 cmpwi r8,0x34
2059 bne 93f
2060 andi. r6,r6,0x1f
2061 b 103f
206293: andi. r6,r6,0x3f
2063
2064103: lwzx r8,r5,r7
2065 cmpw r8,r6
2066 beq 104f
2067 addi r7,r7,8
2068 b 103b
2069
2070104: addi r7,r7,4
2071 lwzx r9,r5,r7
2072 mr r24,r9
2073101:
2074#endif
2075 mr r3,r24
2076 b .pSeries_secondary_smp_init
2077
2078#ifdef CONFIG_HMT
2079_GLOBAL(hmt_start_secondary)
2080 LOADADDR(r4,__hmt_secondary_hold)
2081 clrldi r4,r4,4
2082 mtspr NIADORM, r4
2083 mfspr r4, MSRDORM
2084 li r5, -65
2085 and r4, r4, r5
2086 mtspr MSRDORM, r4
2087 lis r4,0xffef
2088 ori r4,r4,0x7403
2089 mtspr TSC, r4
2090 li r4,0x1f4
2091 mtspr TST, r4
2092 mfspr r4, HID0
2093 ori r4, r4, 0x1
2094 mtspr HID0, r4
6dc2f0c7 2095 mfspr r4, SPRN_CTRLF
1da177e4 2096 oris r4, r4, 0x40
6dc2f0c7 2097 mtspr SPRN_CTRLT, r4
1da177e4
LT
2098 blr
2099#endif
2100
2101#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
2102_GLOBAL(smp_release_cpus)
2103 /* All secondary cpus are spinning on a common
2104 * spinloop, release them all now so they can start
2105 * to spin on their individual paca spinloops.
2106 * For non SMP kernels, the secondary cpus never
2107 * get out of the common spinloop.
2108 */
2109 li r3,1
2110 LOADADDR(r5,__secondary_hold_spinloop)
2111 std r3,0(r5)
2112 sync
2113 blr
2114#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2115
2116
2117/*
2118 * We put a few things here that have to be page-aligned.
2119 * This stuff goes at the beginning of the data segment,
2120 * which is page-aligned.
2121 */
2122 .data
2123 .align 12
2124 .globl sdata
2125sdata:
2126 .globl empty_zero_page
2127empty_zero_page:
2128 .space 4096
2129
2130 .globl swapper_pg_dir
2131swapper_pg_dir:
2132 .space 4096
2133
1da177e4
LT
2134/*
2135 * This space gets a copy of optional info passed to us by the bootstrap
2136 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2137 */
2138 .globl cmd_line
2139cmd_line:
2140 .space COMMAND_LINE_SIZE
This page took 0.140373 seconds and 5 git commands to generate.