Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * | |
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | |
14 | * | |
15 | * This file contains the low-level support and setup for the | |
16 | * PowerPC-64 platform, including trap and interrupt dispatch. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
23 | ||
14cf11af | 24 | #include <linux/threads.h> |
b5bbeb23 | 25 | #include <asm/reg.h> |
14cf11af PM |
26 | #include <asm/page.h> |
27 | #include <asm/mmu.h> | |
14cf11af PM |
28 | #include <asm/ppc_asm.h> |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/bug.h> | |
31 | #include <asm/cputable.h> | |
32 | #include <asm/setup.h> | |
33 | #include <asm/hvcall.h> | |
c43a55ff | 34 | #include <asm/iseries/lpar_map.h> |
6cb7bfeb | 35 | #include <asm/thread_info.h> |
3f639ee8 | 36 | #include <asm/firmware.h> |
16a15a30 | 37 | #include <asm/page_64.h> |
f9ff0f30 | 38 | #include <asm/exception.h> |
14cf11af | 39 | |
14cf11af | 40 | #define DO_SOFT_DISABLE |
14cf11af PM |
41 | |
42 | /* | |
43 | * We layout physical memory as follows: | |
44 | * 0x0000 - 0x00ff : Secondary processor spin code | |
45 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | |
46 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | |
47 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | |
48 | * 0x7000 - 0x7fff : FWNMI data area | |
49 | * 0x8000 - : Early init and support code | |
50 | */ | |
51 | ||
52 | /* | |
53 | * SPRG Usage | |
54 | * | |
55 | * Register Definition | |
56 | * | |
57 | * SPRG0 reserved for hypervisor | |
58 | * SPRG1 temp - used to save gpr | |
59 | * SPRG2 temp - used to save gpr | |
60 | * SPRG3 virt addr of paca | |
61 | */ | |
62 | ||
63 | /* | |
64 | * Entering into this code we make the following assumptions: | |
65 | * For pSeries: | |
66 | * 1. The MMU is off & open firmware is running in real mode. | |
67 | * 2. The kernel is entered at __start | |
68 | * | |
69 | * For iSeries: | |
70 | * 1. The MMU is on (as it always is for iSeries) | |
71 | * 2. The kernel is entered at system_reset_iSeries | |
72 | */ | |
73 | ||
74 | .text | |
75 | .globl _stext | |
76 | _stext: | |
14cf11af PM |
77 | _GLOBAL(__start) |
78 | /* NOP this out unconditionally */ | |
79 | BEGIN_FTR_SECTION | |
b85a046a | 80 | b .__start_initialization_multiplatform |
14cf11af | 81 | END_FTR_SECTION(0, 1) |
14cf11af PM |
82 | |
83 | /* Catch branch to 0 in real mode */ | |
84 | trap | |
85 | ||
14cf11af PM |
86 | /* Secondary processors spin on this value until it goes to 1. */ |
87 | .globl __secondary_hold_spinloop | |
88 | __secondary_hold_spinloop: | |
89 | .llong 0x0 | |
90 | ||
91 | /* Secondary processors write this value with their cpu # */ | |
92 | /* after they enter the spin loop immediately below. */ | |
93 | .globl __secondary_hold_acknowledge | |
94 | __secondary_hold_acknowledge: | |
95 | .llong 0x0 | |
96 | ||
1dce0e30 ME |
97 | #ifdef CONFIG_PPC_ISERIES |
98 | /* | |
99 | * At offset 0x20, there is a pointer to iSeries LPAR data. | |
100 | * This is required by the hypervisor | |
101 | */ | |
102 | . = 0x20 | |
103 | .llong hvReleaseData-KERNELBASE | |
104 | #endif /* CONFIG_PPC_ISERIES */ | |
105 | ||
14cf11af PM |
106 | . = 0x60 |
107 | /* | |
75423b7b GL |
108 | * The following code is used to hold secondary processors |
109 | * in a spin loop after they have entered the kernel, but | |
14cf11af PM |
110 | * before the bulk of the kernel has been relocated. This code |
111 | * is relocated to physical address 0x60 before prom_init is run. | |
112 | * All of it must fit below the first exception vector at 0x100. | |
113 | */ | |
114 | _GLOBAL(__secondary_hold) | |
115 | mfmsr r24 | |
116 | ori r24,r24,MSR_RI | |
117 | mtmsrd r24 /* RI on */ | |
118 | ||
f1870f77 | 119 | /* Grab our physical cpu number */ |
14cf11af PM |
120 | mr r24,r3 |
121 | ||
122 | /* Tell the master cpu we're here */ | |
123 | /* Relocation is off & we are located at an address less */ | |
124 | /* than 0x100, so only need to grab low order offset. */ | |
125 | std r24,__secondary_hold_acknowledge@l(0) | |
126 | sync | |
127 | ||
128 | /* All secondary cpus wait here until told to start. */ | |
129 | 100: ld r4,__secondary_hold_spinloop@l(0) | |
130 | cmpdi 0,r4,1 | |
131 | bne 100b | |
132 | ||
f1870f77 | 133 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
f39b7a55 | 134 | LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) |
758438a7 | 135 | mtctr r4 |
14cf11af | 136 | mr r3,r24 |
758438a7 | 137 | bctr |
14cf11af PM |
138 | #else |
139 | BUG_OPCODE | |
140 | #endif | |
14cf11af PM |
141 | |
142 | /* This value is used to mark exception frames on the stack. */ | |
143 | .section ".toc","aw" | |
144 | exception_marker: | |
145 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
146 | .text | |
147 | ||
14cf11af PM |
148 | /* |
149 | * This is the start of the interrupt handlers for pSeries | |
150 | * This code runs with relocation off. | |
14cf11af PM |
151 | */ |
152 | . = 0x100 | |
153 | .globl __start_interrupts | |
154 | __start_interrupts: | |
155 | ||
156 | STD_EXCEPTION_PSERIES(0x100, system_reset) | |
157 | ||
158 | . = 0x200 | |
159 | _machine_check_pSeries: | |
160 | HMT_MEDIUM | |
b5bbeb23 | 161 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
14cf11af PM |
162 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
163 | ||
164 | . = 0x300 | |
165 | .globl data_access_pSeries | |
166 | data_access_pSeries: | |
167 | HMT_MEDIUM | |
b5bbeb23 | 168 | mtspr SPRN_SPRG1,r13 |
14cf11af | 169 | BEGIN_FTR_SECTION |
b5bbeb23 PM |
170 | mtspr SPRN_SPRG2,r12 |
171 | mfspr r13,SPRN_DAR | |
172 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
173 | srdi r13,r13,60 |
174 | rlwimi r13,r12,16,0x20 | |
175 | mfcr r12 | |
176 | cmpwi r13,0x2c | |
3ccfc65c | 177 | beq do_stab_bolted_pSeries |
14cf11af | 178 | mtcrf 0x80,r12 |
b5bbeb23 | 179 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
180 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
181 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | |
182 | ||
183 | . = 0x380 | |
184 | .globl data_access_slb_pSeries | |
185 | data_access_slb_pSeries: | |
186 | HMT_MEDIUM | |
b5bbeb23 | 187 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 188 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
189 | std r3,PACA_EXSLB+EX_R3(r13) |
190 | mfspr r3,SPRN_DAR | |
14cf11af | 191 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
192 | mfcr r9 |
193 | #ifdef __DISABLED__ | |
194 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
195 | cmpdi r3,0 | |
196 | bge slb_miss_user_pseries | |
197 | #endif /* __DISABLED__ */ | |
14cf11af PM |
198 | std r10,PACA_EXSLB+EX_R10(r13) |
199 | std r11,PACA_EXSLB+EX_R11(r13) | |
200 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
201 | mfspr r10,SPRN_SPRG1 |
202 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 203 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
3c726f8d | 204 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
14cf11af PM |
205 | |
206 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | |
207 | ||
208 | . = 0x480 | |
209 | .globl instruction_access_slb_pSeries | |
210 | instruction_access_slb_pSeries: | |
211 | HMT_MEDIUM | |
b5bbeb23 | 212 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 213 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
214 | std r3,PACA_EXSLB+EX_R3(r13) |
215 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | |
14cf11af | 216 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
217 | mfcr r9 |
218 | #ifdef __DISABLED__ | |
219 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
220 | cmpdi r3,0 | |
221 | bge slb_miss_user_pseries | |
222 | #endif /* __DISABLED__ */ | |
14cf11af PM |
223 | std r10,PACA_EXSLB+EX_R10(r13) |
224 | std r11,PACA_EXSLB+EX_R11(r13) | |
225 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
226 | mfspr r10,SPRN_SPRG1 |
227 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 228 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
3c726f8d | 229 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
14cf11af | 230 | |
d04c56f7 | 231 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
14cf11af PM |
232 | STD_EXCEPTION_PSERIES(0x600, alignment) |
233 | STD_EXCEPTION_PSERIES(0x700, program_check) | |
234 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | |
d04c56f7 | 235 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) |
14cf11af PM |
236 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) |
237 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | |
238 | ||
239 | . = 0xc00 | |
240 | .globl system_call_pSeries | |
241 | system_call_pSeries: | |
242 | HMT_MEDIUM | |
14cf11af PM |
243 | mr r9,r13 |
244 | mfmsr r10 | |
b5bbeb23 PM |
245 | mfspr r13,SPRN_SPRG3 |
246 | mfspr r11,SPRN_SRR0 | |
14cf11af PM |
247 | clrrdi r12,r13,32 |
248 | oris r12,r12,system_call_common@h | |
249 | ori r12,r12,system_call_common@l | |
b5bbeb23 | 250 | mtspr SPRN_SRR0,r12 |
14cf11af | 251 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI |
b5bbeb23 PM |
252 | mfspr r12,SPRN_SRR1 |
253 | mtspr SPRN_SRR1,r10 | |
14cf11af PM |
254 | rfid |
255 | b . /* prevent speculative execution */ | |
256 | ||
257 | STD_EXCEPTION_PSERIES(0xd00, single_step) | |
258 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | |
259 | ||
260 | /* We need to deal with the Altivec unavailable exception | |
261 | * here which is at 0xf20, thus in the middle of the | |
262 | * prolog code of the PerformanceMonitor one. A little | |
263 | * trickery is thus necessary | |
264 | */ | |
265 | . = 0xf00 | |
266 | b performance_monitor_pSeries | |
267 | ||
268 | STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) | |
269 | ||
acf7d768 BH |
270 | #ifdef CONFIG_CBE_RAS |
271 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | |
272 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 273 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
acf7d768 BH |
274 | #ifdef CONFIG_CBE_RAS |
275 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | |
276 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 277 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
acf7d768 BH |
278 | #ifdef CONFIG_CBE_RAS |
279 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | |
280 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af PM |
281 | |
282 | . = 0x3000 | |
283 | ||
284 | /*** pSeries interrupt support ***/ | |
285 | ||
286 | /* moved from 0xf00 */ | |
449d846d | 287 | STD_EXCEPTION_PSERIES(., performance_monitor) |
d04c56f7 PM |
288 | |
289 | /* | |
290 | * An interrupt came in while soft-disabled; clear EE in SRR1, | |
291 | * clear paca->hard_enabled and return. | |
292 | */ | |
293 | masked_interrupt: | |
294 | stb r10,PACAHARDIRQEN(r13) | |
295 | mtcrf 0x80,r9 | |
296 | ld r9,PACA_EXGEN+EX_R9(r13) | |
297 | mfspr r10,SPRN_SRR1 | |
298 | rldicl r10,r10,48,1 /* clear MSR_EE */ | |
299 | rotldi r10,r10,16 | |
300 | mtspr SPRN_SRR1,r10 | |
301 | ld r10,PACA_EXGEN+EX_R10(r13) | |
302 | mfspr r13,SPRN_SPRG1 | |
303 | rfid | |
304 | b . | |
14cf11af PM |
305 | |
306 | .align 7 | |
3ccfc65c | 307 | do_stab_bolted_pSeries: |
14cf11af | 308 | mtcrf 0x80,r12 |
b5bbeb23 | 309 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
310 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
311 | ||
3c726f8d BH |
312 | /* |
313 | * We have some room here we use that to put | |
314 | * the peries slb miss user trampoline code so it's reasonably | |
315 | * away from slb_miss_user_common to avoid problems with rfid | |
316 | * | |
317 | * This is used for when the SLB miss handler has to go virtual, | |
318 | * which doesn't happen for now anymore but will once we re-implement | |
319 | * dynamic VSIDs for shared page tables | |
320 | */ | |
321 | #ifdef __DISABLED__ | |
322 | slb_miss_user_pseries: | |
323 | std r10,PACA_EXGEN+EX_R10(r13) | |
324 | std r11,PACA_EXGEN+EX_R11(r13) | |
325 | std r12,PACA_EXGEN+EX_R12(r13) | |
326 | mfspr r10,SPRG1 | |
327 | ld r11,PACA_EXSLB+EX_R9(r13) | |
328 | ld r12,PACA_EXSLB+EX_R3(r13) | |
329 | std r10,PACA_EXGEN+EX_R13(r13) | |
330 | std r11,PACA_EXGEN+EX_R9(r13) | |
331 | std r12,PACA_EXGEN+EX_R3(r13) | |
332 | clrrdi r12,r13,32 | |
333 | mfmsr r10 | |
334 | mfspr r11,SRR0 /* save SRR0 */ | |
335 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | |
336 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | |
337 | mtspr SRR0,r12 | |
338 | mfspr r12,SRR1 /* and SRR1 */ | |
339 | mtspr SRR1,r10 | |
340 | rfid | |
341 | b . /* prevent spec. execution */ | |
342 | #endif /* __DISABLED__ */ | |
343 | ||
9e4859ef | 344 | #ifdef CONFIG_PPC_PSERIES |
14cf11af PM |
345 | /* |
346 | * Vectors for the FWNMI option. Share common code. | |
347 | */ | |
b5bbeb23 | 348 | .globl system_reset_fwnmi |
8c4f1f29 | 349 | .align 7 |
14cf11af | 350 | system_reset_fwnmi: |
b5bbeb23 PM |
351 | HMT_MEDIUM |
352 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
9fc0a92c | 353 | EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common) |
14cf11af | 354 | |
b5bbeb23 | 355 | .globl machine_check_fwnmi |
8c4f1f29 | 356 | .align 7 |
14cf11af | 357 | machine_check_fwnmi: |
b5bbeb23 PM |
358 | HMT_MEDIUM |
359 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
9fc0a92c | 360 | EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) |
14cf11af | 361 | |
9e4859ef SR |
362 | #endif /* CONFIG_PPC_PSERIES */ |
363 | ||
14cf11af PM |
364 | /*** Common interrupt handlers ***/ |
365 | ||
366 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | |
367 | ||
368 | /* | |
369 | * Machine check is different because we use a different | |
370 | * save area: PACA_EXMC instead of PACA_EXGEN. | |
371 | */ | |
372 | .align 7 | |
373 | .globl machine_check_common | |
374 | machine_check_common: | |
375 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | |
f39224a8 | 376 | FINISH_NAP |
14cf11af PM |
377 | DISABLE_INTS |
378 | bl .save_nvgprs | |
379 | addi r3,r1,STACK_FRAME_OVERHEAD | |
380 | bl .machine_check_exception | |
381 | b .ret_from_except | |
382 | ||
383 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | |
384 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | |
385 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | |
386 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | |
387 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | |
f39224a8 | 388 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
14cf11af PM |
389 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
390 | #ifdef CONFIG_ALTIVEC | |
391 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | |
392 | #else | |
393 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | |
394 | #endif | |
acf7d768 BH |
395 | #ifdef CONFIG_CBE_RAS |
396 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | |
397 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | |
398 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | |
399 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af PM |
400 | |
401 | /* | |
402 | * Here we have detected that the kernel stack pointer is bad. | |
403 | * R9 contains the saved CR, r13 points to the paca, | |
404 | * r10 contains the (bad) kernel stack pointer, | |
405 | * r11 and r12 contain the saved SRR0 and SRR1. | |
406 | * We switch to using an emergency stack, save the registers there, | |
407 | * and call kernel_bad_stack(), which panics. | |
408 | */ | |
409 | bad_stack: | |
410 | ld r1,PACAEMERGSP(r13) | |
411 | subi r1,r1,64+INT_FRAME_SIZE | |
412 | std r9,_CCR(r1) | |
413 | std r10,GPR1(r1) | |
414 | std r11,_NIP(r1) | |
415 | std r12,_MSR(r1) | |
b5bbeb23 PM |
416 | mfspr r11,SPRN_DAR |
417 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
418 | std r11,_DAR(r1) |
419 | std r12,_DSISR(r1) | |
420 | mflr r10 | |
421 | mfctr r11 | |
422 | mfxer r12 | |
423 | std r10,_LINK(r1) | |
424 | std r11,_CTR(r1) | |
425 | std r12,_XER(r1) | |
426 | SAVE_GPR(0,r1) | |
427 | SAVE_GPR(2,r1) | |
428 | SAVE_4GPRS(3,r1) | |
429 | SAVE_2GPRS(7,r1) | |
430 | SAVE_10GPRS(12,r1) | |
431 | SAVE_10GPRS(22,r1) | |
68730401 OJ |
432 | lhz r12,PACA_TRAP_SAVE(r13) |
433 | std r12,_TRAP(r1) | |
14cf11af PM |
434 | addi r11,r1,INT_FRAME_SIZE |
435 | std r11,0(r1) | |
436 | li r12,0 | |
437 | std r12,0(r11) | |
438 | ld r2,PACATOC(r13) | |
439 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
440 | bl .kernel_bad_stack | |
441 | b 1b | |
442 | ||
443 | /* | |
444 | * Return from an exception with minimal checks. | |
445 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | |
446 | * If interrupts have been enabled, or anything has been | |
447 | * done that might have changed the scheduling status of | |
448 | * any task or sent any task a signal, you should use | |
449 | * ret_from_except or ret_from_except_lite instead of this. | |
450 | */ | |
b0a779de PM |
451 | fast_exc_return_irq: /* restores irq state too */ |
452 | ld r3,SOFTE(r1) | |
453 | ld r12,_MSR(r1) | |
454 | stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */ | |
455 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | |
456 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | |
457 | b 1f | |
458 | ||
40ef8cbc | 459 | .globl fast_exception_return |
14cf11af PM |
460 | fast_exception_return: |
461 | ld r12,_MSR(r1) | |
b0a779de | 462 | 1: ld r11,_NIP(r1) |
14cf11af PM |
463 | andi. r3,r12,MSR_RI /* check if RI is set */ |
464 | beq- unrecov_fer | |
c6622f63 PM |
465 | |
466 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
467 | andi. r3,r12,MSR_PR | |
468 | beq 2f | |
469 | ACCOUNT_CPU_USER_EXIT(r3, r4) | |
470 | 2: | |
471 | #endif | |
472 | ||
14cf11af PM |
473 | ld r3,_CCR(r1) |
474 | ld r4,_LINK(r1) | |
475 | ld r5,_CTR(r1) | |
476 | ld r6,_XER(r1) | |
477 | mtcr r3 | |
478 | mtlr r4 | |
479 | mtctr r5 | |
480 | mtxer r6 | |
481 | REST_GPR(0, r1) | |
482 | REST_8GPRS(2, r1) | |
483 | ||
484 | mfmsr r10 | |
d04c56f7 PM |
485 | rldicl r10,r10,48,1 /* clear EE */ |
486 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | |
14cf11af PM |
487 | mtmsrd r10,1 |
488 | ||
b5bbeb23 PM |
489 | mtspr SPRN_SRR1,r12 |
490 | mtspr SPRN_SRR0,r11 | |
14cf11af PM |
491 | REST_4GPRS(10, r1) |
492 | ld r1,GPR1(r1) | |
493 | rfid | |
494 | b . /* prevent speculative execution */ | |
495 | ||
496 | unrecov_fer: | |
497 | bl .save_nvgprs | |
498 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
499 | bl .unrecoverable_exception | |
500 | b 1b | |
501 | ||
502 | /* | |
503 | * Here r13 points to the paca, r9 contains the saved CR, | |
504 | * SRR0 and SRR1 are saved in r11 and r12, | |
505 | * r9 - r13 are saved in paca->exgen. | |
506 | */ | |
507 | .align 7 | |
508 | .globl data_access_common | |
509 | data_access_common: | |
b5bbeb23 | 510 | mfspr r10,SPRN_DAR |
14cf11af | 511 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 512 | mfspr r10,SPRN_DSISR |
14cf11af PM |
513 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
514 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | |
515 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
516 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
517 | li r5,0x300 | |
518 | b .do_hash_page /* Try to handle as hpte fault */ | |
519 | ||
520 | .align 7 | |
521 | .globl instruction_access_common | |
522 | instruction_access_common: | |
523 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | |
524 | ld r3,_NIP(r1) | |
525 | andis. r4,r12,0x5820 | |
526 | li r5,0x400 | |
527 | b .do_hash_page /* Try to handle as hpte fault */ | |
528 | ||
3c726f8d BH |
529 | /* |
530 | * Here is the common SLB miss user that is used when going to virtual | |
531 | * mode for SLB misses, that is currently not used | |
532 | */ | |
533 | #ifdef __DISABLED__ | |
534 | .align 7 | |
535 | .globl slb_miss_user_common | |
536 | slb_miss_user_common: | |
537 | mflr r10 | |
538 | std r3,PACA_EXGEN+EX_DAR(r13) | |
539 | stw r9,PACA_EXGEN+EX_CCR(r13) | |
540 | std r10,PACA_EXGEN+EX_LR(r13) | |
541 | std r11,PACA_EXGEN+EX_SRR0(r13) | |
542 | bl .slb_allocate_user | |
543 | ||
544 | ld r10,PACA_EXGEN+EX_LR(r13) | |
545 | ld r3,PACA_EXGEN+EX_R3(r13) | |
546 | lwz r9,PACA_EXGEN+EX_CCR(r13) | |
547 | ld r11,PACA_EXGEN+EX_SRR0(r13) | |
548 | mtlr r10 | |
549 | beq- slb_miss_fault | |
550 | ||
551 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
552 | beq- unrecov_user_slb | |
553 | mfmsr r10 | |
554 | ||
555 | .machine push | |
556 | .machine "power4" | |
557 | mtcrf 0x80,r9 | |
558 | .machine pop | |
559 | ||
560 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | |
561 | mtmsrd r10,1 | |
562 | ||
563 | mtspr SRR0,r11 | |
564 | mtspr SRR1,r12 | |
565 | ||
566 | ld r9,PACA_EXGEN+EX_R9(r13) | |
567 | ld r10,PACA_EXGEN+EX_R10(r13) | |
568 | ld r11,PACA_EXGEN+EX_R11(r13) | |
569 | ld r12,PACA_EXGEN+EX_R12(r13) | |
570 | ld r13,PACA_EXGEN+EX_R13(r13) | |
571 | rfid | |
572 | b . | |
573 | ||
574 | slb_miss_fault: | |
575 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | |
576 | ld r4,PACA_EXGEN+EX_DAR(r13) | |
577 | li r5,0 | |
578 | std r4,_DAR(r1) | |
579 | std r5,_DSISR(r1) | |
3ccfc65c | 580 | b handle_page_fault |
3c726f8d BH |
581 | |
582 | unrecov_user_slb: | |
583 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | |
584 | DISABLE_INTS | |
585 | bl .save_nvgprs | |
586 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
587 | bl .unrecoverable_exception | |
588 | b 1b | |
589 | ||
590 | #endif /* __DISABLED__ */ | |
591 | ||
592 | ||
593 | /* | |
594 | * r13 points to the PACA, r9 contains the saved CR, | |
595 | * r12 contain the saved SRR1, SRR0 is still ready for return | |
596 | * r3 has the faulting address | |
597 | * r9 - r13 are saved in paca->exslb. | |
598 | * r3 is saved in paca->slb_r3 | |
599 | * We assume we aren't going to take any exceptions during this procedure. | |
600 | */ | |
601 | _GLOBAL(slb_miss_realmode) | |
602 | mflr r10 | |
603 | ||
604 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
605 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | |
606 | ||
607 | bl .slb_allocate_realmode | |
608 | ||
609 | /* All done -- return from exception. */ | |
610 | ||
611 | ld r10,PACA_EXSLB+EX_LR(r13) | |
612 | ld r3,PACA_EXSLB+EX_R3(r13) | |
613 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
614 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 615 | BEGIN_FW_FTR_SECTION |
3356bb9f DG |
616 | ld r11,PACALPPACAPTR(r13) |
617 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | |
3f639ee8 | 618 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
619 | #endif /* CONFIG_PPC_ISERIES */ |
620 | ||
621 | mtlr r10 | |
622 | ||
623 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
624 | beq- unrecov_slb | |
625 | ||
626 | .machine push | |
627 | .machine "power4" | |
628 | mtcrf 0x80,r9 | |
629 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | |
630 | .machine pop | |
631 | ||
632 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 633 | BEGIN_FW_FTR_SECTION |
3c726f8d BH |
634 | mtspr SPRN_SRR0,r11 |
635 | mtspr SPRN_SRR1,r12 | |
3f639ee8 | 636 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
637 | #endif /* CONFIG_PPC_ISERIES */ |
638 | ld r9,PACA_EXSLB+EX_R9(r13) | |
639 | ld r10,PACA_EXSLB+EX_R10(r13) | |
640 | ld r11,PACA_EXSLB+EX_R11(r13) | |
641 | ld r12,PACA_EXSLB+EX_R12(r13) | |
642 | ld r13,PACA_EXSLB+EX_R13(r13) | |
643 | rfid | |
644 | b . /* prevent speculative execution */ | |
645 | ||
646 | unrecov_slb: | |
647 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | |
648 | DISABLE_INTS | |
649 | bl .save_nvgprs | |
650 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
651 | bl .unrecoverable_exception | |
652 | b 1b | |
653 | ||
14cf11af PM |
654 | .align 7 |
655 | .globl hardware_interrupt_common | |
656 | .globl hardware_interrupt_entry | |
657 | hardware_interrupt_common: | |
658 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | |
f39224a8 | 659 | FINISH_NAP |
14cf11af PM |
660 | hardware_interrupt_entry: |
661 | DISABLE_INTS | |
a416561b | 662 | BEGIN_FTR_SECTION |
cb2c9b27 | 663 | bl .ppc64_runlatch_on |
a416561b | 664 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) |
14cf11af PM |
665 | addi r3,r1,STACK_FRAME_OVERHEAD |
666 | bl .do_IRQ | |
667 | b .ret_from_except_lite | |
668 | ||
f39224a8 PM |
669 | #ifdef CONFIG_PPC_970_NAP |
670 | power4_fixup_nap: | |
671 | andc r9,r9,r10 | |
672 | std r9,TI_LOCAL_FLAGS(r11) | |
673 | ld r10,_LINK(r1) /* make idle task do the */ | |
674 | std r10,_NIP(r1) /* equivalent of a blr */ | |
675 | blr | |
676 | #endif | |
677 | ||
14cf11af PM |
678 | .align 7 |
679 | .globl alignment_common | |
680 | alignment_common: | |
b5bbeb23 | 681 | mfspr r10,SPRN_DAR |
14cf11af | 682 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 683 | mfspr r10,SPRN_DSISR |
14cf11af PM |
684 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
685 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | |
686 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
687 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
688 | std r3,_DAR(r1) | |
689 | std r4,_DSISR(r1) | |
690 | bl .save_nvgprs | |
691 | addi r3,r1,STACK_FRAME_OVERHEAD | |
692 | ENABLE_INTS | |
693 | bl .alignment_exception | |
694 | b .ret_from_except | |
695 | ||
696 | .align 7 | |
697 | .globl program_check_common | |
698 | program_check_common: | |
699 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | |
700 | bl .save_nvgprs | |
701 | addi r3,r1,STACK_FRAME_OVERHEAD | |
702 | ENABLE_INTS | |
703 | bl .program_check_exception | |
704 | b .ret_from_except | |
705 | ||
706 | .align 7 | |
707 | .globl fp_unavailable_common | |
708 | fp_unavailable_common: | |
709 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | |
3ccfc65c | 710 | bne 1f /* if from user, just load it up */ |
14cf11af PM |
711 | bl .save_nvgprs |
712 | addi r3,r1,STACK_FRAME_OVERHEAD | |
713 | ENABLE_INTS | |
714 | bl .kernel_fp_unavailable_exception | |
715 | BUG_OPCODE | |
3ccfc65c | 716 | 1: b .load_up_fpu |
14cf11af | 717 | |
14cf11af PM |
718 | .align 7 |
719 | .globl altivec_unavailable_common | |
720 | altivec_unavailable_common: | |
721 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | |
722 | #ifdef CONFIG_ALTIVEC | |
723 | BEGIN_FTR_SECTION | |
724 | bne .load_up_altivec /* if from user, just load it up */ | |
725 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
726 | #endif | |
727 | bl .save_nvgprs | |
728 | addi r3,r1,STACK_FRAME_OVERHEAD | |
729 | ENABLE_INTS | |
730 | bl .altivec_unavailable_exception | |
731 | b .ret_from_except | |
732 | ||
733 | #ifdef CONFIG_ALTIVEC | |
734 | /* | |
735 | * load_up_altivec(unused, unused, tsk) | |
736 | * Disable VMX for the task which had it previously, | |
737 | * and save its vector registers in its thread_struct. | |
738 | * Enables the VMX for use in the kernel on return. | |
739 | * On SMP we know the VMX is free, since we give it up every | |
740 | * switch (ie, no lazy save of the vector registers). | |
741 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | |
742 | */ | |
743 | _STATIC(load_up_altivec) | |
744 | mfmsr r5 /* grab the current MSR */ | |
745 | oris r5,r5,MSR_VEC@h | |
746 | mtmsrd r5 /* enable use of VMX now */ | |
747 | isync | |
748 | ||
749 | /* | |
750 | * For SMP, we don't do lazy VMX switching because it just gets too | |
751 | * horrendously complex, especially when a task switches from one CPU | |
752 | * to another. Instead we call giveup_altvec in switch_to. | |
753 | * VRSAVE isn't dealt with here, that is done in the normal context | |
754 | * switch code. Note that we could rely on vrsave value to eventually | |
755 | * avoid saving all of the VREGs here... | |
756 | */ | |
757 | #ifndef CONFIG_SMP | |
758 | ld r3,last_task_used_altivec@got(r2) | |
759 | ld r4,0(r3) | |
760 | cmpdi 0,r4,0 | |
761 | beq 1f | |
762 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | |
763 | addi r4,r4,THREAD | |
764 | SAVE_32VRS(0,r5,r4) | |
765 | mfvscr vr0 | |
766 | li r10,THREAD_VSCR | |
767 | stvx vr0,r10,r4 | |
768 | /* Disable VMX for last_task_used_altivec */ | |
769 | ld r5,PT_REGS(r4) | |
770 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
771 | lis r6,MSR_VEC@h | |
772 | andc r4,r4,r6 | |
773 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
774 | 1: | |
775 | #endif /* CONFIG_SMP */ | |
776 | /* Hack: if we get an altivec unavailable trap with VRSAVE | |
777 | * set to all zeros, we assume this is a broken application | |
778 | * that fails to set it properly, and thus we switch it to | |
779 | * all 1's | |
780 | */ | |
781 | mfspr r4,SPRN_VRSAVE | |
782 | cmpdi 0,r4,0 | |
783 | bne+ 1f | |
784 | li r4,-1 | |
785 | mtspr SPRN_VRSAVE,r4 | |
786 | 1: | |
787 | /* enable use of VMX after return */ | |
788 | ld r4,PACACURRENT(r13) | |
789 | addi r5,r4,THREAD /* Get THREAD */ | |
790 | oris r12,r12,MSR_VEC@h | |
791 | std r12,_MSR(r1) | |
792 | li r4,1 | |
793 | li r10,THREAD_VSCR | |
794 | stw r4,THREAD_USED_VR(r5) | |
795 | lvx vr0,r10,r5 | |
796 | mtvscr vr0 | |
797 | REST_32VRS(0,r4,r5) | |
798 | #ifndef CONFIG_SMP | |
799 | /* Update last_task_used_math to 'current' */ | |
800 | subi r4,r5,THREAD /* Back to 'current' */ | |
801 | std r4,0(r3) | |
802 | #endif /* CONFIG_SMP */ | |
803 | /* restore registers and return */ | |
804 | b fast_exception_return | |
805 | #endif /* CONFIG_ALTIVEC */ | |
806 | ||
807 | /* | |
808 | * Hash table stuff | |
809 | */ | |
810 | .align 7 | |
811 | _GLOBAL(do_hash_page) | |
812 | std r3,_DAR(r1) | |
813 | std r4,_DSISR(r1) | |
814 | ||
815 | andis. r0,r4,0xa450 /* weird error? */ | |
3ccfc65c | 816 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
14cf11af PM |
817 | BEGIN_FTR_SECTION |
818 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | |
3ccfc65c | 819 | bne- do_ste_alloc /* If so handle it */ |
14cf11af PM |
820 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
821 | ||
822 | /* | |
823 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | |
824 | * accessing a userspace segment (even from the kernel). We assume | |
825 | * kernel addresses always have the high bit set. | |
826 | */ | |
827 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | |
828 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | |
829 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | |
830 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | |
831 | ori r4,r4,1 /* add _PAGE_PRESENT */ | |
832 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | |
833 | ||
834 | /* | |
835 | * On iSeries, we soft-disable interrupts here, then | |
836 | * hard-enable interrupts so that the hash_page code can spin on | |
837 | * the hash_table_lock without problems on a shared processor. | |
838 | */ | |
839 | DISABLE_INTS | |
840 | ||
841 | /* | |
842 | * r3 contains the faulting address | |
843 | * r4 contains the required access permissions | |
844 | * r5 contains the trap number | |
845 | * | |
846 | * at return r3 = 0 for success | |
847 | */ | |
848 | bl .hash_page /* build HPTE if possible */ | |
849 | cmpdi r3,0 /* see if hash_page succeeded */ | |
850 | ||
851 | #ifdef DO_SOFT_DISABLE | |
3f639ee8 | 852 | BEGIN_FW_FTR_SECTION |
14cf11af PM |
853 | /* |
854 | * If we had interrupts soft-enabled at the point where the | |
855 | * DSI/ISI occurred, and an interrupt came in during hash_page, | |
856 | * handle it now. | |
857 | * We jump to ret_from_except_lite rather than fast_exception_return | |
858 | * because ret_from_except_lite will check for and handle pending | |
859 | * interrupts if necessary. | |
860 | */ | |
3ccfc65c | 861 | beq 13f |
b0a779de PM |
862 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
863 | #endif | |
864 | BEGIN_FW_FTR_SECTION | |
865 | /* | |
866 | * Here we have interrupts hard-disabled, so it is sufficient | |
867 | * to restore paca->{soft,hard}_enable and get out. | |
868 | */ | |
869 | beq fast_exc_return_irq /* Return from exception on success */ | |
870 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
871 | ||
14cf11af PM |
872 | /* For a hash failure, we don't bother re-enabling interrupts */ |
873 | ble- 12f | |
874 | ||
875 | /* | |
876 | * hash_page couldn't handle it, set soft interrupt enable back | |
877 | * to what it was before the trap. Note that .local_irq_restore | |
878 | * handles any interrupts pending at this point. | |
879 | */ | |
880 | ld r3,SOFTE(r1) | |
881 | bl .local_irq_restore | |
882 | b 11f | |
14cf11af PM |
883 | |
884 | /* Here we have a page fault that hash_page can't handle. */ | |
3ccfc65c | 885 | handle_page_fault: |
14cf11af PM |
886 | ENABLE_INTS |
887 | 11: ld r4,_DAR(r1) | |
888 | ld r5,_DSISR(r1) | |
889 | addi r3,r1,STACK_FRAME_OVERHEAD | |
890 | bl .do_page_fault | |
891 | cmpdi r3,0 | |
3ccfc65c | 892 | beq+ 13f |
14cf11af PM |
893 | bl .save_nvgprs |
894 | mr r5,r3 | |
895 | addi r3,r1,STACK_FRAME_OVERHEAD | |
896 | lwz r4,_DAR(r1) | |
897 | bl .bad_page_fault | |
898 | b .ret_from_except | |
899 | ||
79acbb3f PM |
900 | 13: b .ret_from_except_lite |
901 | ||
14cf11af PM |
902 | /* We have a page fault that hash_page could handle but HV refused |
903 | * the PTE insertion | |
904 | */ | |
905 | 12: bl .save_nvgprs | |
fa28237c | 906 | mr r5,r3 |
14cf11af | 907 | addi r3,r1,STACK_FRAME_OVERHEAD |
a792e75d | 908 | ld r4,_DAR(r1) |
14cf11af PM |
909 | bl .low_hash_fault |
910 | b .ret_from_except | |
911 | ||
912 | /* here we have a segment miss */ | |
3ccfc65c | 913 | do_ste_alloc: |
14cf11af PM |
914 | bl .ste_allocate /* try to insert stab entry */ |
915 | cmpdi r3,0 | |
3ccfc65c PM |
916 | bne- handle_page_fault |
917 | b fast_exception_return | |
14cf11af PM |
918 | |
919 | /* | |
920 | * r13 points to the PACA, r9 contains the saved CR, | |
921 | * r11 and r12 contain the saved SRR0 and SRR1. | |
922 | * r9 - r13 are saved in paca->exslb. | |
923 | * We assume we aren't going to take any exceptions during this procedure. | |
924 | * We assume (DAR >> 60) == 0xc. | |
925 | */ | |
926 | .align 7 | |
927 | _GLOBAL(do_stab_bolted) | |
928 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
929 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | |
930 | ||
931 | /* Hash to the primary group */ | |
932 | ld r10,PACASTABVIRT(r13) | |
b5bbeb23 | 933 | mfspr r11,SPRN_DAR |
14cf11af PM |
934 | srdi r11,r11,28 |
935 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | |
936 | ||
937 | /* Calculate VSID */ | |
938 | /* This is a kernel address, so protovsid = ESID */ | |
1189be65 | 939 | ASM_VSID_SCRAMBLE(r11, r9, 256M) |
14cf11af PM |
940 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ |
941 | ||
942 | /* Search the primary group for a free entry */ | |
943 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | |
944 | andi. r11,r11,0x80 | |
945 | beq 2f | |
946 | addi r10,r10,16 | |
947 | andi. r11,r10,0x70 | |
948 | bne 1b | |
949 | ||
950 | /* Stick for only searching the primary group for now. */ | |
951 | /* At least for now, we use a very simple random castout scheme */ | |
952 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | |
953 | mftb r11 | |
954 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | |
955 | ori r11,r11,0x10 | |
956 | ||
957 | /* r10 currently points to an ste one past the group of interest */ | |
958 | /* make it point to the randomly selected entry */ | |
959 | subi r10,r10,128 | |
960 | or r10,r10,r11 /* r10 is the entry to invalidate */ | |
961 | ||
962 | isync /* mark the entry invalid */ | |
963 | ld r11,0(r10) | |
964 | rldicl r11,r11,56,1 /* clear the valid bit */ | |
965 | rotldi r11,r11,8 | |
966 | std r11,0(r10) | |
967 | sync | |
968 | ||
969 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | |
970 | slbie r11 | |
971 | ||
972 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | |
973 | eieio | |
974 | ||
b5bbeb23 | 975 | mfspr r11,SPRN_DAR /* Get the new esid */ |
14cf11af PM |
976 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ |
977 | ori r11,r11,0x90 /* Turn on valid and kp */ | |
978 | std r11,0(r10) /* Put new entry back into the stab */ | |
979 | ||
980 | sync | |
981 | ||
982 | /* All done -- return from exception. */ | |
983 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
984 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | |
985 | ||
986 | andi. r10,r12,MSR_RI | |
987 | beq- unrecov_slb | |
988 | ||
989 | mtcrf 0x80,r9 /* restore CR */ | |
990 | ||
991 | mfmsr r10 | |
992 | clrrdi r10,r10,2 | |
993 | mtmsrd r10,1 | |
994 | ||
b5bbeb23 PM |
995 | mtspr SPRN_SRR0,r11 |
996 | mtspr SPRN_SRR1,r12 | |
14cf11af PM |
997 | ld r9,PACA_EXSLB+EX_R9(r13) |
998 | ld r10,PACA_EXSLB+EX_R10(r13) | |
999 | ld r11,PACA_EXSLB+EX_R11(r13) | |
1000 | ld r12,PACA_EXSLB+EX_R12(r13) | |
1001 | ld r13,PACA_EXSLB+EX_R13(r13) | |
1002 | rfid | |
1003 | b . /* prevent speculative execution */ | |
1004 | ||
14cf11af PM |
1005 | /* |
1006 | * Space for CPU0's segment table. | |
1007 | * | |
1008 | * On iSeries, the hypervisor must fill in at least one entry before | |
16a15a30 SR |
1009 | * we get control (with relocate on). The address is given to the hv |
1010 | * as a page number (see xLparMap below), so this must be at a | |
14cf11af PM |
1011 | * fixed address (the linker can't compute (u64)&initial_stab >> |
1012 | * PAGE_SHIFT). | |
1013 | */ | |
758438a7 | 1014 | . = STAB0_OFFSET /* 0x6000 */ |
14cf11af PM |
1015 | .globl initial_stab |
1016 | initial_stab: | |
1017 | .space 4096 | |
1018 | ||
9e4859ef | 1019 | #ifdef CONFIG_PPC_PSERIES |
14cf11af PM |
1020 | /* |
1021 | * Data area reserved for FWNMI option. | |
1022 | * This address (0x7000) is fixed by the RPA. | |
1023 | */ | |
1024 | .= 0x7000 | |
1025 | .globl fwnmi_data_area | |
1026 | fwnmi_data_area: | |
9e4859ef | 1027 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1028 | |
1029 | /* iSeries does not use the FWNMI stuff, so it is safe to put | |
1030 | * this here, even if we later allow kernels that will boot on | |
1031 | * both pSeries and iSeries */ | |
1032 | #ifdef CONFIG_PPC_ISERIES | |
1033 | . = LPARMAP_PHYS | |
16a15a30 SR |
1034 | .globl xLparMap |
1035 | xLparMap: | |
1036 | .quad HvEsidsToMap /* xNumberEsids */ | |
1037 | .quad HvRangesToMap /* xNumberRanges */ | |
1038 | .quad STAB0_PAGE /* xSegmentTableOffs */ | |
1039 | .zero 40 /* xRsvd */ | |
1040 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | |
1041 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | |
1042 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | |
1043 | .quad VMALLOC_START_ESID /* xKernelEsid */ | |
1044 | .quad VMALLOC_START_VSID /* xKernelVsid */ | |
1045 | /* xRanges (HvRangesToMap entries of 3 quads) */ | |
1046 | .quad HvPagesToMap /* xPages */ | |
1047 | .quad 0 /* xOffset */ | |
1048 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | |
1049 | ||
14cf11af PM |
1050 | #endif /* CONFIG_PPC_ISERIES */ |
1051 | ||
9e4859ef | 1052 | #ifdef CONFIG_PPC_PSERIES |
14cf11af | 1053 | . = 0x8000 |
9e4859ef | 1054 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1055 | |
1056 | /* | |
f39b7a55 OJ |
1057 | * On pSeries and most other platforms, secondary processors spin |
1058 | * in the following code. | |
14cf11af PM |
1059 | * At entry, r3 = this processor's number (physical cpu id) |
1060 | */ | |
f39b7a55 | 1061 | _GLOBAL(generic_secondary_smp_init) |
14cf11af PM |
1062 | mr r24,r3 |
1063 | ||
1064 | /* turn on 64-bit mode */ | |
1065 | bl .enable_64b_mode | |
14cf11af | 1066 | |
14cf11af PM |
1067 | /* Set up a paca value for this processor. Since we have the |
1068 | * physical cpu id in r24, we need to search the pacas to find | |
1069 | * which logical id maps to our physical one. | |
1070 | */ | |
e58c3495 | 1071 | LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
1072 | li r5,0 /* logical cpu id */ |
1073 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | |
1074 | cmpw r6,r24 /* Compare to our id */ | |
1075 | beq 2f | |
1076 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | |
1077 | addi r5,r5,1 | |
1078 | cmpwi r5,NR_CPUS | |
1079 | blt 1b | |
1080 | ||
1081 | mr r3,r24 /* not found, copy phys to r3 */ | |
1082 | b .kexec_wait /* next kernel might do better */ | |
1083 | ||
b5bbeb23 | 1084 | 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1085 | /* From now on, r24 is expected to be logical cpuid */ |
1086 | mr r24,r5 | |
1087 | 3: HMT_LOW | |
1088 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | |
1089 | /* start. */ | |
1090 | sync | |
1091 | ||
f39b7a55 OJ |
1092 | #ifndef CONFIG_SMP |
1093 | b 3b /* Never go on non-SMP */ | |
1094 | #else | |
1095 | cmpwi 0,r23,0 | |
1096 | beq 3b /* Loop until told to go */ | |
1097 | ||
1098 | /* See if we need to call a cpu state restore handler */ | |
1099 | LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) | |
1100 | ld r23,0(r23) | |
1101 | ld r23,CPU_SPEC_RESTORE(r23) | |
1102 | cmpdi 0,r23,0 | |
1103 | beq 4f | |
1104 | ld r23,0(r23) | |
1105 | mtctr r23 | |
1106 | bctrl | |
1107 | ||
1108 | 4: /* Create a temp kernel stack for use before relocation is on. */ | |
14cf11af PM |
1109 | ld r1,PACAEMERGSP(r13) |
1110 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1111 | ||
c705677e | 1112 | b __secondary_start |
14cf11af | 1113 | #endif |
14cf11af | 1114 | |
14cf11af PM |
1115 | _STATIC(__mmu_off) |
1116 | mfmsr r3 | |
1117 | andi. r0,r3,MSR_IR|MSR_DR | |
1118 | beqlr | |
1119 | andc r3,r3,r0 | |
1120 | mtspr SPRN_SRR0,r4 | |
1121 | mtspr SPRN_SRR1,r3 | |
1122 | sync | |
1123 | rfid | |
1124 | b . /* prevent speculative execution */ | |
1125 | ||
1126 | ||
1127 | /* | |
1128 | * Here is our main kernel entry point. We support currently 2 kind of entries | |
1129 | * depending on the value of r5. | |
1130 | * | |
1131 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | |
1132 | * in r3...r7 | |
1133 | * | |
1134 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | |
1135 | * DT block, r4 is a physical pointer to the kernel itself | |
1136 | * | |
1137 | */ | |
1138 | _GLOBAL(__start_initialization_multiplatform) | |
1139 | /* | |
1140 | * Are we booted from a PROM Of-type client-interface ? | |
1141 | */ | |
1142 | cmpldi cr0,r5,0 | |
939e60f6 SR |
1143 | beq 1f |
1144 | b .__boot_from_prom /* yes -> prom */ | |
1145 | 1: | |
14cf11af PM |
1146 | /* Save parameters */ |
1147 | mr r31,r3 | |
1148 | mr r30,r4 | |
1149 | ||
1150 | /* Make sure we are running in 64 bits mode */ | |
1151 | bl .enable_64b_mode | |
1152 | ||
1153 | /* Setup some critical 970 SPRs before switching MMU off */ | |
f39b7a55 OJ |
1154 | mfspr r0,SPRN_PVR |
1155 | srwi r0,r0,16 | |
1156 | cmpwi r0,0x39 /* 970 */ | |
1157 | beq 1f | |
1158 | cmpwi r0,0x3c /* 970FX */ | |
1159 | beq 1f | |
1160 | cmpwi r0,0x44 /* 970MP */ | |
190a24f5 OJ |
1161 | beq 1f |
1162 | cmpwi r0,0x45 /* 970GX */ | |
f39b7a55 OJ |
1163 | bne 2f |
1164 | 1: bl .__cpu_preinit_ppc970 | |
1165 | 2: | |
14cf11af | 1166 | |
14cf11af | 1167 | /* Switch off MMU if not already */ |
e58c3495 | 1168 | LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) |
14cf11af PM |
1169 | add r4,r4,r30 |
1170 | bl .__mmu_off | |
1171 | b .__after_prom_start | |
1172 | ||
939e60f6 | 1173 | _INIT_STATIC(__boot_from_prom) |
14cf11af PM |
1174 | /* Save parameters */ |
1175 | mr r31,r3 | |
1176 | mr r30,r4 | |
1177 | mr r29,r5 | |
1178 | mr r28,r6 | |
1179 | mr r27,r7 | |
1180 | ||
6088857b OH |
1181 | /* |
1182 | * Align the stack to 16-byte boundary | |
1183 | * Depending on the size and layout of the ELF sections in the initial | |
1184 | * boot binary, the stack pointer will be unalignet on PowerMac | |
1185 | */ | |
c05b4770 LT |
1186 | rldicr r1,r1,0,59 |
1187 | ||
14cf11af PM |
1188 | /* Make sure we are running in 64 bits mode */ |
1189 | bl .enable_64b_mode | |
1190 | ||
1191 | /* put a relocation offset into r3 */ | |
1192 | bl .reloc_offset | |
1193 | ||
e58c3495 | 1194 | LOAD_REG_IMMEDIATE(r2,__toc_start) |
14cf11af PM |
1195 | addi r2,r2,0x4000 |
1196 | addi r2,r2,0x4000 | |
1197 | ||
1198 | /* Relocate the TOC from a virt addr to a real addr */ | |
5a408329 | 1199 | add r2,r2,r3 |
14cf11af PM |
1200 | |
1201 | /* Restore parameters */ | |
1202 | mr r3,r31 | |
1203 | mr r4,r30 | |
1204 | mr r5,r29 | |
1205 | mr r6,r28 | |
1206 | mr r7,r27 | |
1207 | ||
1208 | /* Do all of the interaction with OF client interface */ | |
1209 | bl .prom_init | |
1210 | /* We never return */ | |
1211 | trap | |
1212 | ||
14cf11af PM |
1213 | _STATIC(__after_prom_start) |
1214 | ||
1215 | /* | |
758438a7 | 1216 | * We need to run with __start at physical address PHYSICAL_START. |
14cf11af PM |
1217 | * This will leave some code in the first 256B of |
1218 | * real memory, which are reserved for software use. | |
1219 | * The remainder of the first page is loaded with the fixed | |
1220 | * interrupt vectors. The next two pages are filled with | |
1221 | * unknown exception placeholders. | |
1222 | * | |
1223 | * Note: This process overwrites the OF exception vectors. | |
1224 | * r26 == relocation offset | |
1225 | * r27 == KERNELBASE | |
1226 | */ | |
1227 | bl .reloc_offset | |
1228 | mr r26,r3 | |
e58c3495 | 1229 | LOAD_REG_IMMEDIATE(r27, KERNELBASE) |
14cf11af | 1230 | |
e58c3495 | 1231 | LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ |
14cf11af PM |
1232 | |
1233 | // XXX FIXME: Use phys returned by OF (r30) | |
5a408329 | 1234 | add r4,r27,r26 /* source addr */ |
14cf11af PM |
1235 | /* current address of _start */ |
1236 | /* i.e. where we are running */ | |
1237 | /* the source addr */ | |
1238 | ||
d0b79c54 | 1239 | cmpdi r4,0 /* In some cases the loader may */ |
939e60f6 SR |
1240 | bne 1f |
1241 | b .start_here_multiplatform /* have already put us at zero */ | |
d0b79c54 | 1242 | /* so we can skip the copy. */ |
939e60f6 | 1243 | 1: LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ |
14cf11af PM |
1244 | sub r5,r5,r27 |
1245 | ||
1246 | li r6,0x100 /* Start offset, the first 0x100 */ | |
1247 | /* bytes were copied earlier. */ | |
1248 | ||
1249 | bl .copy_and_flush /* copy the first n bytes */ | |
1250 | /* this includes the code being */ | |
1251 | /* executed here. */ | |
1252 | ||
e58c3495 | 1253 | LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ |
14cf11af PM |
1254 | mtctr r0 /* that we just made/relocated */ |
1255 | bctr | |
1256 | ||
e58c3495 | 1257 | 4: LOAD_REG_IMMEDIATE(r5,klimit) |
5a408329 | 1258 | add r5,r5,r26 |
14cf11af PM |
1259 | ld r5,0(r5) /* get the value of klimit */ |
1260 | sub r5,r5,r27 | |
1261 | bl .copy_and_flush /* copy the rest */ | |
1262 | b .start_here_multiplatform | |
1263 | ||
14cf11af PM |
1264 | /* |
1265 | * Copy routine used to copy the kernel to start at physical address 0 | |
1266 | * and flush and invalidate the caches as needed. | |
1267 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
1268 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
1269 | * | |
1270 | * Note: this routine *only* clobbers r0, r6 and lr | |
1271 | */ | |
1272 | _GLOBAL(copy_and_flush) | |
1273 | addi r5,r5,-8 | |
1274 | addi r6,r6,-8 | |
5a2fe38d | 1275 | 4: li r0,8 /* Use the smallest common */ |
14cf11af PM |
1276 | /* denominator cache line */ |
1277 | /* size. This results in */ | |
1278 | /* extra cache line flushes */ | |
1279 | /* but operation is correct. */ | |
1280 | /* Can't get cache line size */ | |
1281 | /* from NACA as it is being */ | |
1282 | /* moved too. */ | |
1283 | ||
1284 | mtctr r0 /* put # words/line in ctr */ | |
1285 | 3: addi r6,r6,8 /* copy a cache line */ | |
1286 | ldx r0,r6,r4 | |
1287 | stdx r0,r6,r3 | |
1288 | bdnz 3b | |
1289 | dcbst r6,r3 /* write it to memory */ | |
1290 | sync | |
1291 | icbi r6,r3 /* flush the icache line */ | |
1292 | cmpld 0,r6,r5 | |
1293 | blt 4b | |
1294 | sync | |
1295 | addi r5,r5,8 | |
1296 | addi r6,r6,8 | |
1297 | blr | |
1298 | ||
1299 | .align 8 | |
1300 | copy_to_here: | |
1301 | ||
1302 | #ifdef CONFIG_SMP | |
1303 | #ifdef CONFIG_PPC_PMAC | |
1304 | /* | |
1305 | * On PowerMac, secondary processors starts from the reset vector, which | |
1306 | * is temporarily turned into a call to one of the functions below. | |
1307 | */ | |
1308 | .section ".text"; | |
1309 | .align 2 ; | |
1310 | ||
35499c01 PM |
1311 | .globl __secondary_start_pmac_0 |
1312 | __secondary_start_pmac_0: | |
1313 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | |
1314 | li r24,0 | |
1315 | b 1f | |
1316 | li r24,1 | |
1317 | b 1f | |
1318 | li r24,2 | |
1319 | b 1f | |
1320 | li r24,3 | |
1321 | 1: | |
14cf11af PM |
1322 | |
1323 | _GLOBAL(pmac_secondary_start) | |
1324 | /* turn on 64-bit mode */ | |
1325 | bl .enable_64b_mode | |
14cf11af PM |
1326 | |
1327 | /* Copy some CPU settings from CPU 0 */ | |
f39b7a55 | 1328 | bl .__restore_cpu_ppc970 |
14cf11af PM |
1329 | |
1330 | /* pSeries do that early though I don't think we really need it */ | |
1331 | mfmsr r3 | |
1332 | ori r3,r3,MSR_RI | |
1333 | mtmsrd r3 /* RI on */ | |
1334 | ||
1335 | /* Set up a paca value for this processor. */ | |
e58c3495 | 1336 | LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
1337 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ |
1338 | add r13,r13,r4 /* for this processor. */ | |
b5bbeb23 | 1339 | mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1340 | |
1341 | /* Create a temp kernel stack for use before relocation is on. */ | |
1342 | ld r1,PACAEMERGSP(r13) | |
1343 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1344 | ||
c705677e | 1345 | b __secondary_start |
14cf11af PM |
1346 | |
1347 | #endif /* CONFIG_PPC_PMAC */ | |
1348 | ||
1349 | /* | |
1350 | * This function is called after the master CPU has released the | |
1351 | * secondary processors. The execution environment is relocation off. | |
1352 | * The paca for this processor has the following fields initialized at | |
1353 | * this point: | |
1354 | * 1. Processor number | |
1355 | * 2. Segment table pointer (virtual address) | |
1356 | * On entry the following are set: | |
1357 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | |
1358 | * r24 = cpu# (in Linux terms) | |
1359 | * r13 = paca virtual address | |
1360 | * SPRG3 = paca virtual address | |
1361 | */ | |
fc68e869 | 1362 | .globl __secondary_start |
c705677e | 1363 | __secondary_start: |
799d6046 PM |
1364 | /* Set thread priority to MEDIUM */ |
1365 | HMT_MEDIUM | |
14cf11af | 1366 | |
799d6046 | 1367 | /* Load TOC */ |
14cf11af | 1368 | ld r2,PACATOC(r13) |
799d6046 PM |
1369 | |
1370 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | |
1371 | bl .early_setup_secondary | |
14cf11af PM |
1372 | |
1373 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | |
e58c3495 | 1374 | LOAD_REG_ADDR(r3, current_set) |
14cf11af PM |
1375 | sldi r28,r24,3 /* get current_set[cpu#] */ |
1376 | ldx r1,r3,r28 | |
1377 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
1378 | std r1,PACAKSAVE(r13) | |
1379 | ||
799d6046 | 1380 | /* Clear backchain so we get nice backtraces */ |
14cf11af PM |
1381 | li r7,0 |
1382 | mtlr r7 | |
1383 | ||
1384 | /* enable MMU and jump to start_secondary */ | |
e58c3495 DG |
1385 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
1386 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
d04c56f7 | 1387 | #ifdef CONFIG_PPC_ISERIES |
3f639ee8 | 1388 | BEGIN_FW_FTR_SECTION |
14cf11af | 1389 | ori r4,r4,MSR_EE |
ff3da2e0 BH |
1390 | li r8,1 |
1391 | stb r8,PACAHARDIRQEN(r13) | |
3f639ee8 | 1392 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1393 | #endif |
d04c56f7 | 1394 | BEGIN_FW_FTR_SECTION |
d04c56f7 PM |
1395 | stb r7,PACAHARDIRQEN(r13) |
1396 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
ff3da2e0 | 1397 | stb r7,PACASOFTIRQEN(r13) |
d04c56f7 | 1398 | |
b5bbeb23 PM |
1399 | mtspr SPRN_SRR0,r3 |
1400 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1401 | rfid |
1402 | b . /* prevent speculative execution */ | |
1403 | ||
1404 | /* | |
1405 | * Running with relocation on at this point. All we want to do is | |
1406 | * zero the stack back-chain pointer before going into C code. | |
1407 | */ | |
1408 | _GLOBAL(start_secondary_prolog) | |
1409 | li r3,0 | |
1410 | std r3,0(r1) /* Zero the stack frame pointer */ | |
1411 | bl .start_secondary | |
799d6046 | 1412 | b . |
14cf11af PM |
1413 | #endif |
1414 | ||
1415 | /* | |
1416 | * This subroutine clobbers r11 and r12 | |
1417 | */ | |
1418 | _GLOBAL(enable_64b_mode) | |
1419 | mfmsr r11 /* grab the current MSR */ | |
1420 | li r12,1 | |
1421 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | |
1422 | or r11,r11,r12 | |
1423 | li r12,1 | |
1424 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | |
1425 | or r11,r11,r12 | |
1426 | mtmsrd r11 | |
1427 | isync | |
1428 | blr | |
1429 | ||
14cf11af PM |
1430 | /* |
1431 | * This is where the main kernel code starts. | |
1432 | */ | |
939e60f6 | 1433 | _INIT_STATIC(start_here_multiplatform) |
14cf11af PM |
1434 | /* get a new offset, now that the kernel has moved. */ |
1435 | bl .reloc_offset | |
1436 | mr r26,r3 | |
1437 | ||
1438 | /* Clear out the BSS. It may have been done in prom_init, | |
1439 | * already but that's irrelevant since prom_init will soon | |
1440 | * be detached from the kernel completely. Besides, we need | |
1441 | * to clear it now for kexec-style entry. | |
1442 | */ | |
e58c3495 DG |
1443 | LOAD_REG_IMMEDIATE(r11,__bss_stop) |
1444 | LOAD_REG_IMMEDIATE(r8,__bss_start) | |
14cf11af PM |
1445 | sub r11,r11,r8 /* bss size */ |
1446 | addi r11,r11,7 /* round up to an even double word */ | |
1447 | rldicl. r11,r11,61,3 /* shift right by 3 */ | |
1448 | beq 4f | |
1449 | addi r8,r8,-8 | |
1450 | li r0,0 | |
1451 | mtctr r11 /* zero this many doublewords */ | |
1452 | 3: stdu r0,8(r8) | |
1453 | bdnz 3b | |
1454 | 4: | |
1455 | ||
1456 | mfmsr r6 | |
1457 | ori r6,r6,MSR_RI | |
1458 | mtmsrd r6 /* RI on */ | |
1459 | ||
14cf11af PM |
1460 | /* The following gets the stack and TOC set up with the regs */ |
1461 | /* pointing to the real addr of the kernel stack. This is */ | |
1462 | /* all done to support the C function call below which sets */ | |
1463 | /* up the htab. This is done because we have relocated the */ | |
1464 | /* kernel but are still running in real mode. */ | |
1465 | ||
e58c3495 | 1466 | LOAD_REG_IMMEDIATE(r3,init_thread_union) |
5a408329 | 1467 | add r3,r3,r26 |
14cf11af PM |
1468 | |
1469 | /* set up a stack pointer (physical address) */ | |
1470 | addi r1,r3,THREAD_SIZE | |
1471 | li r0,0 | |
1472 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1473 | ||
1474 | /* set up the TOC (physical address) */ | |
e58c3495 | 1475 | LOAD_REG_IMMEDIATE(r2,__toc_start) |
14cf11af PM |
1476 | addi r2,r2,0x4000 |
1477 | addi r2,r2,0x4000 | |
5a408329 | 1478 | add r2,r2,r26 |
14cf11af | 1479 | |
14cf11af PM |
1480 | /* Do very early kernel initializations, including initial hash table, |
1481 | * stab and slb setup before we turn on relocation. */ | |
1482 | ||
1483 | /* Restore parameters passed from prom_init/kexec */ | |
1484 | mr r3,r31 | |
1485 | bl .early_setup | |
1486 | ||
e58c3495 DG |
1487 | LOAD_REG_IMMEDIATE(r3, .start_here_common) |
1488 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
b5bbeb23 PM |
1489 | mtspr SPRN_SRR0,r3 |
1490 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1491 | rfid |
1492 | b . /* prevent speculative execution */ | |
14cf11af PM |
1493 | |
1494 | /* This is where all platforms converge execution */ | |
fc68e869 | 1495 | _INIT_GLOBAL(start_here_common) |
14cf11af PM |
1496 | /* relocation is on at this point */ |
1497 | ||
1498 | /* The following code sets up the SP and TOC now that we are */ | |
1499 | /* running with translation enabled. */ | |
1500 | ||
e58c3495 | 1501 | LOAD_REG_IMMEDIATE(r3,init_thread_union) |
14cf11af PM |
1502 | |
1503 | /* set up the stack */ | |
1504 | addi r1,r3,THREAD_SIZE | |
1505 | li r0,0 | |
1506 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1507 | ||
14cf11af | 1508 | /* ptr to current */ |
e58c3495 | 1509 | LOAD_REG_IMMEDIATE(r4, init_task) |
14cf11af PM |
1510 | std r4,PACACURRENT(r13) |
1511 | ||
1512 | /* Load the TOC */ | |
1513 | ld r2,PACATOC(r13) | |
1514 | std r1,PACAKSAVE(r13) | |
1515 | ||
1516 | bl .setup_system | |
1517 | ||
1518 | /* Load up the kernel context */ | |
1519 | 5: | |
14cf11af | 1520 | li r5,0 |
d04c56f7 PM |
1521 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ |
1522 | #ifdef CONFIG_PPC_ISERIES | |
1523 | BEGIN_FW_FTR_SECTION | |
14cf11af | 1524 | mfmsr r5 |
ff3da2e0 | 1525 | ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ |
14cf11af | 1526 | mtmsrd r5 |
ff3da2e0 | 1527 | li r5,1 |
3f639ee8 | 1528 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1529 | #endif |
ff3da2e0 | 1530 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ |
14cf11af | 1531 | |
ff3da2e0 | 1532 | bl .start_kernel |
14cf11af | 1533 | |
f1870f77 AB |
1534 | /* Not reached */ |
1535 | BUG_OPCODE | |
14cf11af | 1536 | |
14cf11af PM |
1537 | /* |
1538 | * We put a few things here that have to be page-aligned. | |
1539 | * This stuff goes at the beginning of the bss, which is page-aligned. | |
1540 | */ | |
1541 | .section ".bss" | |
1542 | ||
1543 | .align PAGE_SHIFT | |
1544 | ||
1545 | .globl empty_zero_page | |
1546 | empty_zero_page: | |
1547 | .space PAGE_SIZE | |
1548 | ||
1549 | .globl swapper_pg_dir | |
1550 | swapper_pg_dir: | |
ee7a76da | 1551 | .space PGD_TABLE_SIZE |