Commit | Line | Data |
---|---|---|
9994a338 PM |
1 | /* |
2 | * This file contains miscellaneous low-level functions. | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | |
6 | * and Paul Mackerras. | |
7 | * | |
3d1229d6 ME |
8 | * kexec bits: |
9 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | |
10 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | |
11 | * | |
9994a338 PM |
12 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
17 | */ | |
18 | ||
9994a338 PM |
19 | #include <linux/sys.h> |
20 | #include <asm/unistd.h> | |
21 | #include <asm/errno.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/cache.h> | |
25 | #include <asm/cputable.h> | |
26 | #include <asm/mmu.h> | |
27 | #include <asm/ppc_asm.h> | |
28 | #include <asm/thread_info.h> | |
29 | #include <asm/asm-offsets.h> | |
3d1229d6 ME |
30 | #include <asm/processor.h> |
31 | #include <asm/kexec.h> | |
f048aace | 32 | #include <asm/bug.h> |
9994a338 PM |
33 | |
34 | .text | |
35 | ||
85218827 KG |
36 | #ifdef CONFIG_IRQSTACKS |
37 | _GLOBAL(call_do_softirq) | |
38 | mflr r0 | |
39 | stw r0,4(r1) | |
40 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) | |
41 | mr r1,r3 | |
42 | bl __do_softirq | |
43 | lwz r1,0(r1) | |
44 | lwz r0,4(r1) | |
45 | mtlr r0 | |
46 | blr | |
47 | ||
48 | _GLOBAL(call_handle_irq) | |
49 | mflr r0 | |
50 | stw r0,4(r1) | |
51 | mtctr r6 | |
52 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) | |
53 | mr r1,r5 | |
54 | bctrl | |
55 | lwz r1,0(r1) | |
56 | lwz r0,4(r1) | |
57 | mtlr r0 | |
58 | blr | |
59 | #endif /* CONFIG_IRQSTACKS */ | |
60 | ||
f2783c15 PM |
61 | /* |
62 | * This returns the high 64 bits of the product of two 64-bit numbers. | |
63 | */ | |
64 | _GLOBAL(mulhdu) | |
65 | cmpwi r6,0 | |
66 | cmpwi cr1,r3,0 | |
67 | mr r10,r4 | |
68 | mulhwu r4,r4,r5 | |
69 | beq 1f | |
70 | mulhwu r0,r10,r6 | |
71 | mullw r7,r10,r5 | |
72 | addc r7,r0,r7 | |
73 | addze r4,r4 | |
74 | 1: beqlr cr1 /* all done if high part of A is 0 */ | |
75 | mr r10,r3 | |
76 | mullw r9,r3,r5 | |
77 | mulhwu r3,r3,r5 | |
78 | beq 2f | |
79 | mullw r0,r10,r6 | |
80 | mulhwu r8,r10,r6 | |
81 | addc r7,r0,r7 | |
82 | adde r4,r4,r8 | |
83 | addze r3,r3 | |
84 | 2: addc r4,r4,r9 | |
85 | addze r3,r3 | |
86 | blr | |
87 | ||
9994a338 PM |
88 | /* |
89 | * sub_reloc_offset(x) returns x - reloc_offset(). | |
90 | */ | |
91 | _GLOBAL(sub_reloc_offset) | |
92 | mflr r0 | |
93 | bl 1f | |
94 | 1: mflr r5 | |
95 | lis r4,1b@ha | |
96 | addi r4,r4,1b@l | |
97 | subf r5,r4,r5 | |
98 | subf r3,r5,r3 | |
99 | mtlr r0 | |
100 | blr | |
101 | ||
102 | /* | |
103 | * reloc_got2 runs through the .got2 section adding an offset | |
104 | * to each entry. | |
105 | */ | |
106 | _GLOBAL(reloc_got2) | |
107 | mflr r11 | |
108 | lis r7,__got2_start@ha | |
109 | addi r7,r7,__got2_start@l | |
110 | lis r8,__got2_end@ha | |
111 | addi r8,r8,__got2_end@l | |
112 | subf r8,r7,r8 | |
113 | srwi. r8,r8,2 | |
114 | beqlr | |
115 | mtctr r8 | |
116 | bl 1f | |
117 | 1: mflr r0 | |
118 | lis r4,1b@ha | |
119 | addi r4,r4,1b@l | |
120 | subf r0,r4,r0 | |
121 | add r7,r0,r7 | |
122 | 2: lwz r0,0(r7) | |
123 | add r0,r0,r3 | |
124 | stw r0,0(r7) | |
125 | addi r7,r7,4 | |
126 | bdnz 2b | |
127 | mtlr r11 | |
128 | blr | |
129 | ||
9994a338 PM |
130 | /* |
131 | * call_setup_cpu - call the setup_cpu function for this cpu | |
132 | * r3 = data offset, r24 = cpu number | |
133 | * | |
134 | * Setup function is called with: | |
135 | * r3 = data offset | |
136 | * r4 = ptr to CPU spec (relocated) | |
137 | */ | |
138 | _GLOBAL(call_setup_cpu) | |
139 | addis r4,r3,cur_cpu_spec@ha | |
140 | addi r4,r4,cur_cpu_spec@l | |
141 | lwz r4,0(r4) | |
142 | add r4,r4,r3 | |
143 | lwz r5,CPU_SPEC_SETUP(r4) | |
b26f100d | 144 | cmpwi 0,r5,0 |
9994a338 PM |
145 | add r5,r5,r3 |
146 | beqlr | |
147 | mtctr r5 | |
148 | bctr | |
149 | ||
150 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) | |
151 | ||
152 | /* This gets called by via-pmu.c to switch the PLL selection | |
153 | * on 750fx CPU. This function should really be moved to some | |
154 | * other place (as most of the cpufreq code in via-pmu | |
155 | */ | |
156 | _GLOBAL(low_choose_750fx_pll) | |
157 | /* Clear MSR:EE */ | |
158 | mfmsr r7 | |
159 | rlwinm r0,r7,0,17,15 | |
160 | mtmsr r0 | |
161 | ||
162 | /* If switching to PLL1, disable HID0:BTIC */ | |
163 | cmplwi cr0,r3,0 | |
164 | beq 1f | |
165 | mfspr r5,SPRN_HID0 | |
166 | rlwinm r5,r5,0,27,25 | |
167 | sync | |
168 | mtspr SPRN_HID0,r5 | |
169 | isync | |
170 | sync | |
171 | ||
172 | 1: | |
173 | /* Calc new HID1 value */ | |
174 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ | |
175 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ | |
176 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ | |
177 | or r4,r4,r5 | |
178 | mtspr SPRN_HID1,r4 | |
179 | ||
180 | /* Store new HID1 image */ | |
f608600e | 181 | rlwinm r6,r1,0,0,(31-THREAD_SHIFT) |
9994a338 PM |
182 | lwz r6,TI_CPU(r6) |
183 | slwi r6,r6,2 | |
184 | addis r6,r6,nap_save_hid1@ha | |
185 | stw r4,nap_save_hid1@l(r6) | |
186 | ||
187 | /* If switching to PLL0, enable HID0:BTIC */ | |
188 | cmplwi cr0,r3,0 | |
189 | bne 1f | |
190 | mfspr r5,SPRN_HID0 | |
191 | ori r5,r5,HID0_BTIC | |
192 | sync | |
193 | mtspr SPRN_HID0,r5 | |
194 | isync | |
195 | sync | |
196 | ||
197 | 1: | |
198 | /* Return */ | |
199 | mtmsr r7 | |
200 | blr | |
201 | ||
202 | _GLOBAL(low_choose_7447a_dfs) | |
203 | /* Clear MSR:EE */ | |
204 | mfmsr r7 | |
205 | rlwinm r0,r7,0,17,15 | |
206 | mtmsr r0 | |
207 | ||
208 | /* Calc new HID1 value */ | |
209 | mfspr r4,SPRN_HID1 | |
210 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ | |
211 | sync | |
212 | mtspr SPRN_HID1,r4 | |
213 | sync | |
214 | isync | |
215 | ||
216 | /* Return */ | |
217 | mtmsr r7 | |
218 | blr | |
219 | ||
220 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ | |
221 | ||
222 | /* | |
223 | * complement mask on the msr then "or" some values on. | |
224 | * _nmask_and_or_msr(nmask, value_to_or) | |
225 | */ | |
226 | _GLOBAL(_nmask_and_or_msr) | |
227 | mfmsr r0 /* Get current msr */ | |
228 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ | |
229 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ | |
230 | SYNC /* Some chip revs have problems here... */ | |
231 | mtmsr r0 /* Update machine state */ | |
232 | isync | |
233 | blr /* Done */ | |
234 | ||
9dae8afd BH |
235 | #ifdef CONFIG_40x |
236 | ||
237 | /* | |
238 | * Do an IO access in real mode | |
239 | */ | |
240 | _GLOBAL(real_readb) | |
241 | mfmsr r7 | |
242 | ori r0,r7,MSR_DR | |
243 | xori r0,r0,MSR_DR | |
244 | sync | |
245 | mtmsr r0 | |
246 | sync | |
247 | isync | |
248 | lbz r3,0(r3) | |
249 | sync | |
250 | mtmsr r7 | |
251 | sync | |
252 | isync | |
253 | blr | |
254 | ||
255 | /* | |
256 | * Do an IO access in real mode | |
257 | */ | |
258 | _GLOBAL(real_writeb) | |
259 | mfmsr r7 | |
260 | ori r0,r7,MSR_DR | |
261 | xori r0,r0,MSR_DR | |
262 | sync | |
263 | mtmsr r0 | |
264 | sync | |
265 | isync | |
266 | stb r3,0(r4) | |
267 | sync | |
268 | mtmsr r7 | |
269 | sync | |
270 | isync | |
271 | blr | |
272 | ||
273 | #endif /* CONFIG_40x */ | |
9994a338 PM |
274 | |
275 | /* | |
276 | * Flush MMU TLB | |
277 | */ | |
0ba3418b KG |
278 | #ifndef CONFIG_FSL_BOOKE |
279 | _GLOBAL(_tlbil_all) | |
280 | _GLOBAL(_tlbil_pid) | |
281 | #endif | |
9994a338 PM |
282 | _GLOBAL(_tlbia) |
283 | #if defined(CONFIG_40x) | |
284 | sync /* Flush to memory before changing mapping */ | |
285 | tlbia | |
286 | isync /* Flush shadow TLB */ | |
287 | #elif defined(CONFIG_44x) | |
288 | li r3,0 | |
289 | sync | |
290 | ||
291 | /* Load high watermark */ | |
292 | lis r4,tlb_44x_hwater@ha | |
293 | lwz r5,tlb_44x_hwater@l(r4) | |
294 | ||
295 | 1: tlbwe r3,r3,PPC44x_TLB_PAGEID | |
296 | addi r3,r3,1 | |
297 | cmpw 0,r3,r5 | |
298 | ble 1b | |
299 | ||
300 | isync | |
301 | #elif defined(CONFIG_FSL_BOOKE) | |
302 | /* Invalidate all entries in TLB0 */ | |
303 | li r3, 0x04 | |
304 | tlbivax 0,3 | |
305 | /* Invalidate all entries in TLB1 */ | |
306 | li r3, 0x0c | |
307 | tlbivax 0,3 | |
9994a338 PM |
308 | msync |
309 | #ifdef CONFIG_SMP | |
310 | tlbsync | |
311 | #endif /* CONFIG_SMP */ | |
312 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | |
313 | #if defined(CONFIG_SMP) | |
f608600e | 314 | rlwinm r8,r1,0,0,(31-THREAD_SHIFT) |
9994a338 PM |
315 | lwz r8,TI_CPU(r8) |
316 | oris r8,r8,10 | |
317 | mfmsr r10 | |
318 | SYNC | |
319 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
320 | rlwinm r0,r0,0,28,26 /* clear DR */ | |
321 | mtmsr r0 | |
322 | SYNC_601 | |
323 | isync | |
324 | lis r9,mmu_hash_lock@h | |
325 | ori r9,r9,mmu_hash_lock@l | |
326 | tophys(r9,r9) | |
327 | 10: lwarx r7,0,r9 | |
328 | cmpwi 0,r7,0 | |
329 | bne- 10b | |
330 | stwcx. r8,0,r9 | |
331 | bne- 10b | |
332 | sync | |
333 | tlbia | |
334 | sync | |
335 | TLBSYNC | |
336 | li r0,0 | |
337 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
338 | mtmsr r10 | |
339 | SYNC_601 | |
340 | isync | |
341 | #else /* CONFIG_SMP */ | |
342 | sync | |
343 | tlbia | |
344 | sync | |
345 | #endif /* CONFIG_SMP */ | |
346 | #endif /* ! defined(CONFIG_40x) */ | |
347 | blr | |
348 | ||
349 | /* | |
350 | * Flush MMU TLB for a particular address | |
351 | */ | |
0ba3418b KG |
352 | #ifndef CONFIG_FSL_BOOKE |
353 | _GLOBAL(_tlbil_va) | |
354 | #endif | |
9994a338 PM |
355 | _GLOBAL(_tlbie) |
356 | #if defined(CONFIG_40x) | |
e701d269 BH |
357 | /* We run the search with interrupts disabled because we have to change |
358 | * the PID and I don't want to preempt when that happens. | |
359 | */ | |
360 | mfmsr r5 | |
361 | mfspr r6,SPRN_PID | |
362 | wrteei 0 | |
363 | mtspr SPRN_PID,r4 | |
9994a338 | 364 | tlbsx. r3, 0, r3 |
e701d269 BH |
365 | mtspr SPRN_PID,r6 |
366 | wrtee r5 | |
9994a338 PM |
367 | bne 10f |
368 | sync | |
369 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. | |
370 | * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate | |
371 | * the TLB entry. */ | |
372 | tlbwe r3, r3, TLB_TAG | |
373 | isync | |
374 | 10: | |
e701d269 | 375 | |
9994a338 | 376 | #elif defined(CONFIG_44x) |
e701d269 BH |
377 | mfspr r5,SPRN_MMUCR |
378 | rlwimi r5,r4,0,24,31 /* Set TID */ | |
9994a338 | 379 | |
aa1cf632 DG |
380 | /* We have to run the search with interrupts disabled, even critical |
381 | * and debug interrupts (in fact the only critical exceptions we have | |
382 | * are debug and machine check). Otherwise an interrupt which causes | |
383 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ | |
e701d269 | 384 | mfmsr r4 |
aa1cf632 DG |
385 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha |
386 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | |
e701d269 | 387 | andc r6,r4,r6 |
aa1cf632 | 388 | mtmsr r6 |
e701d269 | 389 | mtspr SPRN_MMUCR,r5 |
9994a338 | 390 | tlbsx. r3, 0, r3 |
e701d269 | 391 | mtmsr r4 |
9994a338 PM |
392 | bne 10f |
393 | sync | |
394 | /* There are only 64 TLB entries, so r3 < 64, | |
395 | * which means bit 22, is clear. Since 22 is | |
396 | * the V bit in the TLB_PAGEID, loading this | |
397 | * value will invalidate the TLB entry. | |
398 | */ | |
399 | tlbwe r3, r3, PPC44x_TLB_PAGEID | |
400 | isync | |
401 | 10: | |
402 | #elif defined(CONFIG_FSL_BOOKE) | |
403 | rlwinm r4, r3, 0, 0, 19 | |
404 | ori r5, r4, 0x08 /* TLBSEL = 1 */ | |
9994a338 PM |
405 | tlbivax 0, r4 |
406 | tlbivax 0, r5 | |
9994a338 PM |
407 | msync |
408 | #if defined(CONFIG_SMP) | |
409 | tlbsync | |
410 | #endif /* CONFIG_SMP */ | |
411 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | |
412 | #if defined(CONFIG_SMP) | |
f608600e | 413 | rlwinm r8,r1,0,0,(31-THREAD_SHIFT) |
9994a338 PM |
414 | lwz r8,TI_CPU(r8) |
415 | oris r8,r8,11 | |
416 | mfmsr r10 | |
417 | SYNC | |
418 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
419 | rlwinm r0,r0,0,28,26 /* clear DR */ | |
420 | mtmsr r0 | |
421 | SYNC_601 | |
422 | isync | |
423 | lis r9,mmu_hash_lock@h | |
424 | ori r9,r9,mmu_hash_lock@l | |
425 | tophys(r9,r9) | |
426 | 10: lwarx r7,0,r9 | |
427 | cmpwi 0,r7,0 | |
428 | bne- 10b | |
429 | stwcx. r8,0,r9 | |
430 | bne- 10b | |
431 | eieio | |
432 | tlbie r3 | |
433 | sync | |
434 | TLBSYNC | |
435 | li r0,0 | |
436 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
437 | mtmsr r10 | |
438 | SYNC_601 | |
439 | isync | |
440 | #else /* CONFIG_SMP */ | |
441 | tlbie r3 | |
442 | sync | |
443 | #endif /* CONFIG_SMP */ | |
444 | #endif /* ! CONFIG_40x */ | |
445 | blr | |
446 | ||
0ba3418b KG |
447 | #if defined(CONFIG_FSL_BOOKE) |
448 | /* | |
449 | * Flush MMU TLB, but only on the local processor (no broadcast) | |
450 | */ | |
451 | _GLOBAL(_tlbil_all) | |
452 | #define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ | |
453 | MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) | |
454 | li r3,(MMUCSR0_TLBFI)@l | |
455 | mtspr SPRN_MMUCSR0, r3 | |
456 | 1: | |
457 | mfspr r3,SPRN_MMUCSR0 | |
458 | andi. r3,r3,MMUCSR0_TLBFI@l | |
459 | bne 1b | |
460 | blr | |
461 | ||
462 | /* | |
463 | * Flush MMU TLB for a particular process id, but only on the local processor | |
464 | * (no broadcast) | |
465 | */ | |
466 | _GLOBAL(_tlbil_pid) | |
467 | /* we currently do an invalidate all since we don't have per pid invalidate */ | |
468 | li r3,(MMUCSR0_TLBFI)@l | |
469 | mtspr SPRN_MMUCSR0, r3 | |
470 | 1: | |
471 | mfspr r3,SPRN_MMUCSR0 | |
472 | andi. r3,r3,MMUCSR0_TLBFI@l | |
473 | bne 1b | |
b41d6fee KG |
474 | msync |
475 | isync | |
0ba3418b KG |
476 | blr |
477 | ||
478 | /* | |
479 | * Flush MMU TLB for a particular address, but only on the local processor | |
480 | * (no broadcast) | |
481 | */ | |
482 | _GLOBAL(_tlbil_va) | |
e5e774d8 KG |
483 | mfmsr r10 |
484 | wrteei 0 | |
0ba3418b KG |
485 | slwi r4,r4,16 |
486 | mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ | |
487 | tlbsx 0,r3 | |
488 | mfspr r4,SPRN_MAS1 /* check valid */ | |
489 | andis. r3,r4,MAS1_VALID@h | |
28707af0 | 490 | beq 1f |
0ba3418b KG |
491 | rlwinm r4,r4,0,1,31 |
492 | mtspr SPRN_MAS1,r4 | |
493 | tlbwe | |
b41d6fee KG |
494 | msync |
495 | isync | |
28707af0 | 496 | 1: wrtee r10 |
0ba3418b KG |
497 | blr |
498 | #endif /* CONFIG_FSL_BOOKE */ | |
499 | ||
f048aace BH |
500 | /* |
501 | * Nobody implements this yet | |
502 | */ | |
503 | _GLOBAL(_tlbivax_bcast) | |
504 | 1: trap | |
505 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; | |
506 | blr | |
507 | ||
0ba3418b | 508 | |
9994a338 PM |
509 | /* |
510 | * Flush instruction cache. | |
511 | * This is a no-op on the 601. | |
512 | */ | |
513 | _GLOBAL(flush_instruction_cache) | |
514 | #if defined(CONFIG_8xx) | |
515 | isync | |
516 | lis r5, IDC_INVALL@h | |
517 | mtspr SPRN_IC_CST, r5 | |
518 | #elif defined(CONFIG_4xx) | |
519 | #ifdef CONFIG_403GCX | |
520 | li r3, 512 | |
521 | mtctr r3 | |
522 | lis r4, KERNELBASE@h | |
523 | 1: iccci 0, r4 | |
524 | addi r4, r4, 16 | |
525 | bdnz 1b | |
526 | #else | |
527 | lis r3, KERNELBASE@h | |
528 | iccci 0,r3 | |
529 | #endif | |
530 | #elif CONFIG_FSL_BOOKE | |
531 | BEGIN_FTR_SECTION | |
532 | mfspr r3,SPRN_L1CSR0 | |
533 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC | |
534 | /* msync; isync recommended here */ | |
535 | mtspr SPRN_L1CSR0,r3 | |
536 | isync | |
537 | blr | |
4508dc21 | 538 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) |
9994a338 PM |
539 | mfspr r3,SPRN_L1CSR1 |
540 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR | |
541 | mtspr SPRN_L1CSR1,r3 | |
542 | #else | |
543 | mfspr r3,SPRN_PVR | |
544 | rlwinm r3,r3,16,16,31 | |
545 | cmpwi 0,r3,1 | |
546 | beqlr /* for 601, do nothing */ | |
547 | /* 603/604 processor - use invalidate-all bit in HID0 */ | |
548 | mfspr r3,SPRN_HID0 | |
549 | ori r3,r3,HID0_ICFI | |
550 | mtspr SPRN_HID0,r3 | |
551 | #endif /* CONFIG_8xx/4xx */ | |
552 | isync | |
553 | blr | |
554 | ||
555 | /* | |
556 | * Write any modified data cache blocks out to memory | |
557 | * and invalidate the corresponding instruction cache blocks. | |
558 | * This is a no-op on the 601. | |
559 | * | |
560 | * flush_icache_range(unsigned long start, unsigned long stop) | |
561 | */ | |
b76e59d1 | 562 | _KPROBE(__flush_icache_range) |
9994a338 PM |
563 | BEGIN_FTR_SECTION |
564 | blr /* for 601, do nothing */ | |
4508dc21 | 565 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
7dffb720 | 566 | li r5,L1_CACHE_BYTES-1 |
9994a338 PM |
567 | andc r3,r3,r5 |
568 | subf r4,r3,r4 | |
569 | add r4,r4,r5 | |
7dffb720 | 570 | srwi. r4,r4,L1_CACHE_SHIFT |
9994a338 PM |
571 | beqlr |
572 | mtctr r4 | |
573 | mr r6,r3 | |
574 | 1: dcbst 0,r3 | |
7dffb720 | 575 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
576 | bdnz 1b |
577 | sync /* wait for dcbst's to get to ram */ | |
578 | mtctr r4 | |
579 | 2: icbi 0,r6 | |
7dffb720 | 580 | addi r6,r6,L1_CACHE_BYTES |
9994a338 PM |
581 | bdnz 2b |
582 | sync /* additional sync needed on g4 */ | |
583 | isync | |
584 | blr | |
585 | /* | |
586 | * Write any modified data cache blocks out to memory. | |
587 | * Does not invalidate the corresponding cache lines (especially for | |
588 | * any corresponding instruction cache). | |
589 | * | |
590 | * clean_dcache_range(unsigned long start, unsigned long stop) | |
591 | */ | |
592 | _GLOBAL(clean_dcache_range) | |
7dffb720 | 593 | li r5,L1_CACHE_BYTES-1 |
9994a338 PM |
594 | andc r3,r3,r5 |
595 | subf r4,r3,r4 | |
596 | add r4,r4,r5 | |
7dffb720 | 597 | srwi. r4,r4,L1_CACHE_SHIFT |
9994a338 PM |
598 | beqlr |
599 | mtctr r4 | |
600 | ||
601 | 1: dcbst 0,r3 | |
7dffb720 | 602 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
603 | bdnz 1b |
604 | sync /* wait for dcbst's to get to ram */ | |
605 | blr | |
606 | ||
607 | /* | |
608 | * Write any modified data cache blocks out to memory and invalidate them. | |
609 | * Does not invalidate the corresponding instruction cache blocks. | |
610 | * | |
611 | * flush_dcache_range(unsigned long start, unsigned long stop) | |
612 | */ | |
613 | _GLOBAL(flush_dcache_range) | |
7dffb720 | 614 | li r5,L1_CACHE_BYTES-1 |
9994a338 PM |
615 | andc r3,r3,r5 |
616 | subf r4,r3,r4 | |
617 | add r4,r4,r5 | |
7dffb720 | 618 | srwi. r4,r4,L1_CACHE_SHIFT |
9994a338 PM |
619 | beqlr |
620 | mtctr r4 | |
621 | ||
622 | 1: dcbf 0,r3 | |
7dffb720 | 623 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
624 | bdnz 1b |
625 | sync /* wait for dcbst's to get to ram */ | |
626 | blr | |
627 | ||
628 | /* | |
629 | * Like above, but invalidate the D-cache. This is used by the 8xx | |
630 | * to invalidate the cache so the PPC core doesn't get stale data | |
631 | * from the CPM (no cache snooping here :-). | |
632 | * | |
633 | * invalidate_dcache_range(unsigned long start, unsigned long stop) | |
634 | */ | |
635 | _GLOBAL(invalidate_dcache_range) | |
7dffb720 | 636 | li r5,L1_CACHE_BYTES-1 |
9994a338 PM |
637 | andc r3,r3,r5 |
638 | subf r4,r3,r4 | |
639 | add r4,r4,r5 | |
7dffb720 | 640 | srwi. r4,r4,L1_CACHE_SHIFT |
9994a338 PM |
641 | beqlr |
642 | mtctr r4 | |
643 | ||
644 | 1: dcbi 0,r3 | |
7dffb720 | 645 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
646 | bdnz 1b |
647 | sync /* wait for dcbi's to get to ram */ | |
648 | blr | |
649 | ||
9994a338 PM |
650 | /* |
651 | * Flush a particular page from the data cache to RAM. | |
652 | * Note: this is necessary because the instruction cache does *not* | |
653 | * snoop from the data cache. | |
654 | * This is a no-op on the 601 which has a unified cache. | |
655 | * | |
656 | * void __flush_dcache_icache(void *page) | |
657 | */ | |
658 | _GLOBAL(__flush_dcache_icache) | |
659 | BEGIN_FTR_SECTION | |
4508dc21 DG |
660 | blr |
661 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | |
9994a338 | 662 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
7dffb720 | 663 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
9994a338 PM |
664 | mtctr r4 |
665 | mr r6,r3 | |
666 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 667 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
668 | bdnz 0b |
669 | sync | |
b98ac05d BH |
670 | #ifndef CONFIG_44x |
671 | /* We don't flush the icache on 44x. Those have a virtual icache | |
672 | * and we don't have access to the virtual address here (it's | |
673 | * not the page vaddr but where it's mapped in user space). The | |
674 | * flushing of the icache on these is handled elsewhere, when | |
675 | * a change in the address space occurs, before returning to | |
676 | * user space | |
677 | */ | |
9994a338 PM |
678 | mtctr r4 |
679 | 1: icbi 0,r6 | |
7dffb720 | 680 | addi r6,r6,L1_CACHE_BYTES |
9994a338 PM |
681 | bdnz 1b |
682 | sync | |
683 | isync | |
b98ac05d | 684 | #endif /* CONFIG_44x */ |
9994a338 PM |
685 | blr |
686 | ||
687 | /* | |
688 | * Flush a particular page from the data cache to RAM, identified | |
689 | * by its physical address. We turn off the MMU so we can just use | |
690 | * the physical address (this may be a highmem page without a kernel | |
691 | * mapping). | |
692 | * | |
693 | * void __flush_dcache_icache_phys(unsigned long physaddr) | |
694 | */ | |
695 | _GLOBAL(__flush_dcache_icache_phys) | |
696 | BEGIN_FTR_SECTION | |
697 | blr /* for 601, do nothing */ | |
4508dc21 | 698 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
9994a338 PM |
699 | mfmsr r10 |
700 | rlwinm r0,r10,0,28,26 /* clear DR */ | |
701 | mtmsr r0 | |
702 | isync | |
703 | rlwinm r3,r3,0,0,19 /* Get page base address */ | |
7dffb720 | 704 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
9994a338 PM |
705 | mtctr r4 |
706 | mr r6,r3 | |
707 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 708 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
709 | bdnz 0b |
710 | sync | |
711 | mtctr r4 | |
712 | 1: icbi 0,r6 | |
7dffb720 | 713 | addi r6,r6,L1_CACHE_BYTES |
9994a338 PM |
714 | bdnz 1b |
715 | sync | |
716 | mtmsr r10 /* restore DR */ | |
717 | isync | |
718 | blr | |
719 | ||
720 | /* | |
721 | * Clear pages using the dcbz instruction, which doesn't cause any | |
722 | * memory traffic (except to write out any cache lines which get | |
723 | * displaced). This only works on cacheable memory. | |
724 | * | |
725 | * void clear_pages(void *page, int order) ; | |
726 | */ | |
727 | _GLOBAL(clear_pages) | |
7dffb720 | 728 | li r0,4096/L1_CACHE_BYTES |
9994a338 PM |
729 | slw r0,r0,r4 |
730 | mtctr r0 | |
731 | #ifdef CONFIG_8xx | |
732 | li r4, 0 | |
733 | 1: stw r4, 0(r3) | |
734 | stw r4, 4(r3) | |
735 | stw r4, 8(r3) | |
736 | stw r4, 12(r3) | |
737 | #else | |
738 | 1: dcbz 0,r3 | |
739 | #endif | |
7dffb720 | 740 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
741 | bdnz 1b |
742 | blr | |
743 | ||
744 | /* | |
745 | * Copy a whole page. We use the dcbz instruction on the destination | |
746 | * to reduce memory traffic (it eliminates the unnecessary reads of | |
747 | * the destination into cache). This requires that the destination | |
748 | * is cacheable. | |
749 | */ | |
750 | #define COPY_16_BYTES \ | |
751 | lwz r6,4(r4); \ | |
752 | lwz r7,8(r4); \ | |
753 | lwz r8,12(r4); \ | |
754 | lwzu r9,16(r4); \ | |
755 | stw r6,4(r3); \ | |
756 | stw r7,8(r3); \ | |
757 | stw r8,12(r3); \ | |
758 | stwu r9,16(r3) | |
759 | ||
760 | _GLOBAL(copy_page) | |
761 | addi r3,r3,-4 | |
762 | addi r4,r4,-4 | |
763 | ||
764 | #ifdef CONFIG_8xx | |
765 | /* don't use prefetch on 8xx */ | |
7dffb720 | 766 | li r0,4096/L1_CACHE_BYTES |
9994a338 PM |
767 | mtctr r0 |
768 | 1: COPY_16_BYTES | |
769 | bdnz 1b | |
770 | blr | |
771 | ||
772 | #else /* not 8xx, we can prefetch */ | |
773 | li r5,4 | |
774 | ||
775 | #if MAX_COPY_PREFETCH > 1 | |
776 | li r0,MAX_COPY_PREFETCH | |
777 | li r11,4 | |
778 | mtctr r0 | |
779 | 11: dcbt r11,r4 | |
7dffb720 | 780 | addi r11,r11,L1_CACHE_BYTES |
9994a338 PM |
781 | bdnz 11b |
782 | #else /* MAX_COPY_PREFETCH == 1 */ | |
783 | dcbt r5,r4 | |
7dffb720 | 784 | li r11,L1_CACHE_BYTES+4 |
9994a338 | 785 | #endif /* MAX_COPY_PREFETCH */ |
7dffb720 | 786 | li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
9994a338 PM |
787 | crclr 4*cr0+eq |
788 | 2: | |
789 | mtctr r0 | |
790 | 1: | |
791 | dcbt r11,r4 | |
792 | dcbz r5,r3 | |
793 | COPY_16_BYTES | |
7dffb720 | 794 | #if L1_CACHE_BYTES >= 32 |
9994a338 | 795 | COPY_16_BYTES |
7dffb720 | 796 | #if L1_CACHE_BYTES >= 64 |
9994a338 PM |
797 | COPY_16_BYTES |
798 | COPY_16_BYTES | |
7dffb720 | 799 | #if L1_CACHE_BYTES >= 128 |
9994a338 PM |
800 | COPY_16_BYTES |
801 | COPY_16_BYTES | |
802 | COPY_16_BYTES | |
803 | COPY_16_BYTES | |
804 | #endif | |
805 | #endif | |
806 | #endif | |
807 | bdnz 1b | |
808 | beqlr | |
809 | crnot 4*cr0+eq,4*cr0+eq | |
810 | li r0,MAX_COPY_PREFETCH | |
811 | li r11,4 | |
812 | b 2b | |
813 | #endif /* CONFIG_8xx */ | |
814 | ||
815 | /* | |
816 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) | |
817 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); | |
818 | */ | |
819 | _GLOBAL(atomic_clear_mask) | |
820 | 10: lwarx r5,0,r4 | |
821 | andc r5,r5,r3 | |
822 | PPC405_ERR77(0,r4) | |
823 | stwcx. r5,0,r4 | |
824 | bne- 10b | |
825 | blr | |
826 | _GLOBAL(atomic_set_mask) | |
827 | 10: lwarx r5,0,r4 | |
828 | or r5,r5,r3 | |
829 | PPC405_ERR77(0,r4) | |
830 | stwcx. r5,0,r4 | |
831 | bne- 10b | |
832 | blr | |
833 | ||
9994a338 PM |
834 | /* |
835 | * Extended precision shifts. | |
836 | * | |
837 | * Updated to be valid for shift counts from 0 to 63 inclusive. | |
838 | * -- Gabriel | |
839 | * | |
840 | * R3/R4 has 64 bit value | |
841 | * R5 has shift count | |
842 | * result in R3/R4 | |
843 | * | |
844 | * ashrdi3: arithmetic right shift (sign propagation) | |
845 | * lshrdi3: logical right shift | |
846 | * ashldi3: left shift | |
847 | */ | |
848 | _GLOBAL(__ashrdi3) | |
849 | subfic r6,r5,32 | |
850 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
851 | addi r7,r5,32 # could be xori, or addi with -32 | |
852 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
853 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 | |
854 | sraw r7,r3,r7 # t2 = MSW >> (count-32) | |
855 | or r4,r4,r6 # LSW |= t1 | |
856 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 | |
857 | sraw r3,r3,r5 # MSW = MSW >> count | |
858 | or r4,r4,r7 # LSW |= t2 | |
859 | blr | |
860 | ||
861 | _GLOBAL(__ashldi3) | |
862 | subfic r6,r5,32 | |
863 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count | |
864 | addi r7,r5,32 # could be xori, or addi with -32 | |
865 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) | |
866 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) | |
867 | or r3,r3,r6 # MSW |= t1 | |
868 | slw r4,r4,r5 # LSW = LSW << count | |
869 | or r3,r3,r7 # MSW |= t2 | |
870 | blr | |
871 | ||
872 | _GLOBAL(__lshrdi3) | |
873 | subfic r6,r5,32 | |
874 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
875 | addi r7,r5,32 # could be xori, or addi with -32 | |
876 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
877 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) | |
878 | or r4,r4,r6 # LSW |= t1 | |
879 | srw r3,r3,r5 # MSW = MSW >> count | |
880 | or r4,r4,r7 # LSW |= t2 | |
881 | blr | |
882 | ||
95ff54f5 PM |
883 | /* |
884 | * 64-bit comparison: __ucmpdi2(u64 a, u64 b) | |
885 | * Returns 0 if a < b, 1 if a == b, 2 if a > b. | |
886 | */ | |
887 | _GLOBAL(__ucmpdi2) | |
888 | cmplw r3,r5 | |
889 | li r3,1 | |
890 | bne 1f | |
891 | cmplw r4,r6 | |
892 | beqlr | |
893 | 1: li r3,0 | |
894 | bltlr | |
895 | li r3,2 | |
896 | blr | |
897 | ||
9994a338 PM |
898 | _GLOBAL(abs) |
899 | srawi r4,r3,31 | |
900 | xor r3,r3,r4 | |
901 | sub r3,r3,r4 | |
902 | blr | |
903 | ||
9994a338 PM |
904 | /* |
905 | * Create a kernel thread | |
906 | * kernel_thread(fn, arg, flags) | |
907 | */ | |
908 | _GLOBAL(kernel_thread) | |
909 | stwu r1,-16(r1) | |
910 | stw r30,8(r1) | |
911 | stw r31,12(r1) | |
912 | mr r30,r3 /* function */ | |
913 | mr r31,r4 /* argument */ | |
914 | ori r3,r5,CLONE_VM /* flags */ | |
915 | oris r3,r3,CLONE_UNTRACED>>16 | |
916 | li r4,0 /* new sp (unused) */ | |
917 | li r0,__NR_clone | |
918 | sc | |
41c2e949 JP |
919 | bns+ 1f /* did system call indicate error? */ |
920 | neg r3,r3 /* if so, make return code negative */ | |
921 | 1: cmpwi 0,r3,0 /* parent or child? */ | |
922 | bne 2f /* return if parent */ | |
9994a338 PM |
923 | li r0,0 /* make top-level stack frame */ |
924 | stwu r0,-16(r1) | |
925 | mtlr r30 /* fn addr in lr */ | |
926 | mr r3,r31 /* load arg and call fn */ | |
927 | PPC440EP_ERR42 | |
928 | blrl | |
929 | li r0,__NR_exit /* exit if function returns */ | |
930 | li r3,0 | |
931 | sc | |
41c2e949 | 932 | 2: lwz r30,8(r1) |
9994a338 PM |
933 | lwz r31,12(r1) |
934 | addi r1,r1,16 | |
935 | blr | |
936 | ||
9994a338 PM |
937 | /* |
938 | * This routine is just here to keep GCC happy - sigh... | |
939 | */ | |
940 | _GLOBAL(__main) | |
941 | blr | |
3d1229d6 ME |
942 | |
943 | #ifdef CONFIG_KEXEC | |
944 | /* | |
945 | * Must be relocatable PIC code callable as a C function. | |
946 | */ | |
947 | .globl relocate_new_kernel | |
948 | relocate_new_kernel: | |
949 | /* r3 = page_list */ | |
950 | /* r4 = reboot_code_buffer */ | |
951 | /* r5 = start_address */ | |
952 | ||
953 | li r0, 0 | |
954 | ||
955 | /* | |
956 | * Set Machine Status Register to a known status, | |
957 | * switch the MMU off and jump to 1: in a single step. | |
958 | */ | |
959 | ||
960 | mr r8, r0 | |
961 | ori r8, r8, MSR_RI|MSR_ME | |
962 | mtspr SPRN_SRR1, r8 | |
963 | addi r8, r4, 1f - relocate_new_kernel | |
964 | mtspr SPRN_SRR0, r8 | |
965 | sync | |
966 | rfi | |
967 | ||
968 | 1: | |
969 | /* from this point address translation is turned off */ | |
970 | /* and interrupts are disabled */ | |
971 | ||
972 | /* set a new stack at the bottom of our page... */ | |
973 | /* (not really needed now) */ | |
d9178f4c | 974 | addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ |
3d1229d6 ME |
975 | stw r0, 0(r1) |
976 | ||
977 | /* Do the copies */ | |
978 | li r6, 0 /* checksum */ | |
979 | mr r0, r3 | |
980 | b 1f | |
981 | ||
982 | 0: /* top, read another word for the indirection page */ | |
983 | lwzu r0, 4(r3) | |
984 | ||
985 | 1: | |
986 | /* is it a destination page? (r8) */ | |
987 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ | |
988 | beq 2f | |
989 | ||
990 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
991 | b 0b | |
992 | ||
993 | 2: /* is it an indirection page? (r3) */ | |
994 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ | |
995 | beq 2f | |
996 | ||
997 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
998 | subi r3, r3, 4 | |
999 | b 0b | |
1000 | ||
1001 | 2: /* are we done? */ | |
1002 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ | |
1003 | beq 2f | |
1004 | b 3f | |
1005 | ||
1006 | 2: /* is it a source page? (r9) */ | |
1007 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ | |
1008 | beq 0b | |
1009 | ||
1010 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
1011 | ||
1012 | li r7, PAGE_SIZE / 4 | |
1013 | mtctr r7 | |
1014 | subi r9, r9, 4 | |
1015 | subi r8, r8, 4 | |
1016 | 9: | |
1017 | lwzu r0, 4(r9) /* do the copy */ | |
1018 | xor r6, r6, r0 | |
1019 | stwu r0, 4(r8) | |
1020 | dcbst 0, r8 | |
1021 | sync | |
1022 | icbi 0, r8 | |
1023 | bdnz 9b | |
1024 | ||
1025 | addi r9, r9, 4 | |
1026 | addi r8, r8, 4 | |
1027 | b 0b | |
1028 | ||
1029 | 3: | |
1030 | ||
1031 | /* To be certain of avoiding problems with self-modifying code | |
1032 | * execute a serializing instruction here. | |
1033 | */ | |
1034 | isync | |
1035 | sync | |
1036 | ||
1037 | /* jump to the entry point, usually the setup routine */ | |
1038 | mtlr r5 | |
1039 | blrl | |
1040 | ||
1041 | 1: b 1b | |
1042 | ||
1043 | relocate_new_kernel_end: | |
1044 | ||
1045 | .globl relocate_new_kernel_size | |
1046 | relocate_new_kernel_size: | |
1047 | .long relocate_new_kernel_end - relocate_new_kernel | |
1048 | #endif |