Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file contains miscellaneous low-level functions. | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | |
6 | * and Paul Mackerras. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or (at your option) any later version. | |
12 | * | |
13 | */ | |
14 | ||
1da177e4 LT |
15 | #include <linux/sys.h> |
16 | #include <asm/unistd.h> | |
17 | #include <asm/errno.h> | |
18 | #include <asm/processor.h> | |
19 | #include <asm/page.h> | |
20 | #include <asm/cache.h> | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/mmu.h> | |
23 | #include <asm/ppc_asm.h> | |
24 | #include <asm/thread_info.h> | |
0013a854 | 25 | #include <asm/asm-offsets.h> |
1da177e4 | 26 | |
55b6332e MT |
27 | #ifdef CONFIG_8xx |
28 | #define ISYNC_8xx isync | |
29 | #else | |
30 | #define ISYNC_8xx | |
31 | #endif | |
1da177e4 LT |
32 | .text |
33 | ||
34 | .align 5 | |
35 | _GLOBAL(__delay) | |
36 | cmpwi 0,r3,0 | |
37 | mtctr r3 | |
38 | beqlr | |
39 | 1: bdnz 1b | |
40 | blr | |
41 | ||
42 | /* | |
43 | * Returns (address we're running at) - (address we were linked at) | |
44 | * for use before the text and data are mapped to KERNELBASE. | |
45 | */ | |
46 | _GLOBAL(reloc_offset) | |
47 | mflr r0 | |
48 | bl 1f | |
49 | 1: mflr r3 | |
50 | lis r4,1b@ha | |
51 | addi r4,r4,1b@l | |
52 | subf r3,r4,r3 | |
53 | mtlr r0 | |
54 | blr | |
55 | ||
56 | /* | |
57 | * add_reloc_offset(x) returns x + reloc_offset(). | |
58 | */ | |
59 | _GLOBAL(add_reloc_offset) | |
60 | mflr r0 | |
61 | bl 1f | |
62 | 1: mflr r5 | |
63 | lis r4,1b@ha | |
64 | addi r4,r4,1b@l | |
65 | subf r5,r4,r5 | |
66 | add r3,r3,r5 | |
67 | mtlr r0 | |
68 | blr | |
69 | ||
70 | /* | |
71 | * sub_reloc_offset(x) returns x - reloc_offset(). | |
72 | */ | |
73 | _GLOBAL(sub_reloc_offset) | |
74 | mflr r0 | |
75 | bl 1f | |
76 | 1: mflr r5 | |
77 | lis r4,1b@ha | |
78 | addi r4,r4,1b@l | |
79 | subf r5,r4,r5 | |
80 | subf r3,r5,r3 | |
81 | mtlr r0 | |
82 | blr | |
83 | ||
84 | /* | |
85 | * reloc_got2 runs through the .got2 section adding an offset | |
86 | * to each entry. | |
87 | */ | |
88 | _GLOBAL(reloc_got2) | |
89 | mflr r11 | |
90 | lis r7,__got2_start@ha | |
91 | addi r7,r7,__got2_start@l | |
92 | lis r8,__got2_end@ha | |
93 | addi r8,r8,__got2_end@l | |
94 | subf r8,r7,r8 | |
95 | srwi. r8,r8,2 | |
96 | beqlr | |
97 | mtctr r8 | |
98 | bl 1f | |
99 | 1: mflr r0 | |
100 | lis r4,1b@ha | |
101 | addi r4,r4,1b@l | |
102 | subf r0,r4,r0 | |
103 | add r7,r0,r7 | |
104 | 2: lwz r0,0(r7) | |
105 | add r0,r0,r3 | |
106 | stw r0,0(r7) | |
107 | addi r7,r7,4 | |
108 | bdnz 2b | |
109 | mtlr r11 | |
110 | blr | |
111 | ||
112 | /* | |
113 | * identify_cpu, | |
114 | * called with r3 = data offset and r4 = CPU number | |
115 | * doesn't change r3 | |
116 | */ | |
117 | _GLOBAL(identify_cpu) | |
118 | addis r8,r3,cpu_specs@ha | |
119 | addi r8,r8,cpu_specs@l | |
120 | mfpvr r7 | |
121 | 1: | |
122 | lwz r5,CPU_SPEC_PVR_MASK(r8) | |
123 | and r5,r5,r7 | |
124 | lwz r6,CPU_SPEC_PVR_VALUE(r8) | |
125 | cmplw 0,r6,r5 | |
126 | beq 1f | |
127 | addi r8,r8,CPU_SPEC_ENTRY_SIZE | |
128 | b 1b | |
129 | 1: | |
130 | addis r6,r3,cur_cpu_spec@ha | |
131 | addi r6,r6,cur_cpu_spec@l | |
1da177e4 | 132 | sub r8,r8,r3 |
400d2212 | 133 | stw r8,0(r6) |
1da177e4 LT |
134 | blr |
135 | ||
136 | /* | |
137 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups | |
138 | * and writes nop's over sections of code that don't apply for this cpu. | |
139 | * r3 = data offset (not changed) | |
140 | */ | |
141 | _GLOBAL(do_cpu_ftr_fixups) | |
142 | /* Get CPU 0 features */ | |
143 | addis r6,r3,cur_cpu_spec@ha | |
144 | addi r6,r6,cur_cpu_spec@l | |
145 | lwz r4,0(r6) | |
146 | add r4,r4,r3 | |
147 | lwz r4,CPU_SPEC_FEATURES(r4) | |
148 | ||
149 | /* Get the fixup table */ | |
150 | addis r6,r3,__start___ftr_fixup@ha | |
151 | addi r6,r6,__start___ftr_fixup@l | |
152 | addis r7,r3,__stop___ftr_fixup@ha | |
153 | addi r7,r7,__stop___ftr_fixup@l | |
154 | ||
155 | /* Do the fixup */ | |
156 | 1: cmplw 0,r6,r7 | |
157 | bgelr | |
158 | addi r6,r6,16 | |
159 | lwz r8,-16(r6) /* mask */ | |
160 | and r8,r8,r4 | |
161 | lwz r9,-12(r6) /* value */ | |
162 | cmplw 0,r8,r9 | |
163 | beq 1b | |
164 | lwz r8,-8(r6) /* section begin */ | |
165 | lwz r9,-4(r6) /* section end */ | |
166 | subf. r9,r8,r9 | |
167 | beq 1b | |
168 | /* write nops over the section of code */ | |
169 | /* todo: if large section, add a branch at the start of it */ | |
170 | srwi r9,r9,2 | |
171 | mtctr r9 | |
172 | add r8,r8,r3 | |
173 | lis r0,0x60000000@h /* nop */ | |
174 | 3: stw r0,0(r8) | |
175 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l | |
176 | beq 2f | |
177 | dcbst 0,r8 /* suboptimal, but simpler */ | |
178 | sync | |
179 | icbi 0,r8 | |
180 | 2: addi r8,r8,4 | |
181 | bdnz 3b | |
182 | sync /* additional sync needed on g4 */ | |
183 | isync | |
184 | b 1b | |
185 | ||
186 | /* | |
187 | * call_setup_cpu - call the setup_cpu function for this cpu | |
188 | * r3 = data offset, r24 = cpu number | |
189 | * | |
190 | * Setup function is called with: | |
191 | * r3 = data offset | |
400d2212 | 192 | * r4 = ptr to CPU spec (relocated) |
1da177e4 LT |
193 | */ |
194 | _GLOBAL(call_setup_cpu) | |
400d2212 KG |
195 | addis r4,r3,cur_cpu_spec@ha |
196 | addi r4,r4,cur_cpu_spec@l | |
197 | lwz r4,0(r4) | |
198 | add r4,r4,r3 | |
199 | lwz r5,CPU_SPEC_SETUP(r4) | |
200 | cmpi 0,r5,0 | |
1da177e4 | 201 | add r5,r5,r3 |
400d2212 KG |
202 | beqlr |
203 | mtctr r5 | |
1da177e4 LT |
204 | bctr |
205 | ||
1da177e4 LT |
206 | /* |
207 | * complement mask on the msr then "or" some values on. | |
208 | * _nmask_and_or_msr(nmask, value_to_or) | |
209 | */ | |
210 | _GLOBAL(_nmask_and_or_msr) | |
211 | mfmsr r0 /* Get current msr */ | |
212 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ | |
213 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ | |
214 | SYNC /* Some chip revs have problems here... */ | |
215 | mtmsr r0 /* Update machine state */ | |
216 | isync | |
217 | blr /* Done */ | |
218 | ||
219 | ||
220 | /* | |
221 | * Flush MMU TLB | |
222 | */ | |
223 | _GLOBAL(_tlbia) | |
224 | #if defined(CONFIG_40x) | |
225 | sync /* Flush to memory before changing mapping */ | |
226 | tlbia | |
227 | isync /* Flush shadow TLB */ | |
228 | #elif defined(CONFIG_44x) | |
229 | li r3,0 | |
230 | sync | |
231 | ||
232 | /* Load high watermark */ | |
233 | lis r4,tlb_44x_hwater@ha | |
234 | lwz r5,tlb_44x_hwater@l(r4) | |
235 | ||
236 | 1: tlbwe r3,r3,PPC44x_TLB_PAGEID | |
237 | addi r3,r3,1 | |
238 | cmpw 0,r3,r5 | |
239 | ble 1b | |
240 | ||
241 | isync | |
242 | #elif defined(CONFIG_FSL_BOOKE) | |
243 | /* Invalidate all entries in TLB0 */ | |
244 | li r3, 0x04 | |
245 | tlbivax 0,3 | |
246 | /* Invalidate all entries in TLB1 */ | |
247 | li r3, 0x0c | |
248 | tlbivax 0,3 | |
249 | /* Invalidate all entries in TLB2 */ | |
250 | li r3, 0x14 | |
251 | tlbivax 0,3 | |
252 | /* Invalidate all entries in TLB3 */ | |
253 | li r3, 0x1c | |
254 | tlbivax 0,3 | |
255 | msync | |
256 | #ifdef CONFIG_SMP | |
257 | tlbsync | |
258 | #endif /* CONFIG_SMP */ | |
259 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | |
260 | #if defined(CONFIG_SMP) | |
261 | rlwinm r8,r1,0,0,18 | |
262 | lwz r8,TI_CPU(r8) | |
263 | oris r8,r8,10 | |
264 | mfmsr r10 | |
265 | SYNC | |
266 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
267 | rlwinm r0,r0,0,28,26 /* clear DR */ | |
268 | mtmsr r0 | |
269 | SYNC_601 | |
270 | isync | |
271 | lis r9,mmu_hash_lock@h | |
272 | ori r9,r9,mmu_hash_lock@l | |
273 | tophys(r9,r9) | |
274 | 10: lwarx r7,0,r9 | |
275 | cmpwi 0,r7,0 | |
276 | bne- 10b | |
277 | stwcx. r8,0,r9 | |
278 | bne- 10b | |
279 | sync | |
280 | tlbia | |
281 | sync | |
282 | TLBSYNC | |
283 | li r0,0 | |
284 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
285 | mtmsr r10 | |
286 | SYNC_601 | |
287 | isync | |
288 | #else /* CONFIG_SMP */ | |
289 | sync | |
290 | tlbia | |
291 | sync | |
292 | #endif /* CONFIG_SMP */ | |
293 | #endif /* ! defined(CONFIG_40x) */ | |
294 | blr | |
295 | ||
296 | /* | |
297 | * Flush MMU TLB for a particular address | |
298 | */ | |
299 | _GLOBAL(_tlbie) | |
300 | #if defined(CONFIG_40x) | |
301 | tlbsx. r3, 0, r3 | |
302 | bne 10f | |
303 | sync | |
304 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. | |
305 | * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate | |
306 | * the TLB entry. */ | |
307 | tlbwe r3, r3, TLB_TAG | |
308 | isync | |
309 | 10: | |
310 | #elif defined(CONFIG_44x) | |
311 | mfspr r4,SPRN_MMUCR | |
312 | mfspr r5,SPRN_PID /* Get PID */ | |
313 | rlwimi r4,r5,0,24,31 /* Set TID */ | |
314 | mtspr SPRN_MMUCR,r4 | |
315 | ||
316 | tlbsx. r3, 0, r3 | |
317 | bne 10f | |
318 | sync | |
319 | /* There are only 64 TLB entries, so r3 < 64, | |
320 | * which means bit 22, is clear. Since 22 is | |
321 | * the V bit in the TLB_PAGEID, loading this | |
322 | * value will invalidate the TLB entry. | |
323 | */ | |
324 | tlbwe r3, r3, PPC44x_TLB_PAGEID | |
325 | isync | |
326 | 10: | |
327 | #elif defined(CONFIG_FSL_BOOKE) | |
328 | rlwinm r4, r3, 0, 0, 19 | |
329 | ori r5, r4, 0x08 /* TLBSEL = 1 */ | |
330 | ori r6, r4, 0x10 /* TLBSEL = 2 */ | |
331 | ori r7, r4, 0x18 /* TLBSEL = 3 */ | |
332 | tlbivax 0, r4 | |
333 | tlbivax 0, r5 | |
334 | tlbivax 0, r6 | |
335 | tlbivax 0, r7 | |
336 | msync | |
337 | #if defined(CONFIG_SMP) | |
338 | tlbsync | |
339 | #endif /* CONFIG_SMP */ | |
340 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | |
341 | #if defined(CONFIG_SMP) | |
342 | rlwinm r8,r1,0,0,18 | |
343 | lwz r8,TI_CPU(r8) | |
344 | oris r8,r8,11 | |
345 | mfmsr r10 | |
346 | SYNC | |
347 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
348 | rlwinm r0,r0,0,28,26 /* clear DR */ | |
349 | mtmsr r0 | |
350 | SYNC_601 | |
351 | isync | |
352 | lis r9,mmu_hash_lock@h | |
353 | ori r9,r9,mmu_hash_lock@l | |
354 | tophys(r9,r9) | |
355 | 10: lwarx r7,0,r9 | |
356 | cmpwi 0,r7,0 | |
357 | bne- 10b | |
358 | stwcx. r8,0,r9 | |
359 | bne- 10b | |
360 | eieio | |
361 | tlbie r3 | |
362 | sync | |
363 | TLBSYNC | |
364 | li r0,0 | |
365 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
366 | mtmsr r10 | |
367 | SYNC_601 | |
368 | isync | |
369 | #else /* CONFIG_SMP */ | |
370 | tlbie r3 | |
371 | sync | |
372 | #endif /* CONFIG_SMP */ | |
373 | #endif /* ! CONFIG_40x */ | |
374 | blr | |
375 | ||
376 | /* | |
377 | * Flush instruction cache. | |
378 | * This is a no-op on the 601. | |
379 | */ | |
380 | _GLOBAL(flush_instruction_cache) | |
381 | #if defined(CONFIG_8xx) | |
382 | isync | |
383 | lis r5, IDC_INVALL@h | |
384 | mtspr SPRN_IC_CST, r5 | |
385 | #elif defined(CONFIG_4xx) | |
386 | #ifdef CONFIG_403GCX | |
387 | li r3, 512 | |
388 | mtctr r3 | |
389 | lis r4, KERNELBASE@h | |
390 | 1: iccci 0, r4 | |
391 | addi r4, r4, 16 | |
392 | bdnz 1b | |
393 | #else | |
394 | lis r3, KERNELBASE@h | |
395 | iccci 0,r3 | |
396 | #endif | |
397 | #elif CONFIG_FSL_BOOKE | |
33d9e9b5 KG |
398 | BEGIN_FTR_SECTION |
399 | mfspr r3,SPRN_L1CSR0 | |
400 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC | |
401 | /* msync; isync recommended here */ | |
402 | mtspr SPRN_L1CSR0,r3 | |
403 | isync | |
404 | blr | |
405 | END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) | |
1da177e4 LT |
406 | mfspr r3,SPRN_L1CSR1 |
407 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR | |
408 | mtspr SPRN_L1CSR1,r3 | |
409 | #else | |
410 | mfspr r3,SPRN_PVR | |
411 | rlwinm r3,r3,16,16,31 | |
412 | cmpwi 0,r3,1 | |
413 | beqlr /* for 601, do nothing */ | |
414 | /* 603/604 processor - use invalidate-all bit in HID0 */ | |
415 | mfspr r3,SPRN_HID0 | |
416 | ori r3,r3,HID0_ICFI | |
417 | mtspr SPRN_HID0,r3 | |
418 | #endif /* CONFIG_8xx/4xx */ | |
419 | isync | |
420 | blr | |
421 | ||
422 | /* | |
423 | * Write any modified data cache blocks out to memory | |
424 | * and invalidate the corresponding instruction cache blocks. | |
425 | * This is a no-op on the 601. | |
426 | * | |
26ef5c09 | 427 | * __flush_icache_range(unsigned long start, unsigned long stop) |
1da177e4 | 428 | */ |
26ef5c09 | 429 | _GLOBAL(__flush_icache_range) |
1da177e4 LT |
430 | BEGIN_FTR_SECTION |
431 | blr /* for 601, do nothing */ | |
4481e8ee | 432 | END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) |
7dffb720 | 433 | li r5,L1_CACHE_BYTES-1 |
1da177e4 LT |
434 | andc r3,r3,r5 |
435 | subf r4,r3,r4 | |
436 | add r4,r4,r5 | |
7dffb720 | 437 | srwi. r4,r4,L1_CACHE_SHIFT |
1da177e4 LT |
438 | beqlr |
439 | mtctr r4 | |
440 | mr r6,r3 | |
441 | 1: dcbst 0,r3 | |
7dffb720 | 442 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
443 | bdnz 1b |
444 | sync /* wait for dcbst's to get to ram */ | |
445 | mtctr r4 | |
446 | 2: icbi 0,r6 | |
7dffb720 | 447 | addi r6,r6,L1_CACHE_BYTES |
1da177e4 LT |
448 | bdnz 2b |
449 | sync /* additional sync needed on g4 */ | |
450 | isync | |
451 | blr | |
452 | /* | |
453 | * Write any modified data cache blocks out to memory. | |
454 | * Does not invalidate the corresponding cache lines (especially for | |
455 | * any corresponding instruction cache). | |
456 | * | |
457 | * clean_dcache_range(unsigned long start, unsigned long stop) | |
458 | */ | |
459 | _GLOBAL(clean_dcache_range) | |
7dffb720 | 460 | li r5,L1_CACHE_BYTES-1 |
1da177e4 LT |
461 | andc r3,r3,r5 |
462 | subf r4,r3,r4 | |
463 | add r4,r4,r5 | |
7dffb720 | 464 | srwi. r4,r4,L1_CACHE_SHIFT |
1da177e4 LT |
465 | beqlr |
466 | mtctr r4 | |
467 | ||
468 | 1: dcbst 0,r3 | |
7dffb720 | 469 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
470 | bdnz 1b |
471 | sync /* wait for dcbst's to get to ram */ | |
472 | blr | |
473 | ||
474 | /* | |
475 | * Write any modified data cache blocks out to memory and invalidate them. | |
476 | * Does not invalidate the corresponding instruction cache blocks. | |
477 | * | |
478 | * flush_dcache_range(unsigned long start, unsigned long stop) | |
479 | */ | |
480 | _GLOBAL(flush_dcache_range) | |
7dffb720 | 481 | li r5,L1_CACHE_BYTES-1 |
1da177e4 LT |
482 | andc r3,r3,r5 |
483 | subf r4,r3,r4 | |
484 | add r4,r4,r5 | |
7dffb720 | 485 | srwi. r4,r4,L1_CACHE_SHIFT |
1da177e4 LT |
486 | beqlr |
487 | mtctr r4 | |
488 | ||
489 | 1: dcbf 0,r3 | |
7dffb720 | 490 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
491 | bdnz 1b |
492 | sync /* wait for dcbst's to get to ram */ | |
493 | blr | |
494 | ||
495 | /* | |
496 | * Like above, but invalidate the D-cache. This is used by the 8xx | |
497 | * to invalidate the cache so the PPC core doesn't get stale data | |
498 | * from the CPM (no cache snooping here :-). | |
499 | * | |
500 | * invalidate_dcache_range(unsigned long start, unsigned long stop) | |
501 | */ | |
502 | _GLOBAL(invalidate_dcache_range) | |
7dffb720 | 503 | li r5,L1_CACHE_BYTES-1 |
1da177e4 LT |
504 | andc r3,r3,r5 |
505 | subf r4,r3,r4 | |
506 | add r4,r4,r5 | |
7dffb720 | 507 | srwi. r4,r4,L1_CACHE_SHIFT |
1da177e4 LT |
508 | beqlr |
509 | mtctr r4 | |
510 | ||
511 | 1: dcbi 0,r3 | |
7dffb720 | 512 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
513 | bdnz 1b |
514 | sync /* wait for dcbi's to get to ram */ | |
515 | blr | |
516 | ||
517 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
518 | /* | |
519 | * 40x cores have 8K or 16K dcache and 32 byte line size. | |
520 | * 44x has a 32K dcache and 32 byte line size. | |
521 | * 8xx has 1, 2, 4, 8K variants. | |
522 | * For now, cover the worst case of the 44x. | |
523 | * Must be called with external interrupts disabled. | |
524 | */ | |
525 | #define CACHE_NWAYS 64 | |
526 | #define CACHE_NLINES 16 | |
527 | ||
528 | _GLOBAL(flush_dcache_all) | |
529 | li r4, (2 * CACHE_NWAYS * CACHE_NLINES) | |
530 | mtctr r4 | |
531 | lis r5, KERNELBASE@h | |
532 | 1: lwz r3, 0(r5) /* Load one word from every line */ | |
7dffb720 | 533 | addi r5, r5, L1_CACHE_BYTES |
1da177e4 LT |
534 | bdnz 1b |
535 | blr | |
536 | #endif /* CONFIG_NOT_COHERENT_CACHE */ | |
537 | ||
538 | /* | |
539 | * Flush a particular page from the data cache to RAM. | |
540 | * Note: this is necessary because the instruction cache does *not* | |
541 | * snoop from the data cache. | |
542 | * This is a no-op on the 601 which has a unified cache. | |
543 | * | |
544 | * void __flush_dcache_icache(void *page) | |
545 | */ | |
546 | _GLOBAL(__flush_dcache_icache) | |
547 | BEGIN_FTR_SECTION | |
548 | blr /* for 601, do nothing */ | |
4481e8ee | 549 | END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) |
1da177e4 | 550 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
7dffb720 | 551 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
1da177e4 LT |
552 | mtctr r4 |
553 | mr r6,r3 | |
554 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 555 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
556 | bdnz 0b |
557 | sync | |
558 | mtctr r4 | |
559 | 1: icbi 0,r6 | |
7dffb720 | 560 | addi r6,r6,L1_CACHE_BYTES |
1da177e4 LT |
561 | bdnz 1b |
562 | sync | |
563 | isync | |
564 | blr | |
565 | ||
566 | /* | |
567 | * Flush a particular page from the data cache to RAM, identified | |
568 | * by its physical address. We turn off the MMU so we can just use | |
569 | * the physical address (this may be a highmem page without a kernel | |
570 | * mapping). | |
571 | * | |
572 | * void __flush_dcache_icache_phys(unsigned long physaddr) | |
573 | */ | |
574 | _GLOBAL(__flush_dcache_icache_phys) | |
575 | BEGIN_FTR_SECTION | |
576 | blr /* for 601, do nothing */ | |
4481e8ee | 577 | END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) |
1da177e4 LT |
578 | mfmsr r10 |
579 | rlwinm r0,r10,0,28,26 /* clear DR */ | |
580 | mtmsr r0 | |
581 | isync | |
582 | rlwinm r3,r3,0,0,19 /* Get page base address */ | |
7dffb720 | 583 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
1da177e4 LT |
584 | mtctr r4 |
585 | mr r6,r3 | |
586 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 587 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
588 | bdnz 0b |
589 | sync | |
590 | mtctr r4 | |
591 | 1: icbi 0,r6 | |
7dffb720 | 592 | addi r6,r6,L1_CACHE_BYTES |
1da177e4 LT |
593 | bdnz 1b |
594 | sync | |
595 | mtmsr r10 /* restore DR */ | |
596 | isync | |
597 | blr | |
598 | ||
599 | /* | |
600 | * Clear pages using the dcbz instruction, which doesn't cause any | |
601 | * memory traffic (except to write out any cache lines which get | |
602 | * displaced). This only works on cacheable memory. | |
603 | * | |
604 | * void clear_pages(void *page, int order) ; | |
605 | */ | |
606 | _GLOBAL(clear_pages) | |
7dffb720 | 607 | li r0,4096/L1_CACHE_BYTES |
1da177e4 LT |
608 | slw r0,r0,r4 |
609 | mtctr r0 | |
610 | #ifdef CONFIG_8xx | |
611 | li r4, 0 | |
612 | 1: stw r4, 0(r3) | |
613 | stw r4, 4(r3) | |
614 | stw r4, 8(r3) | |
615 | stw r4, 12(r3) | |
616 | #else | |
617 | 1: dcbz 0,r3 | |
618 | #endif | |
7dffb720 | 619 | addi r3,r3,L1_CACHE_BYTES |
1da177e4 LT |
620 | bdnz 1b |
621 | blr | |
622 | ||
623 | /* | |
624 | * Copy a whole page. We use the dcbz instruction on the destination | |
625 | * to reduce memory traffic (it eliminates the unnecessary reads of | |
626 | * the destination into cache). This requires that the destination | |
627 | * is cacheable. | |
628 | */ | |
629 | #define COPY_16_BYTES \ | |
630 | lwz r6,4(r4); \ | |
631 | lwz r7,8(r4); \ | |
632 | lwz r8,12(r4); \ | |
633 | lwzu r9,16(r4); \ | |
634 | stw r6,4(r3); \ | |
635 | stw r7,8(r3); \ | |
636 | stw r8,12(r3); \ | |
637 | stwu r9,16(r3) | |
638 | ||
639 | _GLOBAL(copy_page) | |
640 | addi r3,r3,-4 | |
641 | addi r4,r4,-4 | |
642 | ||
643 | #ifdef CONFIG_8xx | |
644 | /* don't use prefetch on 8xx */ | |
7dffb720 | 645 | li r0,4096/L1_CACHE_BYTES |
1da177e4 LT |
646 | mtctr r0 |
647 | 1: COPY_16_BYTES | |
648 | bdnz 1b | |
649 | blr | |
650 | ||
651 | #else /* not 8xx, we can prefetch */ | |
652 | li r5,4 | |
653 | ||
654 | #if MAX_COPY_PREFETCH > 1 | |
655 | li r0,MAX_COPY_PREFETCH | |
656 | li r11,4 | |
657 | mtctr r0 | |
658 | 11: dcbt r11,r4 | |
7dffb720 | 659 | addi r11,r11,L1_CACHE_BYTES |
1da177e4 LT |
660 | bdnz 11b |
661 | #else /* MAX_COPY_PREFETCH == 1 */ | |
662 | dcbt r5,r4 | |
7dffb720 | 663 | li r11,L1_CACHE_BYTES+4 |
1da177e4 | 664 | #endif /* MAX_COPY_PREFETCH */ |
7dffb720 | 665 | li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
1da177e4 LT |
666 | crclr 4*cr0+eq |
667 | 2: | |
668 | mtctr r0 | |
669 | 1: | |
670 | dcbt r11,r4 | |
671 | dcbz r5,r3 | |
672 | COPY_16_BYTES | |
7dffb720 | 673 | #if L1_CACHE_BYTES >= 32 |
1da177e4 | 674 | COPY_16_BYTES |
7dffb720 | 675 | #if L1_CACHE_BYTES >= 64 |
1da177e4 LT |
676 | COPY_16_BYTES |
677 | COPY_16_BYTES | |
7dffb720 | 678 | #if L1_CACHE_BYTES >= 128 |
1da177e4 LT |
679 | COPY_16_BYTES |
680 | COPY_16_BYTES | |
681 | COPY_16_BYTES | |
682 | COPY_16_BYTES | |
683 | #endif | |
684 | #endif | |
685 | #endif | |
686 | bdnz 1b | |
687 | beqlr | |
688 | crnot 4*cr0+eq,4*cr0+eq | |
689 | li r0,MAX_COPY_PREFETCH | |
690 | li r11,4 | |
691 | b 2b | |
692 | #endif /* CONFIG_8xx */ | |
693 | ||
694 | /* | |
695 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) | |
696 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); | |
697 | */ | |
698 | _GLOBAL(atomic_clear_mask) | |
699 | 10: lwarx r5,0,r4 | |
700 | andc r5,r5,r3 | |
701 | PPC405_ERR77(0,r4) | |
702 | stwcx. r5,0,r4 | |
703 | bne- 10b | |
704 | blr | |
705 | _GLOBAL(atomic_set_mask) | |
706 | 10: lwarx r5,0,r4 | |
707 | or r5,r5,r3 | |
708 | PPC405_ERR77(0,r4) | |
709 | stwcx. r5,0,r4 | |
710 | bne- 10b | |
711 | blr | |
712 | ||
713 | /* | |
714 | * I/O string operations | |
715 | * | |
716 | * insb(port, buf, len) | |
717 | * outsb(port, buf, len) | |
718 | * insw(port, buf, len) | |
719 | * outsw(port, buf, len) | |
720 | * insl(port, buf, len) | |
721 | * outsl(port, buf, len) | |
722 | * insw_ns(port, buf, len) | |
723 | * outsw_ns(port, buf, len) | |
724 | * insl_ns(port, buf, len) | |
725 | * outsl_ns(port, buf, len) | |
726 | * | |
727 | * The *_ns versions don't do byte-swapping. | |
728 | */ | |
729 | _GLOBAL(_insb) | |
730 | cmpwi 0,r5,0 | |
731 | mtctr r5 | |
732 | subi r4,r4,1 | |
733 | blelr- | |
734 | 00: lbz r5,0(r3) | |
55b6332e MT |
735 | 01: eieio |
736 | 02: stbu r5,1(r4) | |
737 | ISYNC_8xx | |
738 | .section .fixup,"ax" | |
739 | 03: blr | |
740 | .text | |
741 | .section __ex_table, "a" | |
742 | .align 2 | |
743 | .long 00b, 03b | |
744 | .long 01b, 03b | |
745 | .long 02b, 03b | |
746 | .text | |
1da177e4 LT |
747 | bdnz 00b |
748 | blr | |
749 | ||
750 | _GLOBAL(_outsb) | |
751 | cmpwi 0,r5,0 | |
752 | mtctr r5 | |
753 | subi r4,r4,1 | |
754 | blelr- | |
755 | 00: lbzu r5,1(r4) | |
55b6332e MT |
756 | 01: stb r5,0(r3) |
757 | 02: eieio | |
758 | ISYNC_8xx | |
759 | .section .fixup,"ax" | |
760 | 03: blr | |
761 | .text | |
762 | .section __ex_table, "a" | |
763 | .align 2 | |
764 | .long 00b, 03b | |
765 | .long 01b, 03b | |
766 | .long 02b, 03b | |
767 | .text | |
1da177e4 LT |
768 | bdnz 00b |
769 | blr | |
770 | ||
1da177e4 LT |
771 | _GLOBAL(_insw_ns) |
772 | cmpwi 0,r5,0 | |
773 | mtctr r5 | |
774 | subi r4,r4,2 | |
775 | blelr- | |
776 | 00: lhz r5,0(r3) | |
55b6332e MT |
777 | 01: eieio |
778 | 02: sthu r5,2(r4) | |
779 | ISYNC_8xx | |
780 | .section .fixup,"ax" | |
781 | 03: blr | |
782 | .text | |
783 | .section __ex_table, "a" | |
784 | .align 2 | |
785 | .long 00b, 03b | |
786 | .long 01b, 03b | |
787 | .long 02b, 03b | |
788 | .text | |
1da177e4 LT |
789 | bdnz 00b |
790 | blr | |
791 | ||
1da177e4 LT |
792 | _GLOBAL(_outsw_ns) |
793 | cmpwi 0,r5,0 | |
794 | mtctr r5 | |
795 | subi r4,r4,2 | |
796 | blelr- | |
797 | 00: lhzu r5,2(r4) | |
55b6332e MT |
798 | 01: sth r5,0(r3) |
799 | 02: eieio | |
800 | ISYNC_8xx | |
801 | .section .fixup,"ax" | |
802 | 03: blr | |
803 | .text | |
804 | .section __ex_table, "a" | |
805 | .align 2 | |
806 | .long 00b, 03b | |
807 | .long 01b, 03b | |
808 | .long 02b, 03b | |
809 | .text | |
1da177e4 LT |
810 | bdnz 00b |
811 | blr | |
812 | ||
1da177e4 LT |
813 | _GLOBAL(_insl_ns) |
814 | cmpwi 0,r5,0 | |
815 | mtctr r5 | |
816 | subi r4,r4,4 | |
817 | blelr- | |
818 | 00: lwz r5,0(r3) | |
55b6332e MT |
819 | 01: eieio |
820 | 02: stwu r5,4(r4) | |
821 | ISYNC_8xx | |
822 | .section .fixup,"ax" | |
823 | 03: blr | |
824 | .text | |
825 | .section __ex_table, "a" | |
826 | .align 2 | |
827 | .long 00b, 03b | |
828 | .long 01b, 03b | |
829 | .long 02b, 03b | |
830 | .text | |
1da177e4 LT |
831 | bdnz 00b |
832 | blr | |
833 | ||
1da177e4 LT |
834 | _GLOBAL(_outsl_ns) |
835 | cmpwi 0,r5,0 | |
836 | mtctr r5 | |
837 | subi r4,r4,4 | |
838 | blelr- | |
839 | 00: lwzu r5,4(r4) | |
55b6332e MT |
840 | 01: stw r5,0(r3) |
841 | 02: eieio | |
842 | ISYNC_8xx | |
843 | .section .fixup,"ax" | |
844 | 03: blr | |
845 | .text | |
846 | .section __ex_table, "a" | |
847 | .align 2 | |
848 | .long 00b, 03b | |
849 | .long 01b, 03b | |
850 | .long 02b, 03b | |
851 | .text | |
1da177e4 LT |
852 | bdnz 00b |
853 | blr | |
854 | ||
855 | /* | |
856 | * Extended precision shifts. | |
857 | * | |
858 | * Updated to be valid for shift counts from 0 to 63 inclusive. | |
859 | * -- Gabriel | |
860 | * | |
861 | * R3/R4 has 64 bit value | |
862 | * R5 has shift count | |
863 | * result in R3/R4 | |
864 | * | |
865 | * ashrdi3: arithmetic right shift (sign propagation) | |
866 | * lshrdi3: logical right shift | |
867 | * ashldi3: left shift | |
868 | */ | |
869 | _GLOBAL(__ashrdi3) | |
870 | subfic r6,r5,32 | |
871 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
872 | addi r7,r5,32 # could be xori, or addi with -32 | |
873 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
874 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 | |
875 | sraw r7,r3,r7 # t2 = MSW >> (count-32) | |
876 | or r4,r4,r6 # LSW |= t1 | |
877 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 | |
878 | sraw r3,r3,r5 # MSW = MSW >> count | |
879 | or r4,r4,r7 # LSW |= t2 | |
880 | blr | |
881 | ||
882 | _GLOBAL(__ashldi3) | |
883 | subfic r6,r5,32 | |
884 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count | |
885 | addi r7,r5,32 # could be xori, or addi with -32 | |
886 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) | |
887 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) | |
888 | or r3,r3,r6 # MSW |= t1 | |
889 | slw r4,r4,r5 # LSW = LSW << count | |
890 | or r3,r3,r7 # MSW |= t2 | |
891 | blr | |
892 | ||
893 | _GLOBAL(__lshrdi3) | |
894 | subfic r6,r5,32 | |
895 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
896 | addi r7,r5,32 # could be xori, or addi with -32 | |
897 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
898 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) | |
899 | or r4,r4,r6 # LSW |= t1 | |
900 | srw r3,r3,r5 # MSW = MSW >> count | |
901 | or r4,r4,r7 # LSW |= t2 | |
902 | blr | |
903 | ||
904 | _GLOBAL(abs) | |
905 | srawi r4,r3,31 | |
906 | xor r3,r3,r4 | |
907 | sub r3,r3,r4 | |
908 | blr | |
909 | ||
910 | _GLOBAL(_get_SP) | |
911 | mr r3,r1 /* Close enough */ | |
912 | blr | |
913 | ||
1da177e4 LT |
914 | /* |
915 | * Create a kernel thread | |
916 | * kernel_thread(fn, arg, flags) | |
917 | */ | |
918 | _GLOBAL(kernel_thread) | |
919 | stwu r1,-16(r1) | |
920 | stw r30,8(r1) | |
921 | stw r31,12(r1) | |
922 | mr r30,r3 /* function */ | |
923 | mr r31,r4 /* argument */ | |
924 | ori r3,r5,CLONE_VM /* flags */ | |
925 | oris r3,r3,CLONE_UNTRACED>>16 | |
926 | li r4,0 /* new sp (unused) */ | |
927 | li r0,__NR_clone | |
928 | sc | |
929 | cmpwi 0,r3,0 /* parent or child? */ | |
930 | bne 1f /* return if parent */ | |
931 | li r0,0 /* make top-level stack frame */ | |
932 | stwu r0,-16(r1) | |
933 | mtlr r30 /* fn addr in lr */ | |
934 | mr r3,r31 /* load arg and call fn */ | |
c9cf73ae | 935 | PPC440EP_ERR42 |
1da177e4 LT |
936 | blrl |
937 | li r0,__NR_exit /* exit if function returns */ | |
938 | li r3,0 | |
939 | sc | |
940 | 1: lwz r30,8(r1) | |
941 | lwz r31,12(r1) | |
942 | addi r1,r1,16 | |
943 | blr | |
944 | ||
fe74290d AB |
945 | _GLOBAL(kernel_execve) |
946 | li r0,__NR_execve | |
947 | sc | |
948 | bnslr | |
949 | neg r3,r3 | |
950 | blr | |
951 | ||
1da177e4 LT |
952 | /* |
953 | * This routine is just here to keep GCC happy - sigh... | |
954 | */ | |
955 | _GLOBAL(__main) | |
956 | blr | |
957 |