| 1 | /* |
| 2 | * This file contains miscellaneous low-level functions. |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * |
| 5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
| 6 | * and Paul Mackerras. |
| 7 | * |
| 8 | * kexec bits: |
| 9 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> |
| 10 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include <linux/sys.h> |
| 20 | #include <asm/unistd.h> |
| 21 | #include <asm/errno.h> |
| 22 | #include <asm/reg.h> |
| 23 | #include <asm/page.h> |
| 24 | #include <asm/cache.h> |
| 25 | #include <asm/cputable.h> |
| 26 | #include <asm/mmu.h> |
| 27 | #include <asm/ppc_asm.h> |
| 28 | #include <asm/thread_info.h> |
| 29 | #include <asm/asm-offsets.h> |
| 30 | #include <asm/processor.h> |
| 31 | #include <asm/kexec.h> |
| 32 | |
| 33 | .text |
| 34 | |
| 35 | /* |
| 36 | * This returns the high 64 bits of the product of two 64-bit numbers. |
| 37 | */ |
| 38 | _GLOBAL(mulhdu) |
| 39 | cmpwi r6,0 |
| 40 | cmpwi cr1,r3,0 |
| 41 | mr r10,r4 |
| 42 | mulhwu r4,r4,r5 |
| 43 | beq 1f |
| 44 | mulhwu r0,r10,r6 |
| 45 | mullw r7,r10,r5 |
| 46 | addc r7,r0,r7 |
| 47 | addze r4,r4 |
| 48 | 1: beqlr cr1 /* all done if high part of A is 0 */ |
| 49 | mr r10,r3 |
| 50 | mullw r9,r3,r5 |
| 51 | mulhwu r3,r3,r5 |
| 52 | beq 2f |
| 53 | mullw r0,r10,r6 |
| 54 | mulhwu r8,r10,r6 |
| 55 | addc r7,r0,r7 |
| 56 | adde r4,r4,r8 |
| 57 | addze r3,r3 |
| 58 | 2: addc r4,r4,r9 |
| 59 | addze r3,r3 |
| 60 | blr |
| 61 | |
| 62 | /* |
| 63 | * sub_reloc_offset(x) returns x - reloc_offset(). |
| 64 | */ |
| 65 | _GLOBAL(sub_reloc_offset) |
| 66 | mflr r0 |
| 67 | bl 1f |
| 68 | 1: mflr r5 |
| 69 | lis r4,1b@ha |
| 70 | addi r4,r4,1b@l |
| 71 | subf r5,r4,r5 |
| 72 | subf r3,r5,r3 |
| 73 | mtlr r0 |
| 74 | blr |
| 75 | |
| 76 | /* |
| 77 | * reloc_got2 runs through the .got2 section adding an offset |
| 78 | * to each entry. |
| 79 | */ |
| 80 | _GLOBAL(reloc_got2) |
| 81 | mflr r11 |
| 82 | lis r7,__got2_start@ha |
| 83 | addi r7,r7,__got2_start@l |
| 84 | lis r8,__got2_end@ha |
| 85 | addi r8,r8,__got2_end@l |
| 86 | subf r8,r7,r8 |
| 87 | srwi. r8,r8,2 |
| 88 | beqlr |
| 89 | mtctr r8 |
| 90 | bl 1f |
| 91 | 1: mflr r0 |
| 92 | lis r4,1b@ha |
| 93 | addi r4,r4,1b@l |
| 94 | subf r0,r4,r0 |
| 95 | add r7,r0,r7 |
| 96 | 2: lwz r0,0(r7) |
| 97 | add r0,r0,r3 |
| 98 | stw r0,0(r7) |
| 99 | addi r7,r7,4 |
| 100 | bdnz 2b |
| 101 | mtlr r11 |
| 102 | blr |
| 103 | |
| 104 | /* |
| 105 | * call_setup_cpu - call the setup_cpu function for this cpu |
| 106 | * r3 = data offset, r24 = cpu number |
| 107 | * |
| 108 | * Setup function is called with: |
| 109 | * r3 = data offset |
| 110 | * r4 = ptr to CPU spec (relocated) |
| 111 | */ |
| 112 | _GLOBAL(call_setup_cpu) |
| 113 | addis r4,r3,cur_cpu_spec@ha |
| 114 | addi r4,r4,cur_cpu_spec@l |
| 115 | lwz r4,0(r4) |
| 116 | add r4,r4,r3 |
| 117 | lwz r5,CPU_SPEC_SETUP(r4) |
| 118 | cmpwi 0,r5,0 |
| 119 | add r5,r5,r3 |
| 120 | beqlr |
| 121 | mtctr r5 |
| 122 | bctr |
| 123 | |
| 124 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) |
| 125 | |
| 126 | /* This gets called by via-pmu.c to switch the PLL selection |
| 127 | * on 750fx CPU. This function should really be moved to some |
| 128 | * other place (as most of the cpufreq code in via-pmu |
| 129 | */ |
| 130 | _GLOBAL(low_choose_750fx_pll) |
| 131 | /* Clear MSR:EE */ |
| 132 | mfmsr r7 |
| 133 | rlwinm r0,r7,0,17,15 |
| 134 | mtmsr r0 |
| 135 | |
| 136 | /* If switching to PLL1, disable HID0:BTIC */ |
| 137 | cmplwi cr0,r3,0 |
| 138 | beq 1f |
| 139 | mfspr r5,SPRN_HID0 |
| 140 | rlwinm r5,r5,0,27,25 |
| 141 | sync |
| 142 | mtspr SPRN_HID0,r5 |
| 143 | isync |
| 144 | sync |
| 145 | |
| 146 | 1: |
| 147 | /* Calc new HID1 value */ |
| 148 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ |
| 149 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ |
| 150 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ |
| 151 | or r4,r4,r5 |
| 152 | mtspr SPRN_HID1,r4 |
| 153 | |
| 154 | /* Store new HID1 image */ |
| 155 | rlwinm r6,r1,0,0,18 |
| 156 | lwz r6,TI_CPU(r6) |
| 157 | slwi r6,r6,2 |
| 158 | addis r6,r6,nap_save_hid1@ha |
| 159 | stw r4,nap_save_hid1@l(r6) |
| 160 | |
| 161 | /* If switching to PLL0, enable HID0:BTIC */ |
| 162 | cmplwi cr0,r3,0 |
| 163 | bne 1f |
| 164 | mfspr r5,SPRN_HID0 |
| 165 | ori r5,r5,HID0_BTIC |
| 166 | sync |
| 167 | mtspr SPRN_HID0,r5 |
| 168 | isync |
| 169 | sync |
| 170 | |
| 171 | 1: |
| 172 | /* Return */ |
| 173 | mtmsr r7 |
| 174 | blr |
| 175 | |
| 176 | _GLOBAL(low_choose_7447a_dfs) |
| 177 | /* Clear MSR:EE */ |
| 178 | mfmsr r7 |
| 179 | rlwinm r0,r7,0,17,15 |
| 180 | mtmsr r0 |
| 181 | |
| 182 | /* Calc new HID1 value */ |
| 183 | mfspr r4,SPRN_HID1 |
| 184 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ |
| 185 | sync |
| 186 | mtspr SPRN_HID1,r4 |
| 187 | sync |
| 188 | isync |
| 189 | |
| 190 | /* Return */ |
| 191 | mtmsr r7 |
| 192 | blr |
| 193 | |
| 194 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ |
| 195 | |
| 196 | /* |
| 197 | * complement mask on the msr then "or" some values on. |
| 198 | * _nmask_and_or_msr(nmask, value_to_or) |
| 199 | */ |
| 200 | _GLOBAL(_nmask_and_or_msr) |
| 201 | mfmsr r0 /* Get current msr */ |
| 202 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ |
| 203 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ |
| 204 | SYNC /* Some chip revs have problems here... */ |
| 205 | mtmsr r0 /* Update machine state */ |
| 206 | isync |
| 207 | blr /* Done */ |
| 208 | |
| 209 | |
| 210 | /* |
| 211 | * Flush MMU TLB |
| 212 | */ |
| 213 | _GLOBAL(_tlbia) |
| 214 | #if defined(CONFIG_40x) |
| 215 | sync /* Flush to memory before changing mapping */ |
| 216 | tlbia |
| 217 | isync /* Flush shadow TLB */ |
| 218 | #elif defined(CONFIG_44x) |
| 219 | li r3,0 |
| 220 | sync |
| 221 | |
| 222 | /* Load high watermark */ |
| 223 | lis r4,tlb_44x_hwater@ha |
| 224 | lwz r5,tlb_44x_hwater@l(r4) |
| 225 | |
| 226 | 1: tlbwe r3,r3,PPC44x_TLB_PAGEID |
| 227 | addi r3,r3,1 |
| 228 | cmpw 0,r3,r5 |
| 229 | ble 1b |
| 230 | |
| 231 | isync |
| 232 | #elif defined(CONFIG_FSL_BOOKE) |
| 233 | /* Invalidate all entries in TLB0 */ |
| 234 | li r3, 0x04 |
| 235 | tlbivax 0,3 |
| 236 | /* Invalidate all entries in TLB1 */ |
| 237 | li r3, 0x0c |
| 238 | tlbivax 0,3 |
| 239 | /* Invalidate all entries in TLB2 */ |
| 240 | li r3, 0x14 |
| 241 | tlbivax 0,3 |
| 242 | /* Invalidate all entries in TLB3 */ |
| 243 | li r3, 0x1c |
| 244 | tlbivax 0,3 |
| 245 | msync |
| 246 | #ifdef CONFIG_SMP |
| 247 | tlbsync |
| 248 | #endif /* CONFIG_SMP */ |
| 249 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ |
| 250 | #if defined(CONFIG_SMP) |
| 251 | rlwinm r8,r1,0,0,18 |
| 252 | lwz r8,TI_CPU(r8) |
| 253 | oris r8,r8,10 |
| 254 | mfmsr r10 |
| 255 | SYNC |
| 256 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ |
| 257 | rlwinm r0,r0,0,28,26 /* clear DR */ |
| 258 | mtmsr r0 |
| 259 | SYNC_601 |
| 260 | isync |
| 261 | lis r9,mmu_hash_lock@h |
| 262 | ori r9,r9,mmu_hash_lock@l |
| 263 | tophys(r9,r9) |
| 264 | 10: lwarx r7,0,r9 |
| 265 | cmpwi 0,r7,0 |
| 266 | bne- 10b |
| 267 | stwcx. r8,0,r9 |
| 268 | bne- 10b |
| 269 | sync |
| 270 | tlbia |
| 271 | sync |
| 272 | TLBSYNC |
| 273 | li r0,0 |
| 274 | stw r0,0(r9) /* clear mmu_hash_lock */ |
| 275 | mtmsr r10 |
| 276 | SYNC_601 |
| 277 | isync |
| 278 | #else /* CONFIG_SMP */ |
| 279 | sync |
| 280 | tlbia |
| 281 | sync |
| 282 | #endif /* CONFIG_SMP */ |
| 283 | #endif /* ! defined(CONFIG_40x) */ |
| 284 | blr |
| 285 | |
| 286 | /* |
| 287 | * Flush MMU TLB for a particular address |
| 288 | */ |
| 289 | _GLOBAL(_tlbie) |
| 290 | #if defined(CONFIG_40x) |
| 291 | tlbsx. r3, 0, r3 |
| 292 | bne 10f |
| 293 | sync |
| 294 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. |
| 295 | * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate |
| 296 | * the TLB entry. */ |
| 297 | tlbwe r3, r3, TLB_TAG |
| 298 | isync |
| 299 | 10: |
| 300 | #elif defined(CONFIG_44x) |
| 301 | mfspr r4,SPRN_MMUCR |
| 302 | mfspr r5,SPRN_PID /* Get PID */ |
| 303 | rlwimi r4,r5,0,24,31 /* Set TID */ |
| 304 | mtspr SPRN_MMUCR,r4 |
| 305 | |
| 306 | tlbsx. r3, 0, r3 |
| 307 | bne 10f |
| 308 | sync |
| 309 | /* There are only 64 TLB entries, so r3 < 64, |
| 310 | * which means bit 22, is clear. Since 22 is |
| 311 | * the V bit in the TLB_PAGEID, loading this |
| 312 | * value will invalidate the TLB entry. |
| 313 | */ |
| 314 | tlbwe r3, r3, PPC44x_TLB_PAGEID |
| 315 | isync |
| 316 | 10: |
| 317 | #elif defined(CONFIG_FSL_BOOKE) |
| 318 | rlwinm r4, r3, 0, 0, 19 |
| 319 | ori r5, r4, 0x08 /* TLBSEL = 1 */ |
| 320 | ori r6, r4, 0x10 /* TLBSEL = 2 */ |
| 321 | ori r7, r4, 0x18 /* TLBSEL = 3 */ |
| 322 | tlbivax 0, r4 |
| 323 | tlbivax 0, r5 |
| 324 | tlbivax 0, r6 |
| 325 | tlbivax 0, r7 |
| 326 | msync |
| 327 | #if defined(CONFIG_SMP) |
| 328 | tlbsync |
| 329 | #endif /* CONFIG_SMP */ |
| 330 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ |
| 331 | #if defined(CONFIG_SMP) |
| 332 | rlwinm r8,r1,0,0,18 |
| 333 | lwz r8,TI_CPU(r8) |
| 334 | oris r8,r8,11 |
| 335 | mfmsr r10 |
| 336 | SYNC |
| 337 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ |
| 338 | rlwinm r0,r0,0,28,26 /* clear DR */ |
| 339 | mtmsr r0 |
| 340 | SYNC_601 |
| 341 | isync |
| 342 | lis r9,mmu_hash_lock@h |
| 343 | ori r9,r9,mmu_hash_lock@l |
| 344 | tophys(r9,r9) |
| 345 | 10: lwarx r7,0,r9 |
| 346 | cmpwi 0,r7,0 |
| 347 | bne- 10b |
| 348 | stwcx. r8,0,r9 |
| 349 | bne- 10b |
| 350 | eieio |
| 351 | tlbie r3 |
| 352 | sync |
| 353 | TLBSYNC |
| 354 | li r0,0 |
| 355 | stw r0,0(r9) /* clear mmu_hash_lock */ |
| 356 | mtmsr r10 |
| 357 | SYNC_601 |
| 358 | isync |
| 359 | #else /* CONFIG_SMP */ |
| 360 | tlbie r3 |
| 361 | sync |
| 362 | #endif /* CONFIG_SMP */ |
| 363 | #endif /* ! CONFIG_40x */ |
| 364 | blr |
| 365 | |
| 366 | /* |
| 367 | * Flush instruction cache. |
| 368 | * This is a no-op on the 601. |
| 369 | */ |
| 370 | _GLOBAL(flush_instruction_cache) |
| 371 | #if defined(CONFIG_8xx) |
| 372 | isync |
| 373 | lis r5, IDC_INVALL@h |
| 374 | mtspr SPRN_IC_CST, r5 |
| 375 | #elif defined(CONFIG_4xx) |
| 376 | #ifdef CONFIG_403GCX |
| 377 | li r3, 512 |
| 378 | mtctr r3 |
| 379 | lis r4, KERNELBASE@h |
| 380 | 1: iccci 0, r4 |
| 381 | addi r4, r4, 16 |
| 382 | bdnz 1b |
| 383 | #else |
| 384 | lis r3, KERNELBASE@h |
| 385 | iccci 0,r3 |
| 386 | #endif |
| 387 | #elif CONFIG_FSL_BOOKE |
| 388 | BEGIN_FTR_SECTION |
| 389 | mfspr r3,SPRN_L1CSR0 |
| 390 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC |
| 391 | /* msync; isync recommended here */ |
| 392 | mtspr SPRN_L1CSR0,r3 |
| 393 | isync |
| 394 | blr |
| 395 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) |
| 396 | mfspr r3,SPRN_L1CSR1 |
| 397 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR |
| 398 | mtspr SPRN_L1CSR1,r3 |
| 399 | #else |
| 400 | mfspr r3,SPRN_PVR |
| 401 | rlwinm r3,r3,16,16,31 |
| 402 | cmpwi 0,r3,1 |
| 403 | beqlr /* for 601, do nothing */ |
| 404 | /* 603/604 processor - use invalidate-all bit in HID0 */ |
| 405 | mfspr r3,SPRN_HID0 |
| 406 | ori r3,r3,HID0_ICFI |
| 407 | mtspr SPRN_HID0,r3 |
| 408 | #endif /* CONFIG_8xx/4xx */ |
| 409 | isync |
| 410 | blr |
| 411 | |
| 412 | /* |
| 413 | * Write any modified data cache blocks out to memory |
| 414 | * and invalidate the corresponding instruction cache blocks. |
| 415 | * This is a no-op on the 601. |
| 416 | * |
| 417 | * flush_icache_range(unsigned long start, unsigned long stop) |
| 418 | */ |
| 419 | _GLOBAL(__flush_icache_range) |
| 420 | BEGIN_FTR_SECTION |
| 421 | blr /* for 601, do nothing */ |
| 422 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
| 423 | li r5,L1_CACHE_BYTES-1 |
| 424 | andc r3,r3,r5 |
| 425 | subf r4,r3,r4 |
| 426 | add r4,r4,r5 |
| 427 | srwi. r4,r4,L1_CACHE_SHIFT |
| 428 | beqlr |
| 429 | mtctr r4 |
| 430 | mr r6,r3 |
| 431 | 1: dcbst 0,r3 |
| 432 | addi r3,r3,L1_CACHE_BYTES |
| 433 | bdnz 1b |
| 434 | sync /* wait for dcbst's to get to ram */ |
| 435 | mtctr r4 |
| 436 | 2: icbi 0,r6 |
| 437 | addi r6,r6,L1_CACHE_BYTES |
| 438 | bdnz 2b |
| 439 | sync /* additional sync needed on g4 */ |
| 440 | isync |
| 441 | blr |
| 442 | /* |
| 443 | * Write any modified data cache blocks out to memory. |
| 444 | * Does not invalidate the corresponding cache lines (especially for |
| 445 | * any corresponding instruction cache). |
| 446 | * |
| 447 | * clean_dcache_range(unsigned long start, unsigned long stop) |
| 448 | */ |
| 449 | _GLOBAL(clean_dcache_range) |
| 450 | li r5,L1_CACHE_BYTES-1 |
| 451 | andc r3,r3,r5 |
| 452 | subf r4,r3,r4 |
| 453 | add r4,r4,r5 |
| 454 | srwi. r4,r4,L1_CACHE_SHIFT |
| 455 | beqlr |
| 456 | mtctr r4 |
| 457 | |
| 458 | 1: dcbst 0,r3 |
| 459 | addi r3,r3,L1_CACHE_BYTES |
| 460 | bdnz 1b |
| 461 | sync /* wait for dcbst's to get to ram */ |
| 462 | blr |
| 463 | |
| 464 | /* |
| 465 | * Write any modified data cache blocks out to memory and invalidate them. |
| 466 | * Does not invalidate the corresponding instruction cache blocks. |
| 467 | * |
| 468 | * flush_dcache_range(unsigned long start, unsigned long stop) |
| 469 | */ |
| 470 | _GLOBAL(flush_dcache_range) |
| 471 | li r5,L1_CACHE_BYTES-1 |
| 472 | andc r3,r3,r5 |
| 473 | subf r4,r3,r4 |
| 474 | add r4,r4,r5 |
| 475 | srwi. r4,r4,L1_CACHE_SHIFT |
| 476 | beqlr |
| 477 | mtctr r4 |
| 478 | |
| 479 | 1: dcbf 0,r3 |
| 480 | addi r3,r3,L1_CACHE_BYTES |
| 481 | bdnz 1b |
| 482 | sync /* wait for dcbst's to get to ram */ |
| 483 | blr |
| 484 | |
| 485 | /* |
| 486 | * Like above, but invalidate the D-cache. This is used by the 8xx |
| 487 | * to invalidate the cache so the PPC core doesn't get stale data |
| 488 | * from the CPM (no cache snooping here :-). |
| 489 | * |
| 490 | * invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 491 | */ |
| 492 | _GLOBAL(invalidate_dcache_range) |
| 493 | li r5,L1_CACHE_BYTES-1 |
| 494 | andc r3,r3,r5 |
| 495 | subf r4,r3,r4 |
| 496 | add r4,r4,r5 |
| 497 | srwi. r4,r4,L1_CACHE_SHIFT |
| 498 | beqlr |
| 499 | mtctr r4 |
| 500 | |
| 501 | 1: dcbi 0,r3 |
| 502 | addi r3,r3,L1_CACHE_BYTES |
| 503 | bdnz 1b |
| 504 | sync /* wait for dcbi's to get to ram */ |
| 505 | blr |
| 506 | |
| 507 | /* |
| 508 | * Flush a particular page from the data cache to RAM. |
| 509 | * Note: this is necessary because the instruction cache does *not* |
| 510 | * snoop from the data cache. |
| 511 | * This is a no-op on the 601 which has a unified cache. |
| 512 | * |
| 513 | * void __flush_dcache_icache(void *page) |
| 514 | */ |
| 515 | _GLOBAL(__flush_dcache_icache) |
| 516 | BEGIN_FTR_SECTION |
| 517 | blr |
| 518 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
| 519 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
| 520 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
| 521 | mtctr r4 |
| 522 | mr r6,r3 |
| 523 | 0: dcbst 0,r3 /* Write line to ram */ |
| 524 | addi r3,r3,L1_CACHE_BYTES |
| 525 | bdnz 0b |
| 526 | sync |
| 527 | mtctr r4 |
| 528 | 1: icbi 0,r6 |
| 529 | addi r6,r6,L1_CACHE_BYTES |
| 530 | bdnz 1b |
| 531 | sync |
| 532 | isync |
| 533 | blr |
| 534 | |
| 535 | /* |
| 536 | * Flush a particular page from the data cache to RAM, identified |
| 537 | * by its physical address. We turn off the MMU so we can just use |
| 538 | * the physical address (this may be a highmem page without a kernel |
| 539 | * mapping). |
| 540 | * |
| 541 | * void __flush_dcache_icache_phys(unsigned long physaddr) |
| 542 | */ |
| 543 | _GLOBAL(__flush_dcache_icache_phys) |
| 544 | BEGIN_FTR_SECTION |
| 545 | blr /* for 601, do nothing */ |
| 546 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
| 547 | mfmsr r10 |
| 548 | rlwinm r0,r10,0,28,26 /* clear DR */ |
| 549 | mtmsr r0 |
| 550 | isync |
| 551 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
| 552 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
| 553 | mtctr r4 |
| 554 | mr r6,r3 |
| 555 | 0: dcbst 0,r3 /* Write line to ram */ |
| 556 | addi r3,r3,L1_CACHE_BYTES |
| 557 | bdnz 0b |
| 558 | sync |
| 559 | mtctr r4 |
| 560 | 1: icbi 0,r6 |
| 561 | addi r6,r6,L1_CACHE_BYTES |
| 562 | bdnz 1b |
| 563 | sync |
| 564 | mtmsr r10 /* restore DR */ |
| 565 | isync |
| 566 | blr |
| 567 | |
| 568 | /* |
| 569 | * Clear pages using the dcbz instruction, which doesn't cause any |
| 570 | * memory traffic (except to write out any cache lines which get |
| 571 | * displaced). This only works on cacheable memory. |
| 572 | * |
| 573 | * void clear_pages(void *page, int order) ; |
| 574 | */ |
| 575 | _GLOBAL(clear_pages) |
| 576 | li r0,4096/L1_CACHE_BYTES |
| 577 | slw r0,r0,r4 |
| 578 | mtctr r0 |
| 579 | #ifdef CONFIG_8xx |
| 580 | li r4, 0 |
| 581 | 1: stw r4, 0(r3) |
| 582 | stw r4, 4(r3) |
| 583 | stw r4, 8(r3) |
| 584 | stw r4, 12(r3) |
| 585 | #else |
| 586 | 1: dcbz 0,r3 |
| 587 | #endif |
| 588 | addi r3,r3,L1_CACHE_BYTES |
| 589 | bdnz 1b |
| 590 | blr |
| 591 | |
| 592 | /* |
| 593 | * Copy a whole page. We use the dcbz instruction on the destination |
| 594 | * to reduce memory traffic (it eliminates the unnecessary reads of |
| 595 | * the destination into cache). This requires that the destination |
| 596 | * is cacheable. |
| 597 | */ |
| 598 | #define COPY_16_BYTES \ |
| 599 | lwz r6,4(r4); \ |
| 600 | lwz r7,8(r4); \ |
| 601 | lwz r8,12(r4); \ |
| 602 | lwzu r9,16(r4); \ |
| 603 | stw r6,4(r3); \ |
| 604 | stw r7,8(r3); \ |
| 605 | stw r8,12(r3); \ |
| 606 | stwu r9,16(r3) |
| 607 | |
| 608 | _GLOBAL(copy_page) |
| 609 | addi r3,r3,-4 |
| 610 | addi r4,r4,-4 |
| 611 | |
| 612 | #ifdef CONFIG_8xx |
| 613 | /* don't use prefetch on 8xx */ |
| 614 | li r0,4096/L1_CACHE_BYTES |
| 615 | mtctr r0 |
| 616 | 1: COPY_16_BYTES |
| 617 | bdnz 1b |
| 618 | blr |
| 619 | |
| 620 | #else /* not 8xx, we can prefetch */ |
| 621 | li r5,4 |
| 622 | |
| 623 | #if MAX_COPY_PREFETCH > 1 |
| 624 | li r0,MAX_COPY_PREFETCH |
| 625 | li r11,4 |
| 626 | mtctr r0 |
| 627 | 11: dcbt r11,r4 |
| 628 | addi r11,r11,L1_CACHE_BYTES |
| 629 | bdnz 11b |
| 630 | #else /* MAX_COPY_PREFETCH == 1 */ |
| 631 | dcbt r5,r4 |
| 632 | li r11,L1_CACHE_BYTES+4 |
| 633 | #endif /* MAX_COPY_PREFETCH */ |
| 634 | li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
| 635 | crclr 4*cr0+eq |
| 636 | 2: |
| 637 | mtctr r0 |
| 638 | 1: |
| 639 | dcbt r11,r4 |
| 640 | dcbz r5,r3 |
| 641 | COPY_16_BYTES |
| 642 | #if L1_CACHE_BYTES >= 32 |
| 643 | COPY_16_BYTES |
| 644 | #if L1_CACHE_BYTES >= 64 |
| 645 | COPY_16_BYTES |
| 646 | COPY_16_BYTES |
| 647 | #if L1_CACHE_BYTES >= 128 |
| 648 | COPY_16_BYTES |
| 649 | COPY_16_BYTES |
| 650 | COPY_16_BYTES |
| 651 | COPY_16_BYTES |
| 652 | #endif |
| 653 | #endif |
| 654 | #endif |
| 655 | bdnz 1b |
| 656 | beqlr |
| 657 | crnot 4*cr0+eq,4*cr0+eq |
| 658 | li r0,MAX_COPY_PREFETCH |
| 659 | li r11,4 |
| 660 | b 2b |
| 661 | #endif /* CONFIG_8xx */ |
| 662 | |
| 663 | /* |
| 664 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) |
| 665 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); |
| 666 | */ |
| 667 | _GLOBAL(atomic_clear_mask) |
| 668 | 10: lwarx r5,0,r4 |
| 669 | andc r5,r5,r3 |
| 670 | PPC405_ERR77(0,r4) |
| 671 | stwcx. r5,0,r4 |
| 672 | bne- 10b |
| 673 | blr |
| 674 | _GLOBAL(atomic_set_mask) |
| 675 | 10: lwarx r5,0,r4 |
| 676 | or r5,r5,r3 |
| 677 | PPC405_ERR77(0,r4) |
| 678 | stwcx. r5,0,r4 |
| 679 | bne- 10b |
| 680 | blr |
| 681 | |
| 682 | /* |
| 683 | * Extended precision shifts. |
| 684 | * |
| 685 | * Updated to be valid for shift counts from 0 to 63 inclusive. |
| 686 | * -- Gabriel |
| 687 | * |
| 688 | * R3/R4 has 64 bit value |
| 689 | * R5 has shift count |
| 690 | * result in R3/R4 |
| 691 | * |
| 692 | * ashrdi3: arithmetic right shift (sign propagation) |
| 693 | * lshrdi3: logical right shift |
| 694 | * ashldi3: left shift |
| 695 | */ |
| 696 | _GLOBAL(__ashrdi3) |
| 697 | subfic r6,r5,32 |
| 698 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 699 | addi r7,r5,32 # could be xori, or addi with -32 |
| 700 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 701 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 |
| 702 | sraw r7,r3,r7 # t2 = MSW >> (count-32) |
| 703 | or r4,r4,r6 # LSW |= t1 |
| 704 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 |
| 705 | sraw r3,r3,r5 # MSW = MSW >> count |
| 706 | or r4,r4,r7 # LSW |= t2 |
| 707 | blr |
| 708 | |
| 709 | _GLOBAL(__ashldi3) |
| 710 | subfic r6,r5,32 |
| 711 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count |
| 712 | addi r7,r5,32 # could be xori, or addi with -32 |
| 713 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) |
| 714 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) |
| 715 | or r3,r3,r6 # MSW |= t1 |
| 716 | slw r4,r4,r5 # LSW = LSW << count |
| 717 | or r3,r3,r7 # MSW |= t2 |
| 718 | blr |
| 719 | |
| 720 | _GLOBAL(__lshrdi3) |
| 721 | subfic r6,r5,32 |
| 722 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 723 | addi r7,r5,32 # could be xori, or addi with -32 |
| 724 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 725 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) |
| 726 | or r4,r4,r6 # LSW |= t1 |
| 727 | srw r3,r3,r5 # MSW = MSW >> count |
| 728 | or r4,r4,r7 # LSW |= t2 |
| 729 | blr |
| 730 | |
| 731 | _GLOBAL(abs) |
| 732 | srawi r4,r3,31 |
| 733 | xor r3,r3,r4 |
| 734 | sub r3,r3,r4 |
| 735 | blr |
| 736 | |
| 737 | /* |
| 738 | * Create a kernel thread |
| 739 | * kernel_thread(fn, arg, flags) |
| 740 | */ |
| 741 | _GLOBAL(kernel_thread) |
| 742 | stwu r1,-16(r1) |
| 743 | stw r30,8(r1) |
| 744 | stw r31,12(r1) |
| 745 | mr r30,r3 /* function */ |
| 746 | mr r31,r4 /* argument */ |
| 747 | ori r3,r5,CLONE_VM /* flags */ |
| 748 | oris r3,r3,CLONE_UNTRACED>>16 |
| 749 | li r4,0 /* new sp (unused) */ |
| 750 | li r0,__NR_clone |
| 751 | sc |
| 752 | cmpwi 0,r3,0 /* parent or child? */ |
| 753 | bne 1f /* return if parent */ |
| 754 | li r0,0 /* make top-level stack frame */ |
| 755 | stwu r0,-16(r1) |
| 756 | mtlr r30 /* fn addr in lr */ |
| 757 | mr r3,r31 /* load arg and call fn */ |
| 758 | PPC440EP_ERR42 |
| 759 | blrl |
| 760 | li r0,__NR_exit /* exit if function returns */ |
| 761 | li r3,0 |
| 762 | sc |
| 763 | 1: lwz r30,8(r1) |
| 764 | lwz r31,12(r1) |
| 765 | addi r1,r1,16 |
| 766 | blr |
| 767 | |
| 768 | _GLOBAL(kernel_execve) |
| 769 | li r0,__NR_execve |
| 770 | sc |
| 771 | bnslr |
| 772 | neg r3,r3 |
| 773 | blr |
| 774 | |
| 775 | /* |
| 776 | * This routine is just here to keep GCC happy - sigh... |
| 777 | */ |
| 778 | _GLOBAL(__main) |
| 779 | blr |
| 780 | |
| 781 | #ifdef CONFIG_KEXEC |
| 782 | /* |
| 783 | * Must be relocatable PIC code callable as a C function. |
| 784 | */ |
| 785 | .globl relocate_new_kernel |
| 786 | relocate_new_kernel: |
| 787 | /* r3 = page_list */ |
| 788 | /* r4 = reboot_code_buffer */ |
| 789 | /* r5 = start_address */ |
| 790 | |
| 791 | li r0, 0 |
| 792 | |
| 793 | /* |
| 794 | * Set Machine Status Register to a known status, |
| 795 | * switch the MMU off and jump to 1: in a single step. |
| 796 | */ |
| 797 | |
| 798 | mr r8, r0 |
| 799 | ori r8, r8, MSR_RI|MSR_ME |
| 800 | mtspr SPRN_SRR1, r8 |
| 801 | addi r8, r4, 1f - relocate_new_kernel |
| 802 | mtspr SPRN_SRR0, r8 |
| 803 | sync |
| 804 | rfi |
| 805 | |
| 806 | 1: |
| 807 | /* from this point address translation is turned off */ |
| 808 | /* and interrupts are disabled */ |
| 809 | |
| 810 | /* set a new stack at the bottom of our page... */ |
| 811 | /* (not really needed now) */ |
| 812 | addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */ |
| 813 | stw r0, 0(r1) |
| 814 | |
| 815 | /* Do the copies */ |
| 816 | li r6, 0 /* checksum */ |
| 817 | mr r0, r3 |
| 818 | b 1f |
| 819 | |
| 820 | 0: /* top, read another word for the indirection page */ |
| 821 | lwzu r0, 4(r3) |
| 822 | |
| 823 | 1: |
| 824 | /* is it a destination page? (r8) */ |
| 825 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ |
| 826 | beq 2f |
| 827 | |
| 828 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 829 | b 0b |
| 830 | |
| 831 | 2: /* is it an indirection page? (r3) */ |
| 832 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ |
| 833 | beq 2f |
| 834 | |
| 835 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 836 | subi r3, r3, 4 |
| 837 | b 0b |
| 838 | |
| 839 | 2: /* are we done? */ |
| 840 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ |
| 841 | beq 2f |
| 842 | b 3f |
| 843 | |
| 844 | 2: /* is it a source page? (r9) */ |
| 845 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ |
| 846 | beq 0b |
| 847 | |
| 848 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 849 | |
| 850 | li r7, PAGE_SIZE / 4 |
| 851 | mtctr r7 |
| 852 | subi r9, r9, 4 |
| 853 | subi r8, r8, 4 |
| 854 | 9: |
| 855 | lwzu r0, 4(r9) /* do the copy */ |
| 856 | xor r6, r6, r0 |
| 857 | stwu r0, 4(r8) |
| 858 | dcbst 0, r8 |
| 859 | sync |
| 860 | icbi 0, r8 |
| 861 | bdnz 9b |
| 862 | |
| 863 | addi r9, r9, 4 |
| 864 | addi r8, r8, 4 |
| 865 | b 0b |
| 866 | |
| 867 | 3: |
| 868 | |
| 869 | /* To be certain of avoiding problems with self-modifying code |
| 870 | * execute a serializing instruction here. |
| 871 | */ |
| 872 | isync |
| 873 | sync |
| 874 | |
| 875 | /* jump to the entry point, usually the setup routine */ |
| 876 | mtlr r5 |
| 877 | blrl |
| 878 | |
| 879 | 1: b 1b |
| 880 | |
| 881 | relocate_new_kernel_end: |
| 882 | |
| 883 | .globl relocate_new_kernel_size |
| 884 | relocate_new_kernel_size: |
| 885 | .long relocate_new_kernel_end - relocate_new_kernel |
| 886 | #endif |