2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/sys.h>
20 #include <asm/unistd.h>
21 #include <asm/errno.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/processor.h>
31 #include <asm/kexec.h>
36 #ifdef CONFIG_IRQSTACKS
37 _GLOBAL(call_do_softirq)
40 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
48 _GLOBAL(call_handle_irq)
52 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
59 #endif /* CONFIG_IRQSTACKS */
62 * This returns the high 64 bits of the product of two 64-bit numbers.
74 1: beqlr cr1 /* all done if high part of A is 0 */
89 * sub_reloc_offset(x) returns x - reloc_offset().
91 _GLOBAL(sub_reloc_offset)
103 * reloc_got2 runs through the .got2 section adding an offset
108 lis r7,__got2_start@ha
109 addi r7,r7,__got2_start@l
111 addi r8,r8,__got2_end@l
131 * call_setup_cpu - call the setup_cpu function for this cpu
132 * r3 = data offset, r24 = cpu number
134 * Setup function is called with:
136 * r4 = ptr to CPU spec (relocated)
138 _GLOBAL(call_setup_cpu)
139 addis r4,r3,cur_cpu_spec@ha
140 addi r4,r4,cur_cpu_spec@l
143 lwz r5,CPU_SPEC_SETUP(r4)
150 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
152 /* This gets called by via-pmu.c to switch the PLL selection
153 * on 750fx CPU. This function should really be moved to some
154 * other place (as most of the cpufreq code in via-pmu
156 _GLOBAL(low_choose_750fx_pll)
162 /* If switching to PLL1, disable HID0:BTIC */
173 /* Calc new HID1 value */
174 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
175 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
176 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
180 /* Store new HID1 image */
181 rlwinm r6,r1,0,0,(31-THREAD_SHIFT)
184 addis r6,r6,nap_save_hid1@ha
185 stw r4,nap_save_hid1@l(r6)
187 /* If switching to PLL0, enable HID0:BTIC */
202 _GLOBAL(low_choose_7447a_dfs)
208 /* Calc new HID1 value */
210 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
220 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
223 * complement mask on the msr then "or" some values on.
224 * _nmask_and_or_msr(nmask, value_to_or)
226 _GLOBAL(_nmask_and_or_msr)
227 mfmsr r0 /* Get current msr */
228 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
229 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
230 SYNC /* Some chip revs have problems here... */
231 mtmsr r0 /* Update machine state */
238 * Do an IO access in real mode
256 * Do an IO access in real mode
273 #endif /* CONFIG_40x */
278 #ifndef CONFIG_FSL_BOOKE
283 #if defined(CONFIG_40x)
284 sync /* Flush to memory before changing mapping */
286 isync /* Flush shadow TLB */
287 #elif defined(CONFIG_44x)
291 /* Load high watermark */
292 lis r4,tlb_44x_hwater@ha
293 lwz r5,tlb_44x_hwater@l(r4)
295 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
301 #elif defined(CONFIG_FSL_BOOKE)
302 /* Invalidate all entries in TLB0 */
305 /* Invalidate all entries in TLB1 */
311 #endif /* CONFIG_SMP */
312 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
313 #if defined(CONFIG_SMP)
314 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
319 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
320 rlwinm r0,r0,0,28,26 /* clear DR */
324 lis r9,mmu_hash_lock@h
325 ori r9,r9,mmu_hash_lock@l
337 stw r0,0(r9) /* clear mmu_hash_lock */
341 #else /* CONFIG_SMP */
345 #endif /* CONFIG_SMP */
346 #endif /* ! defined(CONFIG_40x) */
350 * Flush MMU TLB for a particular address
352 #ifndef CONFIG_FSL_BOOKE
356 #if defined(CONFIG_40x)
357 /* We run the search with interrupts disabled because we have to change
358 * the PID and I don't want to preempt when that happens.
369 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
370 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
372 tlbwe r3, r3, TLB_TAG
376 #elif defined(CONFIG_44x)
378 rlwimi r5,r4,0,24,31 /* Set TID */
380 /* We have to run the search with interrupts disabled, even critical
381 * and debug interrupts (in fact the only critical exceptions we have
382 * are debug and machine check). Otherwise an interrupt which causes
383 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
385 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
386 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
394 /* There are only 64 TLB entries, so r3 < 64,
395 * which means bit 22, is clear. Since 22 is
396 * the V bit in the TLB_PAGEID, loading this
397 * value will invalidate the TLB entry.
399 tlbwe r3, r3, PPC44x_TLB_PAGEID
402 #elif defined(CONFIG_FSL_BOOKE)
403 rlwinm r4, r3, 0, 0, 19
404 ori r5, r4, 0x08 /* TLBSEL = 1 */
408 #if defined(CONFIG_SMP)
410 #endif /* CONFIG_SMP */
411 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
412 #if defined(CONFIG_SMP)
413 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
418 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
419 rlwinm r0,r0,0,28,26 /* clear DR */
423 lis r9,mmu_hash_lock@h
424 ori r9,r9,mmu_hash_lock@l
436 stw r0,0(r9) /* clear mmu_hash_lock */
440 #else /* CONFIG_SMP */
443 #endif /* CONFIG_SMP */
444 #endif /* ! CONFIG_40x */
447 #if defined(CONFIG_FSL_BOOKE)
449 * Flush MMU TLB, but only on the local processor (no broadcast)
452 #define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
453 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
454 li r3,(MMUCSR0_TLBFI)@l
455 mtspr SPRN_MMUCSR0, r3
457 mfspr r3,SPRN_MMUCSR0
458 andi. r3,r3,MMUCSR0_TLBFI@l
463 * Flush MMU TLB for a particular process id, but only on the local processor
467 /* we currently do an invalidate all since we don't have per pid invalidate */
468 li r3,(MMUCSR0_TLBFI)@l
469 mtspr SPRN_MMUCSR0, r3
471 mfspr r3,SPRN_MMUCSR0
472 andi. r3,r3,MMUCSR0_TLBFI@l
479 * Flush MMU TLB for a particular address, but only on the local processor
486 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
488 mfspr r4,SPRN_MAS1 /* check valid */
489 andis. r3,r4,MAS1_VALID@h
498 #endif /* CONFIG_FSL_BOOKE */
501 * Nobody implements this yet
503 _GLOBAL(_tlbivax_bcast)
505 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
510 * Flush instruction cache.
511 * This is a no-op on the 601.
513 _GLOBAL(flush_instruction_cache)
514 #if defined(CONFIG_8xx)
517 mtspr SPRN_IC_CST, r5
518 #elif defined(CONFIG_4xx)
530 #elif CONFIG_FSL_BOOKE
533 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
534 /* msync; isync recommended here */
538 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
540 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
544 rlwinm r3,r3,16,16,31
546 beqlr /* for 601, do nothing */
547 /* 603/604 processor - use invalidate-all bit in HID0 */
551 #endif /* CONFIG_8xx/4xx */
556 * Write any modified data cache blocks out to memory
557 * and invalidate the corresponding instruction cache blocks.
558 * This is a no-op on the 601.
560 * flush_icache_range(unsigned long start, unsigned long stop)
562 _KPROBE(__flush_icache_range)
564 blr /* for 601, do nothing */
565 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
566 li r5,L1_CACHE_BYTES-1
570 srwi. r4,r4,L1_CACHE_SHIFT
575 addi r3,r3,L1_CACHE_BYTES
577 sync /* wait for dcbst's to get to ram */
580 addi r6,r6,L1_CACHE_BYTES
582 sync /* additional sync needed on g4 */
586 * Write any modified data cache blocks out to memory.
587 * Does not invalidate the corresponding cache lines (especially for
588 * any corresponding instruction cache).
590 * clean_dcache_range(unsigned long start, unsigned long stop)
592 _GLOBAL(clean_dcache_range)
593 li r5,L1_CACHE_BYTES-1
597 srwi. r4,r4,L1_CACHE_SHIFT
602 addi r3,r3,L1_CACHE_BYTES
604 sync /* wait for dcbst's to get to ram */
608 * Write any modified data cache blocks out to memory and invalidate them.
609 * Does not invalidate the corresponding instruction cache blocks.
611 * flush_dcache_range(unsigned long start, unsigned long stop)
613 _GLOBAL(flush_dcache_range)
614 li r5,L1_CACHE_BYTES-1
618 srwi. r4,r4,L1_CACHE_SHIFT
623 addi r3,r3,L1_CACHE_BYTES
625 sync /* wait for dcbst's to get to ram */
629 * Like above, but invalidate the D-cache. This is used by the 8xx
630 * to invalidate the cache so the PPC core doesn't get stale data
631 * from the CPM (no cache snooping here :-).
633 * invalidate_dcache_range(unsigned long start, unsigned long stop)
635 _GLOBAL(invalidate_dcache_range)
636 li r5,L1_CACHE_BYTES-1
640 srwi. r4,r4,L1_CACHE_SHIFT
645 addi r3,r3,L1_CACHE_BYTES
647 sync /* wait for dcbi's to get to ram */
651 * Flush a particular page from the data cache to RAM.
652 * Note: this is necessary because the instruction cache does *not*
653 * snoop from the data cache.
654 * This is a no-op on the 601 which has a unified cache.
656 * void __flush_dcache_icache(void *page)
658 _GLOBAL(__flush_dcache_icache)
661 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
662 rlwinm r3,r3,0,0,19 /* Get page base address */
663 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
666 0: dcbst 0,r3 /* Write line to ram */
667 addi r3,r3,L1_CACHE_BYTES
671 /* We don't flush the icache on 44x. Those have a virtual icache
672 * and we don't have access to the virtual address here (it's
673 * not the page vaddr but where it's mapped in user space). The
674 * flushing of the icache on these is handled elsewhere, when
675 * a change in the address space occurs, before returning to
680 addi r6,r6,L1_CACHE_BYTES
684 #endif /* CONFIG_44x */
688 * Flush a particular page from the data cache to RAM, identified
689 * by its physical address. We turn off the MMU so we can just use
690 * the physical address (this may be a highmem page without a kernel
693 * void __flush_dcache_icache_phys(unsigned long physaddr)
695 _GLOBAL(__flush_dcache_icache_phys)
697 blr /* for 601, do nothing */
698 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
700 rlwinm r0,r10,0,28,26 /* clear DR */
703 rlwinm r3,r3,0,0,19 /* Get page base address */
704 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
707 0: dcbst 0,r3 /* Write line to ram */
708 addi r3,r3,L1_CACHE_BYTES
713 addi r6,r6,L1_CACHE_BYTES
716 mtmsr r10 /* restore DR */
721 * Clear pages using the dcbz instruction, which doesn't cause any
722 * memory traffic (except to write out any cache lines which get
723 * displaced). This only works on cacheable memory.
725 * void clear_pages(void *page, int order) ;
728 li r0,4096/L1_CACHE_BYTES
740 addi r3,r3,L1_CACHE_BYTES
745 * Copy a whole page. We use the dcbz instruction on the destination
746 * to reduce memory traffic (it eliminates the unnecessary reads of
747 * the destination into cache). This requires that the destination
750 #define COPY_16_BYTES \
765 /* don't use prefetch on 8xx */
766 li r0,4096/L1_CACHE_BYTES
772 #else /* not 8xx, we can prefetch */
775 #if MAX_COPY_PREFETCH > 1
776 li r0,MAX_COPY_PREFETCH
780 addi r11,r11,L1_CACHE_BYTES
782 #else /* MAX_COPY_PREFETCH == 1 */
784 li r11,L1_CACHE_BYTES+4
785 #endif /* MAX_COPY_PREFETCH */
786 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
794 #if L1_CACHE_BYTES >= 32
796 #if L1_CACHE_BYTES >= 64
799 #if L1_CACHE_BYTES >= 128
809 crnot 4*cr0+eq,4*cr0+eq
810 li r0,MAX_COPY_PREFETCH
813 #endif /* CONFIG_8xx */
816 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
817 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
819 _GLOBAL(atomic_clear_mask)
826 _GLOBAL(atomic_set_mask)
835 * Extended precision shifts.
837 * Updated to be valid for shift counts from 0 to 63 inclusive.
840 * R3/R4 has 64 bit value
844 * ashrdi3: arithmetic right shift (sign propagation)
845 * lshrdi3: logical right shift
846 * ashldi3: left shift
850 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
851 addi r7,r5,32 # could be xori, or addi with -32
852 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
853 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
854 sraw r7,r3,r7 # t2 = MSW >> (count-32)
855 or r4,r4,r6 # LSW |= t1
856 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
857 sraw r3,r3,r5 # MSW = MSW >> count
858 or r4,r4,r7 # LSW |= t2
863 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
864 addi r7,r5,32 # could be xori, or addi with -32
865 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
866 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
867 or r3,r3,r6 # MSW |= t1
868 slw r4,r4,r5 # LSW = LSW << count
869 or r3,r3,r7 # MSW |= t2
874 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
875 addi r7,r5,32 # could be xori, or addi with -32
876 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
877 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
878 or r4,r4,r6 # LSW |= t1
879 srw r3,r3,r5 # MSW = MSW >> count
880 or r4,r4,r7 # LSW |= t2
884 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
885 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
905 * Create a kernel thread
906 * kernel_thread(fn, arg, flags)
908 _GLOBAL(kernel_thread)
912 mr r30,r3 /* function */
913 mr r31,r4 /* argument */
914 ori r3,r5,CLONE_VM /* flags */
915 oris r3,r3,CLONE_UNTRACED>>16
916 li r4,0 /* new sp (unused) */
919 bns+ 1f /* did system call indicate error? */
920 neg r3,r3 /* if so, make return code negative */
921 1: cmpwi 0,r3,0 /* parent or child? */
922 bne 2f /* return if parent */
923 li r0,0 /* make top-level stack frame */
925 mtlr r30 /* fn addr in lr */
926 mr r3,r31 /* load arg and call fn */
929 li r0,__NR_exit /* exit if function returns */
938 * This routine is just here to keep GCC happy - sigh...
945 * Must be relocatable PIC code callable as a C function.
947 .globl relocate_new_kernel
950 /* r4 = reboot_code_buffer */
951 /* r5 = start_address */
956 * Set Machine Status Register to a known status,
957 * switch the MMU off and jump to 1: in a single step.
961 ori r8, r8, MSR_RI|MSR_ME
963 addi r8, r4, 1f - relocate_new_kernel
969 /* from this point address translation is turned off */
970 /* and interrupts are disabled */
972 /* set a new stack at the bottom of our page... */
973 /* (not really needed now) */
974 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
978 li r6, 0 /* checksum */
982 0: /* top, read another word for the indirection page */
986 /* is it a destination page? (r8) */
987 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
990 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
993 2: /* is it an indirection page? (r3) */
994 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
997 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1001 2: /* are we done? */
1002 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1006 2: /* is it a source page? (r9) */
1007 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1010 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1012 li r7, PAGE_SIZE / 4
1017 lwzu r0, 4(r9) /* do the copy */
1031 /* To be certain of avoiding problems with self-modifying code
1032 * execute a serializing instruction here.
1037 /* jump to the entry point, usually the setup routine */
1043 relocate_new_kernel_end:
1045 .globl relocate_new_kernel_size
1046 relocate_new_kernel_size:
1047 .long relocate_new_kernel_end - relocate_new_kernel