2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
45 #include <asm/assembly.h>
46 #include <asm/pgtable.h>
47 #include <asm/cache.h>
48 #include <linux/linkage.h>
53 ENTRY(flush_tlb_all_local)
59 * The pitlbe and pdtlbe instructions should only be used to
60 * flush the entire tlb. Also, there needs to be no intervening
61 * tlb operations, e.g. tlb misses, so the operation needs
62 * to happen in real mode with all interruptions disabled.
65 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
66 rsm PSW_SM_I, %r19 /* save I-bit state */
74 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
75 mtctl %r0, %cr17 /* Clear IIASQ tail */
76 mtctl %r0, %cr17 /* Clear IIASQ head */
77 mtctl %r1, %cr18 /* IIAOQ head */
79 mtctl %r1, %cr18 /* IIAOQ tail */
80 load32 REAL_MODE_PSW, %r1
85 1: load32 PA(cache_info), %r1
87 /* Flush Instruction Tlb */
89 LDREG ITLB_SID_BASE(%r1), %r20
90 LDREG ITLB_SID_STRIDE(%r1), %r21
91 LDREG ITLB_SID_COUNT(%r1), %r22
92 LDREG ITLB_OFF_BASE(%r1), %arg0
93 LDREG ITLB_OFF_STRIDE(%r1), %arg1
94 LDREG ITLB_OFF_COUNT(%r1), %arg2
95 LDREG ITLB_LOOP(%r1), %arg3
97 ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
98 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
99 copy %arg0, %r28 /* Init base addr */
101 fitmanyloop: /* Loop if LOOP >= 2 */
103 add %r21, %r20, %r20 /* increment space */
104 copy %arg2, %r29 /* Init middle loop count */
106 fitmanymiddle: /* Loop if LOOP >= 2 */
107 ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
109 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
110 ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
111 copy %arg3, %r31 /* Re-init inner loop count */
113 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
114 ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
116 fitoneloop: /* Loop if LOOP = 1 */
118 copy %arg0, %r28 /* init base addr */
119 copy %arg2, %r29 /* init middle loop count */
121 fitonemiddle: /* Loop if LOOP = 1 */
122 ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
123 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
125 ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
126 add %r21, %r20, %r20 /* increment space */
132 LDREG DTLB_SID_BASE(%r1), %r20
133 LDREG DTLB_SID_STRIDE(%r1), %r21
134 LDREG DTLB_SID_COUNT(%r1), %r22
135 LDREG DTLB_OFF_BASE(%r1), %arg0
136 LDREG DTLB_OFF_STRIDE(%r1), %arg1
137 LDREG DTLB_OFF_COUNT(%r1), %arg2
138 LDREG DTLB_LOOP(%r1), %arg3
140 ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
141 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
142 copy %arg0, %r28 /* Init base addr */
144 fdtmanyloop: /* Loop if LOOP >= 2 */
146 add %r21, %r20, %r20 /* increment space */
147 copy %arg2, %r29 /* Init middle loop count */
149 fdtmanymiddle: /* Loop if LOOP >= 2 */
150 ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
152 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
153 ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
154 copy %arg3, %r31 /* Re-init inner loop count */
156 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
157 ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
159 fdtoneloop: /* Loop if LOOP = 1 */
161 copy %arg0, %r28 /* init base addr */
162 copy %arg2, %r29 /* init middle loop count */
164 fdtonemiddle: /* Loop if LOOP = 1 */
165 ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
166 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
168 ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
169 add %r21, %r20, %r20 /* increment space */
174 * Switch back to virtual mode
185 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
186 mtctl %r0, %cr17 /* Clear IIASQ tail */
187 mtctl %r0, %cr17 /* Clear IIASQ head */
188 mtctl %r1, %cr18 /* IIAOQ head */
190 mtctl %r1, %cr18 /* IIAOQ tail */
191 load32 KERNEL_PSW, %r1
192 or %r1, %r19, %r1 /* I-bit to state on entry */
193 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
202 ENDPROC(flush_tlb_all_local)
204 .import cache_info,data
206 ENTRY(flush_instruction_cache_local)
212 load32 cache_info, %r1
214 /* Flush Instruction Cache */
216 LDREG ICACHE_BASE(%r1), %arg0
217 LDREG ICACHE_STRIDE(%r1), %arg1
218 LDREG ICACHE_COUNT(%r1), %arg2
219 LDREG ICACHE_LOOP(%r1), %arg3
220 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
221 ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
222 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
224 fimanyloop: /* Loop if LOOP >= 2 */
225 ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
226 fice %r0(%sr1, %arg0)
227 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
228 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
229 ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
231 fioneloop: /* Loop if LOOP = 1 */
232 ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
233 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
237 mtsm %r22 /* restore I-bit */
243 ENDPROC(flush_instruction_cache_local)
246 .import cache_info, data
247 ENTRY(flush_data_cache_local)
253 load32 cache_info, %r1
255 /* Flush Data Cache */
257 LDREG DCACHE_BASE(%r1), %arg0
258 LDREG DCACHE_STRIDE(%r1), %arg1
259 LDREG DCACHE_COUNT(%r1), %arg2
260 LDREG DCACHE_LOOP(%r1), %arg3
262 ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
263 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
265 fdmanyloop: /* Loop if LOOP >= 2 */
266 ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
267 fdce %r0(%sr1, %arg0)
268 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
269 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
270 ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
272 fdoneloop: /* Loop if LOOP = 1 */
273 ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
274 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
279 mtsm %r22 /* restore I-bit */
285 ENDPROC(flush_data_cache_local)
289 ENTRY(copy_user_page_asm)
295 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
296 * Unroll the loop by hand and arrange insn appropriately.
297 * GCC probably can do this just as well.
301 ldi ASM_PAGE_SIZE_DIV128, %r1
303 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
304 ldw 128(%r25), %r0 /* prefetch 2 */
307 ldw 192(%r25), %r0 /* prefetch 3 */
308 ldw 256(%r25), %r0 /* prefetch 4 */
350 /* conditional branches nullify on forward taken branch, and on
351 * non-taken backward branch. Note that .+4 is a backwards branch.
352 * The ldd should only get executed if the branch is taken.
354 ADDIB>,n -1, %r1, 1b /* bundle 10 */
355 ldd 0(%r25), %r19 /* start next loads */
360 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
361 * bundles (very restricted rules for bundling).
362 * Note that until (if) we start saving
363 * the full 64 bit register values on interrupt, we can't
364 * use ldd/std on a 32 bit kernel.
367 ldi ASM_PAGE_SIZE_DIV64, %r1
411 ENDPROC(copy_user_page_asm)
414 * NOTE: Code in clear_user_page has a hard coded dependency on the
415 * maximum alias boundary being 4 Mb. We've been assured by the
416 * parisc chip designers that there will not ever be a parisc
417 * chip with a larger alias boundary (Never say never :-) ).
419 * Subtle: the dtlb miss handlers support the temp alias region by
420 * "knowing" that if a dtlb miss happens within the temp alias
421 * region it must have occurred while in clear_user_page. Since
422 * this routine makes use of processor local translations, we
423 * don't want to insert them into the kernel page table. Instead,
424 * we load up some general registers (they need to be registers
425 * which aren't shadowed) with the physical page numbers (preshifted
426 * for tlb insertion) needed to insert the translations. When we
427 * miss on the translation, the dtlb miss handler inserts the
428 * translation into the tlb using these values:
430 * %r26 physical page (shifted for tlb insert) of "to" translation
431 * %r23 physical page (shifted for tlb insert) of "from" translation
437 * We can't do this since copy_user_page is used to bring in
438 * file data that might have instructions. Since the data would
439 * then need to be flushed out so the i-fetch can see it, it
440 * makes more sense to just copy through the kernel translation
443 * I'm still keeping this around because it may be possible to
444 * use it if more information is passed into copy_user_page().
445 * Have to do some measurements to see if it is worthwhile to
446 * lobby for such a change.
449 ENTRY(copy_user_page_asm)
454 ldil L%(__PAGE_OFFSET), %r1
456 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
458 ldil L%(TMPALIAS_MAP_START), %r28
459 /* FIXME for different page sizes != 4k */
461 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
462 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
463 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
464 depdi 0, 63,12, %r28 /* Clear any offset bits */
466 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
468 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
469 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
470 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
471 depwi 0, 31,12, %r28 /* Clear any offset bits */
473 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
476 /* Purge any old translations */
484 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
485 * bundles (very restricted rules for bundling). It probably
486 * does OK on PCXU and better, but we could do better with
487 * ldd/std instructions. Note that until (if) we start saving
488 * the full 64 bit register values on interrupt, we can't
489 * use ldd/std on a 32 bit kernel.
535 ENDPROC(copy_user_page_asm)
538 ENTRY(__clear_user_page_asm)
545 ldil L%(TMPALIAS_MAP_START), %r28
547 #if (TMPALIAS_MAP_START >= 0x80000000)
548 depdi 0, 31,32, %r28 /* clear any sign extension */
549 /* FIXME: page size dependend */
551 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
552 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
553 depdi 0, 63,12, %r28 /* Clear any offset bits */
555 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
556 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
557 depwi 0, 31,12, %r28 /* Clear any offset bits */
560 /* Purge any old translation */
565 ldi ASM_PAGE_SIZE_DIV128, %r1
567 /* PREFETCH (Write) has not (yet) been proven to help here */
568 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
589 #else /* ! CONFIG_64BIT */
590 ldi ASM_PAGE_SIZE_DIV64, %r1
611 #endif /* CONFIG_64BIT */
618 ENDPROC(__clear_user_page_asm)
620 ENTRY(flush_kernel_dcache_page_asm)
625 ldil L%dcache_stride, %r1
626 ldw R%dcache_stride(%r1), %r23
629 depdi,z 1, 63-PAGE_SHIFT,1, %r25
631 depwi,z 1, 31-PAGE_SHIFT,1, %r25
661 ENDPROC(flush_kernel_dcache_page_asm)
663 ENTRY(flush_user_dcache_page)
668 ldil L%dcache_stride, %r1
669 ldw R%dcache_stride(%r1), %r23
672 depdi,z 1,63-PAGE_SHIFT,1, %r25
674 depwi,z 1,31-PAGE_SHIFT,1, %r25
680 1: fdc,m %r23(%sr3, %r26)
681 fdc,m %r23(%sr3, %r26)
682 fdc,m %r23(%sr3, %r26)
683 fdc,m %r23(%sr3, %r26)
684 fdc,m %r23(%sr3, %r26)
685 fdc,m %r23(%sr3, %r26)
686 fdc,m %r23(%sr3, %r26)
687 fdc,m %r23(%sr3, %r26)
688 fdc,m %r23(%sr3, %r26)
689 fdc,m %r23(%sr3, %r26)
690 fdc,m %r23(%sr3, %r26)
691 fdc,m %r23(%sr3, %r26)
692 fdc,m %r23(%sr3, %r26)
693 fdc,m %r23(%sr3, %r26)
694 fdc,m %r23(%sr3, %r26)
696 fdc,m %r23(%sr3, %r26)
704 ENDPROC(flush_user_dcache_page)
706 ENTRY(flush_user_icache_page)
711 ldil L%dcache_stride, %r1
712 ldw R%dcache_stride(%r1), %r23
715 depdi,z 1, 63-PAGE_SHIFT,1, %r25
717 depwi,z 1, 31-PAGE_SHIFT,1, %r25
723 1: fic,m %r23(%sr3, %r26)
724 fic,m %r23(%sr3, %r26)
725 fic,m %r23(%sr3, %r26)
726 fic,m %r23(%sr3, %r26)
727 fic,m %r23(%sr3, %r26)
728 fic,m %r23(%sr3, %r26)
729 fic,m %r23(%sr3, %r26)
730 fic,m %r23(%sr3, %r26)
731 fic,m %r23(%sr3, %r26)
732 fic,m %r23(%sr3, %r26)
733 fic,m %r23(%sr3, %r26)
734 fic,m %r23(%sr3, %r26)
735 fic,m %r23(%sr3, %r26)
736 fic,m %r23(%sr3, %r26)
737 fic,m %r23(%sr3, %r26)
739 fic,m %r23(%sr3, %r26)
747 ENDPROC(flush_user_icache_page)
750 ENTRY(purge_kernel_dcache_page)
755 ldil L%dcache_stride, %r1
756 ldw R%dcache_stride(%r1), %r23
759 depdi,z 1, 63-PAGE_SHIFT,1, %r25
761 depwi,z 1, 31-PAGE_SHIFT,1, %r25
781 CMPB<< %r26, %r25, 1b
790 ENDPROC(purge_kernel_dcache_page)
793 /* Currently not used, but it still is a possible alternate
797 ENTRY(flush_alias_page)
804 ldil L%(TMPALIAS_MAP_START), %r28
806 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
807 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
808 depdi 0, 63,12, %r28 /* Clear any offset bits */
810 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
811 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
812 depwi 0, 31,12, %r28 /* Clear any offset bits */
815 /* Purge any old translation */
819 ldil L%dcache_stride, %r1
820 ldw R%dcache_stride(%r1), %r23
823 depdi,z 1, 63-PAGE_SHIFT,1, %r29
825 depwi,z 1, 31-PAGE_SHIFT,1, %r29
845 CMPB<< %r28, %r29, 1b
856 .export flush_user_dcache_range_asm
858 flush_user_dcache_range_asm:
863 ldil L%dcache_stride, %r1
864 ldw R%dcache_stride(%r1), %r23
866 ANDCM %r26, %r21, %r26
868 1: CMPB<<,n %r26, %r25, 1b
869 fdc,m %r23(%sr3, %r26)
877 ENDPROC(flush_alias_page)
879 ENTRY(flush_kernel_dcache_range_asm)
884 ldil L%dcache_stride, %r1
885 ldw R%dcache_stride(%r1), %r23
887 ANDCM %r26, %r21, %r26
889 1: CMPB<<,n %r26, %r25,1b
899 ENDPROC(flush_kernel_dcache_range_asm)
901 ENTRY(flush_user_icache_range_asm)
906 ldil L%icache_stride, %r1
907 ldw R%icache_stride(%r1), %r23
909 ANDCM %r26, %r21, %r26
911 1: CMPB<<,n %r26, %r25,1b
912 fic,m %r23(%sr3, %r26)
920 ENDPROC(flush_user_icache_range_asm)
922 ENTRY(flush_kernel_icache_page)
927 ldil L%icache_stride, %r1
928 ldw R%icache_stride(%r1), %r23
931 depdi,z 1, 63-PAGE_SHIFT,1, %r25
933 depwi,z 1, 31-PAGE_SHIFT,1, %r25
939 1: fic,m %r23(%sr4, %r26)
940 fic,m %r23(%sr4, %r26)
941 fic,m %r23(%sr4, %r26)
942 fic,m %r23(%sr4, %r26)
943 fic,m %r23(%sr4, %r26)
944 fic,m %r23(%sr4, %r26)
945 fic,m %r23(%sr4, %r26)
946 fic,m %r23(%sr4, %r26)
947 fic,m %r23(%sr4, %r26)
948 fic,m %r23(%sr4, %r26)
949 fic,m %r23(%sr4, %r26)
950 fic,m %r23(%sr4, %r26)
951 fic,m %r23(%sr4, %r26)
952 fic,m %r23(%sr4, %r26)
953 fic,m %r23(%sr4, %r26)
954 CMPB<< %r26, %r25, 1b
955 fic,m %r23(%sr4, %r26)
963 ENDPROC(flush_kernel_icache_page)
965 ENTRY(flush_kernel_icache_range_asm)
970 ldil L%icache_stride, %r1
971 ldw R%icache_stride(%r1), %r23
973 ANDCM %r26, %r21, %r26
975 1: CMPB<<,n %r26, %r25, 1b
976 fic,m %r23(%sr4, %r26)
983 ENDPROC(flush_kernel_icache_range_asm)
985 /* align should cover use of rfi in disable_sr_hashing_asm and
989 ENTRY(disable_sr_hashing_asm)
995 * Switch to real mode
1006 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1007 mtctl %r0, %cr17 /* Clear IIASQ tail */
1008 mtctl %r0, %cr17 /* Clear IIASQ head */
1009 mtctl %r1, %cr18 /* IIAOQ head */
1011 mtctl %r1, %cr18 /* IIAOQ tail */
1012 load32 REAL_MODE_PSW, %r1
1017 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1018 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1019 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1024 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1026 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1027 .word 0x141c1a00 /* must issue twice */
1028 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1029 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1030 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1031 .word 0x141c1600 /* must issue twice */
1036 /* Disable Space Register Hashing for PCXL */
1038 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1039 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1040 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1045 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1047 .word 0x144008bc /* mfdiag %dr2, %r28 */
1048 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1049 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1053 /* Switch back to virtual mode */
1054 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1062 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1063 mtctl %r0, %cr17 /* Clear IIASQ tail */
1064 mtctl %r0, %cr17 /* Clear IIASQ head */
1065 mtctl %r1, %cr18 /* IIAOQ head */
1067 mtctl %r1, %cr18 /* IIAOQ tail */
1068 load32 KERNEL_PSW, %r1
1078 ENDPROC(disable_sr_hashing_asm)