Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[deliverable/linux.git] / arch / parisc / kernel / pacache.S
1 /*
2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 /*
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
26 * can be used.
27 */
28
29 #ifdef CONFIG_64BIT
30 #define ADDIB addib,*
31 #define CMPB cmpb,*
32 #define ANDCM andcm,*
33
34 .level 2.0w
35 #else
36 #define ADDIB addib,
37 #define CMPB cmpb,
38 #define ANDCM andcm
39
40 .level 2.0
41 #endif
42
43
44 #include <asm/psw.h>
45 #include <asm/assembly.h>
46 #include <asm/pgtable.h>
47 #include <asm/cache.h>
48
49 .text
50 .align 128
51
52 .export flush_tlb_all_local,code
53
54 flush_tlb_all_local:
55 .proc
56 .callinfo NO_CALLS
57 .entry
58
59 /*
60 * The pitlbe and pdtlbe instructions should only be used to
61 * flush the entire tlb. Also, there needs to be no intervening
62 * tlb operations, e.g. tlb misses, so the operation needs
63 * to happen in real mode with all interruptions disabled.
64 */
65
66 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
67 rsm PSW_SM_I, %r19 /* save I-bit state */
68 load32 PA(1f), %r1
69 nop
70 nop
71 nop
72 nop
73 nop
74
75 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
76 mtctl %r0, %cr17 /* Clear IIASQ tail */
77 mtctl %r0, %cr17 /* Clear IIASQ head */
78 mtctl %r1, %cr18 /* IIAOQ head */
79 ldo 4(%r1), %r1
80 mtctl %r1, %cr18 /* IIAOQ tail */
81 load32 REAL_MODE_PSW, %r1
82 mtctl %r1, %ipsw
83 rfi
84 nop
85
86 1: load32 PA(cache_info), %r1
87
88 /* Flush Instruction Tlb */
89
90 LDREG ITLB_SID_BASE(%r1), %r20
91 LDREG ITLB_SID_STRIDE(%r1), %r21
92 LDREG ITLB_SID_COUNT(%r1), %r22
93 LDREG ITLB_OFF_BASE(%r1), %arg0
94 LDREG ITLB_OFF_STRIDE(%r1), %arg1
95 LDREG ITLB_OFF_COUNT(%r1), %arg2
96 LDREG ITLB_LOOP(%r1), %arg3
97
98 ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
99 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
100 copy %arg0, %r28 /* Init base addr */
101
102 fitmanyloop: /* Loop if LOOP >= 2 */
103 mtsp %r20, %sr1
104 add %r21, %r20, %r20 /* increment space */
105 copy %arg2, %r29 /* Init middle loop count */
106
107 fitmanymiddle: /* Loop if LOOP >= 2 */
108 ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
109 pitlbe 0(%sr1, %r28)
110 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
111 ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
112 copy %arg3, %r31 /* Re-init inner loop count */
113
114 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
115 ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
116
117 fitoneloop: /* Loop if LOOP = 1 */
118 mtsp %r20, %sr1
119 copy %arg0, %r28 /* init base addr */
120 copy %arg2, %r29 /* init middle loop count */
121
122 fitonemiddle: /* Loop if LOOP = 1 */
123 ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
124 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
125
126 ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
127 add %r21, %r20, %r20 /* increment space */
128
129 fitdone:
130
131 /* Flush Data Tlb */
132
133 LDREG DTLB_SID_BASE(%r1), %r20
134 LDREG DTLB_SID_STRIDE(%r1), %r21
135 LDREG DTLB_SID_COUNT(%r1), %r22
136 LDREG DTLB_OFF_BASE(%r1), %arg0
137 LDREG DTLB_OFF_STRIDE(%r1), %arg1
138 LDREG DTLB_OFF_COUNT(%r1), %arg2
139 LDREG DTLB_LOOP(%r1), %arg3
140
141 ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
142 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
143 copy %arg0, %r28 /* Init base addr */
144
145 fdtmanyloop: /* Loop if LOOP >= 2 */
146 mtsp %r20, %sr1
147 add %r21, %r20, %r20 /* increment space */
148 copy %arg2, %r29 /* Init middle loop count */
149
150 fdtmanymiddle: /* Loop if LOOP >= 2 */
151 ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
152 pdtlbe 0(%sr1, %r28)
153 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
154 ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
155 copy %arg3, %r31 /* Re-init inner loop count */
156
157 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
158 ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
159
160 fdtoneloop: /* Loop if LOOP = 1 */
161 mtsp %r20, %sr1
162 copy %arg0, %r28 /* init base addr */
163 copy %arg2, %r29 /* init middle loop count */
164
165 fdtonemiddle: /* Loop if LOOP = 1 */
166 ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
167 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
168
169 ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
170 add %r21, %r20, %r20 /* increment space */
171
172
173 fdtdone:
174 /*
175 * Switch back to virtual mode
176 */
177 /* pcxt_ssm_bug */
178 rsm PSW_SM_I, %r0
179 load32 2f, %r1
180 nop
181 nop
182 nop
183 nop
184 nop
185
186 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
187 mtctl %r0, %cr17 /* Clear IIASQ tail */
188 mtctl %r0, %cr17 /* Clear IIASQ head */
189 mtctl %r1, %cr18 /* IIAOQ head */
190 ldo 4(%r1), %r1
191 mtctl %r1, %cr18 /* IIAOQ tail */
192 load32 KERNEL_PSW, %r1
193 or %r1, %r19, %r1 /* I-bit to state on entry */
194 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
195 rfi
196 nop
197
198 2: bv %r0(%r2)
199 nop
200
201 .exit
202 .procend
203
204 .export flush_instruction_cache_local,code
205 .import cache_info,data
206
207 flush_instruction_cache_local:
208 .proc
209 .callinfo NO_CALLS
210 .entry
211
212 mtsp %r0, %sr1
213 load32 cache_info, %r1
214
215 /* Flush Instruction Cache */
216
217 LDREG ICACHE_BASE(%r1), %arg0
218 LDREG ICACHE_STRIDE(%r1), %arg1
219 LDREG ICACHE_COUNT(%r1), %arg2
220 LDREG ICACHE_LOOP(%r1), %arg3
221 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
222 ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
223 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
224
225 fimanyloop: /* Loop if LOOP >= 2 */
226 ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
227 fice %r0(%sr1, %arg0)
228 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
229 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
230 ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
231
232 fioneloop: /* Loop if LOOP = 1 */
233 ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
234 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
235
236 fisync:
237 sync
238 mtsm %r22 /* restore I-bit */
239 bv %r0(%r2)
240 nop
241 .exit
242
243 .procend
244
245 .export flush_data_cache_local, code
246 .import cache_info, data
247
248 flush_data_cache_local:
249 .proc
250 .callinfo NO_CALLS
251 .entry
252
253 mtsp %r0, %sr1
254 load32 cache_info, %r1
255
256 /* Flush Data Cache */
257
258 LDREG DCACHE_BASE(%r1), %arg0
259 LDREG DCACHE_STRIDE(%r1), %arg1
260 LDREG DCACHE_COUNT(%r1), %arg2
261 LDREG DCACHE_LOOP(%r1), %arg3
262 rsm PSW_SM_I, %r22
263 ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
264 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
265
266 fdmanyloop: /* Loop if LOOP >= 2 */
267 ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
268 fdce %r0(%sr1, %arg0)
269 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
270 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
271 ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
272
273 fdoneloop: /* Loop if LOOP = 1 */
274 ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
275 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
276
277 fdsync:
278 syncdma
279 sync
280 mtsm %r22 /* restore I-bit */
281 bv %r0(%r2)
282 nop
283 .exit
284
285 .procend
286
287 .export copy_user_page_asm,code
288 .align 16
289
290 copy_user_page_asm:
291 .proc
292 .callinfo NO_CALLS
293 .entry
294
295 #ifdef CONFIG_64BIT
296 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
297 * Unroll the loop by hand and arrange insn appropriately.
298 * GCC probably can do this just as well.
299 */
300
301 ldd 0(%r25), %r19
302 ldi ASM_PAGE_SIZE_DIV128, %r1
303
304 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
305 ldw 128(%r25), %r0 /* prefetch 2 */
306
307 1: ldd 8(%r25), %r20
308 ldw 192(%r25), %r0 /* prefetch 3 */
309 ldw 256(%r25), %r0 /* prefetch 4 */
310
311 ldd 16(%r25), %r21
312 ldd 24(%r25), %r22
313 std %r19, 0(%r26)
314 std %r20, 8(%r26)
315
316 ldd 32(%r25), %r19
317 ldd 40(%r25), %r20
318 std %r21, 16(%r26)
319 std %r22, 24(%r26)
320
321 ldd 48(%r25), %r21
322 ldd 56(%r25), %r22
323 std %r19, 32(%r26)
324 std %r20, 40(%r26)
325
326 ldd 64(%r25), %r19
327 ldd 72(%r25), %r20
328 std %r21, 48(%r26)
329 std %r22, 56(%r26)
330
331 ldd 80(%r25), %r21
332 ldd 88(%r25), %r22
333 std %r19, 64(%r26)
334 std %r20, 72(%r26)
335
336 ldd 96(%r25), %r19
337 ldd 104(%r25), %r20
338 std %r21, 80(%r26)
339 std %r22, 88(%r26)
340
341 ldd 112(%r25), %r21
342 ldd 120(%r25), %r22
343 std %r19, 96(%r26)
344 std %r20, 104(%r26)
345
346 ldo 128(%r25), %r25
347 std %r21, 112(%r26)
348 std %r22, 120(%r26)
349 ldo 128(%r26), %r26
350
351 /* conditional branches nullify on forward taken branch, and on
352 * non-taken backward branch. Note that .+4 is a backwards branch.
353 * The ldd should only get executed if the branch is taken.
354 */
355 ADDIB>,n -1, %r1, 1b /* bundle 10 */
356 ldd 0(%r25), %r19 /* start next loads */
357
358 #else
359
360 /*
361 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
362 * bundles (very restricted rules for bundling).
363 * Note that until (if) we start saving
364 * the full 64 bit register values on interrupt, we can't
365 * use ldd/std on a 32 bit kernel.
366 */
367 ldw 0(%r25), %r19
368 ldi ASM_PAGE_SIZE_DIV64, %r1
369
370 1:
371 ldw 4(%r25), %r20
372 ldw 8(%r25), %r21
373 ldw 12(%r25), %r22
374 stw %r19, 0(%r26)
375 stw %r20, 4(%r26)
376 stw %r21, 8(%r26)
377 stw %r22, 12(%r26)
378 ldw 16(%r25), %r19
379 ldw 20(%r25), %r20
380 ldw 24(%r25), %r21
381 ldw 28(%r25), %r22
382 stw %r19, 16(%r26)
383 stw %r20, 20(%r26)
384 stw %r21, 24(%r26)
385 stw %r22, 28(%r26)
386 ldw 32(%r25), %r19
387 ldw 36(%r25), %r20
388 ldw 40(%r25), %r21
389 ldw 44(%r25), %r22
390 stw %r19, 32(%r26)
391 stw %r20, 36(%r26)
392 stw %r21, 40(%r26)
393 stw %r22, 44(%r26)
394 ldw 48(%r25), %r19
395 ldw 52(%r25), %r20
396 ldw 56(%r25), %r21
397 ldw 60(%r25), %r22
398 stw %r19, 48(%r26)
399 stw %r20, 52(%r26)
400 ldo 64(%r25), %r25
401 stw %r21, 56(%r26)
402 stw %r22, 60(%r26)
403 ldo 64(%r26), %r26
404 ADDIB>,n -1, %r1, 1b
405 ldw 0(%r25), %r19
406 #endif
407 bv %r0(%r2)
408 nop
409 .exit
410
411 .procend
412
413 /*
414 * NOTE: Code in clear_user_page has a hard coded dependency on the
415 * maximum alias boundary being 4 Mb. We've been assured by the
416 * parisc chip designers that there will not ever be a parisc
417 * chip with a larger alias boundary (Never say never :-) ).
418 *
419 * Subtle: the dtlb miss handlers support the temp alias region by
420 * "knowing" that if a dtlb miss happens within the temp alias
421 * region it must have occurred while in clear_user_page. Since
422 * this routine makes use of processor local translations, we
423 * don't want to insert them into the kernel page table. Instead,
424 * we load up some general registers (they need to be registers
425 * which aren't shadowed) with the physical page numbers (preshifted
426 * for tlb insertion) needed to insert the translations. When we
427 * miss on the translation, the dtlb miss handler inserts the
428 * translation into the tlb using these values:
429 *
430 * %r26 physical page (shifted for tlb insert) of "to" translation
431 * %r23 physical page (shifted for tlb insert) of "from" translation
432 */
433
434 #if 0
435
436 /*
437 * We can't do this since copy_user_page is used to bring in
438 * file data that might have instructions. Since the data would
439 * then need to be flushed out so the i-fetch can see it, it
440 * makes more sense to just copy through the kernel translation
441 * and flush it.
442 *
443 * I'm still keeping this around because it may be possible to
444 * use it if more information is passed into copy_user_page().
445 * Have to do some measurements to see if it is worthwhile to
446 * lobby for such a change.
447 */
448
449 .export copy_user_page_asm,code
450
451 copy_user_page_asm:
452 .proc
453 .callinfo NO_CALLS
454 .entry
455
456 ldil L%(__PAGE_OFFSET), %r1
457 sub %r26, %r1, %r26
458 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
459
460 ldil L%(TMPALIAS_MAP_START), %r28
461 /* FIXME for different page sizes != 4k */
462 #ifdef CONFIG_64BIT
463 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
464 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
465 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
466 depdi 0, 63,12, %r28 /* Clear any offset bits */
467 copy %r28, %r29
468 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
469 #else
470 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
471 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
472 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
473 depwi 0, 31,12, %r28 /* Clear any offset bits */
474 copy %r28, %r29
475 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
476 #endif
477
478 /* Purge any old translations */
479
480 pdtlb 0(%r28)
481 pdtlb 0(%r29)
482
483 ldi 64, %r1
484
485 /*
486 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
487 * bundles (very restricted rules for bundling). It probably
488 * does OK on PCXU and better, but we could do better with
489 * ldd/std instructions. Note that until (if) we start saving
490 * the full 64 bit register values on interrupt, we can't
491 * use ldd/std on a 32 bit kernel.
492 */
493
494
495 1:
496 ldw 0(%r29), %r19
497 ldw 4(%r29), %r20
498 ldw 8(%r29), %r21
499 ldw 12(%r29), %r22
500 stw %r19, 0(%r28)
501 stw %r20, 4(%r28)
502 stw %r21, 8(%r28)
503 stw %r22, 12(%r28)
504 ldw 16(%r29), %r19
505 ldw 20(%r29), %r20
506 ldw 24(%r29), %r21
507 ldw 28(%r29), %r22
508 stw %r19, 16(%r28)
509 stw %r20, 20(%r28)
510 stw %r21, 24(%r28)
511 stw %r22, 28(%r28)
512 ldw 32(%r29), %r19
513 ldw 36(%r29), %r20
514 ldw 40(%r29), %r21
515 ldw 44(%r29), %r22
516 stw %r19, 32(%r28)
517 stw %r20, 36(%r28)
518 stw %r21, 40(%r28)
519 stw %r22, 44(%r28)
520 ldw 48(%r29), %r19
521 ldw 52(%r29), %r20
522 ldw 56(%r29), %r21
523 ldw 60(%r29), %r22
524 stw %r19, 48(%r28)
525 stw %r20, 52(%r28)
526 stw %r21, 56(%r28)
527 stw %r22, 60(%r28)
528 ldo 64(%r28), %r28
529 ADDIB> -1, %r1,1b
530 ldo 64(%r29), %r29
531
532 bv %r0(%r2)
533 nop
534 .exit
535
536 .procend
537 #endif
538
539 .export __clear_user_page_asm,code
540
541 __clear_user_page_asm:
542 .proc
543 .callinfo NO_CALLS
544 .entry
545
546 tophys_r1 %r26
547
548 ldil L%(TMPALIAS_MAP_START), %r28
549 #ifdef CONFIG_64BIT
550 #if (TMPALIAS_MAP_START >= 0x80000000)
551 depdi 0, 31,32, %r28 /* clear any sign extension */
552 /* FIXME: page size dependend */
553 #endif
554 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
555 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
556 depdi 0, 63,12, %r28 /* Clear any offset bits */
557 #else
558 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
559 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
560 depwi 0, 31,12, %r28 /* Clear any offset bits */
561 #endif
562
563 /* Purge any old translation */
564
565 pdtlb 0(%r28)
566
567 #ifdef CONFIG_64BIT
568 ldi ASM_PAGE_SIZE_DIV128, %r1
569
570 /* PREFETCH (Write) has not (yet) been proven to help here */
571 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
572
573 1: std %r0, 0(%r28)
574 std %r0, 8(%r28)
575 std %r0, 16(%r28)
576 std %r0, 24(%r28)
577 std %r0, 32(%r28)
578 std %r0, 40(%r28)
579 std %r0, 48(%r28)
580 std %r0, 56(%r28)
581 std %r0, 64(%r28)
582 std %r0, 72(%r28)
583 std %r0, 80(%r28)
584 std %r0, 88(%r28)
585 std %r0, 96(%r28)
586 std %r0, 104(%r28)
587 std %r0, 112(%r28)
588 std %r0, 120(%r28)
589 ADDIB> -1, %r1, 1b
590 ldo 128(%r28), %r28
591
592 #else /* ! CONFIG_64BIT */
593 ldi ASM_PAGE_SIZE_DIV64, %r1
594
595 1:
596 stw %r0, 0(%r28)
597 stw %r0, 4(%r28)
598 stw %r0, 8(%r28)
599 stw %r0, 12(%r28)
600 stw %r0, 16(%r28)
601 stw %r0, 20(%r28)
602 stw %r0, 24(%r28)
603 stw %r0, 28(%r28)
604 stw %r0, 32(%r28)
605 stw %r0, 36(%r28)
606 stw %r0, 40(%r28)
607 stw %r0, 44(%r28)
608 stw %r0, 48(%r28)
609 stw %r0, 52(%r28)
610 stw %r0, 56(%r28)
611 stw %r0, 60(%r28)
612 ADDIB> -1, %r1, 1b
613 ldo 64(%r28), %r28
614 #endif /* CONFIG_64BIT */
615
616 bv %r0(%r2)
617 nop
618 .exit
619
620 .procend
621
622 .export flush_kernel_dcache_page_asm
623
624 flush_kernel_dcache_page_asm:
625 .proc
626 .callinfo NO_CALLS
627 .entry
628
629 ldil L%dcache_stride, %r1
630 ldw R%dcache_stride(%r1), %r23
631
632 #ifdef CONFIG_64BIT
633 depdi,z 1, 63-PAGE_SHIFT,1, %r25
634 #else
635 depwi,z 1, 31-PAGE_SHIFT,1, %r25
636 #endif
637 add %r26, %r25, %r25
638 sub %r25, %r23, %r25
639
640
641 1: fdc,m %r23(%r26)
642 fdc,m %r23(%r26)
643 fdc,m %r23(%r26)
644 fdc,m %r23(%r26)
645 fdc,m %r23(%r26)
646 fdc,m %r23(%r26)
647 fdc,m %r23(%r26)
648 fdc,m %r23(%r26)
649 fdc,m %r23(%r26)
650 fdc,m %r23(%r26)
651 fdc,m %r23(%r26)
652 fdc,m %r23(%r26)
653 fdc,m %r23(%r26)
654 fdc,m %r23(%r26)
655 fdc,m %r23(%r26)
656 CMPB<< %r26, %r25,1b
657 fdc,m %r23(%r26)
658
659 sync
660 bv %r0(%r2)
661 nop
662 .exit
663
664 .procend
665
666 .export flush_user_dcache_page
667
668 flush_user_dcache_page:
669 .proc
670 .callinfo NO_CALLS
671 .entry
672
673 ldil L%dcache_stride, %r1
674 ldw R%dcache_stride(%r1), %r23
675
676 #ifdef CONFIG_64BIT
677 depdi,z 1,63-PAGE_SHIFT,1, %r25
678 #else
679 depwi,z 1,31-PAGE_SHIFT,1, %r25
680 #endif
681 add %r26, %r25, %r25
682 sub %r25, %r23, %r25
683
684
685 1: fdc,m %r23(%sr3, %r26)
686 fdc,m %r23(%sr3, %r26)
687 fdc,m %r23(%sr3, %r26)
688 fdc,m %r23(%sr3, %r26)
689 fdc,m %r23(%sr3, %r26)
690 fdc,m %r23(%sr3, %r26)
691 fdc,m %r23(%sr3, %r26)
692 fdc,m %r23(%sr3, %r26)
693 fdc,m %r23(%sr3, %r26)
694 fdc,m %r23(%sr3, %r26)
695 fdc,m %r23(%sr3, %r26)
696 fdc,m %r23(%sr3, %r26)
697 fdc,m %r23(%sr3, %r26)
698 fdc,m %r23(%sr3, %r26)
699 fdc,m %r23(%sr3, %r26)
700 CMPB<< %r26, %r25,1b
701 fdc,m %r23(%sr3, %r26)
702
703 sync
704 bv %r0(%r2)
705 nop
706 .exit
707
708 .procend
709
710 .export flush_user_icache_page
711
712 flush_user_icache_page:
713 .proc
714 .callinfo NO_CALLS
715 .entry
716
717 ldil L%dcache_stride, %r1
718 ldw R%dcache_stride(%r1), %r23
719
720 #ifdef CONFIG_64BIT
721 depdi,z 1, 63-PAGE_SHIFT,1, %r25
722 #else
723 depwi,z 1, 31-PAGE_SHIFT,1, %r25
724 #endif
725 add %r26, %r25, %r25
726 sub %r25, %r23, %r25
727
728
729 1: fic,m %r23(%sr3, %r26)
730 fic,m %r23(%sr3, %r26)
731 fic,m %r23(%sr3, %r26)
732 fic,m %r23(%sr3, %r26)
733 fic,m %r23(%sr3, %r26)
734 fic,m %r23(%sr3, %r26)
735 fic,m %r23(%sr3, %r26)
736 fic,m %r23(%sr3, %r26)
737 fic,m %r23(%sr3, %r26)
738 fic,m %r23(%sr3, %r26)
739 fic,m %r23(%sr3, %r26)
740 fic,m %r23(%sr3, %r26)
741 fic,m %r23(%sr3, %r26)
742 fic,m %r23(%sr3, %r26)
743 fic,m %r23(%sr3, %r26)
744 CMPB<< %r26, %r25,1b
745 fic,m %r23(%sr3, %r26)
746
747 sync
748 bv %r0(%r2)
749 nop
750 .exit
751
752 .procend
753
754
755 .export purge_kernel_dcache_page
756
757 purge_kernel_dcache_page:
758 .proc
759 .callinfo NO_CALLS
760 .entry
761
762 ldil L%dcache_stride, %r1
763 ldw R%dcache_stride(%r1), %r23
764
765 #ifdef CONFIG_64BIT
766 depdi,z 1, 63-PAGE_SHIFT,1, %r25
767 #else
768 depwi,z 1, 31-PAGE_SHIFT,1, %r25
769 #endif
770 add %r26, %r25, %r25
771 sub %r25, %r23, %r25
772
773 1: pdc,m %r23(%r26)
774 pdc,m %r23(%r26)
775 pdc,m %r23(%r26)
776 pdc,m %r23(%r26)
777 pdc,m %r23(%r26)
778 pdc,m %r23(%r26)
779 pdc,m %r23(%r26)
780 pdc,m %r23(%r26)
781 pdc,m %r23(%r26)
782 pdc,m %r23(%r26)
783 pdc,m %r23(%r26)
784 pdc,m %r23(%r26)
785 pdc,m %r23(%r26)
786 pdc,m %r23(%r26)
787 pdc,m %r23(%r26)
788 CMPB<< %r26, %r25, 1b
789 pdc,m %r23(%r26)
790
791 sync
792 bv %r0(%r2)
793 nop
794 .exit
795
796 .procend
797
798 #if 0
799 /* Currently not used, but it still is a possible alternate
800 * solution.
801 */
802
803 .export flush_alias_page
804
805 flush_alias_page:
806 .proc
807 .callinfo NO_CALLS
808 .entry
809
810 tophys_r1 %r26
811
812 ldil L%(TMPALIAS_MAP_START), %r28
813 #ifdef CONFIG_64BIT
814 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
815 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
816 depdi 0, 63,12, %r28 /* Clear any offset bits */
817 #else
818 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
819 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
820 depwi 0, 31,12, %r28 /* Clear any offset bits */
821 #endif
822
823 /* Purge any old translation */
824
825 pdtlb 0(%r28)
826
827 ldil L%dcache_stride, %r1
828 ldw R%dcache_stride(%r1), %r23
829
830 #ifdef CONFIG_64BIT
831 depdi,z 1, 63-PAGE_SHIFT,1, %r29
832 #else
833 depwi,z 1, 31-PAGE_SHIFT,1, %r29
834 #endif
835 add %r28, %r29, %r29
836 sub %r29, %r23, %r29
837
838 1: fdc,m %r23(%r28)
839 fdc,m %r23(%r28)
840 fdc,m %r23(%r28)
841 fdc,m %r23(%r28)
842 fdc,m %r23(%r28)
843 fdc,m %r23(%r28)
844 fdc,m %r23(%r28)
845 fdc,m %r23(%r28)
846 fdc,m %r23(%r28)
847 fdc,m %r23(%r28)
848 fdc,m %r23(%r28)
849 fdc,m %r23(%r28)
850 fdc,m %r23(%r28)
851 fdc,m %r23(%r28)
852 fdc,m %r23(%r28)
853 CMPB<< %r28, %r29, 1b
854 fdc,m %r23(%r28)
855
856 sync
857 bv %r0(%r2)
858 nop
859 .exit
860
861 .procend
862 #endif
863
864 .export flush_user_dcache_range_asm
865
866 flush_user_dcache_range_asm:
867 .proc
868 .callinfo NO_CALLS
869 .entry
870
871 ldil L%dcache_stride, %r1
872 ldw R%dcache_stride(%r1), %r23
873 ldo -1(%r23), %r21
874 ANDCM %r26, %r21, %r26
875
876 1: CMPB<<,n %r26, %r25, 1b
877 fdc,m %r23(%sr3, %r26)
878
879 sync
880 bv %r0(%r2)
881 nop
882 .exit
883
884 .procend
885
886 .export flush_kernel_dcache_range_asm
887
888 flush_kernel_dcache_range_asm:
889 .proc
890 .callinfo NO_CALLS
891 .entry
892
893 ldil L%dcache_stride, %r1
894 ldw R%dcache_stride(%r1), %r23
895 ldo -1(%r23), %r21
896 ANDCM %r26, %r21, %r26
897
898 1: CMPB<<,n %r26, %r25,1b
899 fdc,m %r23(%r26)
900
901 sync
902 syncdma
903 bv %r0(%r2)
904 nop
905 .exit
906
907 .procend
908
909 .export flush_user_icache_range_asm
910
911 flush_user_icache_range_asm:
912 .proc
913 .callinfo NO_CALLS
914 .entry
915
916 ldil L%icache_stride, %r1
917 ldw R%icache_stride(%r1), %r23
918 ldo -1(%r23), %r21
919 ANDCM %r26, %r21, %r26
920
921 1: CMPB<<,n %r26, %r25,1b
922 fic,m %r23(%sr3, %r26)
923
924 sync
925 bv %r0(%r2)
926 nop
927 .exit
928
929 .procend
930
931 .export flush_kernel_icache_page
932
933 flush_kernel_icache_page:
934 .proc
935 .callinfo NO_CALLS
936 .entry
937
938 ldil L%icache_stride, %r1
939 ldw R%icache_stride(%r1), %r23
940
941 #ifdef CONFIG_64BIT
942 depdi,z 1, 63-PAGE_SHIFT,1, %r25
943 #else
944 depwi,z 1, 31-PAGE_SHIFT,1, %r25
945 #endif
946 add %r26, %r25, %r25
947 sub %r25, %r23, %r25
948
949
950 1: fic,m %r23(%sr4, %r26)
951 fic,m %r23(%sr4, %r26)
952 fic,m %r23(%sr4, %r26)
953 fic,m %r23(%sr4, %r26)
954 fic,m %r23(%sr4, %r26)
955 fic,m %r23(%sr4, %r26)
956 fic,m %r23(%sr4, %r26)
957 fic,m %r23(%sr4, %r26)
958 fic,m %r23(%sr4, %r26)
959 fic,m %r23(%sr4, %r26)
960 fic,m %r23(%sr4, %r26)
961 fic,m %r23(%sr4, %r26)
962 fic,m %r23(%sr4, %r26)
963 fic,m %r23(%sr4, %r26)
964 fic,m %r23(%sr4, %r26)
965 CMPB<< %r26, %r25, 1b
966 fic,m %r23(%sr4, %r26)
967
968 sync
969 bv %r0(%r2)
970 nop
971 .exit
972
973 .procend
974
975 .export flush_kernel_icache_range_asm
976
977 flush_kernel_icache_range_asm:
978 .proc
979 .callinfo NO_CALLS
980 .entry
981
982 ldil L%icache_stride, %r1
983 ldw R%icache_stride(%r1), %r23
984 ldo -1(%r23), %r21
985 ANDCM %r26, %r21, %r26
986
987 1: CMPB<<,n %r26, %r25, 1b
988 fic,m %r23(%sr4, %r26)
989
990 sync
991 bv %r0(%r2)
992 nop
993 .exit
994 .procend
995
996 /* align should cover use of rfi in disable_sr_hashing_asm and
997 * srdis_done.
998 */
999 .align 256
1000 .export disable_sr_hashing_asm,code
1001
1002 disable_sr_hashing_asm:
1003 .proc
1004 .callinfo NO_CALLS
1005 .entry
1006
1007 /*
1008 * Switch to real mode
1009 */
1010 /* pcxt_ssm_bug */
1011 rsm PSW_SM_I, %r0
1012 load32 PA(1f), %r1
1013 nop
1014 nop
1015 nop
1016 nop
1017 nop
1018
1019 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1020 mtctl %r0, %cr17 /* Clear IIASQ tail */
1021 mtctl %r0, %cr17 /* Clear IIASQ head */
1022 mtctl %r1, %cr18 /* IIAOQ head */
1023 ldo 4(%r1), %r1
1024 mtctl %r1, %cr18 /* IIAOQ tail */
1025 load32 REAL_MODE_PSW, %r1
1026 mtctl %r1, %ipsw
1027 rfi
1028 nop
1029
1030 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1031 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1032 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1033 b,n srdis_done
1034
1035 srdis_pcxs:
1036
1037 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1038
1039 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1040 .word 0x141c1a00 /* must issue twice */
1041 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1042 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1043 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1044 .word 0x141c1600 /* must issue twice */
1045 b,n srdis_done
1046
1047 srdis_pcxl:
1048
1049 /* Disable Space Register Hashing for PCXL */
1050
1051 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1052 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1053 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1054 b,n srdis_done
1055
1056 srdis_pa20:
1057
1058 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1059
1060 .word 0x144008bc /* mfdiag %dr2, %r28 */
1061 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1062 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1063
1064
1065 srdis_done:
1066 /* Switch back to virtual mode */
1067 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1068 load32 2f, %r1
1069 nop
1070 nop
1071 nop
1072 nop
1073 nop
1074
1075 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1076 mtctl %r0, %cr17 /* Clear IIASQ tail */
1077 mtctl %r0, %cr17 /* Clear IIASQ head */
1078 mtctl %r1, %cr18 /* IIAOQ head */
1079 ldo 4(%r1), %r1
1080 mtctl %r1, %cr18 /* IIAOQ tail */
1081 load32 KERNEL_PSW, %r1
1082 mtctl %r1, %ipsw
1083 rfi
1084 nop
1085
1086 2: bv %r0(%r2)
1087 nop
1088 .exit
1089
1090 .procend
1091
1092 .end
This page took 0.051373 seconds and 6 git commands to generate.