[PARISC] more ENTRY(), ENDPROC(), END() conversions
[deliverable/linux.git] / arch / parisc / kernel / pacache.S
1 /*
2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 /*
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
26 * can be used.
27 */
28
29 #ifdef CONFIG_64BIT
30 #define ADDIB addib,*
31 #define CMPB cmpb,*
32 #define ANDCM andcm,*
33
34 .level 2.0w
35 #else
36 #define ADDIB addib,
37 #define CMPB cmpb,
38 #define ANDCM andcm
39
40 .level 2.0
41 #endif
42
43
44 #include <asm/psw.h>
45 #include <asm/assembly.h>
46 #include <asm/pgtable.h>
47 #include <asm/cache.h>
48 #include <linux/linkage.h>
49
50 .text
51 .align 128
52
53 ENTRY(flush_tlb_all_local)
54 .proc
55 .callinfo NO_CALLS
56 .entry
57
58 /*
59 * The pitlbe and pdtlbe instructions should only be used to
60 * flush the entire tlb. Also, there needs to be no intervening
61 * tlb operations, e.g. tlb misses, so the operation needs
62 * to happen in real mode with all interruptions disabled.
63 */
64
65 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
66 rsm PSW_SM_I, %r19 /* save I-bit state */
67 load32 PA(1f), %r1
68 nop
69 nop
70 nop
71 nop
72 nop
73
74 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
75 mtctl %r0, %cr17 /* Clear IIASQ tail */
76 mtctl %r0, %cr17 /* Clear IIASQ head */
77 mtctl %r1, %cr18 /* IIAOQ head */
78 ldo 4(%r1), %r1
79 mtctl %r1, %cr18 /* IIAOQ tail */
80 load32 REAL_MODE_PSW, %r1
81 mtctl %r1, %ipsw
82 rfi
83 nop
84
85 1: load32 PA(cache_info), %r1
86
87 /* Flush Instruction Tlb */
88
89 LDREG ITLB_SID_BASE(%r1), %r20
90 LDREG ITLB_SID_STRIDE(%r1), %r21
91 LDREG ITLB_SID_COUNT(%r1), %r22
92 LDREG ITLB_OFF_BASE(%r1), %arg0
93 LDREG ITLB_OFF_STRIDE(%r1), %arg1
94 LDREG ITLB_OFF_COUNT(%r1), %arg2
95 LDREG ITLB_LOOP(%r1), %arg3
96
97 ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
98 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
99 copy %arg0, %r28 /* Init base addr */
100
101 fitmanyloop: /* Loop if LOOP >= 2 */
102 mtsp %r20, %sr1
103 add %r21, %r20, %r20 /* increment space */
104 copy %arg2, %r29 /* Init middle loop count */
105
106 fitmanymiddle: /* Loop if LOOP >= 2 */
107 ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
108 pitlbe 0(%sr1, %r28)
109 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
110 ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
111 copy %arg3, %r31 /* Re-init inner loop count */
112
113 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
114 ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
115
116 fitoneloop: /* Loop if LOOP = 1 */
117 mtsp %r20, %sr1
118 copy %arg0, %r28 /* init base addr */
119 copy %arg2, %r29 /* init middle loop count */
120
121 fitonemiddle: /* Loop if LOOP = 1 */
122 ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
123 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
124
125 ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
126 add %r21, %r20, %r20 /* increment space */
127
128 fitdone:
129
130 /* Flush Data Tlb */
131
132 LDREG DTLB_SID_BASE(%r1), %r20
133 LDREG DTLB_SID_STRIDE(%r1), %r21
134 LDREG DTLB_SID_COUNT(%r1), %r22
135 LDREG DTLB_OFF_BASE(%r1), %arg0
136 LDREG DTLB_OFF_STRIDE(%r1), %arg1
137 LDREG DTLB_OFF_COUNT(%r1), %arg2
138 LDREG DTLB_LOOP(%r1), %arg3
139
140 ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
141 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
142 copy %arg0, %r28 /* Init base addr */
143
144 fdtmanyloop: /* Loop if LOOP >= 2 */
145 mtsp %r20, %sr1
146 add %r21, %r20, %r20 /* increment space */
147 copy %arg2, %r29 /* Init middle loop count */
148
149 fdtmanymiddle: /* Loop if LOOP >= 2 */
150 ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
151 pdtlbe 0(%sr1, %r28)
152 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
153 ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
154 copy %arg3, %r31 /* Re-init inner loop count */
155
156 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
157 ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
158
159 fdtoneloop: /* Loop if LOOP = 1 */
160 mtsp %r20, %sr1
161 copy %arg0, %r28 /* init base addr */
162 copy %arg2, %r29 /* init middle loop count */
163
164 fdtonemiddle: /* Loop if LOOP = 1 */
165 ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
166 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
167
168 ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
169 add %r21, %r20, %r20 /* increment space */
170
171
172 fdtdone:
173 /*
174 * Switch back to virtual mode
175 */
176 /* pcxt_ssm_bug */
177 rsm PSW_SM_I, %r0
178 load32 2f, %r1
179 nop
180 nop
181 nop
182 nop
183 nop
184
185 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
186 mtctl %r0, %cr17 /* Clear IIASQ tail */
187 mtctl %r0, %cr17 /* Clear IIASQ head */
188 mtctl %r1, %cr18 /* IIAOQ head */
189 ldo 4(%r1), %r1
190 mtctl %r1, %cr18 /* IIAOQ tail */
191 load32 KERNEL_PSW, %r1
192 or %r1, %r19, %r1 /* I-bit to state on entry */
193 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
194 rfi
195 nop
196
197 2: bv %r0(%r2)
198 nop
199
200 .exit
201 .procend
202 ENDPROC(flush_tlb_all_local)
203
204 .import cache_info,data
205
206 ENTRY(flush_instruction_cache_local)
207 .proc
208 .callinfo NO_CALLS
209 .entry
210
211 mtsp %r0, %sr1
212 load32 cache_info, %r1
213
214 /* Flush Instruction Cache */
215
216 LDREG ICACHE_BASE(%r1), %arg0
217 LDREG ICACHE_STRIDE(%r1), %arg1
218 LDREG ICACHE_COUNT(%r1), %arg2
219 LDREG ICACHE_LOOP(%r1), %arg3
220 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
221 ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
222 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
223
224 fimanyloop: /* Loop if LOOP >= 2 */
225 ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
226 fice %r0(%sr1, %arg0)
227 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
228 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
229 ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
230
231 fioneloop: /* Loop if LOOP = 1 */
232 ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
233 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
234
235 fisync:
236 sync
237 mtsm %r22 /* restore I-bit */
238 bv %r0(%r2)
239 nop
240 .exit
241
242 .procend
243 ENDPROC(flush_instruction_cache_local)
244
245
246 .import cache_info, data
247 ENTRY(flush_data_cache_local)
248 .proc
249 .callinfo NO_CALLS
250 .entry
251
252 mtsp %r0, %sr1
253 load32 cache_info, %r1
254
255 /* Flush Data Cache */
256
257 LDREG DCACHE_BASE(%r1), %arg0
258 LDREG DCACHE_STRIDE(%r1), %arg1
259 LDREG DCACHE_COUNT(%r1), %arg2
260 LDREG DCACHE_LOOP(%r1), %arg3
261 rsm PSW_SM_I, %r22
262 ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
263 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
264
265 fdmanyloop: /* Loop if LOOP >= 2 */
266 ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
267 fdce %r0(%sr1, %arg0)
268 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
269 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
270 ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
271
272 fdoneloop: /* Loop if LOOP = 1 */
273 ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
274 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
275
276 fdsync:
277 syncdma
278 sync
279 mtsm %r22 /* restore I-bit */
280 bv %r0(%r2)
281 nop
282 .exit
283
284 .procend
285 ENDPROC(flush_data_cache_local)
286
287 .align 16
288
289 ENTRY(copy_user_page_asm)
290 .proc
291 .callinfo NO_CALLS
292 .entry
293
294 #ifdef CONFIG_64BIT
295 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
296 * Unroll the loop by hand and arrange insn appropriately.
297 * GCC probably can do this just as well.
298 */
299
300 ldd 0(%r25), %r19
301 ldi ASM_PAGE_SIZE_DIV128, %r1
302
303 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
304 ldw 128(%r25), %r0 /* prefetch 2 */
305
306 1: ldd 8(%r25), %r20
307 ldw 192(%r25), %r0 /* prefetch 3 */
308 ldw 256(%r25), %r0 /* prefetch 4 */
309
310 ldd 16(%r25), %r21
311 ldd 24(%r25), %r22
312 std %r19, 0(%r26)
313 std %r20, 8(%r26)
314
315 ldd 32(%r25), %r19
316 ldd 40(%r25), %r20
317 std %r21, 16(%r26)
318 std %r22, 24(%r26)
319
320 ldd 48(%r25), %r21
321 ldd 56(%r25), %r22
322 std %r19, 32(%r26)
323 std %r20, 40(%r26)
324
325 ldd 64(%r25), %r19
326 ldd 72(%r25), %r20
327 std %r21, 48(%r26)
328 std %r22, 56(%r26)
329
330 ldd 80(%r25), %r21
331 ldd 88(%r25), %r22
332 std %r19, 64(%r26)
333 std %r20, 72(%r26)
334
335 ldd 96(%r25), %r19
336 ldd 104(%r25), %r20
337 std %r21, 80(%r26)
338 std %r22, 88(%r26)
339
340 ldd 112(%r25), %r21
341 ldd 120(%r25), %r22
342 std %r19, 96(%r26)
343 std %r20, 104(%r26)
344
345 ldo 128(%r25), %r25
346 std %r21, 112(%r26)
347 std %r22, 120(%r26)
348 ldo 128(%r26), %r26
349
350 /* conditional branches nullify on forward taken branch, and on
351 * non-taken backward branch. Note that .+4 is a backwards branch.
352 * The ldd should only get executed if the branch is taken.
353 */
354 ADDIB>,n -1, %r1, 1b /* bundle 10 */
355 ldd 0(%r25), %r19 /* start next loads */
356
357 #else
358
359 /*
360 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
361 * bundles (very restricted rules for bundling).
362 * Note that until (if) we start saving
363 * the full 64 bit register values on interrupt, we can't
364 * use ldd/std on a 32 bit kernel.
365 */
366 ldw 0(%r25), %r19
367 ldi ASM_PAGE_SIZE_DIV64, %r1
368
369 1:
370 ldw 4(%r25), %r20
371 ldw 8(%r25), %r21
372 ldw 12(%r25), %r22
373 stw %r19, 0(%r26)
374 stw %r20, 4(%r26)
375 stw %r21, 8(%r26)
376 stw %r22, 12(%r26)
377 ldw 16(%r25), %r19
378 ldw 20(%r25), %r20
379 ldw 24(%r25), %r21
380 ldw 28(%r25), %r22
381 stw %r19, 16(%r26)
382 stw %r20, 20(%r26)
383 stw %r21, 24(%r26)
384 stw %r22, 28(%r26)
385 ldw 32(%r25), %r19
386 ldw 36(%r25), %r20
387 ldw 40(%r25), %r21
388 ldw 44(%r25), %r22
389 stw %r19, 32(%r26)
390 stw %r20, 36(%r26)
391 stw %r21, 40(%r26)
392 stw %r22, 44(%r26)
393 ldw 48(%r25), %r19
394 ldw 52(%r25), %r20
395 ldw 56(%r25), %r21
396 ldw 60(%r25), %r22
397 stw %r19, 48(%r26)
398 stw %r20, 52(%r26)
399 ldo 64(%r25), %r25
400 stw %r21, 56(%r26)
401 stw %r22, 60(%r26)
402 ldo 64(%r26), %r26
403 ADDIB>,n -1, %r1, 1b
404 ldw 0(%r25), %r19
405 #endif
406 bv %r0(%r2)
407 nop
408 .exit
409
410 .procend
411 ENDPROC(copy_user_page_asm)
412
413 /*
414 * NOTE: Code in clear_user_page has a hard coded dependency on the
415 * maximum alias boundary being 4 Mb. We've been assured by the
416 * parisc chip designers that there will not ever be a parisc
417 * chip with a larger alias boundary (Never say never :-) ).
418 *
419 * Subtle: the dtlb miss handlers support the temp alias region by
420 * "knowing" that if a dtlb miss happens within the temp alias
421 * region it must have occurred while in clear_user_page. Since
422 * this routine makes use of processor local translations, we
423 * don't want to insert them into the kernel page table. Instead,
424 * we load up some general registers (they need to be registers
425 * which aren't shadowed) with the physical page numbers (preshifted
426 * for tlb insertion) needed to insert the translations. When we
427 * miss on the translation, the dtlb miss handler inserts the
428 * translation into the tlb using these values:
429 *
430 * %r26 physical page (shifted for tlb insert) of "to" translation
431 * %r23 physical page (shifted for tlb insert) of "from" translation
432 */
433
434 #if 0
435
436 /*
437 * We can't do this since copy_user_page is used to bring in
438 * file data that might have instructions. Since the data would
439 * then need to be flushed out so the i-fetch can see it, it
440 * makes more sense to just copy through the kernel translation
441 * and flush it.
442 *
443 * I'm still keeping this around because it may be possible to
444 * use it if more information is passed into copy_user_page().
445 * Have to do some measurements to see if it is worthwhile to
446 * lobby for such a change.
447 */
448
449 ENTRY(copy_user_page_asm)
450 .proc
451 .callinfo NO_CALLS
452 .entry
453
454 ldil L%(__PAGE_OFFSET), %r1
455 sub %r26, %r1, %r26
456 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
457
458 ldil L%(TMPALIAS_MAP_START), %r28
459 /* FIXME for different page sizes != 4k */
460 #ifdef CONFIG_64BIT
461 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
462 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
463 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
464 depdi 0, 63,12, %r28 /* Clear any offset bits */
465 copy %r28, %r29
466 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
467 #else
468 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
469 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
470 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
471 depwi 0, 31,12, %r28 /* Clear any offset bits */
472 copy %r28, %r29
473 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
474 #endif
475
476 /* Purge any old translations */
477
478 pdtlb 0(%r28)
479 pdtlb 0(%r29)
480
481 ldi 64, %r1
482
483 /*
484 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
485 * bundles (very restricted rules for bundling). It probably
486 * does OK on PCXU and better, but we could do better with
487 * ldd/std instructions. Note that until (if) we start saving
488 * the full 64 bit register values on interrupt, we can't
489 * use ldd/std on a 32 bit kernel.
490 */
491
492
493 1:
494 ldw 0(%r29), %r19
495 ldw 4(%r29), %r20
496 ldw 8(%r29), %r21
497 ldw 12(%r29), %r22
498 stw %r19, 0(%r28)
499 stw %r20, 4(%r28)
500 stw %r21, 8(%r28)
501 stw %r22, 12(%r28)
502 ldw 16(%r29), %r19
503 ldw 20(%r29), %r20
504 ldw 24(%r29), %r21
505 ldw 28(%r29), %r22
506 stw %r19, 16(%r28)
507 stw %r20, 20(%r28)
508 stw %r21, 24(%r28)
509 stw %r22, 28(%r28)
510 ldw 32(%r29), %r19
511 ldw 36(%r29), %r20
512 ldw 40(%r29), %r21
513 ldw 44(%r29), %r22
514 stw %r19, 32(%r28)
515 stw %r20, 36(%r28)
516 stw %r21, 40(%r28)
517 stw %r22, 44(%r28)
518 ldw 48(%r29), %r19
519 ldw 52(%r29), %r20
520 ldw 56(%r29), %r21
521 ldw 60(%r29), %r22
522 stw %r19, 48(%r28)
523 stw %r20, 52(%r28)
524 stw %r21, 56(%r28)
525 stw %r22, 60(%r28)
526 ldo 64(%r28), %r28
527 ADDIB> -1, %r1,1b
528 ldo 64(%r29), %r29
529
530 bv %r0(%r2)
531 nop
532 .exit
533
534 .procend
535 ENDPROC(copy_user_page_asm)
536 #endif
537
538 ENTRY(__clear_user_page_asm)
539 .proc
540 .callinfo NO_CALLS
541 .entry
542
543 tophys_r1 %r26
544
545 ldil L%(TMPALIAS_MAP_START), %r28
546 #ifdef CONFIG_64BIT
547 #if (TMPALIAS_MAP_START >= 0x80000000)
548 depdi 0, 31,32, %r28 /* clear any sign extension */
549 /* FIXME: page size dependend */
550 #endif
551 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
552 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
553 depdi 0, 63,12, %r28 /* Clear any offset bits */
554 #else
555 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
556 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
557 depwi 0, 31,12, %r28 /* Clear any offset bits */
558 #endif
559
560 /* Purge any old translation */
561
562 pdtlb 0(%r28)
563
564 #ifdef CONFIG_64BIT
565 ldi ASM_PAGE_SIZE_DIV128, %r1
566
567 /* PREFETCH (Write) has not (yet) been proven to help here */
568 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
569
570 1: std %r0, 0(%r28)
571 std %r0, 8(%r28)
572 std %r0, 16(%r28)
573 std %r0, 24(%r28)
574 std %r0, 32(%r28)
575 std %r0, 40(%r28)
576 std %r0, 48(%r28)
577 std %r0, 56(%r28)
578 std %r0, 64(%r28)
579 std %r0, 72(%r28)
580 std %r0, 80(%r28)
581 std %r0, 88(%r28)
582 std %r0, 96(%r28)
583 std %r0, 104(%r28)
584 std %r0, 112(%r28)
585 std %r0, 120(%r28)
586 ADDIB> -1, %r1, 1b
587 ldo 128(%r28), %r28
588
589 #else /* ! CONFIG_64BIT */
590 ldi ASM_PAGE_SIZE_DIV64, %r1
591
592 1:
593 stw %r0, 0(%r28)
594 stw %r0, 4(%r28)
595 stw %r0, 8(%r28)
596 stw %r0, 12(%r28)
597 stw %r0, 16(%r28)
598 stw %r0, 20(%r28)
599 stw %r0, 24(%r28)
600 stw %r0, 28(%r28)
601 stw %r0, 32(%r28)
602 stw %r0, 36(%r28)
603 stw %r0, 40(%r28)
604 stw %r0, 44(%r28)
605 stw %r0, 48(%r28)
606 stw %r0, 52(%r28)
607 stw %r0, 56(%r28)
608 stw %r0, 60(%r28)
609 ADDIB> -1, %r1, 1b
610 ldo 64(%r28), %r28
611 #endif /* CONFIG_64BIT */
612
613 bv %r0(%r2)
614 nop
615 .exit
616
617 .procend
618 ENDPROC(__clear_user_page_asm)
619
620 ENTRY(flush_kernel_dcache_page_asm)
621 .proc
622 .callinfo NO_CALLS
623 .entry
624
625 ldil L%dcache_stride, %r1
626 ldw R%dcache_stride(%r1), %r23
627
628 #ifdef CONFIG_64BIT
629 depdi,z 1, 63-PAGE_SHIFT,1, %r25
630 #else
631 depwi,z 1, 31-PAGE_SHIFT,1, %r25
632 #endif
633 add %r26, %r25, %r25
634 sub %r25, %r23, %r25
635
636
637 1: fdc,m %r23(%r26)
638 fdc,m %r23(%r26)
639 fdc,m %r23(%r26)
640 fdc,m %r23(%r26)
641 fdc,m %r23(%r26)
642 fdc,m %r23(%r26)
643 fdc,m %r23(%r26)
644 fdc,m %r23(%r26)
645 fdc,m %r23(%r26)
646 fdc,m %r23(%r26)
647 fdc,m %r23(%r26)
648 fdc,m %r23(%r26)
649 fdc,m %r23(%r26)
650 fdc,m %r23(%r26)
651 fdc,m %r23(%r26)
652 CMPB<< %r26, %r25,1b
653 fdc,m %r23(%r26)
654
655 sync
656 bv %r0(%r2)
657 nop
658 .exit
659
660 .procend
661 ENDPROC(flush_kernel_dcache_page_asm)
662
663 ENTRY(flush_user_dcache_page)
664 .proc
665 .callinfo NO_CALLS
666 .entry
667
668 ldil L%dcache_stride, %r1
669 ldw R%dcache_stride(%r1), %r23
670
671 #ifdef CONFIG_64BIT
672 depdi,z 1,63-PAGE_SHIFT,1, %r25
673 #else
674 depwi,z 1,31-PAGE_SHIFT,1, %r25
675 #endif
676 add %r26, %r25, %r25
677 sub %r25, %r23, %r25
678
679
680 1: fdc,m %r23(%sr3, %r26)
681 fdc,m %r23(%sr3, %r26)
682 fdc,m %r23(%sr3, %r26)
683 fdc,m %r23(%sr3, %r26)
684 fdc,m %r23(%sr3, %r26)
685 fdc,m %r23(%sr3, %r26)
686 fdc,m %r23(%sr3, %r26)
687 fdc,m %r23(%sr3, %r26)
688 fdc,m %r23(%sr3, %r26)
689 fdc,m %r23(%sr3, %r26)
690 fdc,m %r23(%sr3, %r26)
691 fdc,m %r23(%sr3, %r26)
692 fdc,m %r23(%sr3, %r26)
693 fdc,m %r23(%sr3, %r26)
694 fdc,m %r23(%sr3, %r26)
695 CMPB<< %r26, %r25,1b
696 fdc,m %r23(%sr3, %r26)
697
698 sync
699 bv %r0(%r2)
700 nop
701 .exit
702
703 .procend
704 ENDPROC(flush_user_dcache_page)
705
706 ENTRY(flush_user_icache_page)
707 .proc
708 .callinfo NO_CALLS
709 .entry
710
711 ldil L%dcache_stride, %r1
712 ldw R%dcache_stride(%r1), %r23
713
714 #ifdef CONFIG_64BIT
715 depdi,z 1, 63-PAGE_SHIFT,1, %r25
716 #else
717 depwi,z 1, 31-PAGE_SHIFT,1, %r25
718 #endif
719 add %r26, %r25, %r25
720 sub %r25, %r23, %r25
721
722
723 1: fic,m %r23(%sr3, %r26)
724 fic,m %r23(%sr3, %r26)
725 fic,m %r23(%sr3, %r26)
726 fic,m %r23(%sr3, %r26)
727 fic,m %r23(%sr3, %r26)
728 fic,m %r23(%sr3, %r26)
729 fic,m %r23(%sr3, %r26)
730 fic,m %r23(%sr3, %r26)
731 fic,m %r23(%sr3, %r26)
732 fic,m %r23(%sr3, %r26)
733 fic,m %r23(%sr3, %r26)
734 fic,m %r23(%sr3, %r26)
735 fic,m %r23(%sr3, %r26)
736 fic,m %r23(%sr3, %r26)
737 fic,m %r23(%sr3, %r26)
738 CMPB<< %r26, %r25,1b
739 fic,m %r23(%sr3, %r26)
740
741 sync
742 bv %r0(%r2)
743 nop
744 .exit
745
746 .procend
747 ENDPROC(flush_user_icache_page)
748
749
750 ENTRY(purge_kernel_dcache_page)
751 .proc
752 .callinfo NO_CALLS
753 .entry
754
755 ldil L%dcache_stride, %r1
756 ldw R%dcache_stride(%r1), %r23
757
758 #ifdef CONFIG_64BIT
759 depdi,z 1, 63-PAGE_SHIFT,1, %r25
760 #else
761 depwi,z 1, 31-PAGE_SHIFT,1, %r25
762 #endif
763 add %r26, %r25, %r25
764 sub %r25, %r23, %r25
765
766 1: pdc,m %r23(%r26)
767 pdc,m %r23(%r26)
768 pdc,m %r23(%r26)
769 pdc,m %r23(%r26)
770 pdc,m %r23(%r26)
771 pdc,m %r23(%r26)
772 pdc,m %r23(%r26)
773 pdc,m %r23(%r26)
774 pdc,m %r23(%r26)
775 pdc,m %r23(%r26)
776 pdc,m %r23(%r26)
777 pdc,m %r23(%r26)
778 pdc,m %r23(%r26)
779 pdc,m %r23(%r26)
780 pdc,m %r23(%r26)
781 CMPB<< %r26, %r25, 1b
782 pdc,m %r23(%r26)
783
784 sync
785 bv %r0(%r2)
786 nop
787 .exit
788
789 .procend
790 ENDPROC(purge_kernel_dcache_page)
791
792 #if 0
793 /* Currently not used, but it still is a possible alternate
794 * solution.
795 */
796
797 ENTRY(flush_alias_page)
798 .proc
799 .callinfo NO_CALLS
800 .entry
801
802 tophys_r1 %r26
803
804 ldil L%(TMPALIAS_MAP_START), %r28
805 #ifdef CONFIG_64BIT
806 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
807 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
808 depdi 0, 63,12, %r28 /* Clear any offset bits */
809 #else
810 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
811 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
812 depwi 0, 31,12, %r28 /* Clear any offset bits */
813 #endif
814
815 /* Purge any old translation */
816
817 pdtlb 0(%r28)
818
819 ldil L%dcache_stride, %r1
820 ldw R%dcache_stride(%r1), %r23
821
822 #ifdef CONFIG_64BIT
823 depdi,z 1, 63-PAGE_SHIFT,1, %r29
824 #else
825 depwi,z 1, 31-PAGE_SHIFT,1, %r29
826 #endif
827 add %r28, %r29, %r29
828 sub %r29, %r23, %r29
829
830 1: fdc,m %r23(%r28)
831 fdc,m %r23(%r28)
832 fdc,m %r23(%r28)
833 fdc,m %r23(%r28)
834 fdc,m %r23(%r28)
835 fdc,m %r23(%r28)
836 fdc,m %r23(%r28)
837 fdc,m %r23(%r28)
838 fdc,m %r23(%r28)
839 fdc,m %r23(%r28)
840 fdc,m %r23(%r28)
841 fdc,m %r23(%r28)
842 fdc,m %r23(%r28)
843 fdc,m %r23(%r28)
844 fdc,m %r23(%r28)
845 CMPB<< %r28, %r29, 1b
846 fdc,m %r23(%r28)
847
848 sync
849 bv %r0(%r2)
850 nop
851 .exit
852
853 .procend
854 #endif
855
856 .export flush_user_dcache_range_asm
857
858 flush_user_dcache_range_asm:
859 .proc
860 .callinfo NO_CALLS
861 .entry
862
863 ldil L%dcache_stride, %r1
864 ldw R%dcache_stride(%r1), %r23
865 ldo -1(%r23), %r21
866 ANDCM %r26, %r21, %r26
867
868 1: CMPB<<,n %r26, %r25, 1b
869 fdc,m %r23(%sr3, %r26)
870
871 sync
872 bv %r0(%r2)
873 nop
874 .exit
875
876 .procend
877 ENDPROC(flush_alias_page)
878
879 ENTRY(flush_kernel_dcache_range_asm)
880 .proc
881 .callinfo NO_CALLS
882 .entry
883
884 ldil L%dcache_stride, %r1
885 ldw R%dcache_stride(%r1), %r23
886 ldo -1(%r23), %r21
887 ANDCM %r26, %r21, %r26
888
889 1: CMPB<<,n %r26, %r25,1b
890 fdc,m %r23(%r26)
891
892 sync
893 syncdma
894 bv %r0(%r2)
895 nop
896 .exit
897
898 .procend
899 ENDPROC(flush_kernel_dcache_range_asm)
900
901 ENTRY(flush_user_icache_range_asm)
902 .proc
903 .callinfo NO_CALLS
904 .entry
905
906 ldil L%icache_stride, %r1
907 ldw R%icache_stride(%r1), %r23
908 ldo -1(%r23), %r21
909 ANDCM %r26, %r21, %r26
910
911 1: CMPB<<,n %r26, %r25,1b
912 fic,m %r23(%sr3, %r26)
913
914 sync
915 bv %r0(%r2)
916 nop
917 .exit
918
919 .procend
920 ENDPROC(flush_user_icache_range_asm)
921
922 ENTRY(flush_kernel_icache_page)
923 .proc
924 .callinfo NO_CALLS
925 .entry
926
927 ldil L%icache_stride, %r1
928 ldw R%icache_stride(%r1), %r23
929
930 #ifdef CONFIG_64BIT
931 depdi,z 1, 63-PAGE_SHIFT,1, %r25
932 #else
933 depwi,z 1, 31-PAGE_SHIFT,1, %r25
934 #endif
935 add %r26, %r25, %r25
936 sub %r25, %r23, %r25
937
938
939 1: fic,m %r23(%sr4, %r26)
940 fic,m %r23(%sr4, %r26)
941 fic,m %r23(%sr4, %r26)
942 fic,m %r23(%sr4, %r26)
943 fic,m %r23(%sr4, %r26)
944 fic,m %r23(%sr4, %r26)
945 fic,m %r23(%sr4, %r26)
946 fic,m %r23(%sr4, %r26)
947 fic,m %r23(%sr4, %r26)
948 fic,m %r23(%sr4, %r26)
949 fic,m %r23(%sr4, %r26)
950 fic,m %r23(%sr4, %r26)
951 fic,m %r23(%sr4, %r26)
952 fic,m %r23(%sr4, %r26)
953 fic,m %r23(%sr4, %r26)
954 CMPB<< %r26, %r25, 1b
955 fic,m %r23(%sr4, %r26)
956
957 sync
958 bv %r0(%r2)
959 nop
960 .exit
961
962 .procend
963 ENDPROC(flush_kernel_icache_page)
964
965 ENTRY(flush_kernel_icache_range_asm)
966 .proc
967 .callinfo NO_CALLS
968 .entry
969
970 ldil L%icache_stride, %r1
971 ldw R%icache_stride(%r1), %r23
972 ldo -1(%r23), %r21
973 ANDCM %r26, %r21, %r26
974
975 1: CMPB<<,n %r26, %r25, 1b
976 fic,m %r23(%sr4, %r26)
977
978 sync
979 bv %r0(%r2)
980 nop
981 .exit
982 .procend
983 ENDPROC(flush_kernel_icache_range_asm)
984
985 /* align should cover use of rfi in disable_sr_hashing_asm and
986 * srdis_done.
987 */
988 .align 256
989 ENTRY(disable_sr_hashing_asm)
990 .proc
991 .callinfo NO_CALLS
992 .entry
993
994 /*
995 * Switch to real mode
996 */
997 /* pcxt_ssm_bug */
998 rsm PSW_SM_I, %r0
999 load32 PA(1f), %r1
1000 nop
1001 nop
1002 nop
1003 nop
1004 nop
1005
1006 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1007 mtctl %r0, %cr17 /* Clear IIASQ tail */
1008 mtctl %r0, %cr17 /* Clear IIASQ head */
1009 mtctl %r1, %cr18 /* IIAOQ head */
1010 ldo 4(%r1), %r1
1011 mtctl %r1, %cr18 /* IIAOQ tail */
1012 load32 REAL_MODE_PSW, %r1
1013 mtctl %r1, %ipsw
1014 rfi
1015 nop
1016
1017 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1018 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1019 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1020 b,n srdis_done
1021
1022 srdis_pcxs:
1023
1024 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1025
1026 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1027 .word 0x141c1a00 /* must issue twice */
1028 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1029 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1030 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1031 .word 0x141c1600 /* must issue twice */
1032 b,n srdis_done
1033
1034 srdis_pcxl:
1035
1036 /* Disable Space Register Hashing for PCXL */
1037
1038 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1039 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1040 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1041 b,n srdis_done
1042
1043 srdis_pa20:
1044
1045 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1046
1047 .word 0x144008bc /* mfdiag %dr2, %r28 */
1048 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1049 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1050
1051
1052 srdis_done:
1053 /* Switch back to virtual mode */
1054 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1055 load32 2f, %r1
1056 nop
1057 nop
1058 nop
1059 nop
1060 nop
1061
1062 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1063 mtctl %r0, %cr17 /* Clear IIASQ tail */
1064 mtctl %r0, %cr17 /* Clear IIASQ head */
1065 mtctl %r1, %cr18 /* IIAOQ head */
1066 ldo 4(%r1), %r1
1067 mtctl %r1, %cr18 /* IIAOQ tail */
1068 load32 KERNEL_PSW, %r1
1069 mtctl %r1, %ipsw
1070 rfi
1071 nop
1072
1073 2: bv %r0(%r2)
1074 nop
1075 .exit
1076
1077 .procend
1078 ENDPROC(disable_sr_hashing_asm)
1079
1080 .end
This page took 0.058424 seconds and 5 git commands to generate.