[SPARC64]: Fix new context version SMP handling.
[deliverable/linux.git] / arch / sparc64 / mm / ultra.S
CommitLineData
1da177e4
LT
1/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9#include <asm/pgtable.h>
10#include <asm/page.h>
11#include <asm/spitfire.h>
12#include <asm/mmu_context.h>
2ef27778 13#include <asm/mmu.h>
1da177e4
LT
14#include <asm/pil.h>
15#include <asm/head.h>
16#include <asm/thread_info.h>
17#include <asm/cacheflush.h>
52bf082f 18#include <asm/hypervisor.h>
1da177e4
LT
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
52bf082f
DM
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
1da177e4
LT
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
4da808c3
DM
41 sethi %hi(KERNBASE), %g3
42 flush %g3
1da177e4 43 retl
4da808c3 44 nop
1da177e4
LT
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
2ef27778
DM
52 nop
53 nop
1da177e4
LT
54
55 .align 32
56 .globl __flush_tlb_pending
52bf082f 57__flush_tlb_pending: /* 26 insns */
1da177e4
LT
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59 rdpr %pstate, %g7
60 sllx %o1, 3, %o1
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67 ldx [%o2 + %o1], %o3
68 andcc %o3, 1, %g0
69 andn %o3, 1, %o3
70 be,pn %icc, 2f
71 or %o3, 0x10, %o3
72 stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74 membar #Sync
75 brnz,pt %o1, 1b
76 nop
77 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
78 sethi %hi(KERNBASE), %o4
79 flush %o4
1da177e4
LT
80 retl
81 wrpr %g7, 0x0, %pstate
fef43da4 82 nop
2ef27778
DM
83 nop
84 nop
85 nop
1da177e4
LT
86
87 .align 32
88 .globl __flush_tlb_kernel_range
1daef08a 89__flush_tlb_kernel_range: /* 16 insns */
52bf082f 90 /* %o0=start, %o1=end */
1da177e4
LT
91 cmp %o0, %o1
92 be,pn %xcc, 2f
93 sethi %hi(PAGE_SIZE), %o4
94 sub %o1, %o0, %o3
95 sub %o3, %o4, %o3
96 or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99 membar #Sync
100 brnz,pt %o3, 1b
101 sub %o3, %o4, %o3
4da808c3
DM
1022: sethi %hi(KERNBASE), %o3
103 flush %o3
104 retl
105 nop
52bf082f 106 nop
1da177e4
LT
107
108__spitfire_flush_tlb_mm_slow:
109 rdpr %pstate, %g1
110 wrpr %g1, PSTATE_IE, %pstate
111 stxa %o0, [%o1] ASI_DMMU
112 stxa %g0, [%g3] ASI_DMMU_DEMAP
113 stxa %g0, [%g3] ASI_IMMU_DEMAP
114 flush %g6
115 stxa %g2, [%o1] ASI_DMMU
4da808c3
DM
116 sethi %hi(KERNBASE), %o1
117 flush %o1
1da177e4
LT
118 retl
119 wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124#if (PAGE_SHIFT == 13)
125#define ITAG_MASK 0xfe
126#elif (PAGE_SHIFT == 16)
127#define ITAG_MASK 0x7fe
128#else
129#error unsupported PAGE_SIZE
130#endif
83005161 131 .section .kprobes.text, "ax"
1da177e4
LT
132 .align 32
133 .globl __flush_icache_page
134__flush_icache_page: /* %o0 = phys_page */
135 membar #StoreStore
136 srlx %o0, PAGE_SHIFT, %o0
137 sethi %uhi(PAGE_OFFSET), %g1
138 sllx %o0, PAGE_SHIFT, %o0
139 sethi %hi(PAGE_SIZE), %g2
140 sllx %g1, 32, %g1
141 add %o0, %g1, %o0
1421: subcc %g2, 32, %g2
143 bne,pt %icc, 1b
144 flush %o0 + %g2
145 retl
146 nop
147
148#ifdef DCACHE_ALIASING_POSSIBLE
149
150#if (PAGE_SHIFT != 13)
151#error only page shift of 13 is supported by dcache flush
152#endif
153
154#define DTAG_MASK 0x3
155
c5bd50a9
DM
156 /* This routine is Spitfire specific so the hardcoded
157 * D-cache size and line-size are OK.
158 */
1da177e4
LT
159 .align 64
160 .globl __flush_dcache_page
161__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
162 sethi %uhi(PAGE_OFFSET), %g1
163 sllx %g1, 32, %g1
c5bd50a9
DM
164 sub %o0, %g1, %o0 ! physical address
165 srlx %o0, 11, %o0 ! make D-cache TAG
166 sethi %hi(1 << 14), %o2 ! D-cache size
167 sub %o2, (1 << 5), %o2 ! D-cache line size
1681: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
169 andcc %o3, DTAG_MASK, %g0 ! Valid?
170 be,pn %xcc, 2f ! Nope, branch
171 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
172 cmp %o3, %o0 ! TAG match?
173 bne,pt %xcc, 2f ! Nope, branch
174 nop
175 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
176 membar #Sync
1772: brnz,pt %o2, 1b
178 sub %o2, (1 << 5), %o2 ! D-cache line size
1da177e4
LT
179
180 /* The I-cache does not snoop local stores so we
181 * better flush that too when necessary.
182 */
183 brnz,pt %o1, __flush_icache_page
184 sllx %o0, 11, %o0
185 retl
186 nop
187
1da177e4
LT
188#endif /* DCACHE_ALIASING_POSSIBLE */
189
c5bd50a9
DM
190 .previous
191
2ef27778 192 /* Cheetah specific versions, patched at boot time. */
4da808c3 193__cheetah_flush_tlb_mm: /* 19 insns */
1da177e4
LT
194 rdpr %pstate, %g7
195 andn %g7, PSTATE_IE, %g2
196 wrpr %g2, 0x0, %pstate
197 wrpr %g0, 1, %tl
198 mov PRIMARY_CONTEXT, %o2
199 mov 0x40, %g3
200 ldxa [%o2] ASI_DMMU, %g2
2ef27778
DM
201 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
202 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
203 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
204 stxa %o0, [%o2] ASI_DMMU
205 stxa %g0, [%g3] ASI_DMMU_DEMAP
206 stxa %g0, [%g3] ASI_IMMU_DEMAP
207 stxa %g2, [%o2] ASI_DMMU
4da808c3
DM
208 sethi %hi(KERNBASE), %o2
209 flush %o2
1da177e4
LT
210 wrpr %g0, 0, %tl
211 retl
212 wrpr %g7, 0x0, %pstate
213
4da808c3 214__cheetah_flush_tlb_pending: /* 27 insns */
1da177e4
LT
215 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
216 rdpr %pstate, %g7
217 sllx %o1, 3, %o1
218 andn %g7, PSTATE_IE, %g2
219 wrpr %g2, 0x0, %pstate
220 wrpr %g0, 1, %tl
221 mov PRIMARY_CONTEXT, %o4
222 ldxa [%o4] ASI_DMMU, %g2
2ef27778
DM
223 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
224 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
225 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
226 stxa %o0, [%o4] ASI_DMMU
2271: sub %o1, (1 << 3), %o1
228 ldx [%o2 + %o1], %o3
229 andcc %o3, 1, %g0
230 be,pn %icc, 2f
231 andn %o3, 1, %o3
232 stxa %g0, [%o3] ASI_IMMU_DEMAP
2332: stxa %g0, [%o3] ASI_DMMU_DEMAP
b445e26c 234 membar #Sync
1da177e4 235 brnz,pt %o1, 1b
b445e26c 236 nop
1da177e4 237 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
238 sethi %hi(KERNBASE), %o4
239 flush %o4
1da177e4
LT
240 wrpr %g0, 0, %tl
241 retl
242 wrpr %g7, 0x0, %pstate
243
244#ifdef DCACHE_ALIASING_POSSIBLE
c5bd50a9 245__cheetah_flush_dcache_page: /* 11 insns */
1da177e4
LT
246 sethi %uhi(PAGE_OFFSET), %g1
247 sllx %g1, 32, %g1
248 sub %o0, %g1, %o0
249 sethi %hi(PAGE_SIZE), %o4
2501: subcc %o4, (1 << 5), %o4
251 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
252 membar #Sync
253 bne,pt %icc, 1b
254 nop
255 retl /* I-cache flush never needed on Cheetah, see callers. */
256 nop
257#endif /* DCACHE_ALIASING_POSSIBLE */
258
52bf082f 259 /* Hypervisor specific versions, patched at boot time. */
2a3a5f5d
DM
260__hypervisor_tlb_tl0_error:
261 save %sp, -192, %sp
262 mov %i0, %o0
263 call hypervisor_tlbop_error
264 mov %i1, %o1
265 ret
266 restore
267
268__hypervisor_flush_tlb_mm: /* 10 insns */
52bf082f
DM
269 mov %o0, %o2 /* ARG2: mmu context */
270 mov 0, %o0 /* ARG0: CPU lists unimplemented */
271 mov 0, %o1 /* ARG1: CPU lists unimplemented */
272 mov HV_MMU_ALL, %o3 /* ARG3: flags */
273 mov HV_FAST_MMU_DEMAP_CTX, %o5
274 ta HV_FAST_TRAP
2a3a5f5d
DM
275 brnz,pn %o0, __hypervisor_tlb_tl0_error
276 mov HV_FAST_MMU_DEMAP_CTX, %o1
52bf082f
DM
277 retl
278 nop
279
2a3a5f5d 280__hypervisor_flush_tlb_pending: /* 16 insns */
52bf082f
DM
281 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
282 sllx %o1, 3, %g1
283 mov %o2, %g2
284 mov %o0, %g3
2851: sub %g1, (1 << 3), %g1
286 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
287 mov %g3, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
288 mov HV_MMU_ALL, %o2 /* ARG2: flags */
289 srlx %o0, PAGE_SHIFT, %o0
290 sllx %o0, PAGE_SHIFT, %o0
52bf082f 291 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
292 brnz,pn %o0, __hypervisor_tlb_tl0_error
293 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
294 brnz,pt %g1, 1b
295 nop
296 retl
297 nop
298
2a3a5f5d 299__hypervisor_flush_tlb_kernel_range: /* 16 insns */
52bf082f
DM
300 /* %o0=start, %o1=end */
301 cmp %o0, %o1
302 be,pn %xcc, 2f
303 sethi %hi(PAGE_SIZE), %g3
304 mov %o0, %g1
305 sub %o1, %g1, %g2
306 sub %g2, %g3, %g2
3071: add %g1, %g2, %o0 /* ARG0: virtual address */
308 mov 0, %o1 /* ARG1: mmu context */
309 mov HV_MMU_ALL, %o2 /* ARG2: flags */
310 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
311 brnz,pn %o0, __hypervisor_tlb_tl0_error
312 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
313 brnz,pt %g2, 1b
314 sub %g2, %g3, %g2
3152: retl
316 nop
317
318#ifdef DCACHE_ALIASING_POSSIBLE
319 /* XXX Niagara and friends have an 8K cache, so no aliasing is
320 * XXX possible, but nothing explicit in the Hypervisor API
321 * XXX guarantees this.
322 */
323__hypervisor_flush_dcache_page: /* 2 insns */
324 retl
325 nop
326#endif
327
328tlb_patch_one:
1da177e4
LT
3291: lduw [%o1], %g1
330 stw %g1, [%o0]
331 flush %o0
332 subcc %o2, 1, %o2
333 add %o1, 4, %o1
334 bne,pt %icc, 1b
335 add %o0, 4, %o0
336 retl
337 nop
338
339 .globl cheetah_patch_cachetlbops
340cheetah_patch_cachetlbops:
341 save %sp, -128, %sp
342
343 sethi %hi(__flush_tlb_mm), %o0
344 or %o0, %lo(__flush_tlb_mm), %o0
345 sethi %hi(__cheetah_flush_tlb_mm), %o1
346 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
52bf082f 347 call tlb_patch_one
4da808c3 348 mov 19, %o2
1da177e4
LT
349
350 sethi %hi(__flush_tlb_pending), %o0
351 or %o0, %lo(__flush_tlb_pending), %o0
352 sethi %hi(__cheetah_flush_tlb_pending), %o1
353 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
52bf082f 354 call tlb_patch_one
4da808c3 355 mov 27, %o2
1da177e4
LT
356
357#ifdef DCACHE_ALIASING_POSSIBLE
358 sethi %hi(__flush_dcache_page), %o0
359 or %o0, %lo(__flush_dcache_page), %o0
c5bd50a9
DM
360 sethi %hi(__cheetah_flush_dcache_page), %o1
361 or %o1, %lo(__cheetah_flush_dcache_page), %o1
52bf082f 362 call tlb_patch_one
1da177e4
LT
363 mov 11, %o2
364#endif /* DCACHE_ALIASING_POSSIBLE */
365
366 ret
367 restore
368
369#ifdef CONFIG_SMP
370 /* These are all called by the slaves of a cross call, at
371 * trap level 1, with interrupts fully disabled.
372 *
373 * Register usage:
374 * %g5 mm->context (all tlb flushes)
375 * %g1 address arg 1 (tlb page and range flushes)
376 * %g7 address arg 2 (tlb range flush only)
377 *
56fb4df6
DM
378 * %g6 scratch 1
379 * %g2 scratch 2
380 * %g3 scratch 3
381 * %g4 scratch 4
1da177e4
LT
382 */
383 .align 32
384 .globl xcall_flush_tlb_mm
2a3a5f5d 385xcall_flush_tlb_mm: /* 21 insns */
1da177e4 386 mov PRIMARY_CONTEXT, %g2
1da177e4 387 ldxa [%g2] ASI_DMMU, %g3
2ef27778
DM
388 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
389 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
390 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
1da177e4 391 stxa %g5, [%g2] ASI_DMMU
2ef27778 392 mov 0x40, %g4
1da177e4
LT
393 stxa %g0, [%g4] ASI_DMMU_DEMAP
394 stxa %g0, [%g4] ASI_IMMU_DEMAP
395 stxa %g3, [%g2] ASI_DMMU
396 retry
52bf082f
DM
397 nop
398 nop
399 nop
400 nop
401 nop
402 nop
403 nop
2a3a5f5d
DM
404 nop
405 nop
406 nop
1da177e4
LT
407
408 .globl xcall_flush_tlb_pending
2a3a5f5d 409xcall_flush_tlb_pending: /* 21 insns */
1da177e4
LT
410 /* %g5=context, %g1=nr, %g7=vaddrs[] */
411 sllx %g1, 3, %g1
412 mov PRIMARY_CONTEXT, %g4
413 ldxa [%g4] ASI_DMMU, %g2
2ef27778
DM
414 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
415 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
416 or %g5, %g4, %g5
417 mov PRIMARY_CONTEXT, %g4
1da177e4
LT
418 stxa %g5, [%g4] ASI_DMMU
4191: sub %g1, (1 << 3), %g1
420 ldx [%g7 + %g1], %g5
421 andcc %g5, 0x1, %g0
422 be,pn %icc, 2f
423
424 andn %g5, 0x1, %g5
425 stxa %g0, [%g5] ASI_IMMU_DEMAP
4262: stxa %g0, [%g5] ASI_DMMU_DEMAP
427 membar #Sync
428 brnz,pt %g1, 1b
429 nop
430 stxa %g2, [%g4] ASI_DMMU
431 retry
2a3a5f5d 432 nop
1da177e4
LT
433
434 .globl xcall_flush_tlb_kernel_range
2a3a5f5d 435xcall_flush_tlb_kernel_range: /* 25 insns */
1da177e4
LT
436 sethi %hi(PAGE_SIZE - 1), %g2
437 or %g2, %lo(PAGE_SIZE - 1), %g2
438 andn %g1, %g2, %g1
439 andn %g7, %g2, %g7
440 sub %g7, %g1, %g3
441 add %g2, 1, %g2
442 sub %g3, %g2, %g3
443 or %g1, 0x20, %g1 ! Nucleus
4441: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
445 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
446 membar #Sync
447 brnz,pt %g3, 1b
448 sub %g3, %g2, %g3
449 retry
450 nop
451 nop
52bf082f
DM
452 nop
453 nop
454 nop
455 nop
456 nop
457 nop
2a3a5f5d
DM
458 nop
459 nop
460 nop
1da177e4
LT
461
462 /* This runs in a very controlled environment, so we do
463 * not need to worry about BH races etc.
464 */
465 .globl xcall_sync_tick
466xcall_sync_tick:
45fec05f
DM
467
468661: rdpr %pstate, %g2
1da177e4 469 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 470 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
471 .word 661b
472 nop
473 nop
474 .previous
475
1da177e4
LT
476 rdpr %pil, %g2
477 wrpr %g0, 15, %pil
478 sethi %hi(109f), %g7
479 b,pt %xcc, etrap_irq
480109: or %g7, %lo(109b), %g7
481 call smp_synchronize_tick_client
482 nop
483 clr %l6
484 b rtrap_xcall
485 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
486
487 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
488 * we choose to deal with the "BH's run with
489 * %pil==15" problem (described in asm/pil.h)
490 * by just invoking rtrap directly past where
491 * BH's are checked for.
492 *
493 * We do it like this because we do not want %pil==15
494 * lockups to prevent regs being reported.
495 */
496 .globl xcall_report_regs
497xcall_report_regs:
45fec05f
DM
498
499661: rdpr %pstate, %g2
1da177e4 500 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 501 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
502 .word 661b
503 nop
504 nop
505 .previous
506
1da177e4
LT
507 rdpr %pil, %g2
508 wrpr %g0, 15, %pil
509 sethi %hi(109f), %g7
510 b,pt %xcc, etrap_irq
511109: or %g7, %lo(109b), %g7
512 call __show_regs
513 add %sp, PTREGS_OFF, %o0
514 clr %l6
515 /* Has to be a non-v9 branch due to the large distance. */
516 b rtrap_xcall
517 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
518
519#ifdef DCACHE_ALIASING_POSSIBLE
520 .align 32
521 .globl xcall_flush_dcache_page_cheetah
522xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
523 sethi %hi(PAGE_SIZE), %g3
5241: subcc %g3, (1 << 5), %g3
525 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
526 membar #Sync
527 bne,pt %icc, 1b
528 nop
529 retry
530 nop
531#endif /* DCACHE_ALIASING_POSSIBLE */
532
533 .globl xcall_flush_dcache_page_spitfire
534xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
535 %g7 == kernel page virtual address
536 %g5 == (page->mapping != NULL) */
537#ifdef DCACHE_ALIASING_POSSIBLE
538 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
539 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
540 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5411: ldxa [%g3] ASI_DCACHE_TAG, %g2
542 andcc %g2, 0x3, %g0
543 be,pn %xcc, 2f
544 andn %g2, 0x3, %g2
545 cmp %g2, %g1
546
547 bne,pt %xcc, 2f
548 nop
549 stxa %g0, [%g3] ASI_DCACHE_TAG
550 membar #Sync
5512: cmp %g3, 0
552 bne,pt %xcc, 1b
553 sub %g3, (1 << 5), %g3
554
555 brz,pn %g5, 2f
556#endif /* DCACHE_ALIASING_POSSIBLE */
557 sethi %hi(PAGE_SIZE), %g3
558
5591: flush %g7
560 subcc %g3, (1 << 5), %g3
561 bne,pt %icc, 1b
562 add %g7, (1 << 5), %g7
563
5642: retry
565 nop
566 nop
567
2a3a5f5d
DM
568 /* %g5: error
569 * %g6: tlb op
570 */
571__hypervisor_tlb_xcall_error:
572 mov %g5, %g4
573 mov %g6, %g5
574 ba,pt %xcc, etrap
575 rd %pc, %g7
576 mov %l4, %o0
577 call hypervisor_tlbop_error_xcall
578 mov %l5, %o1
579 ba,a,pt %xcc, rtrap_clr_l6
580
52bf082f 581 .globl __hypervisor_xcall_flush_tlb_mm
2a3a5f5d 582__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
52bf082f
DM
583 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
584 mov %o0, %g2
585 mov %o1, %g3
586 mov %o2, %g4
587 mov %o3, %g1
588 mov %o5, %g7
589 clr %o0 /* ARG0: CPU lists unimplemented */
590 clr %o1 /* ARG1: CPU lists unimplemented */
591 mov %g5, %o2 /* ARG2: mmu context */
592 mov HV_MMU_ALL, %o3 /* ARG3: flags */
593 mov HV_FAST_MMU_DEMAP_CTX, %o5
594 ta HV_FAST_TRAP
2a3a5f5d
DM
595 mov HV_FAST_MMU_DEMAP_CTX, %g6
596 brnz,pn %o0, __hypervisor_tlb_xcall_error
597 mov %o0, %g5
52bf082f
DM
598 mov %g2, %o0
599 mov %g3, %o1
600 mov %g4, %o2
601 mov %g1, %o3
602 mov %g7, %o5
603 membar #Sync
604 retry
605
606 .globl __hypervisor_xcall_flush_tlb_pending
2a3a5f5d
DM
607__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
608 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
52bf082f
DM
609 sllx %g1, 3, %g1
610 mov %o0, %g2
611 mov %o1, %g3
612 mov %o2, %g4
6131: sub %g1, (1 << 3), %g1
614 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
615 mov %g5, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
616 mov HV_MMU_ALL, %o2 /* ARG2: flags */
617 srlx %o0, PAGE_SHIFT, %o0
618 sllx %o0, PAGE_SHIFT, %o0
52bf082f 619 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
620 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
621 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
622 mov %o0, %g5
52bf082f
DM
623 brnz,pt %g1, 1b
624 nop
625 mov %g2, %o0
626 mov %g3, %o1
627 mov %g4, %o2
628 membar #Sync
629 retry
630
631 .globl __hypervisor_xcall_flush_tlb_kernel_range
2a3a5f5d
DM
632__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
633 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
52bf082f
DM
634 sethi %hi(PAGE_SIZE - 1), %g2
635 or %g2, %lo(PAGE_SIZE - 1), %g2
636 andn %g1, %g2, %g1
637 andn %g7, %g2, %g7
638 sub %g7, %g1, %g3
639 add %g2, 1, %g2
640 sub %g3, %g2, %g3
641 mov %o0, %g2
642 mov %o1, %g4
2a3a5f5d 643 mov %o2, %g7
52bf082f
DM
6441: add %g1, %g3, %o0 /* ARG0: virtual address */
645 mov 0, %o1 /* ARG1: mmu context */
646 mov HV_MMU_ALL, %o2 /* ARG2: flags */
647 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
648 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
649 brnz,pn %o0, __hypervisor_tlb_xcall_error
650 mov %o0, %g5
52bf082f
DM
651 sethi %hi(PAGE_SIZE), %o2
652 brnz,pt %g3, 1b
653 sub %g3, %o2, %g3
654 mov %g2, %o0
655 mov %g4, %o1
2a3a5f5d 656 mov %g7, %o2
52bf082f
DM
657 membar #Sync
658 retry
659
1da177e4
LT
660 /* These just get rescheduled to PIL vectors. */
661 .globl xcall_call_function
662xcall_call_function:
663 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
664 retry
665
666 .globl xcall_receive_signal
667xcall_receive_signal:
668 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
669 retry
670
671 .globl xcall_capture
672xcall_capture:
673 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
674 retry
675
ee29074d
DM
676 .globl xcall_new_mmu_context_version
677xcall_new_mmu_context_version:
678 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
679 retry
680
1da177e4 681#endif /* CONFIG_SMP */
52bf082f
DM
682
683
684 .globl hypervisor_patch_cachetlbops
685hypervisor_patch_cachetlbops:
686 save %sp, -128, %sp
687
688 sethi %hi(__flush_tlb_mm), %o0
689 or %o0, %lo(__flush_tlb_mm), %o0
690 sethi %hi(__hypervisor_flush_tlb_mm), %o1
691 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
692 call tlb_patch_one
2a3a5f5d 693 mov 10, %o2
52bf082f
DM
694
695 sethi %hi(__flush_tlb_pending), %o0
696 or %o0, %lo(__flush_tlb_pending), %o0
697 sethi %hi(__hypervisor_flush_tlb_pending), %o1
698 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
699 call tlb_patch_one
2a3a5f5d 700 mov 16, %o2
52bf082f
DM
701
702 sethi %hi(__flush_tlb_kernel_range), %o0
703 or %o0, %lo(__flush_tlb_kernel_range), %o0
704 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
705 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
706 call tlb_patch_one
2a3a5f5d 707 mov 16, %o2
52bf082f
DM
708
709#ifdef DCACHE_ALIASING_POSSIBLE
710 sethi %hi(__flush_dcache_page), %o0
711 or %o0, %lo(__flush_dcache_page), %o0
712 sethi %hi(__hypervisor_flush_dcache_page), %o1
713 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
714 call tlb_patch_one
715 mov 2, %o2
716#endif /* DCACHE_ALIASING_POSSIBLE */
717
718#ifdef CONFIG_SMP
719 sethi %hi(xcall_flush_tlb_mm), %o0
720 or %o0, %lo(xcall_flush_tlb_mm), %o0
721 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
722 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
723 call tlb_patch_one
2a3a5f5d 724 mov 21, %o2
52bf082f
DM
725
726 sethi %hi(xcall_flush_tlb_pending), %o0
727 or %o0, %lo(xcall_flush_tlb_pending), %o0
728 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
729 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
730 call tlb_patch_one
2a3a5f5d 731 mov 21, %o2
52bf082f
DM
732
733 sethi %hi(xcall_flush_tlb_kernel_range), %o0
734 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
735 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
736 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
737 call tlb_patch_one
2a3a5f5d 738 mov 25, %o2
52bf082f
DM
739#endif /* CONFIG_SMP */
740
741 ret
742 restore
This page took 0.221828 seconds and 5 git commands to generate.