[SPARC64]: Add irqtrace/stacktrace/lockdep support.
[deliverable/linux.git] / arch / sparc64 / mm / ultra.S
CommitLineData
1da177e4
LT
1/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
1da177e4
LT
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
2ef27778 12#include <asm/mmu.h>
1da177e4
LT
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
52bf082f 17#include <asm/hypervisor.h>
1da177e4
LT
18
19 /* Basically, most of the Spitfire vs. Cheetah madness
20 * has to do with the fact that Cheetah does not support
21 * IMMU flushes out of the secondary context. Someone needs
22 * to throw a south lake birthday party for the folks
23 * in Microelectronics who refused to fix this shit.
24 */
25
26 /* This file is meant to be read efficiently by the CPU, not humans.
27 * Staraj sie tego nikomu nie pierdolnac...
28 */
29 .text
30 .align 32
31 .globl __flush_tlb_mm
52bf082f
DM
32__flush_tlb_mm: /* 18 insns */
33 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
1da177e4
LT
34 ldxa [%o1] ASI_DMMU, %g2
35 cmp %g2, %o0
36 bne,pn %icc, __spitfire_flush_tlb_mm_slow
37 mov 0x50, %g3
38 stxa %g0, [%g3] ASI_DMMU_DEMAP
39 stxa %g0, [%g3] ASI_IMMU_DEMAP
4da808c3
DM
40 sethi %hi(KERNBASE), %g3
41 flush %g3
1da177e4 42 retl
4da808c3 43 nop
1da177e4
LT
44 nop
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
2ef27778
DM
51 nop
52 nop
1da177e4
LT
53
54 .align 32
55 .globl __flush_tlb_pending
52bf082f 56__flush_tlb_pending: /* 26 insns */
1da177e4
LT
57 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
58 rdpr %pstate, %g7
59 sllx %o1, 3, %o1
60 andn %g7, PSTATE_IE, %g2
61 wrpr %g2, %pstate
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
651: sub %o1, (1 << 3), %o1
66 ldx [%o2 + %o1], %o3
67 andcc %o3, 1, %g0
68 andn %o3, 1, %o3
69 be,pn %icc, 2f
70 or %o3, 0x10, %o3
71 stxa %g0, [%o3] ASI_IMMU_DEMAP
722: stxa %g0, [%o3] ASI_DMMU_DEMAP
73 membar #Sync
74 brnz,pt %o1, 1b
75 nop
76 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
77 sethi %hi(KERNBASE), %o4
78 flush %o4
1da177e4
LT
79 retl
80 wrpr %g7, 0x0, %pstate
fef43da4 81 nop
2ef27778
DM
82 nop
83 nop
84 nop
1da177e4
LT
85
86 .align 32
87 .globl __flush_tlb_kernel_range
1daef08a 88__flush_tlb_kernel_range: /* 16 insns */
52bf082f 89 /* %o0=start, %o1=end */
1da177e4
LT
90 cmp %o0, %o1
91 be,pn %xcc, 2f
92 sethi %hi(PAGE_SIZE), %o4
93 sub %o1, %o0, %o3
94 sub %o3, %o4, %o3
95 or %o0, 0x20, %o0 ! Nucleus
961: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
97 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
98 membar #Sync
99 brnz,pt %o3, 1b
100 sub %o3, %o4, %o3
4da808c3
DM
1012: sethi %hi(KERNBASE), %o3
102 flush %o3
103 retl
104 nop
52bf082f 105 nop
1da177e4
LT
106
107__spitfire_flush_tlb_mm_slow:
108 rdpr %pstate, %g1
109 wrpr %g1, PSTATE_IE, %pstate
110 stxa %o0, [%o1] ASI_DMMU
111 stxa %g0, [%g3] ASI_DMMU_DEMAP
112 stxa %g0, [%g3] ASI_IMMU_DEMAP
113 flush %g6
114 stxa %g2, [%o1] ASI_DMMU
4da808c3
DM
115 sethi %hi(KERNBASE), %o1
116 flush %o1
1da177e4
LT
117 retl
118 wrpr %g1, 0, %pstate
119
120/*
121 * The following code flushes one page_size worth.
122 */
123#if (PAGE_SHIFT == 13)
124#define ITAG_MASK 0xfe
125#elif (PAGE_SHIFT == 16)
126#define ITAG_MASK 0x7fe
127#else
128#error unsupported PAGE_SIZE
129#endif
83005161 130 .section .kprobes.text, "ax"
1da177e4
LT
131 .align 32
132 .globl __flush_icache_page
133__flush_icache_page: /* %o0 = phys_page */
134 membar #StoreStore
135 srlx %o0, PAGE_SHIFT, %o0
136 sethi %uhi(PAGE_OFFSET), %g1
137 sllx %o0, PAGE_SHIFT, %o0
138 sethi %hi(PAGE_SIZE), %g2
139 sllx %g1, 32, %g1
140 add %o0, %g1, %o0
1411: subcc %g2, 32, %g2
142 bne,pt %icc, 1b
143 flush %o0 + %g2
144 retl
145 nop
146
147#ifdef DCACHE_ALIASING_POSSIBLE
148
149#if (PAGE_SHIFT != 13)
150#error only page shift of 13 is supported by dcache flush
151#endif
152
153#define DTAG_MASK 0x3
154
c5bd50a9
DM
155 /* This routine is Spitfire specific so the hardcoded
156 * D-cache size and line-size are OK.
157 */
1da177e4
LT
158 .align 64
159 .globl __flush_dcache_page
160__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
161 sethi %uhi(PAGE_OFFSET), %g1
162 sllx %g1, 32, %g1
c5bd50a9
DM
163 sub %o0, %g1, %o0 ! physical address
164 srlx %o0, 11, %o0 ! make D-cache TAG
165 sethi %hi(1 << 14), %o2 ! D-cache size
166 sub %o2, (1 << 5), %o2 ! D-cache line size
1671: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
168 andcc %o3, DTAG_MASK, %g0 ! Valid?
169 be,pn %xcc, 2f ! Nope, branch
170 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
171 cmp %o3, %o0 ! TAG match?
172 bne,pt %xcc, 2f ! Nope, branch
173 nop
174 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
175 membar #Sync
1762: brnz,pt %o2, 1b
177 sub %o2, (1 << 5), %o2 ! D-cache line size
1da177e4
LT
178
179 /* The I-cache does not snoop local stores so we
180 * better flush that too when necessary.
181 */
182 brnz,pt %o1, __flush_icache_page
183 sllx %o0, 11, %o0
184 retl
185 nop
186
1da177e4
LT
187#endif /* DCACHE_ALIASING_POSSIBLE */
188
c5bd50a9
DM
189 .previous
190
2ef27778 191 /* Cheetah specific versions, patched at boot time. */
4da808c3 192__cheetah_flush_tlb_mm: /* 19 insns */
1da177e4
LT
193 rdpr %pstate, %g7
194 andn %g7, PSTATE_IE, %g2
195 wrpr %g2, 0x0, %pstate
196 wrpr %g0, 1, %tl
197 mov PRIMARY_CONTEXT, %o2
198 mov 0x40, %g3
199 ldxa [%o2] ASI_DMMU, %g2
2ef27778
DM
200 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
201 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
202 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
203 stxa %o0, [%o2] ASI_DMMU
204 stxa %g0, [%g3] ASI_DMMU_DEMAP
205 stxa %g0, [%g3] ASI_IMMU_DEMAP
206 stxa %g2, [%o2] ASI_DMMU
4da808c3
DM
207 sethi %hi(KERNBASE), %o2
208 flush %o2
1da177e4
LT
209 wrpr %g0, 0, %tl
210 retl
211 wrpr %g7, 0x0, %pstate
212
4da808c3 213__cheetah_flush_tlb_pending: /* 27 insns */
1da177e4
LT
214 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
215 rdpr %pstate, %g7
216 sllx %o1, 3, %o1
217 andn %g7, PSTATE_IE, %g2
218 wrpr %g2, 0x0, %pstate
219 wrpr %g0, 1, %tl
220 mov PRIMARY_CONTEXT, %o4
221 ldxa [%o4] ASI_DMMU, %g2
2ef27778
DM
222 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
223 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
224 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
225 stxa %o0, [%o4] ASI_DMMU
2261: sub %o1, (1 << 3), %o1
227 ldx [%o2 + %o1], %o3
228 andcc %o3, 1, %g0
229 be,pn %icc, 2f
230 andn %o3, 1, %o3
231 stxa %g0, [%o3] ASI_IMMU_DEMAP
2322: stxa %g0, [%o3] ASI_DMMU_DEMAP
b445e26c 233 membar #Sync
1da177e4 234 brnz,pt %o1, 1b
b445e26c 235 nop
1da177e4 236 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
237 sethi %hi(KERNBASE), %o4
238 flush %o4
1da177e4
LT
239 wrpr %g0, 0, %tl
240 retl
241 wrpr %g7, 0x0, %pstate
242
243#ifdef DCACHE_ALIASING_POSSIBLE
c5bd50a9 244__cheetah_flush_dcache_page: /* 11 insns */
1da177e4
LT
245 sethi %uhi(PAGE_OFFSET), %g1
246 sllx %g1, 32, %g1
247 sub %o0, %g1, %o0
248 sethi %hi(PAGE_SIZE), %o4
2491: subcc %o4, (1 << 5), %o4
250 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
251 membar #Sync
252 bne,pt %icc, 1b
253 nop
254 retl /* I-cache flush never needed on Cheetah, see callers. */
255 nop
256#endif /* DCACHE_ALIASING_POSSIBLE */
257
52bf082f 258 /* Hypervisor specific versions, patched at boot time. */
2a3a5f5d
DM
259__hypervisor_tlb_tl0_error:
260 save %sp, -192, %sp
261 mov %i0, %o0
262 call hypervisor_tlbop_error
263 mov %i1, %o1
264 ret
265 restore
266
267__hypervisor_flush_tlb_mm: /* 10 insns */
52bf082f
DM
268 mov %o0, %o2 /* ARG2: mmu context */
269 mov 0, %o0 /* ARG0: CPU lists unimplemented */
270 mov 0, %o1 /* ARG1: CPU lists unimplemented */
271 mov HV_MMU_ALL, %o3 /* ARG3: flags */
272 mov HV_FAST_MMU_DEMAP_CTX, %o5
273 ta HV_FAST_TRAP
2a3a5f5d
DM
274 brnz,pn %o0, __hypervisor_tlb_tl0_error
275 mov HV_FAST_MMU_DEMAP_CTX, %o1
52bf082f
DM
276 retl
277 nop
278
2a3a5f5d 279__hypervisor_flush_tlb_pending: /* 16 insns */
52bf082f
DM
280 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
281 sllx %o1, 3, %g1
282 mov %o2, %g2
283 mov %o0, %g3
2841: sub %g1, (1 << 3), %g1
285 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
286 mov %g3, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
287 mov HV_MMU_ALL, %o2 /* ARG2: flags */
288 srlx %o0, PAGE_SHIFT, %o0
289 sllx %o0, PAGE_SHIFT, %o0
52bf082f 290 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
291 brnz,pn %o0, __hypervisor_tlb_tl0_error
292 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
293 brnz,pt %g1, 1b
294 nop
295 retl
296 nop
297
2a3a5f5d 298__hypervisor_flush_tlb_kernel_range: /* 16 insns */
52bf082f
DM
299 /* %o0=start, %o1=end */
300 cmp %o0, %o1
301 be,pn %xcc, 2f
302 sethi %hi(PAGE_SIZE), %g3
303 mov %o0, %g1
304 sub %o1, %g1, %g2
305 sub %g2, %g3, %g2
3061: add %g1, %g2, %o0 /* ARG0: virtual address */
307 mov 0, %o1 /* ARG1: mmu context */
308 mov HV_MMU_ALL, %o2 /* ARG2: flags */
309 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
310 brnz,pn %o0, __hypervisor_tlb_tl0_error
311 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
312 brnz,pt %g2, 1b
313 sub %g2, %g3, %g2
3142: retl
315 nop
316
317#ifdef DCACHE_ALIASING_POSSIBLE
318 /* XXX Niagara and friends have an 8K cache, so no aliasing is
319 * XXX possible, but nothing explicit in the Hypervisor API
320 * XXX guarantees this.
321 */
322__hypervisor_flush_dcache_page: /* 2 insns */
323 retl
324 nop
325#endif
326
327tlb_patch_one:
1da177e4
LT
3281: lduw [%o1], %g1
329 stw %g1, [%o0]
330 flush %o0
331 subcc %o2, 1, %o2
332 add %o1, 4, %o1
333 bne,pt %icc, 1b
334 add %o0, 4, %o0
335 retl
336 nop
337
338 .globl cheetah_patch_cachetlbops
339cheetah_patch_cachetlbops:
340 save %sp, -128, %sp
341
342 sethi %hi(__flush_tlb_mm), %o0
343 or %o0, %lo(__flush_tlb_mm), %o0
344 sethi %hi(__cheetah_flush_tlb_mm), %o1
345 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
52bf082f 346 call tlb_patch_one
4da808c3 347 mov 19, %o2
1da177e4
LT
348
349 sethi %hi(__flush_tlb_pending), %o0
350 or %o0, %lo(__flush_tlb_pending), %o0
351 sethi %hi(__cheetah_flush_tlb_pending), %o1
352 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
52bf082f 353 call tlb_patch_one
4da808c3 354 mov 27, %o2
1da177e4
LT
355
356#ifdef DCACHE_ALIASING_POSSIBLE
357 sethi %hi(__flush_dcache_page), %o0
358 or %o0, %lo(__flush_dcache_page), %o0
c5bd50a9
DM
359 sethi %hi(__cheetah_flush_dcache_page), %o1
360 or %o1, %lo(__cheetah_flush_dcache_page), %o1
52bf082f 361 call tlb_patch_one
1da177e4
LT
362 mov 11, %o2
363#endif /* DCACHE_ALIASING_POSSIBLE */
364
365 ret
366 restore
367
368#ifdef CONFIG_SMP
369 /* These are all called by the slaves of a cross call, at
370 * trap level 1, with interrupts fully disabled.
371 *
372 * Register usage:
373 * %g5 mm->context (all tlb flushes)
374 * %g1 address arg 1 (tlb page and range flushes)
375 * %g7 address arg 2 (tlb range flush only)
376 *
56fb4df6
DM
377 * %g6 scratch 1
378 * %g2 scratch 2
379 * %g3 scratch 3
380 * %g4 scratch 4
1da177e4
LT
381 */
382 .align 32
383 .globl xcall_flush_tlb_mm
2a3a5f5d 384xcall_flush_tlb_mm: /* 21 insns */
1da177e4 385 mov PRIMARY_CONTEXT, %g2
1da177e4 386 ldxa [%g2] ASI_DMMU, %g3
2ef27778
DM
387 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
388 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
389 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
1da177e4 390 stxa %g5, [%g2] ASI_DMMU
2ef27778 391 mov 0x40, %g4
1da177e4
LT
392 stxa %g0, [%g4] ASI_DMMU_DEMAP
393 stxa %g0, [%g4] ASI_IMMU_DEMAP
394 stxa %g3, [%g2] ASI_DMMU
395 retry
52bf082f
DM
396 nop
397 nop
398 nop
399 nop
400 nop
401 nop
402 nop
2a3a5f5d
DM
403 nop
404 nop
405 nop
1da177e4
LT
406
407 .globl xcall_flush_tlb_pending
2a3a5f5d 408xcall_flush_tlb_pending: /* 21 insns */
1da177e4
LT
409 /* %g5=context, %g1=nr, %g7=vaddrs[] */
410 sllx %g1, 3, %g1
411 mov PRIMARY_CONTEXT, %g4
412 ldxa [%g4] ASI_DMMU, %g2
2ef27778
DM
413 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
414 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
415 or %g5, %g4, %g5
416 mov PRIMARY_CONTEXT, %g4
1da177e4
LT
417 stxa %g5, [%g4] ASI_DMMU
4181: sub %g1, (1 << 3), %g1
419 ldx [%g7 + %g1], %g5
420 andcc %g5, 0x1, %g0
421 be,pn %icc, 2f
422
423 andn %g5, 0x1, %g5
424 stxa %g0, [%g5] ASI_IMMU_DEMAP
4252: stxa %g0, [%g5] ASI_DMMU_DEMAP
426 membar #Sync
427 brnz,pt %g1, 1b
428 nop
429 stxa %g2, [%g4] ASI_DMMU
430 retry
2a3a5f5d 431 nop
1da177e4
LT
432
433 .globl xcall_flush_tlb_kernel_range
2a3a5f5d 434xcall_flush_tlb_kernel_range: /* 25 insns */
1da177e4
LT
435 sethi %hi(PAGE_SIZE - 1), %g2
436 or %g2, %lo(PAGE_SIZE - 1), %g2
437 andn %g1, %g2, %g1
438 andn %g7, %g2, %g7
439 sub %g7, %g1, %g3
440 add %g2, 1, %g2
441 sub %g3, %g2, %g3
442 or %g1, 0x20, %g1 ! Nucleus
4431: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
444 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
445 membar #Sync
446 brnz,pt %g3, 1b
447 sub %g3, %g2, %g3
448 retry
449 nop
450 nop
52bf082f
DM
451 nop
452 nop
453 nop
454 nop
455 nop
456 nop
2a3a5f5d
DM
457 nop
458 nop
459 nop
1da177e4
LT
460
461 /* This runs in a very controlled environment, so we do
462 * not need to worry about BH races etc.
463 */
464 .globl xcall_sync_tick
465xcall_sync_tick:
45fec05f
DM
466
467661: rdpr %pstate, %g2
1da177e4 468 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 469 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
470 .word 661b
471 nop
472 nop
473 .previous
474
1da177e4
LT
475 rdpr %pil, %g2
476 wrpr %g0, 15, %pil
477 sethi %hi(109f), %g7
478 b,pt %xcc, etrap_irq
479109: or %g7, %lo(109b), %g7
10e26723
DM
480#ifdef CONFIG_TRACE_IRQFLAGS
481 call trace_hardirqs_off
482 nop
483#endif
1da177e4
LT
484 call smp_synchronize_tick_client
485 nop
486 clr %l6
487 b rtrap_xcall
488 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
489
490 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
491 * we choose to deal with the "BH's run with
492 * %pil==15" problem (described in asm/pil.h)
493 * by just invoking rtrap directly past where
494 * BH's are checked for.
495 *
496 * We do it like this because we do not want %pil==15
497 * lockups to prevent regs being reported.
498 */
499 .globl xcall_report_regs
500xcall_report_regs:
45fec05f
DM
501
502661: rdpr %pstate, %g2
1da177e4 503 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 504 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
505 .word 661b
506 nop
507 nop
508 .previous
509
1da177e4
LT
510 rdpr %pil, %g2
511 wrpr %g0, 15, %pil
512 sethi %hi(109f), %g7
513 b,pt %xcc, etrap_irq
514109: or %g7, %lo(109b), %g7
10e26723
DM
515#ifdef CONFIG_TRACE_IRQFLAGS
516 call trace_hardirqs_off
517 nop
518#endif
1da177e4
LT
519 call __show_regs
520 add %sp, PTREGS_OFF, %o0
521 clr %l6
522 /* Has to be a non-v9 branch due to the large distance. */
523 b rtrap_xcall
524 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
525
526#ifdef DCACHE_ALIASING_POSSIBLE
527 .align 32
528 .globl xcall_flush_dcache_page_cheetah
529xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
530 sethi %hi(PAGE_SIZE), %g3
5311: subcc %g3, (1 << 5), %g3
532 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
533 membar #Sync
534 bne,pt %icc, 1b
535 nop
536 retry
537 nop
538#endif /* DCACHE_ALIASING_POSSIBLE */
539
540 .globl xcall_flush_dcache_page_spitfire
541xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
542 %g7 == kernel page virtual address
543 %g5 == (page->mapping != NULL) */
544#ifdef DCACHE_ALIASING_POSSIBLE
545 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
546 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
547 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5481: ldxa [%g3] ASI_DCACHE_TAG, %g2
549 andcc %g2, 0x3, %g0
550 be,pn %xcc, 2f
551 andn %g2, 0x3, %g2
552 cmp %g2, %g1
553
554 bne,pt %xcc, 2f
555 nop
556 stxa %g0, [%g3] ASI_DCACHE_TAG
557 membar #Sync
5582: cmp %g3, 0
559 bne,pt %xcc, 1b
560 sub %g3, (1 << 5), %g3
561
562 brz,pn %g5, 2f
563#endif /* DCACHE_ALIASING_POSSIBLE */
564 sethi %hi(PAGE_SIZE), %g3
565
5661: flush %g7
567 subcc %g3, (1 << 5), %g3
568 bne,pt %icc, 1b
569 add %g7, (1 << 5), %g7
570
5712: retry
572 nop
573 nop
574
2a3a5f5d
DM
575 /* %g5: error
576 * %g6: tlb op
577 */
578__hypervisor_tlb_xcall_error:
579 mov %g5, %g4
580 mov %g6, %g5
581 ba,pt %xcc, etrap
582 rd %pc, %g7
583 mov %l4, %o0
584 call hypervisor_tlbop_error_xcall
585 mov %l5, %o1
586 ba,a,pt %xcc, rtrap_clr_l6
587
52bf082f 588 .globl __hypervisor_xcall_flush_tlb_mm
2a3a5f5d 589__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
52bf082f
DM
590 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
591 mov %o0, %g2
592 mov %o1, %g3
593 mov %o2, %g4
594 mov %o3, %g1
595 mov %o5, %g7
596 clr %o0 /* ARG0: CPU lists unimplemented */
597 clr %o1 /* ARG1: CPU lists unimplemented */
598 mov %g5, %o2 /* ARG2: mmu context */
599 mov HV_MMU_ALL, %o3 /* ARG3: flags */
600 mov HV_FAST_MMU_DEMAP_CTX, %o5
601 ta HV_FAST_TRAP
2a3a5f5d
DM
602 mov HV_FAST_MMU_DEMAP_CTX, %g6
603 brnz,pn %o0, __hypervisor_tlb_xcall_error
604 mov %o0, %g5
52bf082f
DM
605 mov %g2, %o0
606 mov %g3, %o1
607 mov %g4, %o2
608 mov %g1, %o3
609 mov %g7, %o5
610 membar #Sync
611 retry
612
613 .globl __hypervisor_xcall_flush_tlb_pending
2a3a5f5d
DM
614__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
615 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
52bf082f
DM
616 sllx %g1, 3, %g1
617 mov %o0, %g2
618 mov %o1, %g3
619 mov %o2, %g4
6201: sub %g1, (1 << 3), %g1
621 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
622 mov %g5, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
623 mov HV_MMU_ALL, %o2 /* ARG2: flags */
624 srlx %o0, PAGE_SHIFT, %o0
625 sllx %o0, PAGE_SHIFT, %o0
52bf082f 626 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
627 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
628 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
629 mov %o0, %g5
52bf082f
DM
630 brnz,pt %g1, 1b
631 nop
632 mov %g2, %o0
633 mov %g3, %o1
634 mov %g4, %o2
635 membar #Sync
636 retry
637
638 .globl __hypervisor_xcall_flush_tlb_kernel_range
2a3a5f5d
DM
639__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
640 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
52bf082f
DM
641 sethi %hi(PAGE_SIZE - 1), %g2
642 or %g2, %lo(PAGE_SIZE - 1), %g2
643 andn %g1, %g2, %g1
644 andn %g7, %g2, %g7
645 sub %g7, %g1, %g3
646 add %g2, 1, %g2
647 sub %g3, %g2, %g3
648 mov %o0, %g2
649 mov %o1, %g4
2a3a5f5d 650 mov %o2, %g7
52bf082f
DM
6511: add %g1, %g3, %o0 /* ARG0: virtual address */
652 mov 0, %o1 /* ARG1: mmu context */
653 mov HV_MMU_ALL, %o2 /* ARG2: flags */
654 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
655 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
656 brnz,pn %o0, __hypervisor_tlb_xcall_error
657 mov %o0, %g5
52bf082f
DM
658 sethi %hi(PAGE_SIZE), %o2
659 brnz,pt %g3, 1b
660 sub %g3, %o2, %g3
661 mov %g2, %o0
662 mov %g4, %o1
2a3a5f5d 663 mov %g7, %o2
52bf082f
DM
664 membar #Sync
665 retry
666
1da177e4
LT
667 /* These just get rescheduled to PIL vectors. */
668 .globl xcall_call_function
669xcall_call_function:
670 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
671 retry
672
673 .globl xcall_receive_signal
674xcall_receive_signal:
675 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
676 retry
677
678 .globl xcall_capture
679xcall_capture:
680 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
681 retry
682
ee29074d
DM
683 .globl xcall_new_mmu_context_version
684xcall_new_mmu_context_version:
685 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
686 retry
687
1da177e4 688#endif /* CONFIG_SMP */
52bf082f
DM
689
690
691 .globl hypervisor_patch_cachetlbops
692hypervisor_patch_cachetlbops:
693 save %sp, -128, %sp
694
695 sethi %hi(__flush_tlb_mm), %o0
696 or %o0, %lo(__flush_tlb_mm), %o0
697 sethi %hi(__hypervisor_flush_tlb_mm), %o1
698 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
699 call tlb_patch_one
2a3a5f5d 700 mov 10, %o2
52bf082f
DM
701
702 sethi %hi(__flush_tlb_pending), %o0
703 or %o0, %lo(__flush_tlb_pending), %o0
704 sethi %hi(__hypervisor_flush_tlb_pending), %o1
705 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
706 call tlb_patch_one
2a3a5f5d 707 mov 16, %o2
52bf082f
DM
708
709 sethi %hi(__flush_tlb_kernel_range), %o0
710 or %o0, %lo(__flush_tlb_kernel_range), %o0
711 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
712 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
713 call tlb_patch_one
2a3a5f5d 714 mov 16, %o2
52bf082f
DM
715
716#ifdef DCACHE_ALIASING_POSSIBLE
717 sethi %hi(__flush_dcache_page), %o0
718 or %o0, %lo(__flush_dcache_page), %o0
719 sethi %hi(__hypervisor_flush_dcache_page), %o1
720 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
721 call tlb_patch_one
722 mov 2, %o2
723#endif /* DCACHE_ALIASING_POSSIBLE */
724
725#ifdef CONFIG_SMP
726 sethi %hi(xcall_flush_tlb_mm), %o0
727 or %o0, %lo(xcall_flush_tlb_mm), %o0
728 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
729 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
730 call tlb_patch_one
2a3a5f5d 731 mov 21, %o2
52bf082f
DM
732
733 sethi %hi(xcall_flush_tlb_pending), %o0
734 or %o0, %lo(xcall_flush_tlb_pending), %o0
735 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
736 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
737 call tlb_patch_one
2a3a5f5d 738 mov 21, %o2
52bf082f
DM
739
740 sethi %hi(xcall_flush_tlb_kernel_range), %o0
741 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
742 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
743 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
744 call tlb_patch_one
2a3a5f5d 745 mov 25, %o2
52bf082f
DM
746#endif /* CONFIG_SMP */
747
748 ret
749 restore
This page took 0.20909 seconds and 5 git commands to generate.