sparc: Add kgdb support.
[deliverable/linux.git] / arch / sparc64 / mm / ultra.S
CommitLineData
1da177e4
LT
1/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
1da177e4
LT
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
2ef27778 12#include <asm/mmu.h>
1da177e4
LT
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
52bf082f 17#include <asm/hypervisor.h>
1da177e4
LT
18
19 /* Basically, most of the Spitfire vs. Cheetah madness
20 * has to do with the fact that Cheetah does not support
21 * IMMU flushes out of the secondary context. Someone needs
22 * to throw a south lake birthday party for the folks
23 * in Microelectronics who refused to fix this shit.
24 */
25
26 /* This file is meant to be read efficiently by the CPU, not humans.
27 * Staraj sie tego nikomu nie pierdolnac...
28 */
29 .text
30 .align 32
31 .globl __flush_tlb_mm
52bf082f
DM
32__flush_tlb_mm: /* 18 insns */
33 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
1da177e4
LT
34 ldxa [%o1] ASI_DMMU, %g2
35 cmp %g2, %o0
36 bne,pn %icc, __spitfire_flush_tlb_mm_slow
37 mov 0x50, %g3
38 stxa %g0, [%g3] ASI_DMMU_DEMAP
39 stxa %g0, [%g3] ASI_IMMU_DEMAP
4da808c3
DM
40 sethi %hi(KERNBASE), %g3
41 flush %g3
1da177e4 42 retl
4da808c3 43 nop
1da177e4
LT
44 nop
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
2ef27778
DM
51 nop
52 nop
1da177e4
LT
53
54 .align 32
55 .globl __flush_tlb_pending
52bf082f 56__flush_tlb_pending: /* 26 insns */
1da177e4
LT
57 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
58 rdpr %pstate, %g7
59 sllx %o1, 3, %o1
60 andn %g7, PSTATE_IE, %g2
61 wrpr %g2, %pstate
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
651: sub %o1, (1 << 3), %o1
66 ldx [%o2 + %o1], %o3
67 andcc %o3, 1, %g0
68 andn %o3, 1, %o3
69 be,pn %icc, 2f
70 or %o3, 0x10, %o3
71 stxa %g0, [%o3] ASI_IMMU_DEMAP
722: stxa %g0, [%o3] ASI_DMMU_DEMAP
73 membar #Sync
74 brnz,pt %o1, 1b
75 nop
76 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
77 sethi %hi(KERNBASE), %o4
78 flush %o4
1da177e4
LT
79 retl
80 wrpr %g7, 0x0, %pstate
fef43da4 81 nop
2ef27778
DM
82 nop
83 nop
84 nop
1da177e4
LT
85
86 .align 32
87 .globl __flush_tlb_kernel_range
1daef08a 88__flush_tlb_kernel_range: /* 16 insns */
52bf082f 89 /* %o0=start, %o1=end */
1da177e4
LT
90 cmp %o0, %o1
91 be,pn %xcc, 2f
92 sethi %hi(PAGE_SIZE), %o4
93 sub %o1, %o0, %o3
94 sub %o3, %o4, %o3
95 or %o0, 0x20, %o0 ! Nucleus
961: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
97 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
98 membar #Sync
99 brnz,pt %o3, 1b
100 sub %o3, %o4, %o3
4da808c3
DM
1012: sethi %hi(KERNBASE), %o3
102 flush %o3
103 retl
104 nop
52bf082f 105 nop
1da177e4
LT
106
107__spitfire_flush_tlb_mm_slow:
108 rdpr %pstate, %g1
109 wrpr %g1, PSTATE_IE, %pstate
110 stxa %o0, [%o1] ASI_DMMU
111 stxa %g0, [%g3] ASI_DMMU_DEMAP
112 stxa %g0, [%g3] ASI_IMMU_DEMAP
113 flush %g6
114 stxa %g2, [%o1] ASI_DMMU
4da808c3
DM
115 sethi %hi(KERNBASE), %o1
116 flush %o1
1da177e4
LT
117 retl
118 wrpr %g1, 0, %pstate
119
120/*
121 * The following code flushes one page_size worth.
122 */
83005161 123 .section .kprobes.text, "ax"
1da177e4
LT
124 .align 32
125 .globl __flush_icache_page
126__flush_icache_page: /* %o0 = phys_page */
127 membar #StoreStore
128 srlx %o0, PAGE_SHIFT, %o0
129 sethi %uhi(PAGE_OFFSET), %g1
130 sllx %o0, PAGE_SHIFT, %o0
131 sethi %hi(PAGE_SIZE), %g2
132 sllx %g1, 32, %g1
133 add %o0, %g1, %o0
1341: subcc %g2, 32, %g2
135 bne,pt %icc, 1b
136 flush %o0 + %g2
137 retl
138 nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
c5bd50a9
DM
148 /* This routine is Spitfire specific so the hardcoded
149 * D-cache size and line-size are OK.
150 */
1da177e4
LT
151 .align 64
152 .globl __flush_dcache_page
153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
154 sethi %uhi(PAGE_OFFSET), %g1
155 sllx %g1, 32, %g1
c5bd50a9
DM
156 sub %o0, %g1, %o0 ! physical address
157 srlx %o0, 11, %o0 ! make D-cache TAG
158 sethi %hi(1 << 14), %o2 ! D-cache size
159 sub %o2, (1 << 5), %o2 ! D-cache line size
1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
161 andcc %o3, DTAG_MASK, %g0 ! Valid?
162 be,pn %xcc, 2f ! Nope, branch
163 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
164 cmp %o3, %o0 ! TAG match?
165 bne,pt %xcc, 2f ! Nope, branch
166 nop
167 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
168 membar #Sync
1692: brnz,pt %o2, 1b
170 sub %o2, (1 << 5), %o2 ! D-cache line size
1da177e4
LT
171
172 /* The I-cache does not snoop local stores so we
173 * better flush that too when necessary.
174 */
175 brnz,pt %o1, __flush_icache_page
176 sllx %o0, 11, %o0
177 retl
178 nop
179
1da177e4
LT
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
c5bd50a9
DM
182 .previous
183
2ef27778 184 /* Cheetah specific versions, patched at boot time. */
4da808c3 185__cheetah_flush_tlb_mm: /* 19 insns */
1da177e4
LT
186 rdpr %pstate, %g7
187 andn %g7, PSTATE_IE, %g2
188 wrpr %g2, 0x0, %pstate
189 wrpr %g0, 1, %tl
190 mov PRIMARY_CONTEXT, %o2
191 mov 0x40, %g3
192 ldxa [%o2] ASI_DMMU, %g2
2ef27778
DM
193 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
194 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
195 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
196 stxa %o0, [%o2] ASI_DMMU
197 stxa %g0, [%g3] ASI_DMMU_DEMAP
198 stxa %g0, [%g3] ASI_IMMU_DEMAP
199 stxa %g2, [%o2] ASI_DMMU
4da808c3
DM
200 sethi %hi(KERNBASE), %o2
201 flush %o2
1da177e4
LT
202 wrpr %g0, 0, %tl
203 retl
204 wrpr %g7, 0x0, %pstate
205
4da808c3 206__cheetah_flush_tlb_pending: /* 27 insns */
1da177e4
LT
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7
209 sllx %o1, 3, %o1
210 andn %g7, PSTATE_IE, %g2
211 wrpr %g2, 0x0, %pstate
212 wrpr %g0, 1, %tl
213 mov PRIMARY_CONTEXT, %o4
214 ldxa [%o4] ASI_DMMU, %g2
2ef27778
DM
215 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
216 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
217 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
218 stxa %o0, [%o4] ASI_DMMU
2191: sub %o1, (1 << 3), %o1
220 ldx [%o2 + %o1], %o3
221 andcc %o3, 1, %g0
222 be,pn %icc, 2f
223 andn %o3, 1, %o3
224 stxa %g0, [%o3] ASI_IMMU_DEMAP
2252: stxa %g0, [%o3] ASI_DMMU_DEMAP
b445e26c 226 membar #Sync
1da177e4 227 brnz,pt %o1, 1b
b445e26c 228 nop
1da177e4 229 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
230 sethi %hi(KERNBASE), %o4
231 flush %o4
1da177e4
LT
232 wrpr %g0, 0, %tl
233 retl
234 wrpr %g7, 0x0, %pstate
235
236#ifdef DCACHE_ALIASING_POSSIBLE
c5bd50a9 237__cheetah_flush_dcache_page: /* 11 insns */
1da177e4
LT
238 sethi %uhi(PAGE_OFFSET), %g1
239 sllx %g1, 32, %g1
240 sub %o0, %g1, %o0
241 sethi %hi(PAGE_SIZE), %o4
2421: subcc %o4, (1 << 5), %o4
243 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244 membar #Sync
245 bne,pt %icc, 1b
246 nop
247 retl /* I-cache flush never needed on Cheetah, see callers. */
248 nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
52bf082f 251 /* Hypervisor specific versions, patched at boot time. */
2a3a5f5d
DM
252__hypervisor_tlb_tl0_error:
253 save %sp, -192, %sp
254 mov %i0, %o0
255 call hypervisor_tlbop_error
256 mov %i1, %o1
257 ret
258 restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
52bf082f
DM
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
2a3a5f5d
DM
267 brnz,pn %o0, __hypervisor_tlb_tl0_error
268 mov HV_FAST_MMU_DEMAP_CTX, %o1
52bf082f
DM
269 retl
270 nop
271
2a3a5f5d 272__hypervisor_flush_tlb_pending: /* 16 insns */
52bf082f
DM
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1
275 mov %o2, %g2
276 mov %o0, %g3
2771: sub %g1, (1 << 3), %g1
278 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
279 mov %g3, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
280 mov HV_MMU_ALL, %o2 /* ARG2: flags */
281 srlx %o0, PAGE_SHIFT, %o0
282 sllx %o0, PAGE_SHIFT, %o0
52bf082f 283 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
284 brnz,pn %o0, __hypervisor_tlb_tl0_error
285 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
286 brnz,pt %g1, 1b
287 nop
288 retl
289 nop
290
2a3a5f5d 291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
52bf082f
DM
292 /* %o0=start, %o1=end */
293 cmp %o0, %o1
294 be,pn %xcc, 2f
295 sethi %hi(PAGE_SIZE), %g3
296 mov %o0, %g1
297 sub %o1, %g1, %g2
298 sub %g2, %g3, %g2
2991: add %g1, %g2, %o0 /* ARG0: virtual address */
300 mov 0, %o1 /* ARG1: mmu context */
301 mov HV_MMU_ALL, %o2 /* ARG2: flags */
302 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
303 brnz,pn %o0, __hypervisor_tlb_tl0_error
304 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
305 brnz,pt %g2, 1b
306 sub %g2, %g3, %g2
3072: retl
308 nop
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311 /* XXX Niagara and friends have an 8K cache, so no aliasing is
312 * XXX possible, but nothing explicit in the Hypervisor API
313 * XXX guarantees this.
314 */
315__hypervisor_flush_dcache_page: /* 2 insns */
316 retl
317 nop
318#endif
319
320tlb_patch_one:
1da177e4
LT
3211: lduw [%o1], %g1
322 stw %g1, [%o0]
323 flush %o0
324 subcc %o2, 1, %o2
325 add %o1, 4, %o1
326 bne,pt %icc, 1b
327 add %o0, 4, %o0
328 retl
329 nop
330
331 .globl cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333 save %sp, -128, %sp
334
335 sethi %hi(__flush_tlb_mm), %o0
336 or %o0, %lo(__flush_tlb_mm), %o0
337 sethi %hi(__cheetah_flush_tlb_mm), %o1
338 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
52bf082f 339 call tlb_patch_one
4da808c3 340 mov 19, %o2
1da177e4
LT
341
342 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1
345 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
52bf082f 346 call tlb_patch_one
4da808c3 347 mov 27, %o2
1da177e4
LT
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350 sethi %hi(__flush_dcache_page), %o0
351 or %o0, %lo(__flush_dcache_page), %o0
c5bd50a9
DM
352 sethi %hi(__cheetah_flush_dcache_page), %o1
353 or %o1, %lo(__cheetah_flush_dcache_page), %o1
52bf082f 354 call tlb_patch_one
1da177e4
LT
355 mov 11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358 ret
359 restore
360
361#ifdef CONFIG_SMP
362 /* These are all called by the slaves of a cross call, at
363 * trap level 1, with interrupts fully disabled.
364 *
365 * Register usage:
366 * %g5 mm->context (all tlb flushes)
367 * %g1 address arg 1 (tlb page and range flushes)
368 * %g7 address arg 2 (tlb range flush only)
369 *
56fb4df6
DM
370 * %g6 scratch 1
371 * %g2 scratch 2
372 * %g3 scratch 3
373 * %g4 scratch 4
1da177e4
LT
374 */
375 .align 32
376 .globl xcall_flush_tlb_mm
2a3a5f5d 377xcall_flush_tlb_mm: /* 21 insns */
1da177e4 378 mov PRIMARY_CONTEXT, %g2
1da177e4 379 ldxa [%g2] ASI_DMMU, %g3
2ef27778
DM
380 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
381 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
382 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
1da177e4 383 stxa %g5, [%g2] ASI_DMMU
2ef27778 384 mov 0x40, %g4
1da177e4
LT
385 stxa %g0, [%g4] ASI_DMMU_DEMAP
386 stxa %g0, [%g4] ASI_IMMU_DEMAP
387 stxa %g3, [%g2] ASI_DMMU
388 retry
52bf082f
DM
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
2a3a5f5d
DM
396 nop
397 nop
398 nop
1da177e4
LT
399
400 .globl xcall_flush_tlb_pending
2a3a5f5d 401xcall_flush_tlb_pending: /* 21 insns */
1da177e4
LT
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2
2ef27778
DM
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
407 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
408 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4
1da177e4
LT
410 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f
415
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU
423 retry
2a3a5f5d 424 nop
1da177e4
LT
425
426 .globl xcall_flush_tlb_kernel_range
2a3a5f5d 427xcall_flush_tlb_kernel_range: /* 25 insns */
1da177e4
LT
428 sethi %hi(PAGE_SIZE - 1), %g2
429 or %g2, %lo(PAGE_SIZE - 1), %g2
430 andn %g1, %g2, %g1
431 andn %g7, %g2, %g7
432 sub %g7, %g1, %g3
433 add %g2, 1, %g2
434 sub %g3, %g2, %g3
435 or %g1, 0x20, %g1 ! Nucleus
4361: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
437 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
438 membar #Sync
439 brnz,pt %g3, 1b
440 sub %g3, %g2, %g3
441 retry
442 nop
443 nop
52bf082f
DM
444 nop
445 nop
446 nop
447 nop
448 nop
449 nop
2a3a5f5d
DM
450 nop
451 nop
452 nop
1da177e4
LT
453
454 /* This runs in a very controlled environment, so we do
455 * not need to worry about BH races etc.
456 */
457 .globl xcall_sync_tick
458xcall_sync_tick:
45fec05f
DM
459
460661: rdpr %pstate, %g2
1da177e4 461 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 462 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
463 .word 661b
464 nop
465 nop
466 .previous
467
1da177e4
LT
468 rdpr %pil, %g2
469 wrpr %g0, 15, %pil
470 sethi %hi(109f), %g7
471 b,pt %xcc, etrap_irq
472109: or %g7, %lo(109b), %g7
10e26723
DM
473#ifdef CONFIG_TRACE_IRQFLAGS
474 call trace_hardirqs_off
475 nop
476#endif
1da177e4
LT
477 call smp_synchronize_tick_client
478 nop
1da177e4
LT
479 b rtrap_xcall
480 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
482 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
483 * we choose to deal with the "BH's run with
484 * %pil==15" problem (described in asm/pil.h)
485 * by just invoking rtrap directly past where
486 * BH's are checked for.
487 *
488 * We do it like this because we do not want %pil==15
489 * lockups to prevent regs being reported.
490 */
491 .globl xcall_report_regs
492xcall_report_regs:
45fec05f
DM
493
494661: rdpr %pstate, %g2
1da177e4 495 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 496 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
497 .word 661b
498 nop
499 nop
500 .previous
501
1da177e4
LT
502 rdpr %pil, %g2
503 wrpr %g0, 15, %pil
504 sethi %hi(109f), %g7
505 b,pt %xcc, etrap_irq
506109: or %g7, %lo(109b), %g7
10e26723
DM
507#ifdef CONFIG_TRACE_IRQFLAGS
508 call trace_hardirqs_off
509 nop
510#endif
1da177e4
LT
511 call __show_regs
512 add %sp, PTREGS_OFF, %o0
1da177e4
LT
513 /* Has to be a non-v9 branch due to the large distance. */
514 b rtrap_xcall
515 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
516
517#ifdef DCACHE_ALIASING_POSSIBLE
518 .align 32
519 .globl xcall_flush_dcache_page_cheetah
520xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
521 sethi %hi(PAGE_SIZE), %g3
5221: subcc %g3, (1 << 5), %g3
523 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
524 membar #Sync
525 bne,pt %icc, 1b
526 nop
527 retry
528 nop
529#endif /* DCACHE_ALIASING_POSSIBLE */
530
531 .globl xcall_flush_dcache_page_spitfire
532xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
533 %g7 == kernel page virtual address
534 %g5 == (page->mapping != NULL) */
535#ifdef DCACHE_ALIASING_POSSIBLE
536 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
537 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
538 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5391: ldxa [%g3] ASI_DCACHE_TAG, %g2
540 andcc %g2, 0x3, %g0
541 be,pn %xcc, 2f
542 andn %g2, 0x3, %g2
543 cmp %g2, %g1
544
545 bne,pt %xcc, 2f
546 nop
547 stxa %g0, [%g3] ASI_DCACHE_TAG
548 membar #Sync
5492: cmp %g3, 0
550 bne,pt %xcc, 1b
551 sub %g3, (1 << 5), %g3
552
553 brz,pn %g5, 2f
554#endif /* DCACHE_ALIASING_POSSIBLE */
555 sethi %hi(PAGE_SIZE), %g3
556
5571: flush %g7
558 subcc %g3, (1 << 5), %g3
559 bne,pt %icc, 1b
560 add %g7, (1 << 5), %g7
561
5622: retry
563 nop
564 nop
565
2a3a5f5d
DM
566 /* %g5: error
567 * %g6: tlb op
568 */
569__hypervisor_tlb_xcall_error:
570 mov %g5, %g4
571 mov %g6, %g5
572 ba,pt %xcc, etrap
573 rd %pc, %g7
574 mov %l4, %o0
575 call hypervisor_tlbop_error_xcall
576 mov %l5, %o1
7697daaa 577 ba,a,pt %xcc, rtrap
2a3a5f5d 578
52bf082f 579 .globl __hypervisor_xcall_flush_tlb_mm
2a3a5f5d 580__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
52bf082f
DM
581 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
582 mov %o0, %g2
583 mov %o1, %g3
584 mov %o2, %g4
585 mov %o3, %g1
586 mov %o5, %g7
587 clr %o0 /* ARG0: CPU lists unimplemented */
588 clr %o1 /* ARG1: CPU lists unimplemented */
589 mov %g5, %o2 /* ARG2: mmu context */
590 mov HV_MMU_ALL, %o3 /* ARG3: flags */
591 mov HV_FAST_MMU_DEMAP_CTX, %o5
592 ta HV_FAST_TRAP
2a3a5f5d
DM
593 mov HV_FAST_MMU_DEMAP_CTX, %g6
594 brnz,pn %o0, __hypervisor_tlb_xcall_error
595 mov %o0, %g5
52bf082f
DM
596 mov %g2, %o0
597 mov %g3, %o1
598 mov %g4, %o2
599 mov %g1, %o3
600 mov %g7, %o5
601 membar #Sync
602 retry
603
604 .globl __hypervisor_xcall_flush_tlb_pending
2a3a5f5d
DM
605__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
606 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
52bf082f
DM
607 sllx %g1, 3, %g1
608 mov %o0, %g2
609 mov %o1, %g3
610 mov %o2, %g4
6111: sub %g1, (1 << 3), %g1
612 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
613 mov %g5, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
614 mov HV_MMU_ALL, %o2 /* ARG2: flags */
615 srlx %o0, PAGE_SHIFT, %o0
616 sllx %o0, PAGE_SHIFT, %o0
52bf082f 617 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
618 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
619 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
620 mov %o0, %g5
52bf082f
DM
621 brnz,pt %g1, 1b
622 nop
623 mov %g2, %o0
624 mov %g3, %o1
625 mov %g4, %o2
626 membar #Sync
627 retry
628
629 .globl __hypervisor_xcall_flush_tlb_kernel_range
2a3a5f5d
DM
630__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
631 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
52bf082f
DM
632 sethi %hi(PAGE_SIZE - 1), %g2
633 or %g2, %lo(PAGE_SIZE - 1), %g2
634 andn %g1, %g2, %g1
635 andn %g7, %g2, %g7
636 sub %g7, %g1, %g3
637 add %g2, 1, %g2
638 sub %g3, %g2, %g3
639 mov %o0, %g2
640 mov %o1, %g4
2a3a5f5d 641 mov %o2, %g7
52bf082f
DM
6421: add %g1, %g3, %o0 /* ARG0: virtual address */
643 mov 0, %o1 /* ARG1: mmu context */
644 mov HV_MMU_ALL, %o2 /* ARG2: flags */
645 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
646 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
647 brnz,pn %o0, __hypervisor_tlb_xcall_error
648 mov %o0, %g5
52bf082f
DM
649 sethi %hi(PAGE_SIZE), %o2
650 brnz,pt %g3, 1b
651 sub %g3, %o2, %g3
652 mov %g2, %o0
653 mov %g4, %o1
2a3a5f5d 654 mov %g7, %o2
52bf082f
DM
655 membar #Sync
656 retry
657
1da177e4
LT
658 /* These just get rescheduled to PIL vectors. */
659 .globl xcall_call_function
660xcall_call_function:
661 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
662 retry
663
664 .globl xcall_receive_signal
665xcall_receive_signal:
666 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
667 retry
668
669 .globl xcall_capture
670xcall_capture:
671 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
672 retry
673
ee29074d
DM
674 .globl xcall_new_mmu_context_version
675xcall_new_mmu_context_version:
676 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
677 retry
678
e2fdd7fd
DM
679#ifdef CONFIG_KGDB
680 .globl xcall_kgdb_capture
681xcall_kgdb_capture:
682661: rdpr %pstate, %g2
683 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
684 .section .sun4v_2insn_patch, "ax"
685 .word 661b
686 nop
687 nop
688 .previous
689
690 rdpr %pil, %g2
691 wrpr %g0, 15, %pil
692 sethi %hi(109f), %g7
693 ba,pt %xcc, etrap_irq
694109: or %g7, %lo(109b), %g7
695#ifdef CONFIG_TRACE_IRQFLAGS
696 call trace_hardirqs_off
697 nop
698#endif
699 call smp_kgdb_capture_client
700 add %sp, PTREGS_OFF, %o0
701 /* Has to be a non-v9 branch due to the large distance. */
702 ba rtrap_xcall
703 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
704#endif
705
1da177e4 706#endif /* CONFIG_SMP */
52bf082f
DM
707
708
709 .globl hypervisor_patch_cachetlbops
710hypervisor_patch_cachetlbops:
711 save %sp, -128, %sp
712
713 sethi %hi(__flush_tlb_mm), %o0
714 or %o0, %lo(__flush_tlb_mm), %o0
715 sethi %hi(__hypervisor_flush_tlb_mm), %o1
716 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
717 call tlb_patch_one
2a3a5f5d 718 mov 10, %o2
52bf082f
DM
719
720 sethi %hi(__flush_tlb_pending), %o0
721 or %o0, %lo(__flush_tlb_pending), %o0
722 sethi %hi(__hypervisor_flush_tlb_pending), %o1
723 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
724 call tlb_patch_one
2a3a5f5d 725 mov 16, %o2
52bf082f
DM
726
727 sethi %hi(__flush_tlb_kernel_range), %o0
728 or %o0, %lo(__flush_tlb_kernel_range), %o0
729 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
730 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
731 call tlb_patch_one
2a3a5f5d 732 mov 16, %o2
52bf082f
DM
733
734#ifdef DCACHE_ALIASING_POSSIBLE
735 sethi %hi(__flush_dcache_page), %o0
736 or %o0, %lo(__flush_dcache_page), %o0
737 sethi %hi(__hypervisor_flush_dcache_page), %o1
738 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
739 call tlb_patch_one
740 mov 2, %o2
741#endif /* DCACHE_ALIASING_POSSIBLE */
742
743#ifdef CONFIG_SMP
744 sethi %hi(xcall_flush_tlb_mm), %o0
745 or %o0, %lo(xcall_flush_tlb_mm), %o0
746 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
747 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
748 call tlb_patch_one
2a3a5f5d 749 mov 21, %o2
52bf082f
DM
750
751 sethi %hi(xcall_flush_tlb_pending), %o0
752 or %o0, %lo(xcall_flush_tlb_pending), %o0
753 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
754 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
755 call tlb_patch_one
2a3a5f5d 756 mov 21, %o2
52bf082f
DM
757
758 sethi %hi(xcall_flush_tlb_kernel_range), %o0
759 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
760 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
761 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
762 call tlb_patch_one
2a3a5f5d 763 mov 25, %o2
52bf082f
DM
764#endif /* CONFIG_SMP */
765
766 ret
767 restore
This page took 0.316549 seconds and 5 git commands to generate.