Mark arguments to certain syscalls as being const
[deliverable/linux.git] / arch / sparc / kernel / tsb.S
1 /* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7 #include <asm/tsb.h>
8 #include <asm/hypervisor.h>
9 #include <asm/page.h>
10 #include <asm/cpudata.h>
11 #include <asm/mmu.h>
12
13 .text
14 .align 32
15
16 /* Invoked from TLB miss handler, we are in the
17 * MMU global registers and they are setup like
18 * this:
19 *
20 * %g1: TSB entry pointer
21 * %g2: available temporary
22 * %g3: FAULT_CODE_{D,I}TLB
23 * %g4: available temporary
24 * %g5: available temporary
25 * %g6: TAG TARGET
26 * %g7: available temporary, will be loaded by us with
27 * the physical address base of the linux page
28 * tables for the current address space
29 */
30 tsb_miss_dtlb:
31 mov TLB_TAG_ACCESS, %g4
32 ba,pt %xcc, tsb_miss_page_table_walk
33 ldxa [%g4] ASI_DMMU, %g4
34
35 tsb_miss_itlb:
36 mov TLB_TAG_ACCESS, %g4
37 ba,pt %xcc, tsb_miss_page_table_walk
38 ldxa [%g4] ASI_IMMU, %g4
39
40 /* At this point we have:
41 * %g1 -- PAGE_SIZE TSB entry address
42 * %g3 -- FAULT_CODE_{D,I}TLB
43 * %g4 -- missing virtual address
44 * %g6 -- TAG TARGET (vaddr >> 22)
45 */
46 tsb_miss_page_table_walk:
47 TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
48
49 /* Before committing to a full page table walk,
50 * check the huge page TSB.
51 */
52 #ifdef CONFIG_HUGETLB_PAGE
53
54 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
55 nop
56 .section .sun4v_2insn_patch, "ax"
57 .word 661b
58 mov SCRATCHPAD_UTSBREG2, %g5
59 ldxa [%g5] ASI_SCRATCHPAD, %g5
60 .previous
61
62 cmp %g5, -1
63 be,pt %xcc, 80f
64 nop
65
66 /* We need an aligned pair of registers containing 2 values
67 * which can be easily rematerialized. %g6 and %g7 foot the
68 * bill just nicely. We'll save %g6 away into %g2 for the
69 * huge page TSB TAG comparison.
70 *
71 * Perform a huge page TSB lookup.
72 */
73 mov %g6, %g2
74 and %g5, 0x7, %g6
75 mov 512, %g7
76 andn %g5, 0x7, %g5
77 sllx %g7, %g6, %g7
78 srlx %g4, HPAGE_SHIFT, %g6
79 sub %g7, 1, %g7
80 and %g6, %g7, %g6
81 sllx %g6, 4, %g6
82 add %g5, %g6, %g5
83
84 TSB_LOAD_QUAD(%g5, %g6)
85 cmp %g6, %g2
86 be,a,pt %xcc, tsb_tlb_reload
87 mov %g7, %g5
88
89 /* No match, remember the huge page TSB entry address,
90 * and restore %g6 and %g7.
91 */
92 TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
93 srlx %g4, 22, %g6
94 80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
95
96 #endif
97
98 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
99
100 /* At this point we have:
101 * %g1 -- TSB entry address
102 * %g3 -- FAULT_CODE_{D,I}TLB
103 * %g4 -- missing virtual address
104 * %g6 -- TAG TARGET (vaddr >> 22)
105 * %g7 -- page table physical address
106 *
107 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
108 * TSB both lack a matching entry.
109 */
110 tsb_miss_page_table_walk_sun4v_fastpath:
111 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
112
113 /* Load and check PTE. */
114 ldxa [%g5] ASI_PHYS_USE_EC, %g5
115 brgez,pn %g5, tsb_do_fault
116 nop
117
118 #ifdef CONFIG_HUGETLB_PAGE
119 661: sethi %uhi(_PAGE_SZALL_4U), %g7
120 sllx %g7, 32, %g7
121 .section .sun4v_2insn_patch, "ax"
122 .word 661b
123 mov _PAGE_SZALL_4V, %g7
124 nop
125 .previous
126
127 and %g5, %g7, %g2
128
129 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
130 sllx %g7, 32, %g7
131 .section .sun4v_2insn_patch, "ax"
132 .word 661b
133 mov _PAGE_SZHUGE_4V, %g7
134 nop
135 .previous
136
137 cmp %g2, %g7
138 bne,pt %xcc, 60f
139 nop
140
141 /* It is a huge page, use huge page TSB entry address we
142 * calculated above.
143 */
144 TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
145 ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
146 cmp %g2, -1
147 movne %xcc, %g2, %g1
148 60:
149 #endif
150
151 /* At this point we have:
152 * %g1 -- TSB entry address
153 * %g3 -- FAULT_CODE_{D,I}TLB
154 * %g5 -- valid PTE
155 * %g6 -- TAG TARGET (vaddr >> 22)
156 */
157 tsb_reload:
158 TSB_LOCK_TAG(%g1, %g2, %g7)
159 TSB_WRITE(%g1, %g5, %g6)
160
161 /* Finally, load TLB and return from trap. */
162 tsb_tlb_reload:
163 cmp %g3, FAULT_CODE_DTLB
164 bne,pn %xcc, tsb_itlb_load
165 nop
166
167 tsb_dtlb_load:
168
169 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
170 retry
171 .section .sun4v_2insn_patch, "ax"
172 .word 661b
173 nop
174 nop
175 .previous
176
177 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
178 * instruction get nop'd out and we get here to branch
179 * to the sun4v tlb load code. The registers are setup
180 * as follows:
181 *
182 * %g4: vaddr
183 * %g5: PTE
184 * %g6: TAG
185 *
186 * The sun4v TLB load wants the PTE in %g3 so we fix that
187 * up here.
188 */
189 ba,pt %xcc, sun4v_dtlb_load
190 mov %g5, %g3
191
192 tsb_itlb_load:
193 /* Executable bit must be set. */
194 661: sethi %hi(_PAGE_EXEC_4U), %g4
195 andcc %g5, %g4, %g0
196 .section .sun4v_2insn_patch, "ax"
197 .word 661b
198 andcc %g5, _PAGE_EXEC_4V, %g0
199 nop
200 .previous
201
202 be,pn %xcc, tsb_do_fault
203 nop
204
205 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
206 retry
207 .section .sun4v_2insn_patch, "ax"
208 .word 661b
209 nop
210 nop
211 .previous
212
213 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
214 * instruction get nop'd out and we get here to branch
215 * to the sun4v tlb load code. The registers are setup
216 * as follows:
217 *
218 * %g4: vaddr
219 * %g5: PTE
220 * %g6: TAG
221 *
222 * The sun4v TLB load wants the PTE in %g3 so we fix that
223 * up here.
224 */
225 ba,pt %xcc, sun4v_itlb_load
226 mov %g5, %g3
227
228 /* No valid entry in the page tables, do full fault
229 * processing.
230 */
231
232 .globl tsb_do_fault
233 tsb_do_fault:
234 cmp %g3, FAULT_CODE_DTLB
235
236 661: rdpr %pstate, %g5
237 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
238 .section .sun4v_2insn_patch, "ax"
239 .word 661b
240 SET_GL(1)
241 ldxa [%g0] ASI_SCRATCHPAD, %g4
242 .previous
243
244 bne,pn %xcc, tsb_do_itlb_fault
245 nop
246
247 tsb_do_dtlb_fault:
248 rdpr %tl, %g3
249 cmp %g3, 1
250
251 661: mov TLB_TAG_ACCESS, %g4
252 ldxa [%g4] ASI_DMMU, %g5
253 .section .sun4v_2insn_patch, "ax"
254 .word 661b
255 ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
256 nop
257 .previous
258
259 be,pt %xcc, sparc64_realfault_common
260 mov FAULT_CODE_DTLB, %g4
261 ba,pt %xcc, winfix_trampoline
262 nop
263
264 tsb_do_itlb_fault:
265 rdpr %tpc, %g5
266 ba,pt %xcc, sparc64_realfault_common
267 mov FAULT_CODE_ITLB, %g4
268
269 .globl sparc64_realfault_common
270 sparc64_realfault_common:
271 /* fault code in %g4, fault address in %g5, etrap will
272 * preserve these two values in %l4 and %l5 respectively
273 */
274 ba,pt %xcc, etrap ! Save trap state
275 1: rd %pc, %g7 ! ...
276 stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
277 stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
278 call do_sparc64_fault ! Call fault handler
279 add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
280 ba,pt %xcc, rtrap ! Restore cpu state
281 nop ! Delay slot (fill me)
282
283 winfix_trampoline:
284 rdpr %tpc, %g3 ! Prepare winfixup TNPC
285 or %g3, 0x7c, %g3 ! Compute branch offset
286 wrpr %g3, %tnpc ! Write it into TNPC
287 done ! Trap return
288
289 /* Insert an entry into the TSB.
290 *
291 * %o0: TSB entry pointer (virt or phys address)
292 * %o1: tag
293 * %o2: pte
294 */
295 .align 32
296 .globl __tsb_insert
297 __tsb_insert:
298 rdpr %pstate, %o5
299 wrpr %o5, PSTATE_IE, %pstate
300 TSB_LOCK_TAG(%o0, %g2, %g3)
301 TSB_WRITE(%o0, %o2, %o1)
302 wrpr %o5, %pstate
303 retl
304 nop
305 .size __tsb_insert, .-__tsb_insert
306
307 /* Flush the given TSB entry if it has the matching
308 * tag.
309 *
310 * %o0: TSB entry pointer (virt or phys address)
311 * %o1: tag
312 */
313 .align 32
314 .globl tsb_flush
315 .type tsb_flush,#function
316 tsb_flush:
317 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
318 1: TSB_LOAD_TAG(%o0, %g1)
319 srlx %g1, 32, %o3
320 andcc %o3, %g2, %g0
321 bne,pn %icc, 1b
322 nop
323 cmp %g1, %o1
324 mov 1, %o3
325 bne,pt %xcc, 2f
326 sllx %o3, TSB_TAG_INVALID_BIT, %o3
327 TSB_CAS_TAG(%o0, %g1, %o3)
328 cmp %g1, %o3
329 bne,pn %xcc, 1b
330 nop
331 2: retl
332 nop
333 .size tsb_flush, .-tsb_flush
334
335 /* Reload MMU related context switch state at
336 * schedule() time.
337 *
338 * %o0: page table physical address
339 * %o1: TSB base config pointer
340 * %o2: TSB huge config pointer, or NULL if none
341 * %o3: Hypervisor TSB descriptor physical address
342 *
343 * We have to run this whole thing with interrupts
344 * disabled so that the current cpu doesn't change
345 * due to preemption.
346 */
347 .align 32
348 .globl __tsb_context_switch
349 .type __tsb_context_switch,#function
350 __tsb_context_switch:
351 rdpr %pstate, %g1
352 wrpr %g1, PSTATE_IE, %pstate
353
354 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
355
356 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
357
358 ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
359 brz,pt %o2, 1f
360 mov -1, %g3
361
362 ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
363
364 1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
365
366 sethi %hi(tlb_type), %g2
367 lduw [%g2 + %lo(tlb_type)], %g2
368 cmp %g2, 3
369 bne,pt %icc, 50f
370 nop
371
372 /* Hypervisor TSB switch. */
373 mov SCRATCHPAD_UTSBREG1, %o5
374 stxa %o0, [%o5] ASI_SCRATCHPAD
375 mov SCRATCHPAD_UTSBREG2, %o5
376 stxa %g3, [%o5] ASI_SCRATCHPAD
377
378 mov 2, %o0
379 cmp %g3, -1
380 move %xcc, 1, %o0
381
382 mov HV_FAST_MMU_TSB_CTXNON0, %o5
383 mov %o3, %o1
384 ta HV_FAST_TRAP
385
386 /* Finish up. */
387 ba,pt %xcc, 9f
388 nop
389
390 /* SUN4U TSB switch. */
391 50: mov TSB_REG, %o5
392 stxa %o0, [%o5] ASI_DMMU
393 membar #Sync
394 stxa %o0, [%o5] ASI_IMMU
395 membar #Sync
396
397 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
398 brz %o4, 9f
399 ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
400
401 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
402 mov TLB_TAG_ACCESS, %g3
403 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
404 stxa %o4, [%g3] ASI_DMMU
405 membar #Sync
406 sllx %g2, 3, %g2
407 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
408 membar #Sync
409
410 brz,pt %o2, 9f
411 nop
412
413 ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
414 ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
415 mov TLB_TAG_ACCESS, %g3
416 stxa %o4, [%g3] ASI_DMMU
417 membar #Sync
418 sub %g2, (1 << 3), %g2
419 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
420 membar #Sync
421
422 9:
423 wrpr %g1, %pstate
424
425 retl
426 nop
427 .size __tsb_context_switch, .-__tsb_context_switch
428
429 #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
430 (1 << TSB_TAG_INVALID_BIT))
431
432 .align 32
433 .globl copy_tsb
434 .type copy_tsb,#function
435 copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
436 * %o2=new_tsb_base, %o3=new_tsb_size
437 */
438 sethi %uhi(TSB_PASS_BITS), %g7
439 srlx %o3, 4, %o3
440 add %o0, %o1, %g1 /* end of old tsb */
441 sllx %g7, 32, %g7
442 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
443
444 661: prefetcha [%o0] ASI_N, #one_read
445 .section .tsb_phys_patch, "ax"
446 .word 661b
447 prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
448 .previous
449
450 90: andcc %o0, (64 - 1), %g0
451 bne 1f
452 add %o0, 64, %o5
453
454 661: prefetcha [%o5] ASI_N, #one_read
455 .section .tsb_phys_patch, "ax"
456 .word 661b
457 prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
458 .previous
459
460 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
461 andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
462 bne,pn %xcc, 80f /* Skip it */
463 sllx %g2, 22, %o4 /* TAG --> VADDR */
464
465 /* This can definitely be computed faster... */
466 srlx %o0, 4, %o5 /* Build index */
467 and %o5, 511, %o5 /* Mask index */
468 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
469 or %o4, %o5, %o4 /* Full VADDR. */
470 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
471 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
472 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
473 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
474 add %o4, 0x8, %o4 /* Advance to TTE */
475 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
476
477 80: add %o0, 16, %o0
478 cmp %o0, %g1
479 bne,pt %xcc, 90b
480 nop
481
482 retl
483 nop
484 .size copy_tsb, .-copy_tsb
485
486 /* Set the invalid bit in all TSB entries. */
487 .align 32
488 .globl tsb_init
489 .type tsb_init,#function
490 tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
491 prefetch [%o0 + 0x000], #n_writes
492 mov 1, %g1
493 prefetch [%o0 + 0x040], #n_writes
494 sllx %g1, TSB_TAG_INVALID_BIT, %g1
495 prefetch [%o0 + 0x080], #n_writes
496 1: prefetch [%o0 + 0x0c0], #n_writes
497 stx %g1, [%o0 + 0x00]
498 stx %g1, [%o0 + 0x10]
499 stx %g1, [%o0 + 0x20]
500 stx %g1, [%o0 + 0x30]
501 prefetch [%o0 + 0x100], #n_writes
502 stx %g1, [%o0 + 0x40]
503 stx %g1, [%o0 + 0x50]
504 stx %g1, [%o0 + 0x60]
505 stx %g1, [%o0 + 0x70]
506 prefetch [%o0 + 0x140], #n_writes
507 stx %g1, [%o0 + 0x80]
508 stx %g1, [%o0 + 0x90]
509 stx %g1, [%o0 + 0xa0]
510 stx %g1, [%o0 + 0xb0]
511 prefetch [%o0 + 0x180], #n_writes
512 stx %g1, [%o0 + 0xc0]
513 stx %g1, [%o0 + 0xd0]
514 stx %g1, [%o0 + 0xe0]
515 stx %g1, [%o0 + 0xf0]
516 subcc %o1, 0x100, %o1
517 bne,pt %xcc, 1b
518 add %o0, 0x100, %o0
519 retl
520 nop
521 nop
522 nop
523 .size tsb_init, .-tsb_init
524
525 .globl NGtsb_init
526 .type NGtsb_init,#function
527 NGtsb_init:
528 rd %asi, %g2
529 mov 1, %g1
530 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
531 sllx %g1, TSB_TAG_INVALID_BIT, %g1
532 1: stxa %g1, [%o0 + 0x00] %asi
533 stxa %g1, [%o0 + 0x10] %asi
534 stxa %g1, [%o0 + 0x20] %asi
535 stxa %g1, [%o0 + 0x30] %asi
536 stxa %g1, [%o0 + 0x40] %asi
537 stxa %g1, [%o0 + 0x50] %asi
538 stxa %g1, [%o0 + 0x60] %asi
539 stxa %g1, [%o0 + 0x70] %asi
540 stxa %g1, [%o0 + 0x80] %asi
541 stxa %g1, [%o0 + 0x90] %asi
542 stxa %g1, [%o0 + 0xa0] %asi
543 stxa %g1, [%o0 + 0xb0] %asi
544 stxa %g1, [%o0 + 0xc0] %asi
545 stxa %g1, [%o0 + 0xd0] %asi
546 stxa %g1, [%o0 + 0xe0] %asi
547 stxa %g1, [%o0 + 0xf0] %asi
548 subcc %o1, 0x100, %o1
549 bne,pt %xcc, 1b
550 add %o0, 0x100, %o0
551 membar #Sync
552 retl
553 wr %g2, 0x0, %asi
554 .size NGtsb_init, .-NGtsb_init
This page took 0.050134 seconds and 5 git commands to generate.