[SPARC64]: Add a secondary TSB for hugepage mappings.
[deliverable/linux.git] / arch / sparc64 / kernel / tsb.S
1 /* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6 #include <linux/config.h>
7
8 #include <asm/tsb.h>
9 #include <asm/hypervisor.h>
10 #include <asm/page.h>
11 #include <asm/cpudata.h>
12 #include <asm/mmu.h>
13
14 .text
15 .align 32
16
17 /* Invoked from TLB miss handler, we are in the
18 * MMU global registers and they are setup like
19 * this:
20 *
21 * %g1: TSB entry pointer
22 * %g2: available temporary
23 * %g3: FAULT_CODE_{D,I}TLB
24 * %g4: available temporary
25 * %g5: available temporary
26 * %g6: TAG TARGET
27 * %g7: available temporary, will be loaded by us with
28 * the physical address base of the linux page
29 * tables for the current address space
30 */
31 tsb_miss_dtlb:
32 mov TLB_TAG_ACCESS, %g4
33 ba,pt %xcc, tsb_miss_page_table_walk
34 ldxa [%g4] ASI_DMMU, %g4
35
36 tsb_miss_itlb:
37 mov TLB_TAG_ACCESS, %g4
38 ba,pt %xcc, tsb_miss_page_table_walk
39 ldxa [%g4] ASI_IMMU, %g4
40
41 /* At this point we have:
42 * %g1 -- PAGE_SIZE TSB entry address
43 * %g3 -- FAULT_CODE_{D,I}TLB
44 * %g4 -- missing virtual address
45 * %g6 -- TAG TARGET (vaddr >> 22)
46 */
47 tsb_miss_page_table_walk:
48 TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
49
50 /* Before committing to a full page table walk,
51 * check the huge page TSB.
52 */
53 #ifdef CONFIG_HUGETLB_PAGE
54
55 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
56 nop
57 .section .sun4v_2insn_patch, "ax"
58 .word 661b
59 mov SCRATCHPAD_UTSBREG2, %g5
60 ldxa [%g5] ASI_SCRATCHPAD, %g5
61 .previous
62
63 cmp %g5, -1
64 be,pt %xcc, 80f
65 nop
66
67 /* We need an aligned pair of registers containing 2 values
68 * which can be easily rematerialized. %g6 and %g7 foot the
69 * bill just nicely. We'll save %g6 away into %g2 for the
70 * huge page TSB TAG comparison.
71 *
72 * Perform a huge page TSB lookup.
73 */
74 mov %g6, %g2
75 and %g5, 0x7, %g6
76 mov 512, %g7
77 andn %g5, 0x7, %g5
78 sllx %g7, %g6, %g7
79 srlx %g4, HPAGE_SHIFT, %g6
80 sub %g7, 1, %g7
81 and %g6, %g7, %g6
82 sllx %g6, 4, %g6
83 add %g5, %g6, %g5
84
85 TSB_LOAD_QUAD(%g5, %g6)
86 cmp %g6, %g2
87 be,a,pt %xcc, tsb_tlb_reload
88 mov %g7, %g5
89
90 /* No match, remember the huge page TSB entry address,
91 * and restore %g6 and %g7.
92 */
93 TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
94 srlx %g4, 22, %g6
95 80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
96
97 #endif
98
99 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
100
101 /* At this point we have:
102 * %g1 -- TSB entry address
103 * %g3 -- FAULT_CODE_{D,I}TLB
104 * %g4 -- missing virtual address
105 * %g6 -- TAG TARGET (vaddr >> 22)
106 * %g7 -- page table physical address
107 *
108 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
109 * TSB both lack a matching entry.
110 */
111 tsb_miss_page_table_walk_sun4v_fastpath:
112 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
113
114 /* Load and check PTE. */
115 ldxa [%g5] ASI_PHYS_USE_EC, %g5
116 brgez,pn %g5, tsb_do_fault
117 nop
118
119 #ifdef CONFIG_HUGETLB_PAGE
120 661: sethi %uhi(_PAGE_SZALL_4U), %g7
121 sllx %g7, 32, %g7
122 .section .sun4v_2insn_patch, "ax"
123 .word 661b
124 mov _PAGE_SZALL_4V, %g7
125 nop
126 .previous
127
128 and %g5, %g7, %g2
129
130 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
131 sllx %g7, 32, %g7
132 .section .sun4v_2insn_patch, "ax"
133 .word 661b
134 mov _PAGE_SZHUGE_4V, %g7
135 nop
136 .previous
137
138 cmp %g2, %g7
139 bne,pt %xcc, 60f
140 nop
141
142 /* It is a huge page, use huge page TSB entry address we
143 * calculated above.
144 */
145 TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
146 ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
147 cmp %g2, -1
148 movne %xcc, %g2, %g1
149 60:
150 #endif
151
152 /* At this point we have:
153 * %g1 -- TSB entry address
154 * %g3 -- FAULT_CODE_{D,I}TLB
155 * %g5 -- valid PTE
156 * %g6 -- TAG TARGET (vaddr >> 22)
157 */
158 tsb_reload:
159 TSB_LOCK_TAG(%g1, %g2, %g7)
160 TSB_WRITE(%g1, %g5, %g6)
161
162 /* Finally, load TLB and return from trap. */
163 tsb_tlb_reload:
164 cmp %g3, FAULT_CODE_DTLB
165 bne,pn %xcc, tsb_itlb_load
166 nop
167
168 tsb_dtlb_load:
169
170 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
171 retry
172 .section .sun4v_2insn_patch, "ax"
173 .word 661b
174 nop
175 nop
176 .previous
177
178 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
179 * instruction get nop'd out and we get here to branch
180 * to the sun4v tlb load code. The registers are setup
181 * as follows:
182 *
183 * %g4: vaddr
184 * %g5: PTE
185 * %g6: TAG
186 *
187 * The sun4v TLB load wants the PTE in %g3 so we fix that
188 * up here.
189 */
190 ba,pt %xcc, sun4v_dtlb_load
191 mov %g5, %g3
192
193 tsb_itlb_load:
194 /* Executable bit must be set. */
195 661: andcc %g5, _PAGE_EXEC_4U, %g0
196 .section .sun4v_1insn_patch, "ax"
197 .word 661b
198 andcc %g5, _PAGE_EXEC_4V, %g0
199 .previous
200
201 be,pn %xcc, tsb_do_fault
202 nop
203
204 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
205 retry
206 .section .sun4v_2insn_patch, "ax"
207 .word 661b
208 nop
209 nop
210 .previous
211
212 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
213 * instruction get nop'd out and we get here to branch
214 * to the sun4v tlb load code. The registers are setup
215 * as follows:
216 *
217 * %g4: vaddr
218 * %g5: PTE
219 * %g6: TAG
220 *
221 * The sun4v TLB load wants the PTE in %g3 so we fix that
222 * up here.
223 */
224 ba,pt %xcc, sun4v_itlb_load
225 mov %g5, %g3
226
227 /* No valid entry in the page tables, do full fault
228 * processing.
229 */
230
231 .globl tsb_do_fault
232 tsb_do_fault:
233 cmp %g3, FAULT_CODE_DTLB
234
235 661: rdpr %pstate, %g5
236 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
237 .section .sun4v_2insn_patch, "ax"
238 .word 661b
239 SET_GL(1)
240 ldxa [%g0] ASI_SCRATCHPAD, %g4
241 .previous
242
243 bne,pn %xcc, tsb_do_itlb_fault
244 nop
245
246 tsb_do_dtlb_fault:
247 rdpr %tl, %g3
248 cmp %g3, 1
249
250 661: mov TLB_TAG_ACCESS, %g4
251 ldxa [%g4] ASI_DMMU, %g5
252 .section .sun4v_2insn_patch, "ax"
253 .word 661b
254 ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
255 nop
256 .previous
257
258 be,pt %xcc, sparc64_realfault_common
259 mov FAULT_CODE_DTLB, %g4
260 ba,pt %xcc, winfix_trampoline
261 nop
262
263 tsb_do_itlb_fault:
264 rdpr %tpc, %g5
265 ba,pt %xcc, sparc64_realfault_common
266 mov FAULT_CODE_ITLB, %g4
267
268 .globl sparc64_realfault_common
269 sparc64_realfault_common:
270 /* fault code in %g4, fault address in %g5, etrap will
271 * preserve these two values in %l4 and %l5 respectively
272 */
273 ba,pt %xcc, etrap ! Save trap state
274 1: rd %pc, %g7 ! ...
275 stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
276 stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
277 call do_sparc64_fault ! Call fault handler
278 add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
279 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
280 nop ! Delay slot (fill me)
281
282 winfix_trampoline:
283 rdpr %tpc, %g3 ! Prepare winfixup TNPC
284 or %g3, 0x7c, %g3 ! Compute branch offset
285 wrpr %g3, %tnpc ! Write it into TNPC
286 done ! Trap return
287
288 /* Insert an entry into the TSB.
289 *
290 * %o0: TSB entry pointer (virt or phys address)
291 * %o1: tag
292 * %o2: pte
293 */
294 .align 32
295 .globl __tsb_insert
296 __tsb_insert:
297 rdpr %pstate, %o5
298 wrpr %o5, PSTATE_IE, %pstate
299 TSB_LOCK_TAG(%o0, %g2, %g3)
300 TSB_WRITE(%o0, %o2, %o1)
301 wrpr %o5, %pstate
302 retl
303 nop
304 .size __tsb_insert, .-__tsb_insert
305
306 /* Flush the given TSB entry if it has the matching
307 * tag.
308 *
309 * %o0: TSB entry pointer (virt or phys address)
310 * %o1: tag
311 */
312 .align 32
313 .globl tsb_flush
314 .type tsb_flush,#function
315 tsb_flush:
316 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
317 1: TSB_LOAD_TAG(%o0, %g1)
318 srlx %g1, 32, %o3
319 andcc %o3, %g2, %g0
320 bne,pn %icc, 1b
321 membar #LoadLoad
322 cmp %g1, %o1
323 mov 1, %o3
324 bne,pt %xcc, 2f
325 sllx %o3, TSB_TAG_INVALID_BIT, %o3
326 TSB_CAS_TAG(%o0, %g1, %o3)
327 cmp %g1, %o3
328 bne,pn %xcc, 1b
329 nop
330 2: retl
331 TSB_MEMBAR
332 .size tsb_flush, .-tsb_flush
333
334 /* Reload MMU related context switch state at
335 * schedule() time.
336 *
337 * %o0: page table physical address
338 * %o1: TSB base config pointer
339 * %o2: TSB huge config pointer, or NULL if none
340 * %o3: Hypervisor TSB descriptor physical address
341 *
342 * We have to run this whole thing with interrupts
343 * disabled so that the current cpu doesn't change
344 * due to preemption.
345 */
346 .align 32
347 .globl __tsb_context_switch
348 .type __tsb_context_switch,#function
349 __tsb_context_switch:
350 rdpr %pstate, %g1
351 wrpr %g1, PSTATE_IE, %pstate
352
353 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
354
355 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
356
357 ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
358 brz,pt %o2, 1f
359 mov -1, %g3
360
361 ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
362
363 1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
364
365 sethi %hi(tlb_type), %g2
366 lduw [%g2 + %lo(tlb_type)], %g2
367 cmp %g2, 3
368 bne,pt %icc, 50f
369 nop
370
371 /* Hypervisor TSB switch. */
372 mov SCRATCHPAD_UTSBREG1, %o5
373 stxa %o0, [%o5] ASI_SCRATCHPAD
374 mov SCRATCHPAD_UTSBREG2, %o5
375 stxa %g3, [%o5] ASI_SCRATCHPAD
376
377 mov 2, %o0
378 cmp %g3, -1
379 move %xcc, 1, %o0
380
381 mov HV_FAST_MMU_TSB_CTXNON0, %o5
382 mov %o3, %o1
383 ta HV_FAST_TRAP
384
385 /* Finish up. */
386 ba,pt %xcc, 9f
387 nop
388
389 /* SUN4U TSB switch. */
390 50: mov TSB_REG, %o5
391 stxa %o0, [%o5] ASI_DMMU
392 membar #Sync
393 stxa %o0, [%o5] ASI_IMMU
394 membar #Sync
395
396 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
397 brz %o4, 9f
398 ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
399
400 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
401 mov TLB_TAG_ACCESS, %g3
402 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
403 stxa %o4, [%g3] ASI_DMMU
404 membar #Sync
405 sllx %g2, 3, %g2
406 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
407 membar #Sync
408
409 brz,pt %o2, 9f
410 nop
411
412 ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
413 ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
414 mov TLB_TAG_ACCESS, %g3
415 stxa %o4, [%g3] ASI_DMMU
416 membar #Sync
417 sub %g2, (1 << 3), %g2
418 stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
419 membar #Sync
420
421 9:
422 wrpr %g1, %pstate
423
424 retl
425 nop
426 .size __tsb_context_switch, .-__tsb_context_switch
427
428 #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
429 (1 << TSB_TAG_INVALID_BIT))
430
431 .align 32
432 .globl copy_tsb
433 .type copy_tsb,#function
434 copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
435 * %o2=new_tsb_base, %o3=new_tsb_size
436 */
437 sethi %uhi(TSB_PASS_BITS), %g7
438 srlx %o3, 4, %o3
439 add %o0, %o1, %g1 /* end of old tsb */
440 sllx %g7, 32, %g7
441 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
442
443 661: prefetcha [%o0] ASI_N, #one_read
444 .section .tsb_phys_patch, "ax"
445 .word 661b
446 prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
447 .previous
448
449 90: andcc %o0, (64 - 1), %g0
450 bne 1f
451 add %o0, 64, %o5
452
453 661: prefetcha [%o5] ASI_N, #one_read
454 .section .tsb_phys_patch, "ax"
455 .word 661b
456 prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
457 .previous
458
459 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
460 andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
461 bne,pn %xcc, 80f /* Skip it */
462 sllx %g2, 22, %o4 /* TAG --> VADDR */
463
464 /* This can definitely be computed faster... */
465 srlx %o0, 4, %o5 /* Build index */
466 and %o5, 511, %o5 /* Mask index */
467 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
468 or %o4, %o5, %o4 /* Full VADDR. */
469 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
470 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
471 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
472 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
473 add %o4, 0x8, %o4 /* Advance to TTE */
474 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
475
476 80: add %o0, 16, %o0
477 cmp %o0, %g1
478 bne,pt %xcc, 90b
479 nop
480
481 retl
482 TSB_MEMBAR
483 .size copy_tsb, .-copy_tsb
484
485 /* Set the invalid bit in all TSB entries. */
486 .align 32
487 .globl tsb_init
488 .type tsb_init,#function
489 tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
490 prefetch [%o0 + 0x000], #n_writes
491 mov 1, %g1
492 prefetch [%o0 + 0x040], #n_writes
493 sllx %g1, TSB_TAG_INVALID_BIT, %g1
494 prefetch [%o0 + 0x080], #n_writes
495 1: prefetch [%o0 + 0x0c0], #n_writes
496 stx %g1, [%o0 + 0x00]
497 stx %g1, [%o0 + 0x10]
498 stx %g1, [%o0 + 0x20]
499 stx %g1, [%o0 + 0x30]
500 prefetch [%o0 + 0x100], #n_writes
501 stx %g1, [%o0 + 0x40]
502 stx %g1, [%o0 + 0x50]
503 stx %g1, [%o0 + 0x60]
504 stx %g1, [%o0 + 0x70]
505 prefetch [%o0 + 0x140], #n_writes
506 stx %g1, [%o0 + 0x80]
507 stx %g1, [%o0 + 0x90]
508 stx %g1, [%o0 + 0xa0]
509 stx %g1, [%o0 + 0xb0]
510 prefetch [%o0 + 0x180], #n_writes
511 stx %g1, [%o0 + 0xc0]
512 stx %g1, [%o0 + 0xd0]
513 stx %g1, [%o0 + 0xe0]
514 stx %g1, [%o0 + 0xf0]
515 subcc %o1, 0x100, %o1
516 bne,pt %xcc, 1b
517 add %o0, 0x100, %o0
518 retl
519 nop
520 nop
521 nop
522 .size tsb_init, .-tsb_init
523
524 .globl NGtsb_init
525 .type NGtsb_init,#function
526 NGtsb_init:
527 rd %asi, %g2
528 mov 1, %g1
529 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
530 sllx %g1, TSB_TAG_INVALID_BIT, %g1
531 1: stxa %g1, [%o0 + 0x00] %asi
532 stxa %g1, [%o0 + 0x10] %asi
533 stxa %g1, [%o0 + 0x20] %asi
534 stxa %g1, [%o0 + 0x30] %asi
535 stxa %g1, [%o0 + 0x40] %asi
536 stxa %g1, [%o0 + 0x50] %asi
537 stxa %g1, [%o0 + 0x60] %asi
538 stxa %g1, [%o0 + 0x70] %asi
539 stxa %g1, [%o0 + 0x80] %asi
540 stxa %g1, [%o0 + 0x90] %asi
541 stxa %g1, [%o0 + 0xa0] %asi
542 stxa %g1, [%o0 + 0xb0] %asi
543 stxa %g1, [%o0 + 0xc0] %asi
544 stxa %g1, [%o0 + 0xd0] %asi
545 stxa %g1, [%o0 + 0xe0] %asi
546 stxa %g1, [%o0 + 0xf0] %asi
547 subcc %o1, 0x100, %o1
548 bne,pt %xcc, 1b
549 add %o0, 0x100, %o0
550 retl
551 wr %g2, 0x0, %asi
552 .size NGtsb_init, .-NGtsb_init
This page took 0.043245 seconds and 5 git commands to generate.