Commit | Line | Data |
---|---|---|
2a7e2990 DM |
1 | /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. |
2 | * | |
3 | * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net> | |
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | |
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | |
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
7 | */ | |
8 | ||
9 | #include <linux/config.h> | |
10 | #include <asm/head.h> | |
11 | #include <asm/asi.h> | |
12 | #include <asm/page.h> | |
13 | #include <asm/pgtable.h> | |
14 | ||
15 | .text | |
16 | .align 32 | |
17 | ||
2a7e2990 DM |
18 | /* |
19 | * On a second level vpte miss, check whether the original fault is to the OBP | |
20 | * range (note that this is only possible for instruction miss, data misses to | |
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | |
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | |
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | |
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | |
25 | * executing (see inherit_locked_prom_mappings() rant). | |
26 | */ | |
27 | sparc64_vpte_nucleus: | |
28 | /* Note that kvmap below has verified that the address is | |
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | |
30 | * here we need only check if it is an OBP address or not. | |
31 | */ | |
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | |
33 | cmp %g4, %g5 | |
1ac4f5eb | 34 | blu,pn %xcc, kern_vpte |
2a7e2990 DM |
35 | mov 0x1, %g5 |
36 | sllx %g5, 32, %g5 | |
37 | cmp %g4, %g5 | |
1ac4f5eb | 38 | blu,pn %xcc, vpte_insn_obp |
2a7e2990 DM |
39 | nop |
40 | ||
41 | /* These two instructions are patched by paginig_init(). */ | |
1ac4f5eb DM |
42 | kern_vpte: |
43 | sethi %hi(swapper_pgd_zero), %g5 | |
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | |
2a7e2990 DM |
45 | |
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | |
47 | ba,pt %xcc, sparc64_kpte_continue | |
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | |
49 | ||
50 | vpte_noent: | |
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | |
52 | * skip over the trap instruction so that the top level | |
53 | * TLB miss handler will thing this %g5 value is just an | |
54 | * invalid PTE, thus branching to full fault processing. | |
55 | */ | |
56 | mov TLB_SFSR, %g1 | |
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | |
58 | done | |
59 | ||
1ac4f5eb DM |
60 | vpte_insn_obp: |
61 | sethi %hi(prom_pmd_phys), %g5 | |
62 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | |
2a7e2990 DM |
63 | |
64 | /* Behave as if we are at TL0. */ | |
65 | wrpr %g0, 1, %tl | |
66 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | |
67 | srlx %g4, 13, %g4 /* Throw out context bits */ | |
68 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | |
69 | ||
70 | /* Restore previous TAG_ACCESS. */ | |
71 | mov TLB_SFSR, %g1 | |
72 | stxa %g4, [%g1 + %g1] ASI_IMMU | |
73 | ||
74 | /* Get PMD offset. */ | |
75 | srlx %g4, 23, %g6 | |
76 | and %g6, 0x7ff, %g6 | |
77 | sllx %g6, 2, %g6 | |
78 | ||
79 | /* Load PMD, is it valid? */ | |
80 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | |
81 | brz,pn %g5, longpath | |
82 | sllx %g5, 11, %g5 | |
83 | ||
84 | /* Get PTE offset. */ | |
85 | srlx %g4, 13, %g6 | |
86 | and %g6, 0x3ff, %g6 | |
87 | sllx %g6, 3, %g6 | |
88 | ||
89 | /* Load PTE. */ | |
90 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | |
91 | brgez,pn %g5, longpath | |
92 | nop | |
93 | ||
94 | /* TLB load and return from trap. */ | |
95 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | |
96 | retry | |
97 | ||
1ac4f5eb DM |
98 | kvmap_do_obp: |
99 | sethi %hi(prom_pmd_phys), %g5 | |
100 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | |
2a7e2990 DM |
101 | |
102 | /* Get PMD offset. */ | |
103 | srlx %g4, 23, %g6 | |
104 | and %g6, 0x7ff, %g6 | |
105 | sllx %g6, 2, %g6 | |
106 | ||
107 | /* Load PMD, is it valid? */ | |
108 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | |
109 | brz,pn %g5, longpath | |
110 | sllx %g5, 11, %g5 | |
111 | ||
112 | /* Get PTE offset. */ | |
113 | srlx %g4, 13, %g6 | |
114 | and %g6, 0x3ff, %g6 | |
115 | sllx %g6, 3, %g6 | |
116 | ||
117 | /* Load PTE. */ | |
118 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | |
119 | brgez,pn %g5, longpath | |
120 | nop | |
121 | ||
122 | /* TLB load and return from trap. */ | |
123 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | |
124 | retry | |
125 | ||
126 | /* | |
127 | * On a first level data miss, check whether this is to the OBP range (note | |
128 | * that such accesses can be made by prom, as well as by kernel using | |
129 | * prom_getproperty on "address"), and if so, do not use vpte access ... | |
130 | * rather, use information saved during inherit_prom_mappings() using 8k | |
131 | * pagesize. | |
132 | */ | |
133 | .align 32 | |
134 | kvmap: | |
135 | brlz,pt %g4, kvmap_load | |
136 | xor %g2, %g4, %g5 | |
137 | ||
138 | kvmap_nonlinear: | |
139 | sethi %hi(MODULES_VADDR), %g5 | |
140 | cmp %g4, %g5 | |
141 | blu,pn %xcc, longpath | |
142 | mov (VMALLOC_END >> 24), %g5 | |
143 | sllx %g5, 24, %g5 | |
144 | cmp %g4, %g5 | |
145 | bgeu,pn %xcc, longpath | |
146 | nop | |
147 | ||
148 | kvmap_check_obp: | |
149 | sethi %hi(LOW_OBP_ADDRESS), %g5 | |
150 | cmp %g4, %g5 | |
151 | blu,pn %xcc, kvmap_vmalloc_addr | |
152 | mov 0x1, %g5 | |
153 | sllx %g5, 32, %g5 | |
154 | cmp %g4, %g5 | |
1ac4f5eb | 155 | blu,pn %xcc, kvmap_do_obp |
2a7e2990 DM |
156 | nop |
157 | ||
158 | kvmap_vmalloc_addr: | |
159 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | |
160 | ldxa [%g3 + %g6] ASI_N, %g5 | |
161 | brgez,pn %g5, longpath | |
162 | nop | |
163 | ||
164 | kvmap_load: | |
165 | /* PTE is valid, load into TLB and return from trap. */ | |
166 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | |
167 | retry |