[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.
[deliverable/linux.git] / arch / sparc64 / kernel / ktlb.S
CommitLineData
2a7e2990
DM
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/
8
9#include <linux/config.h>
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15 .text
16 .align 32
17
2a7e2990
DM
18/*
19 * On a second level vpte miss, check whether the original fault is to the OBP
20 * range (note that this is only possible for instruction miss, data misses to
21 * obp range do not use vpte). If so, go back directly to the faulting address.
22 * This is because we want to read the tpc, otherwise we have no way of knowing
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
24 * also ensures no vpte range addresses are dropped into tlb while obp is
25 * executing (see inherit_locked_prom_mappings() rant).
26 */
27sparc64_vpte_nucleus:
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
31 */
32 sethi %hi(LOW_OBP_ADDRESS), %g5
33 cmp %g4, %g5
1ac4f5eb 34 blu,pn %xcc, kern_vpte
2a7e2990
DM
35 mov 0x1, %g5
36 sllx %g5, 32, %g5
37 cmp %g4, %g5
1ac4f5eb 38 blu,pn %xcc, vpte_insn_obp
2a7e2990
DM
39 nop
40
41 /* These two instructions are patched by paginig_init(). */
1ac4f5eb
DM
42kern_vpte:
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
2a7e2990
DM
45
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
49
50vpte_noent:
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
52 * skip over the trap instruction so that the top level
53 * TLB miss handler will thing this %g5 value is just an
54 * invalid PTE, thus branching to full fault processing.
55 */
56 mov TLB_SFSR, %g1
57 stxa %g4, [%g1 + %g1] ASI_DMMU
58 done
59
1ac4f5eb
DM
60vpte_insn_obp:
61 sethi %hi(prom_pmd_phys), %g5
62 ldx [%g5 + %lo(prom_pmd_phys)], %g5
2a7e2990
DM
63
64 /* Behave as if we are at TL0. */
65 wrpr %g0, 1, %tl
66 rdpr %tpc, %g4 /* Find original faulting iaddr */
67 srlx %g4, 13, %g4 /* Throw out context bits */
68 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
69
70 /* Restore previous TAG_ACCESS. */
71 mov TLB_SFSR, %g1
72 stxa %g4, [%g1 + %g1] ASI_IMMU
73
74 /* Get PMD offset. */
75 srlx %g4, 23, %g6
76 and %g6, 0x7ff, %g6
77 sllx %g6, 2, %g6
78
79 /* Load PMD, is it valid? */
80 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
81 brz,pn %g5, longpath
82 sllx %g5, 11, %g5
83
84 /* Get PTE offset. */
85 srlx %g4, 13, %g6
86 and %g6, 0x3ff, %g6
87 sllx %g6, 3, %g6
88
89 /* Load PTE. */
90 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
91 brgez,pn %g5, longpath
92 nop
93
94 /* TLB load and return from trap. */
95 stxa %g5, [%g0] ASI_ITLB_DATA_IN
96 retry
97
1ac4f5eb
DM
98kvmap_do_obp:
99 sethi %hi(prom_pmd_phys), %g5
100 ldx [%g5 + %lo(prom_pmd_phys)], %g5
2a7e2990
DM
101
102 /* Get PMD offset. */
103 srlx %g4, 23, %g6
104 and %g6, 0x7ff, %g6
105 sllx %g6, 2, %g6
106
107 /* Load PMD, is it valid? */
108 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
109 brz,pn %g5, longpath
110 sllx %g5, 11, %g5
111
112 /* Get PTE offset. */
113 srlx %g4, 13, %g6
114 and %g6, 0x3ff, %g6
115 sllx %g6, 3, %g6
116
117 /* Load PTE. */
118 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
119 brgez,pn %g5, longpath
120 nop
121
122 /* TLB load and return from trap. */
123 stxa %g5, [%g0] ASI_DTLB_DATA_IN
124 retry
125
126/*
127 * On a first level data miss, check whether this is to the OBP range (note
128 * that such accesses can be made by prom, as well as by kernel using
129 * prom_getproperty on "address"), and if so, do not use vpte access ...
130 * rather, use information saved during inherit_prom_mappings() using 8k
131 * pagesize.
132 */
133 .align 32
134kvmap:
56425306
DM
135 brgez,pn %g4, kvmap_nonlinear
136 nop
137
138#ifdef CONFIG_DEBUG_PAGEALLOC
139 .globl kvmap_linear_patch
140kvmap_linear_patch:
141#endif
142 ba,pt %xcc, kvmap_load
2a7e2990
DM
143 xor %g2, %g4, %g5
144
56425306
DM
145#ifdef CONFIG_DEBUG_PAGEALLOC
146 sethi %hi(swapper_pg_dir), %g5
147 or %g5, %lo(swapper_pg_dir), %g5
148 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
149 srlx %g6, 64 - PAGE_SHIFT, %g6
150 andn %g6, 0x3, %g6
151 lduw [%g5 + %g6], %g5
152 brz,pn %g5, longpath
153 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
154 srlx %g6, 64 - PAGE_SHIFT, %g6
155 sllx %g5, 11, %g5
156 andn %g6, 0x3, %g6
157 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
158 brz,pn %g5, longpath
159 sllx %g4, 64 - PMD_SHIFT, %g6
160 srlx %g6, 64 - PAGE_SHIFT, %g6
161 sllx %g5, 11, %g5
162 andn %g6, 0x7, %g6
163 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
164 brz,pn %g5, longpath
165 nop
166 ba,a,pt %xcc, kvmap_load
167#endif
168
2a7e2990
DM
169kvmap_nonlinear:
170 sethi %hi(MODULES_VADDR), %g5
171 cmp %g4, %g5
172 blu,pn %xcc, longpath
173 mov (VMALLOC_END >> 24), %g5
174 sllx %g5, 24, %g5
175 cmp %g4, %g5
176 bgeu,pn %xcc, longpath
177 nop
178
179kvmap_check_obp:
180 sethi %hi(LOW_OBP_ADDRESS), %g5
181 cmp %g4, %g5
182 blu,pn %xcc, kvmap_vmalloc_addr
183 mov 0x1, %g5
184 sllx %g5, 32, %g5
185 cmp %g4, %g5
1ac4f5eb 186 blu,pn %xcc, kvmap_do_obp
2a7e2990
DM
187 nop
188
189kvmap_vmalloc_addr:
190 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
191 ldxa [%g3 + %g6] ASI_N, %g5
192 brgez,pn %g5, longpath
193 nop
194
195kvmap_load:
196 /* PTE is valid, load into TLB and return from trap. */
197 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
198 retry
This page took 0.099717 seconds and 5 git commands to generate.