Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 SLB support. | |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5cdcd9d6 | 5 | * Based on earlier code written by: |
1da177e4 LT |
6 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
7 | * Copyright (c) 2001 Dave Engebretsen | |
8 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
1da177e4 LT |
17 | #include <asm/pgtable.h> |
18 | #include <asm/mmu.h> | |
19 | #include <asm/mmu_context.h> | |
20 | #include <asm/paca.h> | |
21 | #include <asm/cputable.h> | |
3c726f8d | 22 | #include <asm/cacheflush.h> |
2f6093c8 MN |
23 | #include <asm/smp.h> |
24 | #include <linux/compiler.h> | |
aa39be09 | 25 | #include <asm/udbg.h> |
b68a70c4 | 26 | #include <asm/code-patching.h> |
3c726f8d | 27 | |
1da177e4 | 28 | |
3c726f8d BH |
29 | extern void slb_allocate_realmode(unsigned long ea); |
30 | extern void slb_allocate_user(unsigned long ea); | |
31 | ||
32 | static void slb_allocate(unsigned long ea) | |
33 | { | |
34 | /* Currently, we do real mode for all SLBs including user, but | |
35 | * that will change if we bring back dynamic VSIDs | |
36 | */ | |
37 | slb_allocate_realmode(ea); | |
38 | } | |
1da177e4 | 39 | |
3b575064 PM |
40 | #define slb_esid_mask(ssize) \ |
41 | (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) | |
42 | ||
1189be65 PM |
43 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
44 | unsigned long slot) | |
1da177e4 | 45 | { |
3b575064 | 46 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; |
1da177e4 LT |
47 | } |
48 | ||
1189be65 PM |
49 | #define slb_vsid_shift(ssize) \ |
50 | ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) | |
51 | ||
52 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, | |
53 | unsigned long flags) | |
1da177e4 | 54 | { |
1189be65 PM |
55 | return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | |
56 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); | |
1da177e4 LT |
57 | } |
58 | ||
1189be65 | 59 | static inline void slb_shadow_update(unsigned long ea, int ssize, |
67439b76 | 60 | unsigned long flags, |
2f6093c8 | 61 | unsigned long entry) |
1da177e4 | 62 | { |
2f6093c8 MN |
63 | /* |
64 | * Clear the ESID first so the entry is not valid while we are | |
00efee7d MN |
65 | * updating it. No write barriers are needed here, provided |
66 | * we only update the current CPU's SLB shadow buffer. | |
2f6093c8 MN |
67 | */ |
68 | get_slb_shadow()->save_area[entry].esid = 0; | |
7ffcf8ec AB |
69 | get_slb_shadow()->save_area[entry].vsid = |
70 | cpu_to_be64(mk_vsid_data(ea, ssize, flags)); | |
71 | get_slb_shadow()->save_area[entry].esid = | |
72 | cpu_to_be64(mk_esid_data(ea, ssize, entry)); | |
2f6093c8 MN |
73 | } |
74 | ||
edd0622b | 75 | static inline void slb_shadow_clear(unsigned long entry) |
2f6093c8 | 76 | { |
edd0622b | 77 | get_slb_shadow()->save_area[entry].esid = 0; |
1da177e4 LT |
78 | } |
79 | ||
1189be65 PM |
80 | static inline void create_shadowed_slbe(unsigned long ea, int ssize, |
81 | unsigned long flags, | |
175587cc PM |
82 | unsigned long entry) |
83 | { | |
84 | /* | |
85 | * Updating the shadow buffer before writing the SLB ensures | |
86 | * we don't get a stale entry here if we get preempted by PHYP | |
87 | * between these two statements. | |
88 | */ | |
1189be65 | 89 | slb_shadow_update(ea, ssize, flags, entry); |
175587cc PM |
90 | |
91 | asm volatile("slbmte %0,%1" : | |
1189be65 PM |
92 | : "r" (mk_vsid_data(ea, ssize, flags)), |
93 | "r" (mk_esid_data(ea, ssize, entry)) | |
175587cc PM |
94 | : "memory" ); |
95 | } | |
96 | ||
9c1e1052 | 97 | static void __slb_flush_and_rebolt(void) |
1da177e4 LT |
98 | { |
99 | /* If you change this make sure you change SLB_NUM_BOLTED | |
100 | * appropriately too. */ | |
bf72aeba | 101 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
1189be65 | 102 | unsigned long ksp_esid_data, ksp_vsid_data; |
1da177e4 | 103 | |
3c726f8d | 104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
bf72aeba | 105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
3c726f8d | 106 | lflags = SLB_VSID_KERNEL | linear_llp; |
bf72aeba | 107 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
1da177e4 | 108 | |
1189be65 PM |
109 | ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); |
110 | if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { | |
1da177e4 | 111 | ksp_esid_data &= ~SLB_ESID_V; |
1189be65 | 112 | ksp_vsid_data = 0; |
edd0622b PM |
113 | slb_shadow_clear(2); |
114 | } else { | |
115 | /* Update stack entry; others don't change */ | |
1189be65 | 116 | slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); |
7ffcf8ec AB |
117 | ksp_vsid_data = |
118 | be64_to_cpu(get_slb_shadow()->save_area[2].vsid); | |
edd0622b | 119 | } |
2f6093c8 | 120 | |
1da177e4 LT |
121 | /* We need to do this all in asm, so we're sure we don't touch |
122 | * the stack between the slbia and rebolting it. */ | |
123 | asm volatile("isync\n" | |
124 | "slbia\n" | |
125 | /* Slot 1 - first VMALLOC segment */ | |
126 | "slbmte %0,%1\n" | |
127 | /* Slot 2 - kernel stack */ | |
128 | "slbmte %2,%3\n" | |
129 | "isync" | |
1189be65 PM |
130 | :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), |
131 | "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)), | |
132 | "r"(ksp_vsid_data), | |
1da177e4 LT |
133 | "r"(ksp_esid_data) |
134 | : "memory"); | |
135 | } | |
136 | ||
9c1e1052 PM |
137 | void slb_flush_and_rebolt(void) |
138 | { | |
139 | ||
140 | WARN_ON(!irqs_disabled()); | |
141 | ||
142 | /* | |
143 | * We can't take a PMU exception in the following code, so hard | |
144 | * disable interrupts. | |
145 | */ | |
146 | hard_irq_disable(); | |
147 | ||
148 | __slb_flush_and_rebolt(); | |
149 | get_paca()->slb_cache_ptr = 0; | |
150 | } | |
151 | ||
67439b76 MN |
152 | void slb_vmalloc_update(void) |
153 | { | |
154 | unsigned long vflags; | |
155 | ||
156 | vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
1189be65 | 157 | slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); |
67439b76 MN |
158 | slb_flush_and_rebolt(); |
159 | } | |
160 | ||
465ccab9 | 161 | /* Helper function to compare esids. There are four cases to handle. |
162 | * 1. The system is not 1T segment size capable. Use the GET_ESID compare. | |
163 | * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. | |
164 | * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. | |
165 | * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. | |
166 | */ | |
167 | static inline int esids_match(unsigned long addr1, unsigned long addr2) | |
168 | { | |
169 | int esid_1t_count; | |
170 | ||
171 | /* System is not 1T segment size capable. */ | |
44ae3ab3 | 172 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
465ccab9 | 173 | return (GET_ESID(addr1) == GET_ESID(addr2)); |
174 | ||
175 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + | |
176 | ((addr2 >> SID_SHIFT_1T) != 0)); | |
177 | ||
178 | /* both addresses are < 1T */ | |
179 | if (esid_1t_count == 0) | |
180 | return (GET_ESID(addr1) == GET_ESID(addr2)); | |
181 | ||
182 | /* One address < 1T, the other > 1T. Not a match */ | |
183 | if (esid_1t_count == 1) | |
184 | return 0; | |
185 | ||
186 | /* Both addresses are > 1T. */ | |
187 | return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); | |
188 | } | |
189 | ||
1da177e4 LT |
190 | /* Flush all user entries from the segment table of the current processor. */ |
191 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |
192 | { | |
9c1e1052 | 193 | unsigned long offset; |
1189be65 | 194 | unsigned long slbie_data = 0; |
1da177e4 LT |
195 | unsigned long pc = KSTK_EIP(tsk); |
196 | unsigned long stack = KSTK_ESP(tsk); | |
de4376c2 | 197 | unsigned long exec_base; |
1da177e4 | 198 | |
9c1e1052 PM |
199 | /* |
200 | * We need interrupts hard-disabled here, not just soft-disabled, | |
201 | * so that a PMU interrupt can't occur, which might try to access | |
202 | * user memory (to get a stack trace) and possible cause an SLB miss | |
203 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | |
204 | */ | |
205 | hard_irq_disable(); | |
206 | offset = get_paca()->slb_cache_ptr; | |
44ae3ab3 | 207 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && |
f66bce5e | 208 | offset <= SLB_CACHE_ENTRIES) { |
1da177e4 LT |
209 | int i; |
210 | asm volatile("isync" : : : "memory"); | |
211 | for (i = 0; i < offset; i++) { | |
1189be65 PM |
212 | slbie_data = (unsigned long)get_paca()->slb_cache[i] |
213 | << SID_SHIFT; /* EA */ | |
214 | slbie_data |= user_segment_size(slbie_data) | |
215 | << SLBIE_SSIZE_SHIFT; | |
216 | slbie_data |= SLBIE_C; /* C set for user addresses */ | |
217 | asm volatile("slbie %0" : : "r" (slbie_data)); | |
1da177e4 LT |
218 | } |
219 | asm volatile("isync" : : : "memory"); | |
220 | } else { | |
9c1e1052 | 221 | __slb_flush_and_rebolt(); |
1da177e4 LT |
222 | } |
223 | ||
224 | /* Workaround POWER5 < DD2.1 issue */ | |
225 | if (offset == 1 || offset > SLB_CACHE_ENTRIES) | |
1189be65 | 226 | asm volatile("slbie %0" : : "r" (slbie_data)); |
1da177e4 LT |
227 | |
228 | get_paca()->slb_cache_ptr = 0; | |
229 | get_paca()->context = mm->context; | |
230 | ||
231 | /* | |
232 | * preload some userspace segments into the SLB. | |
de4376c2 AB |
233 | * Almost all 32 and 64bit PowerPC executables are linked at |
234 | * 0x10000000 so it makes sense to preload this segment. | |
1da177e4 | 235 | */ |
de4376c2 | 236 | exec_base = 0x10000000; |
1da177e4 | 237 | |
5eb9bac0 | 238 | if (is_kernel_addr(pc) || is_kernel_addr(stack) || |
de4376c2 | 239 | is_kernel_addr(exec_base)) |
1da177e4 LT |
240 | return; |
241 | ||
5eb9bac0 | 242 | slb_allocate(pc); |
1da177e4 | 243 | |
5eb9bac0 AB |
244 | if (!esids_match(pc, stack)) |
245 | slb_allocate(stack); | |
1da177e4 | 246 | |
de4376c2 AB |
247 | if (!esids_match(pc, exec_base) && |
248 | !esids_match(stack, exec_base)) | |
249 | slb_allocate(exec_base); | |
1da177e4 LT |
250 | } |
251 | ||
3c726f8d BH |
252 | static inline void patch_slb_encoding(unsigned int *insn_addr, |
253 | unsigned int immed) | |
254 | { | |
b68a70c4 AB |
255 | int insn = (*insn_addr & 0xffff0000) | immed; |
256 | patch_instruction(insn_addr, insn); | |
3c726f8d BH |
257 | } |
258 | ||
46db2f86 BK |
259 | void slb_set_size(u16 size) |
260 | { | |
261 | extern unsigned int *slb_compare_rr_to_size; | |
262 | ||
263 | if (mmu_slb_size == size) | |
264 | return; | |
265 | ||
266 | mmu_slb_size = size; | |
267 | patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); | |
268 | } | |
269 | ||
1da177e4 LT |
270 | void slb_initialize(void) |
271 | { | |
bf72aeba | 272 | unsigned long linear_llp, vmalloc_llp, io_llp; |
56291e19 | 273 | unsigned long lflags, vflags; |
3c726f8d BH |
274 | static int slb_encoding_inited; |
275 | extern unsigned int *slb_miss_kernel_load_linear; | |
bf72aeba | 276 | extern unsigned int *slb_miss_kernel_load_io; |
584f8b71 | 277 | extern unsigned int *slb_compare_rr_to_size; |
cec08e7a BH |
278 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
279 | extern unsigned int *slb_miss_kernel_load_vmemmap; | |
280 | unsigned long vmemmap_llp; | |
281 | #endif | |
3c726f8d BH |
282 | |
283 | /* Prepare our SLB miss handler based on our page size */ | |
284 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
bf72aeba PM |
285 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
286 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
287 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | |
cec08e7a BH |
288 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
289 | vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; | |
290 | #endif | |
3c726f8d BH |
291 | if (!slb_encoding_inited) { |
292 | slb_encoding_inited = 1; | |
293 | patch_slb_encoding(slb_miss_kernel_load_linear, | |
294 | SLB_VSID_KERNEL | linear_llp); | |
bf72aeba PM |
295 | patch_slb_encoding(slb_miss_kernel_load_io, |
296 | SLB_VSID_KERNEL | io_llp); | |
584f8b71 MN |
297 | patch_slb_encoding(slb_compare_rr_to_size, |
298 | mmu_slb_size); | |
3c726f8d | 299 | |
651e2dd2 ME |
300 | pr_devel("SLB: linear LLP = %04lx\n", linear_llp); |
301 | pr_devel("SLB: io LLP = %04lx\n", io_llp); | |
cec08e7a BH |
302 | |
303 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
304 | patch_slb_encoding(slb_miss_kernel_load_vmemmap, | |
305 | SLB_VSID_KERNEL | vmemmap_llp); | |
651e2dd2 | 306 | pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); |
cec08e7a | 307 | #endif |
3c726f8d BH |
308 | } |
309 | ||
56291e19 SR |
310 | get_paca()->stab_rr = SLB_NUM_BOLTED; |
311 | ||
3c726f8d | 312 | lflags = SLB_VSID_KERNEL | linear_llp; |
bf72aeba | 313 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
1da177e4 | 314 | |
3c726f8d | 315 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
175587cc PM |
316 | asm volatile("isync":::"memory"); |
317 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | |
318 | asm volatile("isync; slbia; isync":::"memory"); | |
1189be65 | 319 | create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); |
175587cc | 320 | |
1189be65 | 321 | create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); |
175587cc | 322 | |
3b575064 PM |
323 | /* For the boot cpu, we're running on the stack in init_thread_union, |
324 | * which is in the first segment of the linear mapping, and also | |
325 | * get_paca()->kstack hasn't been initialized yet. | |
326 | * For secondary cpus, we need to bolt the kernel stack entry now. | |
327 | */ | |
dfbe0d3b | 328 | slb_shadow_clear(2); |
3b575064 PM |
329 | if (raw_smp_processor_id() != boot_cpuid && |
330 | (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) | |
331 | create_shadowed_slbe(get_paca()->kstack, | |
332 | mmu_kernel_ssize, lflags, 2); | |
dfbe0d3b | 333 | |
175587cc | 334 | asm volatile("isync":::"memory"); |
1da177e4 | 335 | } |