[POWERPC] Workaround for iommu page alignment
[deliverable/linux.git] / arch / powerpc / mm / slb.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
3c726f8d
BH
17#undef DEBUG
18
1da177e4
LT
19#include <asm/pgtable.h>
20#include <asm/mmu.h>
21#include <asm/mmu_context.h>
22#include <asm/paca.h>
23#include <asm/cputable.h>
3c726f8d 24#include <asm/cacheflush.h>
2f6093c8 25#include <asm/smp.h>
56291e19 26#include <asm/firmware.h>
2f6093c8 27#include <linux/compiler.h>
aa39be09 28#include <asm/udbg.h>
3c726f8d
BH
29
30#ifdef DEBUG
31#define DBG(fmt...) udbg_printf(fmt)
32#else
33#define DBG(fmt...)
34#endif
1da177e4 35
3c726f8d
BH
36extern void slb_allocate_realmode(unsigned long ea);
37extern void slb_allocate_user(unsigned long ea);
38
39static void slb_allocate(unsigned long ea)
40{
41 /* Currently, we do real mode for all SLBs including user, but
42 * that will change if we bring back dynamic VSIDs
43 */
44 slb_allocate_realmode(ea);
45}
1da177e4 46
1189be65
PM
47static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
48 unsigned long slot)
1da177e4 49{
1189be65
PM
50 unsigned long mask;
51
52 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
53 return (ea & mask) | SLB_ESID_V | slot;
1da177e4
LT
54}
55
1189be65
PM
56#define slb_vsid_shift(ssize) \
57 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
58
59static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
60 unsigned long flags)
1da177e4 61{
1189be65
PM
62 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
63 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
1da177e4
LT
64}
65
1189be65 66static inline void slb_shadow_update(unsigned long ea, int ssize,
67439b76 67 unsigned long flags,
2f6093c8 68 unsigned long entry)
1da177e4 69{
2f6093c8
MN
70 /*
71 * Clear the ESID first so the entry is not valid while we are
00efee7d
MN
72 * updating it. No write barriers are needed here, provided
73 * we only update the current CPU's SLB shadow buffer.
2f6093c8
MN
74 */
75 get_slb_shadow()->save_area[entry].esid = 0;
1189be65
PM
76 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
77 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
2f6093c8
MN
78}
79
edd0622b 80static inline void slb_shadow_clear(unsigned long entry)
2f6093c8 81{
edd0622b 82 get_slb_shadow()->save_area[entry].esid = 0;
1da177e4
LT
83}
84
473980a9
MN
85void slb_shadow_clear_all(void)
86{
87 int i;
88
89 for (i = 0; i < SLB_NUM_BOLTED; i++)
90 slb_shadow_clear(i);
91}
92
1189be65
PM
93static inline void create_shadowed_slbe(unsigned long ea, int ssize,
94 unsigned long flags,
175587cc
PM
95 unsigned long entry)
96{
97 /*
98 * Updating the shadow buffer before writing the SLB ensures
99 * we don't get a stale entry here if we get preempted by PHYP
100 * between these two statements.
101 */
1189be65 102 slb_shadow_update(ea, ssize, flags, entry);
175587cc
PM
103
104 asm volatile("slbmte %0,%1" :
1189be65
PM
105 : "r" (mk_vsid_data(ea, ssize, flags)),
106 "r" (mk_esid_data(ea, ssize, entry))
175587cc
PM
107 : "memory" );
108}
109
bf72aeba 110void slb_flush_and_rebolt(void)
1da177e4
LT
111{
112 /* If you change this make sure you change SLB_NUM_BOLTED
113 * appropriately too. */
bf72aeba 114 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
1189be65 115 unsigned long ksp_esid_data, ksp_vsid_data;
1da177e4
LT
116
117 WARN_ON(!irqs_disabled());
118
3c726f8d 119 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba 120 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
3c726f8d 121 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 122 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4 123
1189be65
PM
124 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
125 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
1da177e4 126 ksp_esid_data &= ~SLB_ESID_V;
1189be65 127 ksp_vsid_data = 0;
edd0622b
PM
128 slb_shadow_clear(2);
129 } else {
130 /* Update stack entry; others don't change */
1189be65
PM
131 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
132 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
edd0622b 133 }
2f6093c8 134
1da177e4
LT
135 /* We need to do this all in asm, so we're sure we don't touch
136 * the stack between the slbia and rebolting it. */
137 asm volatile("isync\n"
138 "slbia\n"
139 /* Slot 1 - first VMALLOC segment */
140 "slbmte %0,%1\n"
141 /* Slot 2 - kernel stack */
142 "slbmte %2,%3\n"
143 "isync"
1189be65
PM
144 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
145 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
146 "r"(ksp_vsid_data),
1da177e4
LT
147 "r"(ksp_esid_data)
148 : "memory");
149}
150
67439b76
MN
151void slb_vmalloc_update(void)
152{
153 unsigned long vflags;
154
155 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
1189be65 156 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
67439b76
MN
157 slb_flush_and_rebolt();
158}
159
465ccab9 160/* Helper function to compare esids. There are four cases to handle.
161 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
162 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
163 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
164 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
165 */
166static inline int esids_match(unsigned long addr1, unsigned long addr2)
167{
168 int esid_1t_count;
169
170 /* System is not 1T segment size capable. */
171 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
172 return (GET_ESID(addr1) == GET_ESID(addr2));
173
174 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
175 ((addr2 >> SID_SHIFT_1T) != 0));
176
177 /* both addresses are < 1T */
178 if (esid_1t_count == 0)
179 return (GET_ESID(addr1) == GET_ESID(addr2));
180
181 /* One address < 1T, the other > 1T. Not a match */
182 if (esid_1t_count == 1)
183 return 0;
184
185 /* Both addresses are > 1T. */
186 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
187}
188
1da177e4
LT
189/* Flush all user entries from the segment table of the current processor. */
190void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
191{
192 unsigned long offset = get_paca()->slb_cache_ptr;
1189be65 193 unsigned long slbie_data = 0;
1da177e4
LT
194 unsigned long pc = KSTK_EIP(tsk);
195 unsigned long stack = KSTK_ESP(tsk);
196 unsigned long unmapped_base;
197
f66bce5e
OJ
198 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
199 offset <= SLB_CACHE_ENTRIES) {
1da177e4
LT
200 int i;
201 asm volatile("isync" : : : "memory");
202 for (i = 0; i < offset; i++) {
1189be65
PM
203 slbie_data = (unsigned long)get_paca()->slb_cache[i]
204 << SID_SHIFT; /* EA */
205 slbie_data |= user_segment_size(slbie_data)
206 << SLBIE_SSIZE_SHIFT;
207 slbie_data |= SLBIE_C; /* C set for user addresses */
208 asm volatile("slbie %0" : : "r" (slbie_data));
1da177e4
LT
209 }
210 asm volatile("isync" : : : "memory");
211 } else {
212 slb_flush_and_rebolt();
213 }
214
215 /* Workaround POWER5 < DD2.1 issue */
216 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
1189be65 217 asm volatile("slbie %0" : : "r" (slbie_data));
1da177e4
LT
218
219 get_paca()->slb_cache_ptr = 0;
220 get_paca()->context = mm->context;
221
222 /*
223 * preload some userspace segments into the SLB.
224 */
225 if (test_tsk_thread_flag(tsk, TIF_32BIT))
226 unmapped_base = TASK_UNMAPPED_BASE_USER32;
227 else
228 unmapped_base = TASK_UNMAPPED_BASE_USER64;
229
51fae6de 230 if (is_kernel_addr(pc))
1da177e4
LT
231 return;
232 slb_allocate(pc);
233
465ccab9 234 if (esids_match(pc,stack))
1da177e4
LT
235 return;
236
51fae6de 237 if (is_kernel_addr(stack))
1da177e4
LT
238 return;
239 slb_allocate(stack);
240
465ccab9 241 if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base))
1da177e4
LT
242 return;
243
51fae6de 244 if (is_kernel_addr(unmapped_base))
1da177e4
LT
245 return;
246 slb_allocate(unmapped_base);
247}
248
3c726f8d
BH
249static inline void patch_slb_encoding(unsigned int *insn_addr,
250 unsigned int immed)
251{
252 /* Assume the instruction had a "0" immediate value, just
253 * "or" in the new value
254 */
255 *insn_addr |= immed;
256 flush_icache_range((unsigned long)insn_addr, 4+
257 (unsigned long)insn_addr);
258}
259
1da177e4
LT
260void slb_initialize(void)
261{
bf72aeba 262 unsigned long linear_llp, vmalloc_llp, io_llp;
56291e19 263 unsigned long lflags, vflags;
3c726f8d
BH
264 static int slb_encoding_inited;
265 extern unsigned int *slb_miss_kernel_load_linear;
bf72aeba 266 extern unsigned int *slb_miss_kernel_load_io;
3c726f8d
BH
267
268 /* Prepare our SLB miss handler based on our page size */
269 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba
PM
270 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
271 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
272 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
273
3c726f8d
BH
274 if (!slb_encoding_inited) {
275 slb_encoding_inited = 1;
276 patch_slb_encoding(slb_miss_kernel_load_linear,
277 SLB_VSID_KERNEL | linear_llp);
bf72aeba
PM
278 patch_slb_encoding(slb_miss_kernel_load_io,
279 SLB_VSID_KERNEL | io_llp);
3c726f8d
BH
280
281 DBG("SLB: linear LLP = %04x\n", linear_llp);
bf72aeba 282 DBG("SLB: io LLP = %04x\n", io_llp);
3c726f8d
BH
283 }
284
56291e19
SR
285 get_paca()->stab_rr = SLB_NUM_BOLTED;
286
1da177e4
LT
287 /* On iSeries the bolted entries have already been set up by
288 * the hypervisor from the lparMap data in head.S */
56291e19
SR
289 if (firmware_has_feature(FW_FEATURE_ISERIES))
290 return;
1da177e4 291
3c726f8d 292 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 293 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4 294
3c726f8d 295 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
175587cc
PM
296 asm volatile("isync":::"memory");
297 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
298 asm volatile("isync; slbia; isync":::"memory");
1189be65 299 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
175587cc 300
1189be65 301 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
175587cc
PM
302
303 /* We don't bolt the stack for the time being - we're in boot,
304 * so the stack is in the bolted segment. By the time it goes
305 * elsewhere, we'll call _switch() which will bolt in the new
306 * one. */
307 asm volatile("isync":::"memory");
1da177e4 308}
This page took 0.261101 seconds and 5 git commands to generate.