Detach sched.h from mm.h
[deliverable/linux.git] / arch / powerpc / mm / slb.c
1 /*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #undef DEBUG
18
19 #include <asm/pgtable.h>
20 #include <asm/mmu.h>
21 #include <asm/mmu_context.h>
22 #include <asm/paca.h>
23 #include <asm/cputable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/smp.h>
26 #include <asm/firmware.h>
27 #include <linux/compiler.h>
28
29 #ifdef DEBUG
30 #define DBG(fmt...) udbg_printf(fmt)
31 #else
32 #define DBG(fmt...)
33 #endif
34
35 extern void slb_allocate_realmode(unsigned long ea);
36 extern void slb_allocate_user(unsigned long ea);
37
38 static void slb_allocate(unsigned long ea)
39 {
40 /* Currently, we do real mode for all SLBs including user, but
41 * that will change if we bring back dynamic VSIDs
42 */
43 slb_allocate_realmode(ea);
44 }
45
46 static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
47 {
48 return (ea & ESID_MASK) | SLB_ESID_V | slot;
49 }
50
51 static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
52 {
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
54 }
55
56 static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
57 unsigned long entry)
58 {
59 /*
60 * Clear the ESID first so the entry is not valid while we are
61 * updating it.
62 */
63 get_slb_shadow()->save_area[entry].esid = 0;
64 barrier();
65 get_slb_shadow()->save_area[entry].vsid = vsid;
66 barrier();
67 get_slb_shadow()->save_area[entry].esid = esid;
68
69 }
70
71 static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
72 unsigned long entry)
73 {
74 /*
75 * Updating the shadow buffer before writing the SLB ensures
76 * we don't get a stale entry here if we get preempted by PHYP
77 * between these two statements.
78 */
79 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
80 entry);
81
82 asm volatile("slbmte %0,%1" :
83 : "r" (mk_vsid_data(ea, flags)),
84 "r" (mk_esid_data(ea, entry))
85 : "memory" );
86 }
87
88 void slb_flush_and_rebolt(void)
89 {
90 /* If you change this make sure you change SLB_NUM_BOLTED
91 * appropriately too. */
92 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
93 unsigned long ksp_esid_data;
94
95 WARN_ON(!irqs_disabled());
96
97 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
98 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
99 lflags = SLB_VSID_KERNEL | linear_llp;
100 vflags = SLB_VSID_KERNEL | vmalloc_llp;
101
102 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
103 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
104 ksp_esid_data &= ~SLB_ESID_V;
105
106 /* Only third entry (stack) may change here so only resave that */
107 slb_shadow_update(ksp_esid_data,
108 mk_vsid_data(ksp_esid_data, lflags), 2);
109
110 /* We need to do this all in asm, so we're sure we don't touch
111 * the stack between the slbia and rebolting it. */
112 asm volatile("isync\n"
113 "slbia\n"
114 /* Slot 1 - first VMALLOC segment */
115 "slbmte %0,%1\n"
116 /* Slot 2 - kernel stack */
117 "slbmte %2,%3\n"
118 "isync"
119 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
120 "r"(mk_esid_data(VMALLOC_START, 1)),
121 "r"(mk_vsid_data(ksp_esid_data, lflags)),
122 "r"(ksp_esid_data)
123 : "memory");
124 }
125
126 /* Flush all user entries from the segment table of the current processor. */
127 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
128 {
129 unsigned long offset = get_paca()->slb_cache_ptr;
130 unsigned long esid_data = 0;
131 unsigned long pc = KSTK_EIP(tsk);
132 unsigned long stack = KSTK_ESP(tsk);
133 unsigned long unmapped_base;
134
135 if (offset <= SLB_CACHE_ENTRIES) {
136 int i;
137 asm volatile("isync" : : : "memory");
138 for (i = 0; i < offset; i++) {
139 esid_data = ((unsigned long)get_paca()->slb_cache[i]
140 << SID_SHIFT) | SLBIE_C;
141 asm volatile("slbie %0" : : "r" (esid_data));
142 }
143 asm volatile("isync" : : : "memory");
144 } else {
145 slb_flush_and_rebolt();
146 }
147
148 /* Workaround POWER5 < DD2.1 issue */
149 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
150 asm volatile("slbie %0" : : "r" (esid_data));
151
152 get_paca()->slb_cache_ptr = 0;
153 get_paca()->context = mm->context;
154
155 /*
156 * preload some userspace segments into the SLB.
157 */
158 if (test_tsk_thread_flag(tsk, TIF_32BIT))
159 unmapped_base = TASK_UNMAPPED_BASE_USER32;
160 else
161 unmapped_base = TASK_UNMAPPED_BASE_USER64;
162
163 if (is_kernel_addr(pc))
164 return;
165 slb_allocate(pc);
166
167 if (GET_ESID(pc) == GET_ESID(stack))
168 return;
169
170 if (is_kernel_addr(stack))
171 return;
172 slb_allocate(stack);
173
174 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
175 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
176 return;
177
178 if (is_kernel_addr(unmapped_base))
179 return;
180 slb_allocate(unmapped_base);
181 }
182
183 static inline void patch_slb_encoding(unsigned int *insn_addr,
184 unsigned int immed)
185 {
186 /* Assume the instruction had a "0" immediate value, just
187 * "or" in the new value
188 */
189 *insn_addr |= immed;
190 flush_icache_range((unsigned long)insn_addr, 4+
191 (unsigned long)insn_addr);
192 }
193
194 void slb_initialize(void)
195 {
196 unsigned long linear_llp, vmalloc_llp, io_llp;
197 unsigned long lflags, vflags;
198 static int slb_encoding_inited;
199 extern unsigned int *slb_miss_kernel_load_linear;
200 extern unsigned int *slb_miss_kernel_load_io;
201
202 /* Prepare our SLB miss handler based on our page size */
203 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
204 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
205 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
206 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
207
208 if (!slb_encoding_inited) {
209 slb_encoding_inited = 1;
210 patch_slb_encoding(slb_miss_kernel_load_linear,
211 SLB_VSID_KERNEL | linear_llp);
212 patch_slb_encoding(slb_miss_kernel_load_io,
213 SLB_VSID_KERNEL | io_llp);
214
215 DBG("SLB: linear LLP = %04x\n", linear_llp);
216 DBG("SLB: io LLP = %04x\n", io_llp);
217 }
218
219 get_paca()->stab_rr = SLB_NUM_BOLTED;
220
221 /* On iSeries the bolted entries have already been set up by
222 * the hypervisor from the lparMap data in head.S */
223 if (firmware_has_feature(FW_FEATURE_ISERIES))
224 return;
225
226 lflags = SLB_VSID_KERNEL | linear_llp;
227 vflags = SLB_VSID_KERNEL | vmalloc_llp;
228
229 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
230 asm volatile("isync":::"memory");
231 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
232 asm volatile("isync; slbia; isync":::"memory");
233 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
234
235 create_shadowed_slbe(VMALLOC_START, vflags, 1);
236
237 /* We don't bolt the stack for the time being - we're in boot,
238 * so the stack is in the bolted segment. By the time it goes
239 * elsewhere, we'll call _switch() which will bolt in the new
240 * one. */
241 asm volatile("isync":::"memory");
242 }
This page took 0.069772 seconds and 5 git commands to generate.