SLUB: Fix format specifier in Documentation/vm/slabinfo.c
[deliverable/linux.git] / arch / powerpc / mm / slb.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
3c726f8d
BH
17#undef DEBUG
18
1da177e4
LT
19#include <asm/pgtable.h>
20#include <asm/mmu.h>
21#include <asm/mmu_context.h>
22#include <asm/paca.h>
23#include <asm/cputable.h>
3c726f8d 24#include <asm/cacheflush.h>
2f6093c8 25#include <asm/smp.h>
56291e19 26#include <asm/firmware.h>
2f6093c8 27#include <linux/compiler.h>
3c726f8d
BH
28
29#ifdef DEBUG
30#define DBG(fmt...) udbg_printf(fmt)
31#else
32#define DBG(fmt...)
33#endif
1da177e4 34
3c726f8d
BH
35extern void slb_allocate_realmode(unsigned long ea);
36extern void slb_allocate_user(unsigned long ea);
37
38static void slb_allocate(unsigned long ea)
39{
40 /* Currently, we do real mode for all SLBs including user, but
41 * that will change if we bring back dynamic VSIDs
42 */
43 slb_allocate_realmode(ea);
44}
1da177e4
LT
45
46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
47{
48 return (ea & ESID_MASK) | SLB_ESID_V | slot;
49}
50
51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
52{
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
54}
55
67439b76
MN
56static inline void slb_shadow_update(unsigned long ea,
57 unsigned long flags,
2f6093c8 58 unsigned long entry)
1da177e4 59{
2f6093c8
MN
60 /*
61 * Clear the ESID first so the entry is not valid while we are
62 * updating it.
63 */
64 get_slb_shadow()->save_area[entry].esid = 0;
67439b76
MN
65 smp_wmb();
66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
67 smp_wmb();
68 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
69 smp_wmb();
2f6093c8
MN
70}
71
72static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
73 unsigned long entry)
74{
75 /*
76 * Updating the shadow buffer before writing the SLB ensures
77 * we don't get a stale entry here if we get preempted by PHYP
78 * between these two statements.
79 */
67439b76 80 slb_shadow_update(ea, flags, entry);
2f6093c8 81
1da177e4
LT
82 asm volatile("slbmte %0,%1" :
83 : "r" (mk_vsid_data(ea, flags)),
84 "r" (mk_esid_data(ea, entry))
85 : "memory" );
86}
87
bf72aeba 88void slb_flush_and_rebolt(void)
1da177e4
LT
89{
90 /* If you change this make sure you change SLB_NUM_BOLTED
91 * appropriately too. */
bf72aeba 92 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
1da177e4
LT
93 unsigned long ksp_esid_data;
94
95 WARN_ON(!irqs_disabled());
96
3c726f8d 97 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba 98 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
3c726f8d 99 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 100 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4
LT
101
102 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
b5666f70 103 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
1da177e4
LT
104 ksp_esid_data &= ~SLB_ESID_V;
105
2f6093c8 106 /* Only third entry (stack) may change here so only resave that */
67439b76 107 slb_shadow_update(get_paca()->kstack, lflags, 2);
2f6093c8 108
1da177e4
LT
109 /* We need to do this all in asm, so we're sure we don't touch
110 * the stack between the slbia and rebolting it. */
111 asm volatile("isync\n"
112 "slbia\n"
113 /* Slot 1 - first VMALLOC segment */
114 "slbmte %0,%1\n"
115 /* Slot 2 - kernel stack */
116 "slbmte %2,%3\n"
117 "isync"
14c89e7f
DG
118 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
119 "r"(mk_esid_data(VMALLOC_START, 1)),
3c726f8d 120 "r"(mk_vsid_data(ksp_esid_data, lflags)),
1da177e4
LT
121 "r"(ksp_esid_data)
122 : "memory");
123}
124
67439b76
MN
125void slb_vmalloc_update(void)
126{
127 unsigned long vflags;
128
129 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
130 slb_shadow_update(VMALLOC_START, vflags, 1);
131 slb_flush_and_rebolt();
132}
133
1da177e4
LT
134/* Flush all user entries from the segment table of the current processor. */
135void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
136{
137 unsigned long offset = get_paca()->slb_cache_ptr;
138 unsigned long esid_data = 0;
139 unsigned long pc = KSTK_EIP(tsk);
140 unsigned long stack = KSTK_ESP(tsk);
141 unsigned long unmapped_base;
142
143 if (offset <= SLB_CACHE_ENTRIES) {
144 int i;
145 asm volatile("isync" : : : "memory");
146 for (i = 0; i < offset; i++) {
14b34661
DG
147 esid_data = ((unsigned long)get_paca()->slb_cache[i]
148 << SID_SHIFT) | SLBIE_C;
1da177e4
LT
149 asm volatile("slbie %0" : : "r" (esid_data));
150 }
151 asm volatile("isync" : : : "memory");
152 } else {
153 slb_flush_and_rebolt();
154 }
155
156 /* Workaround POWER5 < DD2.1 issue */
157 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
158 asm volatile("slbie %0" : : "r" (esid_data));
159
160 get_paca()->slb_cache_ptr = 0;
161 get_paca()->context = mm->context;
162
163 /*
164 * preload some userspace segments into the SLB.
165 */
166 if (test_tsk_thread_flag(tsk, TIF_32BIT))
167 unmapped_base = TASK_UNMAPPED_BASE_USER32;
168 else
169 unmapped_base = TASK_UNMAPPED_BASE_USER64;
170
51fae6de 171 if (is_kernel_addr(pc))
1da177e4
LT
172 return;
173 slb_allocate(pc);
174
175 if (GET_ESID(pc) == GET_ESID(stack))
176 return;
177
51fae6de 178 if (is_kernel_addr(stack))
1da177e4
LT
179 return;
180 slb_allocate(stack);
181
182 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
183 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
184 return;
185
51fae6de 186 if (is_kernel_addr(unmapped_base))
1da177e4
LT
187 return;
188 slb_allocate(unmapped_base);
189}
190
3c726f8d
BH
191static inline void patch_slb_encoding(unsigned int *insn_addr,
192 unsigned int immed)
193{
194 /* Assume the instruction had a "0" immediate value, just
195 * "or" in the new value
196 */
197 *insn_addr |= immed;
198 flush_icache_range((unsigned long)insn_addr, 4+
199 (unsigned long)insn_addr);
200}
201
1da177e4
LT
202void slb_initialize(void)
203{
bf72aeba 204 unsigned long linear_llp, vmalloc_llp, io_llp;
56291e19 205 unsigned long lflags, vflags;
3c726f8d
BH
206 static int slb_encoding_inited;
207 extern unsigned int *slb_miss_kernel_load_linear;
bf72aeba 208 extern unsigned int *slb_miss_kernel_load_io;
3c726f8d
BH
209
210 /* Prepare our SLB miss handler based on our page size */
211 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba
PM
212 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
213 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
214 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
215
3c726f8d
BH
216 if (!slb_encoding_inited) {
217 slb_encoding_inited = 1;
218 patch_slb_encoding(slb_miss_kernel_load_linear,
219 SLB_VSID_KERNEL | linear_llp);
bf72aeba
PM
220 patch_slb_encoding(slb_miss_kernel_load_io,
221 SLB_VSID_KERNEL | io_llp);
3c726f8d
BH
222
223 DBG("SLB: linear LLP = %04x\n", linear_llp);
bf72aeba 224 DBG("SLB: io LLP = %04x\n", io_llp);
3c726f8d
BH
225 }
226
56291e19
SR
227 get_paca()->stab_rr = SLB_NUM_BOLTED;
228
1da177e4
LT
229 /* On iSeries the bolted entries have already been set up by
230 * the hypervisor from the lparMap data in head.S */
56291e19
SR
231 if (firmware_has_feature(FW_FEATURE_ISERIES))
232 return;
1da177e4 233
3c726f8d 234 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 235 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4 236
3c726f8d
BH
237 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
238 asm volatile("isync":::"memory");
239 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
1da177e4 240 asm volatile("isync; slbia; isync":::"memory");
2f6093c8 241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
3c726f8d 242
2f6093c8 243 create_shadowed_slbe(VMALLOC_START, vflags, 1);
3c726f8d 244
1da177e4
LT
245 /* We don't bolt the stack for the time being - we're in boot,
246 * so the stack is in the bolted segment. By the time it goes
247 * elsewhere, we'll call _switch() which will bolt in the new
248 * one. */
249 asm volatile("isync":::"memory");
1da177e4 250}
This page took 0.253562 seconds and 5 git commands to generate.