[PATCH] lockdep: x86 smp alternatives workaround
[deliverable/linux.git] / arch / i386 / kernel / alternative.c
CommitLineData
9a0b5817
GH
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
d167a518
GH
7static int no_replacement = 0;
8static int smp_alt_once = 0;
9static int debug_alternative = 0;
9a0b5817 10
d167a518
GH
11static int __init noreplacement_setup(char *s)
12{
13 no_replacement = 1;
14 return 1;
15}
16static int __init bootonly(char *str)
17{
18 smp_alt_once = 1;
19 return 1;
20}
21static int __init debug_alt(char *str)
22{
23 debug_alternative = 1;
24 return 1;
25}
26
27__setup("noreplacement", noreplacement_setup);
28__setup("smp-alt-boot", bootonly);
29__setup("debug-alternative", debug_alt);
30
31#define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
33
34#ifdef GENERIC_NOP1
9a0b5817
GH
35/* Use inline assembly to define this because the nops are defined
36 as inline assembly strings in the include files and we cannot
37 get them easily into strings. */
38asm("\t.data\nintelnops: "
39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
40 GENERIC_NOP7 GENERIC_NOP8);
d167a518 41extern unsigned char intelnops[];
9a0b5817
GH
42static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
43 NULL,
44 intelnops,
45 intelnops + 1,
46 intelnops + 1 + 2,
47 intelnops + 1 + 2 + 3,
48 intelnops + 1 + 2 + 3 + 4,
49 intelnops + 1 + 2 + 3 + 4 + 5,
50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
52};
d167a518
GH
53#endif
54
55#ifdef K8_NOP1
56asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59extern unsigned char k8nops[];
9a0b5817
GH
60static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
61 NULL,
62 k8nops,
63 k8nops + 1,
64 k8nops + 1 + 2,
65 k8nops + 1 + 2 + 3,
66 k8nops + 1 + 2 + 3 + 4,
67 k8nops + 1 + 2 + 3 + 4 + 5,
68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
70};
d167a518
GH
71#endif
72
73#ifdef K7_NOP1
74asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77extern unsigned char k7nops[];
9a0b5817
GH
78static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
79 NULL,
80 k7nops,
81 k7nops + 1,
82 k7nops + 1 + 2,
83 k7nops + 1 + 2 + 3,
84 k7nops + 1 + 2 + 3 + 4,
85 k7nops + 1 + 2 + 3 + 4 + 5,
86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
88};
d167a518
GH
89#endif
90
91#ifdef CONFIG_X86_64
92
93extern char __vsyscall_0;
94static inline unsigned char** find_nop_table(void)
95{
96 return k8_nops;
97}
98
99#else /* CONFIG_X86_64 */
100
9a0b5817
GH
101static struct nop {
102 int cpuid;
103 unsigned char **noptable;
104} noptypes[] = {
105 { X86_FEATURE_K8, k8_nops },
106 { X86_FEATURE_K7, k7_nops },
107 { -1, NULL }
108};
109
9a0b5817
GH
110static unsigned char** find_nop_table(void)
111{
112 unsigned char **noptable = intel_nops;
113 int i;
114
115 for (i = 0; noptypes[i].cpuid >= 0; i++) {
116 if (boot_cpu_has(noptypes[i].cpuid)) {
117 noptable = noptypes[i].noptable;
118 break;
119 }
120 }
121 return noptable;
122}
123
d167a518
GH
124#endif /* CONFIG_X86_64 */
125
126extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128extern u8 *__smp_locks[], *__smp_locks_end[];
129
130extern u8 __smp_alt_begin[], __smp_alt_end[];
131
9a0b5817
GH
132/* Replace instructions with better alternatives for this CPU type.
133 This runs before SMP is initialized to avoid SMP problems with
134 self modifying code. This implies that assymetric systems where
135 APs have less capabilities than the boot processor are not handled.
136 Tough. Make sure you disable such features by hand. */
137
138void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
139{
140 unsigned char **noptable = find_nop_table();
141 struct alt_instr *a;
d167a518 142 u8 *instr;
9a0b5817
GH
143 int diff, i, k;
144
145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
146 for (a = start; a < end; a++) {
147 BUG_ON(a->replacementlen > a->instrlen);
148 if (!boot_cpu_has(a->cpuid))
149 continue;
d167a518
GH
150 instr = a->instr;
151#ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
157 }
158#endif
159 memcpy(instr, a->replacement, a->replacementlen);
9a0b5817
GH
160 diff = a->instrlen - a->replacementlen;
161 /* Pad the rest with nops */
162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
163 k = diff;
164 if (k > ASM_NOP_MAX)
165 k = ASM_NOP_MAX;
166 memcpy(a->instr + i, noptable[k], k);
167 }
168 }
169}
170
8ec4d41f
GH
171#ifdef CONFIG_SMP
172
9a0b5817
GH
173static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
174{
175 struct alt_instr *a;
176
177 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
178 for (a = start; a < end; a++) {
179 memcpy(a->replacement + a->replacementlen,
180 a->instr,
181 a->instrlen);
182 }
183}
184
185static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
186{
187 struct alt_instr *a;
188
189 for (a = start; a < end; a++) {
190 memcpy(a->instr,
191 a->replacement + a->replacementlen,
192 a->instrlen);
193 }
194}
195
196static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
197{
198 u8 **ptr;
199
200 for (ptr = start; ptr < end; ptr++) {
201 if (*ptr < text)
202 continue;
203 if (*ptr > text_end)
204 continue;
205 **ptr = 0xf0; /* lock prefix */
206 };
207}
208
209static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
210{
211 unsigned char **noptable = find_nop_table();
212 u8 **ptr;
213
214 for (ptr = start; ptr < end; ptr++) {
215 if (*ptr < text)
216 continue;
217 if (*ptr > text_end)
218 continue;
219 **ptr = noptable[1][0];
220 };
221}
222
223struct smp_alt_module {
224 /* what is this ??? */
225 struct module *mod;
226 char *name;
227
228 /* ptrs to lock prefixes */
229 u8 **locks;
230 u8 **locks_end;
231
232 /* .text segment, needed to avoid patching init code ;) */
233 u8 *text;
234 u8 *text_end;
235
236 struct list_head next;
237};
238static LIST_HEAD(smp_alt_modules);
239static DEFINE_SPINLOCK(smp_alt);
240
9a0b5817
GH
241void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
244{
245 struct smp_alt_module *smp;
246 unsigned long flags;
247
d167a518
GH
248 if (no_replacement)
249 return;
250
9a0b5817
GH
251 if (smp_alt_once) {
252 if (boot_cpu_has(X86_FEATURE_UP))
253 alternatives_smp_unlock(locks, locks_end,
254 text, text_end);
255 return;
256 }
257
258 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
259 if (NULL == smp)
260 return; /* we'll run the (safe but slow) SMP code then ... */
261
262 smp->mod = mod;
263 smp->name = name;
264 smp->locks = locks;
265 smp->locks_end = locks_end;
266 smp->text = text;
267 smp->text_end = text_end;
268 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
269 __FUNCTION__, smp->locks, smp->locks_end,
270 smp->text, smp->text_end, smp->name);
271
272 spin_lock_irqsave(&smp_alt, flags);
273 list_add_tail(&smp->next, &smp_alt_modules);
274 if (boot_cpu_has(X86_FEATURE_UP))
275 alternatives_smp_unlock(smp->locks, smp->locks_end,
276 smp->text, smp->text_end);
277 spin_unlock_irqrestore(&smp_alt, flags);
278}
279
280void alternatives_smp_module_del(struct module *mod)
281{
282 struct smp_alt_module *item;
283 unsigned long flags;
284
d167a518 285 if (no_replacement || smp_alt_once)
9a0b5817
GH
286 return;
287
288 spin_lock_irqsave(&smp_alt, flags);
289 list_for_each_entry(item, &smp_alt_modules, next) {
290 if (mod != item->mod)
291 continue;
292 list_del(&item->next);
293 spin_unlock_irqrestore(&smp_alt, flags);
294 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
295 kfree(item);
296 return;
297 }
298 spin_unlock_irqrestore(&smp_alt, flags);
299}
300
301void alternatives_smp_switch(int smp)
302{
303 struct smp_alt_module *mod;
304 unsigned long flags;
305
3047e99e
IM
306#ifdef CONFIG_LOCKDEP
307 /*
308 * A not yet fixed binutils section handling bug prevents
309 * alternatives-replacement from working reliably, so turn
310 * it off:
311 */
312 printk("lockdep: not fixing up alternatives.\n");
313 return;
314#endif
315
d167a518 316 if (no_replacement || smp_alt_once)
9a0b5817
GH
317 return;
318 BUG_ON(!smp && (num_online_cpus() > 1));
319
320 spin_lock_irqsave(&smp_alt, flags);
321 if (smp) {
322 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
323 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
324 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
325 alternatives_smp_apply(__smp_alt_instructions,
326 __smp_alt_instructions_end);
327 list_for_each_entry(mod, &smp_alt_modules, next)
328 alternatives_smp_lock(mod->locks, mod->locks_end,
329 mod->text, mod->text_end);
330 } else {
331 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
332 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
333 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
334 apply_alternatives(__smp_alt_instructions,
335 __smp_alt_instructions_end);
336 list_for_each_entry(mod, &smp_alt_modules, next)
337 alternatives_smp_unlock(mod->locks, mod->locks_end,
338 mod->text, mod->text_end);
339 }
340 spin_unlock_irqrestore(&smp_alt, flags);
341}
342
8ec4d41f
GH
343#endif
344
9a0b5817
GH
345void __init alternative_instructions(void)
346{
d167a518
GH
347 if (no_replacement) {
348 printk(KERN_INFO "(SMP-)alternatives turned off\n");
349 free_init_pages("SMP alternatives",
350 (unsigned long)__smp_alt_begin,
351 (unsigned long)__smp_alt_end);
352 return;
353 }
9a0b5817
GH
354 apply_alternatives(__alt_instructions, __alt_instructions_end);
355
356 /* switch to patch-once-at-boottime-only mode and free the
357 * tables in case we know the number of CPUs will never ever
358 * change */
359#ifdef CONFIG_HOTPLUG_CPU
360 if (num_possible_cpus() < 2)
361 smp_alt_once = 1;
362#else
363 smp_alt_once = 1;
364#endif
365
8ec4d41f 366#ifdef CONFIG_SMP
9a0b5817
GH
367 if (smp_alt_once) {
368 if (1 == num_possible_cpus()) {
369 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
370 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
371 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
372 apply_alternatives(__smp_alt_instructions,
373 __smp_alt_instructions_end);
374 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
375 _text, _etext);
376 }
377 free_init_pages("SMP alternatives",
378 (unsigned long)__smp_alt_begin,
379 (unsigned long)__smp_alt_end);
380 } else {
381 alternatives_smp_save(__smp_alt_instructions,
382 __smp_alt_instructions_end);
383 alternatives_smp_module_add(NULL, "core kernel",
384 __smp_locks, __smp_locks_end,
385 _text, _etext);
386 alternatives_smp_switch(0);
387 }
8ec4d41f 388#endif
9a0b5817 389}
This page took 0.089897 seconds and 5 git commands to generate.