[PATCH] paravirt: Patch inline replacements for paravirt intercepts
[deliverable/linux.git] / arch / i386 / kernel / alternative.c
CommitLineData
9a0b5817 1#include <linux/module.h>
f6a57033 2#include <linux/sched.h>
9a0b5817
GH
3#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
d167a518
GH
8static int no_replacement = 0;
9static int smp_alt_once = 0;
10static int debug_alternative = 0;
9a0b5817 11
d167a518
GH
12static int __init noreplacement_setup(char *s)
13{
14 no_replacement = 1;
15 return 1;
16}
17static int __init bootonly(char *str)
18{
19 smp_alt_once = 1;
20 return 1;
21}
22static int __init debug_alt(char *str)
23{
24 debug_alternative = 1;
25 return 1;
26}
27
28__setup("noreplacement", noreplacement_setup);
29__setup("smp-alt-boot", bootonly);
30__setup("debug-alternative", debug_alt);
31
32#define DPRINTK(fmt, args...) if (debug_alternative) \
33 printk(KERN_DEBUG fmt, args)
34
35#ifdef GENERIC_NOP1
9a0b5817
GH
36/* Use inline assembly to define this because the nops are defined
37 as inline assembly strings in the include files and we cannot
38 get them easily into strings. */
39asm("\t.data\nintelnops: "
40 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
41 GENERIC_NOP7 GENERIC_NOP8);
d167a518 42extern unsigned char intelnops[];
9a0b5817
GH
43static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
44 NULL,
45 intelnops,
46 intelnops + 1,
47 intelnops + 1 + 2,
48 intelnops + 1 + 2 + 3,
49 intelnops + 1 + 2 + 3 + 4,
50 intelnops + 1 + 2 + 3 + 4 + 5,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
52 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
53};
d167a518
GH
54#endif
55
56#ifdef K8_NOP1
57asm("\t.data\nk8nops: "
58 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
59 K8_NOP7 K8_NOP8);
60extern unsigned char k8nops[];
9a0b5817
GH
61static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
62 NULL,
63 k8nops,
64 k8nops + 1,
65 k8nops + 1 + 2,
66 k8nops + 1 + 2 + 3,
67 k8nops + 1 + 2 + 3 + 4,
68 k8nops + 1 + 2 + 3 + 4 + 5,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
70 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
71};
d167a518
GH
72#endif
73
74#ifdef K7_NOP1
75asm("\t.data\nk7nops: "
76 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
77 K7_NOP7 K7_NOP8);
78extern unsigned char k7nops[];
9a0b5817
GH
79static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
80 NULL,
81 k7nops,
82 k7nops + 1,
83 k7nops + 1 + 2,
84 k7nops + 1 + 2 + 3,
85 k7nops + 1 + 2 + 3 + 4,
86 k7nops + 1 + 2 + 3 + 4 + 5,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
88 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
89};
d167a518
GH
90#endif
91
92#ifdef CONFIG_X86_64
93
94extern char __vsyscall_0;
95static inline unsigned char** find_nop_table(void)
96{
97 return k8_nops;
98}
99
100#else /* CONFIG_X86_64 */
101
9a0b5817
GH
102static struct nop {
103 int cpuid;
104 unsigned char **noptable;
105} noptypes[] = {
106 { X86_FEATURE_K8, k8_nops },
107 { X86_FEATURE_K7, k7_nops },
108 { -1, NULL }
109};
110
9a0b5817
GH
111static unsigned char** find_nop_table(void)
112{
113 unsigned char **noptable = intel_nops;
114 int i;
115
116 for (i = 0; noptypes[i].cpuid >= 0; i++) {
117 if (boot_cpu_has(noptypes[i].cpuid)) {
118 noptable = noptypes[i].noptable;
119 break;
120 }
121 }
122 return noptable;
123}
124
d167a518
GH
125#endif /* CONFIG_X86_64 */
126
139ec7c4
RR
127static void nop_out(void *insns, unsigned int len)
128{
129 unsigned char **noptable = find_nop_table();
130
131 while (len > 0) {
132 unsigned int noplen = len;
133 if (noplen > ASM_NOP_MAX)
134 noplen = ASM_NOP_MAX;
135 memcpy(insns, noptable[noplen], noplen);
136 insns += noplen;
137 len -= noplen;
138 }
139}
140
d167a518
GH
141extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
142extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
143extern u8 *__smp_locks[], *__smp_locks_end[];
144
145extern u8 __smp_alt_begin[], __smp_alt_end[];
146
9a0b5817
GH
147/* Replace instructions with better alternatives for this CPU type.
148 This runs before SMP is initialized to avoid SMP problems with
149 self modifying code. This implies that assymetric systems where
150 APs have less capabilities than the boot processor are not handled.
151 Tough. Make sure you disable such features by hand. */
152
153void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
154{
9a0b5817 155 struct alt_instr *a;
d167a518 156 u8 *instr;
139ec7c4 157 int diff;
9a0b5817
GH
158
159 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
160 for (a = start; a < end; a++) {
161 BUG_ON(a->replacementlen > a->instrlen);
162 if (!boot_cpu_has(a->cpuid))
163 continue;
d167a518
GH
164 instr = a->instr;
165#ifdef CONFIG_X86_64
166 /* vsyscall code is not mapped yet. resolve it manually. */
167 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
168 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
169 DPRINTK("%s: vsyscall fixup: %p => %p\n",
170 __FUNCTION__, a->instr, instr);
171 }
172#endif
173 memcpy(instr, a->replacement, a->replacementlen);
9a0b5817 174 diff = a->instrlen - a->replacementlen;
139ec7c4 175 nop_out(instr + a->replacementlen, diff);
9a0b5817
GH
176 }
177}
178
8ec4d41f
GH
179#ifdef CONFIG_SMP
180
9a0b5817
GH
181static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
182{
183 struct alt_instr *a;
184
185 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
186 for (a = start; a < end; a++) {
187 memcpy(a->replacement + a->replacementlen,
188 a->instr,
189 a->instrlen);
190 }
191}
192
193static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
194{
195 struct alt_instr *a;
196
197 for (a = start; a < end; a++) {
198 memcpy(a->instr,
199 a->replacement + a->replacementlen,
200 a->instrlen);
201 }
202}
203
204static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
205{
206 u8 **ptr;
207
208 for (ptr = start; ptr < end; ptr++) {
209 if (*ptr < text)
210 continue;
211 if (*ptr > text_end)
212 continue;
213 **ptr = 0xf0; /* lock prefix */
214 };
215}
216
217static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
218{
9a0b5817
GH
219 u8 **ptr;
220
221 for (ptr = start; ptr < end; ptr++) {
222 if (*ptr < text)
223 continue;
224 if (*ptr > text_end)
225 continue;
139ec7c4 226 nop_out(*ptr, 1);
9a0b5817
GH
227 };
228}
229
230struct smp_alt_module {
231 /* what is this ??? */
232 struct module *mod;
233 char *name;
234
235 /* ptrs to lock prefixes */
236 u8 **locks;
237 u8 **locks_end;
238
239 /* .text segment, needed to avoid patching init code ;) */
240 u8 *text;
241 u8 *text_end;
242
243 struct list_head next;
244};
245static LIST_HEAD(smp_alt_modules);
246static DEFINE_SPINLOCK(smp_alt);
247
9a0b5817
GH
248void alternatives_smp_module_add(struct module *mod, char *name,
249 void *locks, void *locks_end,
250 void *text, void *text_end)
251{
252 struct smp_alt_module *smp;
253 unsigned long flags;
254
d167a518
GH
255 if (no_replacement)
256 return;
257
9a0b5817
GH
258 if (smp_alt_once) {
259 if (boot_cpu_has(X86_FEATURE_UP))
260 alternatives_smp_unlock(locks, locks_end,
261 text, text_end);
262 return;
263 }
264
265 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
266 if (NULL == smp)
267 return; /* we'll run the (safe but slow) SMP code then ... */
268
269 smp->mod = mod;
270 smp->name = name;
271 smp->locks = locks;
272 smp->locks_end = locks_end;
273 smp->text = text;
274 smp->text_end = text_end;
275 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
276 __FUNCTION__, smp->locks, smp->locks_end,
277 smp->text, smp->text_end, smp->name);
278
279 spin_lock_irqsave(&smp_alt, flags);
280 list_add_tail(&smp->next, &smp_alt_modules);
281 if (boot_cpu_has(X86_FEATURE_UP))
282 alternatives_smp_unlock(smp->locks, smp->locks_end,
283 smp->text, smp->text_end);
284 spin_unlock_irqrestore(&smp_alt, flags);
285}
286
287void alternatives_smp_module_del(struct module *mod)
288{
289 struct smp_alt_module *item;
290 unsigned long flags;
291
d167a518 292 if (no_replacement || smp_alt_once)
9a0b5817
GH
293 return;
294
295 spin_lock_irqsave(&smp_alt, flags);
296 list_for_each_entry(item, &smp_alt_modules, next) {
297 if (mod != item->mod)
298 continue;
299 list_del(&item->next);
300 spin_unlock_irqrestore(&smp_alt, flags);
301 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
302 kfree(item);
303 return;
304 }
305 spin_unlock_irqrestore(&smp_alt, flags);
306}
307
308void alternatives_smp_switch(int smp)
309{
310 struct smp_alt_module *mod;
311 unsigned long flags;
312
3047e99e
IM
313#ifdef CONFIG_LOCKDEP
314 /*
315 * A not yet fixed binutils section handling bug prevents
316 * alternatives-replacement from working reliably, so turn
317 * it off:
318 */
319 printk("lockdep: not fixing up alternatives.\n");
320 return;
321#endif
322
d167a518 323 if (no_replacement || smp_alt_once)
9a0b5817
GH
324 return;
325 BUG_ON(!smp && (num_online_cpus() > 1));
326
327 spin_lock_irqsave(&smp_alt, flags);
328 if (smp) {
329 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
330 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
331 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
332 alternatives_smp_apply(__smp_alt_instructions,
333 __smp_alt_instructions_end);
334 list_for_each_entry(mod, &smp_alt_modules, next)
335 alternatives_smp_lock(mod->locks, mod->locks_end,
336 mod->text, mod->text_end);
337 } else {
338 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
339 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
340 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
341 apply_alternatives(__smp_alt_instructions,
342 __smp_alt_instructions_end);
343 list_for_each_entry(mod, &smp_alt_modules, next)
344 alternatives_smp_unlock(mod->locks, mod->locks_end,
345 mod->text, mod->text_end);
346 }
347 spin_unlock_irqrestore(&smp_alt, flags);
348}
349
8ec4d41f
GH
350#endif
351
139ec7c4
RR
352#ifdef CONFIG_PARAVIRT
353void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
354{
355 struct paravirt_patch *p;
356
357 for (p = start; p < end; p++) {
358 unsigned int used;
359
360 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
361 p->len);
362#ifdef CONFIG_DEBUG_PARAVIRT
363 {
364 int i;
365 /* Deliberately clobber regs using "not %reg" to find bugs. */
366 for (i = 0; i < 3; i++) {
367 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
368 memcpy(p->instr + used, "\xf7\xd0", 2);
369 p->instr[used+1] |= i;
370 used += 2;
371 }
372 }
373 }
374#endif
375 /* Pad the rest with nops */
376 nop_out(p->instr + used, p->len - used);
377 }
378
379 /* Sync to be conservative, in case we patched following instructions */
380 sync_core();
381}
382extern struct paravirt_patch __start_parainstructions[],
383 __stop_parainstructions[];
384#endif /* CONFIG_PARAVIRT */
385
9a0b5817
GH
386void __init alternative_instructions(void)
387{
e51959fa 388 unsigned long flags;
d167a518
GH
389 if (no_replacement) {
390 printk(KERN_INFO "(SMP-)alternatives turned off\n");
391 free_init_pages("SMP alternatives",
392 (unsigned long)__smp_alt_begin,
393 (unsigned long)__smp_alt_end);
394 return;
395 }
e51959fa
ZA
396
397 local_irq_save(flags);
9a0b5817
GH
398 apply_alternatives(__alt_instructions, __alt_instructions_end);
399
400 /* switch to patch-once-at-boottime-only mode and free the
401 * tables in case we know the number of CPUs will never ever
402 * change */
403#ifdef CONFIG_HOTPLUG_CPU
404 if (num_possible_cpus() < 2)
405 smp_alt_once = 1;
406#else
407 smp_alt_once = 1;
408#endif
409
8ec4d41f 410#ifdef CONFIG_SMP
9a0b5817
GH
411 if (smp_alt_once) {
412 if (1 == num_possible_cpus()) {
413 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
414 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
415 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
416 apply_alternatives(__smp_alt_instructions,
417 __smp_alt_instructions_end);
418 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
419 _text, _etext);
420 }
421 free_init_pages("SMP alternatives",
422 (unsigned long)__smp_alt_begin,
423 (unsigned long)__smp_alt_end);
424 } else {
425 alternatives_smp_save(__smp_alt_instructions,
426 __smp_alt_instructions_end);
427 alternatives_smp_module_add(NULL, "core kernel",
428 __smp_locks, __smp_locks_end,
429 _text, _etext);
430 alternatives_smp_switch(0);
431 }
8ec4d41f 432#endif
139ec7c4 433 apply_paravirt(__start_parainstructions, __stop_parainstructions);
e51959fa 434 local_irq_restore(flags);
9a0b5817 435}
This page took 0.116988 seconds and 5 git commands to generate.