[PATCH] i386: Remove smp_alt_instructions
[deliverable/linux.git] / arch / i386 / kernel / alternative.c
CommitLineData
9a0b5817 1#include <linux/module.h>
f6a57033 2#include <linux/sched.h>
9a0b5817
GH
3#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
d167a518
GH
8static int smp_alt_once = 0;
9static int debug_alternative = 0;
9a0b5817 10
d167a518
GH
11static int __init bootonly(char *str)
12{
13 smp_alt_once = 1;
14 return 1;
15}
16static int __init debug_alt(char *str)
17{
18 debug_alternative = 1;
19 return 1;
20}
21
d167a518
GH
22__setup("smp-alt-boot", bootonly);
23__setup("debug-alternative", debug_alt);
24
25#define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args)
27
28#ifdef GENERIC_NOP1
9a0b5817
GH
29/* Use inline assembly to define this because the nops are defined
30 as inline assembly strings in the include files and we cannot
31 get them easily into strings. */
32asm("\t.data\nintelnops: "
33 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
34 GENERIC_NOP7 GENERIC_NOP8);
d167a518 35extern unsigned char intelnops[];
9a0b5817
GH
36static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
37 NULL,
38 intelnops,
39 intelnops + 1,
40 intelnops + 1 + 2,
41 intelnops + 1 + 2 + 3,
42 intelnops + 1 + 2 + 3 + 4,
43 intelnops + 1 + 2 + 3 + 4 + 5,
44 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
45 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
46};
d167a518
GH
47#endif
48
49#ifdef K8_NOP1
50asm("\t.data\nk8nops: "
51 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
52 K8_NOP7 K8_NOP8);
53extern unsigned char k8nops[];
9a0b5817
GH
54static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
55 NULL,
56 k8nops,
57 k8nops + 1,
58 k8nops + 1 + 2,
59 k8nops + 1 + 2 + 3,
60 k8nops + 1 + 2 + 3 + 4,
61 k8nops + 1 + 2 + 3 + 4 + 5,
62 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
63 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
64};
d167a518
GH
65#endif
66
67#ifdef K7_NOP1
68asm("\t.data\nk7nops: "
69 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
70 K7_NOP7 K7_NOP8);
71extern unsigned char k7nops[];
9a0b5817
GH
72static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
73 NULL,
74 k7nops,
75 k7nops + 1,
76 k7nops + 1 + 2,
77 k7nops + 1 + 2 + 3,
78 k7nops + 1 + 2 + 3 + 4,
79 k7nops + 1 + 2 + 3 + 4 + 5,
80 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
81 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
82};
d167a518
GH
83#endif
84
85#ifdef CONFIG_X86_64
86
87extern char __vsyscall_0;
88static inline unsigned char** find_nop_table(void)
89{
90 return k8_nops;
91}
92
93#else /* CONFIG_X86_64 */
94
9a0b5817
GH
95static struct nop {
96 int cpuid;
97 unsigned char **noptable;
98} noptypes[] = {
99 { X86_FEATURE_K8, k8_nops },
100 { X86_FEATURE_K7, k7_nops },
101 { -1, NULL }
102};
103
9a0b5817
GH
104static unsigned char** find_nop_table(void)
105{
106 unsigned char **noptable = intel_nops;
107 int i;
108
109 for (i = 0; noptypes[i].cpuid >= 0; i++) {
110 if (boot_cpu_has(noptypes[i].cpuid)) {
111 noptable = noptypes[i].noptable;
112 break;
113 }
114 }
115 return noptable;
116}
117
d167a518
GH
118#endif /* CONFIG_X86_64 */
119
139ec7c4
RR
120static void nop_out(void *insns, unsigned int len)
121{
122 unsigned char **noptable = find_nop_table();
123
124 while (len > 0) {
125 unsigned int noplen = len;
126 if (noplen > ASM_NOP_MAX)
127 noplen = ASM_NOP_MAX;
128 memcpy(insns, noptable[noplen], noplen);
129 insns += noplen;
130 len -= noplen;
131 }
132}
133
d167a518 134extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
d167a518
GH
135extern u8 *__smp_locks[], *__smp_locks_end[];
136
9a0b5817
GH
137/* Replace instructions with better alternatives for this CPU type.
138 This runs before SMP is initialized to avoid SMP problems with
139 self modifying code. This implies that assymetric systems where
140 APs have less capabilities than the boot processor are not handled.
141 Tough. Make sure you disable such features by hand. */
142
143void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
144{
9a0b5817 145 struct alt_instr *a;
d167a518 146 u8 *instr;
139ec7c4 147 int diff;
9a0b5817
GH
148
149 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
150 for (a = start; a < end; a++) {
151 BUG_ON(a->replacementlen > a->instrlen);
152 if (!boot_cpu_has(a->cpuid))
153 continue;
d167a518
GH
154 instr = a->instr;
155#ifdef CONFIG_X86_64
156 /* vsyscall code is not mapped yet. resolve it manually. */
157 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
158 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
159 DPRINTK("%s: vsyscall fixup: %p => %p\n",
160 __FUNCTION__, a->instr, instr);
161 }
162#endif
163 memcpy(instr, a->replacement, a->replacementlen);
9a0b5817 164 diff = a->instrlen - a->replacementlen;
139ec7c4 165 nop_out(instr + a->replacementlen, diff);
9a0b5817
GH
166 }
167}
168
8ec4d41f
GH
169#ifdef CONFIG_SMP
170
9a0b5817
GH
171static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
172{
173 u8 **ptr;
174
175 for (ptr = start; ptr < end; ptr++) {
176 if (*ptr < text)
177 continue;
178 if (*ptr > text_end)
179 continue;
180 **ptr = 0xf0; /* lock prefix */
181 };
182}
183
184static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
185{
9a0b5817
GH
186 u8 **ptr;
187
188 for (ptr = start; ptr < end; ptr++) {
189 if (*ptr < text)
190 continue;
191 if (*ptr > text_end)
192 continue;
139ec7c4 193 nop_out(*ptr, 1);
9a0b5817
GH
194 };
195}
196
197struct smp_alt_module {
198 /* what is this ??? */
199 struct module *mod;
200 char *name;
201
202 /* ptrs to lock prefixes */
203 u8 **locks;
204 u8 **locks_end;
205
206 /* .text segment, needed to avoid patching init code ;) */
207 u8 *text;
208 u8 *text_end;
209
210 struct list_head next;
211};
212static LIST_HEAD(smp_alt_modules);
213static DEFINE_SPINLOCK(smp_alt);
214
9a0b5817
GH
215void alternatives_smp_module_add(struct module *mod, char *name,
216 void *locks, void *locks_end,
217 void *text, void *text_end)
218{
219 struct smp_alt_module *smp;
220 unsigned long flags;
221
222 if (smp_alt_once) {
223 if (boot_cpu_has(X86_FEATURE_UP))
224 alternatives_smp_unlock(locks, locks_end,
225 text, text_end);
226 return;
227 }
228
229 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
230 if (NULL == smp)
231 return; /* we'll run the (safe but slow) SMP code then ... */
232
233 smp->mod = mod;
234 smp->name = name;
235 smp->locks = locks;
236 smp->locks_end = locks_end;
237 smp->text = text;
238 smp->text_end = text_end;
239 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
240 __FUNCTION__, smp->locks, smp->locks_end,
241 smp->text, smp->text_end, smp->name);
242
243 spin_lock_irqsave(&smp_alt, flags);
244 list_add_tail(&smp->next, &smp_alt_modules);
245 if (boot_cpu_has(X86_FEATURE_UP))
246 alternatives_smp_unlock(smp->locks, smp->locks_end,
247 smp->text, smp->text_end);
248 spin_unlock_irqrestore(&smp_alt, flags);
249}
250
251void alternatives_smp_module_del(struct module *mod)
252{
253 struct smp_alt_module *item;
254 unsigned long flags;
255
9ce883be 256 if (smp_alt_once)
9a0b5817
GH
257 return;
258
259 spin_lock_irqsave(&smp_alt, flags);
260 list_for_each_entry(item, &smp_alt_modules, next) {
261 if (mod != item->mod)
262 continue;
263 list_del(&item->next);
264 spin_unlock_irqrestore(&smp_alt, flags);
265 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
266 kfree(item);
267 return;
268 }
269 spin_unlock_irqrestore(&smp_alt, flags);
270}
271
272void alternatives_smp_switch(int smp)
273{
274 struct smp_alt_module *mod;
275 unsigned long flags;
276
3047e99e
IM
277#ifdef CONFIG_LOCKDEP
278 /*
279 * A not yet fixed binutils section handling bug prevents
280 * alternatives-replacement from working reliably, so turn
281 * it off:
282 */
283 printk("lockdep: not fixing up alternatives.\n");
284 return;
285#endif
286
9ce883be 287 if (smp_alt_once)
9a0b5817
GH
288 return;
289 BUG_ON(!smp && (num_online_cpus() > 1));
290
291 spin_lock_irqsave(&smp_alt, flags);
292 if (smp) {
293 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
294 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
295 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
296 list_for_each_entry(mod, &smp_alt_modules, next)
297 alternatives_smp_lock(mod->locks, mod->locks_end,
298 mod->text, mod->text_end);
299 } else {
300 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
301 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
302 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
303 list_for_each_entry(mod, &smp_alt_modules, next)
304 alternatives_smp_unlock(mod->locks, mod->locks_end,
305 mod->text, mod->text_end);
306 }
307 spin_unlock_irqrestore(&smp_alt, flags);
308}
309
8ec4d41f
GH
310#endif
311
139ec7c4
RR
312#ifdef CONFIG_PARAVIRT
313void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
314{
315 struct paravirt_patch *p;
316
317 for (p = start; p < end; p++) {
318 unsigned int used;
319
320 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
321 p->len);
322#ifdef CONFIG_DEBUG_PARAVIRT
323 {
324 int i;
325 /* Deliberately clobber regs using "not %reg" to find bugs. */
326 for (i = 0; i < 3; i++) {
327 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
328 memcpy(p->instr + used, "\xf7\xd0", 2);
329 p->instr[used+1] |= i;
330 used += 2;
331 }
332 }
333 }
334#endif
335 /* Pad the rest with nops */
336 nop_out(p->instr + used, p->len - used);
337 }
338
339 /* Sync to be conservative, in case we patched following instructions */
340 sync_core();
341}
342extern struct paravirt_patch __start_parainstructions[],
343 __stop_parainstructions[];
344#endif /* CONFIG_PARAVIRT */
345
9a0b5817
GH
346void __init alternative_instructions(void)
347{
e51959fa 348 unsigned long flags;
e51959fa
ZA
349
350 local_irq_save(flags);
9a0b5817
GH
351 apply_alternatives(__alt_instructions, __alt_instructions_end);
352
353 /* switch to patch-once-at-boottime-only mode and free the
354 * tables in case we know the number of CPUs will never ever
355 * change */
356#ifdef CONFIG_HOTPLUG_CPU
357 if (num_possible_cpus() < 2)
358 smp_alt_once = 1;
359#else
360 smp_alt_once = 1;
361#endif
362
8ec4d41f 363#ifdef CONFIG_SMP
9a0b5817
GH
364 if (smp_alt_once) {
365 if (1 == num_possible_cpus()) {
366 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
367 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
368 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
369 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
370 _text, _etext);
371 }
372 free_init_pages("SMP alternatives",
d0175ab6
JF
373 __pa_symbol(&__smp_locks),
374 __pa_symbol(&__smp_locks_end));
9a0b5817 375 } else {
9a0b5817
GH
376 alternatives_smp_module_add(NULL, "core kernel",
377 __smp_locks, __smp_locks_end,
378 _text, _etext);
379 alternatives_smp_switch(0);
380 }
8ec4d41f 381#endif
139ec7c4 382 apply_paravirt(__start_parainstructions, __stop_parainstructions);
e51959fa 383 local_irq_restore(flags);
9a0b5817 384}
This page took 0.142827 seconds and 5 git commands to generate.