30c3c770813275ed7c9f9a47ca47275a00fc5fbf
[deliverable/linux.git] / kernel / jump_label.c
1 /*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/jump_label.h>
16
17 #ifdef HAVE_JUMP_LABEL
18
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
21
22 void jump_label_lock(void)
23 {
24 mutex_lock(&jump_label_mutex);
25 }
26
27 void jump_label_unlock(void)
28 {
29 mutex_unlock(&jump_label_mutex);
30 }
31
32 bool jump_label_enabled(struct jump_label_key *key)
33 {
34 return !!atomic_read(&key->enabled);
35 }
36
37 static int jump_label_cmp(const void *a, const void *b)
38 {
39 const struct jump_entry *jea = a;
40 const struct jump_entry *jeb = b;
41
42 if (jea->key < jeb->key)
43 return -1;
44
45 if (jea->key > jeb->key)
46 return 1;
47
48 return 0;
49 }
50
51 static void
52 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
53 {
54 unsigned long size;
55
56 size = (((unsigned long)stop - (unsigned long)start)
57 / sizeof(struct jump_entry));
58 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59 }
60
61 static void jump_label_update(struct jump_label_key *key, int enable);
62
63 void jump_label_inc(struct jump_label_key *key)
64 {
65 if (atomic_inc_not_zero(&key->enabled))
66 return;
67
68 jump_label_lock();
69 if (atomic_read(&key->enabled) == 0)
70 jump_label_update(key, JUMP_LABEL_ENABLE);
71 atomic_inc(&key->enabled);
72 jump_label_unlock();
73 }
74
75 static void __jump_label_dec(struct jump_label_key *key,
76 unsigned long rate_limit, struct delayed_work *work)
77 {
78 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
79 return;
80
81 if (rate_limit) {
82 atomic_inc(&key->enabled);
83 schedule_delayed_work(work, rate_limit);
84 } else
85 jump_label_update(key, JUMP_LABEL_DISABLE);
86
87 jump_label_unlock();
88 }
89
90 static void jump_label_update_timeout(struct work_struct *work)
91 {
92 struct jump_label_key_deferred *key =
93 container_of(work, struct jump_label_key_deferred, work.work);
94 __jump_label_dec(&key->key, 0, NULL);
95 }
96
97 void jump_label_dec(struct jump_label_key *key)
98 {
99 __jump_label_dec(key, 0, NULL);
100 }
101
102 void jump_label_dec_deferred(struct jump_label_key_deferred *key)
103 {
104 __jump_label_dec(&key->key, key->timeout, &key->work);
105 }
106
107
108 void jump_label_rate_limit(struct jump_label_key_deferred *key,
109 unsigned long rl)
110 {
111 key->timeout = rl;
112 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
113 }
114
115 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
116 {
117 if (entry->code <= (unsigned long)end &&
118 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
119 return 1;
120
121 return 0;
122 }
123
124 static int __jump_label_text_reserved(struct jump_entry *iter_start,
125 struct jump_entry *iter_stop, void *start, void *end)
126 {
127 struct jump_entry *iter;
128
129 iter = iter_start;
130 while (iter < iter_stop) {
131 if (addr_conflict(iter, start, end))
132 return 1;
133 iter++;
134 }
135
136 return 0;
137 }
138
139 /*
140 * Update code which is definitely not currently executing.
141 * Architectures which need heavyweight synchronization to modify
142 * running code can override this to make the non-live update case
143 * cheaper.
144 */
145 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
146 enum jump_label_type type)
147 {
148 arch_jump_label_transform(entry, type);
149 }
150
151 static void __jump_label_update(struct jump_label_key *key,
152 struct jump_entry *entry,
153 struct jump_entry *stop, int enable)
154 {
155 for (; (entry < stop) &&
156 (entry->key == (jump_label_t)(unsigned long)key);
157 entry++) {
158 /*
159 * entry->code set to 0 invalidates module init text sections
160 * kernel_text_address() verifies we are not in core kernel
161 * init code, see jump_label_invalidate_module_init().
162 */
163 if (entry->code && kernel_text_address(entry->code))
164 arch_jump_label_transform(entry, enable);
165 }
166 }
167
168 void __init jump_label_init(void)
169 {
170 struct jump_entry *iter_start = __start___jump_table;
171 struct jump_entry *iter_stop = __stop___jump_table;
172 struct jump_label_key *key = NULL;
173 struct jump_entry *iter;
174
175 jump_label_lock();
176 jump_label_sort_entries(iter_start, iter_stop);
177
178 for (iter = iter_start; iter < iter_stop; iter++) {
179 struct jump_label_key *iterk;
180
181 iterk = (struct jump_label_key *)(unsigned long)iter->key;
182 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
183 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
184 if (iterk == key)
185 continue;
186
187 key = iterk;
188 key->entries = iter;
189 #ifdef CONFIG_MODULES
190 key->next = NULL;
191 #endif
192 }
193 jump_label_unlock();
194 }
195
196 #ifdef CONFIG_MODULES
197
198 struct jump_label_mod {
199 struct jump_label_mod *next;
200 struct jump_entry *entries;
201 struct module *mod;
202 };
203
204 static int __jump_label_mod_text_reserved(void *start, void *end)
205 {
206 struct module *mod;
207
208 mod = __module_text_address((unsigned long)start);
209 if (!mod)
210 return 0;
211
212 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
213
214 return __jump_label_text_reserved(mod->jump_entries,
215 mod->jump_entries + mod->num_jump_entries,
216 start, end);
217 }
218
219 static void __jump_label_mod_update(struct jump_label_key *key, int enable)
220 {
221 struct jump_label_mod *mod = key->next;
222
223 while (mod) {
224 struct module *m = mod->mod;
225
226 __jump_label_update(key, mod->entries,
227 m->jump_entries + m->num_jump_entries,
228 enable);
229 mod = mod->next;
230 }
231 }
232
233 /***
234 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
235 * @mod: module to patch
236 *
237 * Allow for run-time selection of the optimal nops. Before the module
238 * loads patch these with arch_get_jump_label_nop(), which is specified by
239 * the arch specific jump label code.
240 */
241 void jump_label_apply_nops(struct module *mod)
242 {
243 struct jump_entry *iter_start = mod->jump_entries;
244 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
245 struct jump_entry *iter;
246
247 /* if the module doesn't have jump label entries, just return */
248 if (iter_start == iter_stop)
249 return;
250
251 for (iter = iter_start; iter < iter_stop; iter++) {
252 struct jump_label_key *iterk;
253
254 iterk = (struct jump_label_key *)(unsigned long)iter->key;
255 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
256 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
257 }
258 }
259
260 static int jump_label_add_module(struct module *mod)
261 {
262 struct jump_entry *iter_start = mod->jump_entries;
263 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
264 struct jump_entry *iter;
265 struct jump_label_key *key = NULL;
266 struct jump_label_mod *jlm;
267
268 /* if the module doesn't have jump label entries, just return */
269 if (iter_start == iter_stop)
270 return 0;
271
272 jump_label_sort_entries(iter_start, iter_stop);
273
274 for (iter = iter_start; iter < iter_stop; iter++) {
275 if (iter->key == (jump_label_t)(unsigned long)key)
276 continue;
277
278 key = (struct jump_label_key *)(unsigned long)iter->key;
279
280 if (__module_address(iter->key) == mod) {
281 atomic_set(&key->enabled, 0);
282 key->entries = iter;
283 key->next = NULL;
284 continue;
285 }
286
287 jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
288 if (!jlm)
289 return -ENOMEM;
290
291 jlm->mod = mod;
292 jlm->entries = iter;
293 jlm->next = key->next;
294 key->next = jlm;
295
296 if (jump_label_enabled(key))
297 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
298 }
299
300 return 0;
301 }
302
303 static void jump_label_del_module(struct module *mod)
304 {
305 struct jump_entry *iter_start = mod->jump_entries;
306 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
307 struct jump_entry *iter;
308 struct jump_label_key *key = NULL;
309 struct jump_label_mod *jlm, **prev;
310
311 for (iter = iter_start; iter < iter_stop; iter++) {
312 if (iter->key == (jump_label_t)(unsigned long)key)
313 continue;
314
315 key = (struct jump_label_key *)(unsigned long)iter->key;
316
317 if (__module_address(iter->key) == mod)
318 continue;
319
320 prev = &key->next;
321 jlm = key->next;
322
323 while (jlm && jlm->mod != mod) {
324 prev = &jlm->next;
325 jlm = jlm->next;
326 }
327
328 if (jlm) {
329 *prev = jlm->next;
330 kfree(jlm);
331 }
332 }
333 }
334
335 static void jump_label_invalidate_module_init(struct module *mod)
336 {
337 struct jump_entry *iter_start = mod->jump_entries;
338 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
339 struct jump_entry *iter;
340
341 for (iter = iter_start; iter < iter_stop; iter++) {
342 if (within_module_init(iter->code, mod))
343 iter->code = 0;
344 }
345 }
346
347 static int
348 jump_label_module_notify(struct notifier_block *self, unsigned long val,
349 void *data)
350 {
351 struct module *mod = data;
352 int ret = 0;
353
354 switch (val) {
355 case MODULE_STATE_COMING:
356 jump_label_lock();
357 ret = jump_label_add_module(mod);
358 if (ret)
359 jump_label_del_module(mod);
360 jump_label_unlock();
361 break;
362 case MODULE_STATE_GOING:
363 jump_label_lock();
364 jump_label_del_module(mod);
365 jump_label_unlock();
366 break;
367 case MODULE_STATE_LIVE:
368 jump_label_lock();
369 jump_label_invalidate_module_init(mod);
370 jump_label_unlock();
371 break;
372 }
373
374 return notifier_from_errno(ret);
375 }
376
377 struct notifier_block jump_label_module_nb = {
378 .notifier_call = jump_label_module_notify,
379 .priority = 1, /* higher than tracepoints */
380 };
381
382 static __init int jump_label_init_module(void)
383 {
384 return register_module_notifier(&jump_label_module_nb);
385 }
386 early_initcall(jump_label_init_module);
387
388 #endif /* CONFIG_MODULES */
389
390 /***
391 * jump_label_text_reserved - check if addr range is reserved
392 * @start: start text addr
393 * @end: end text addr
394 *
395 * checks if the text addr located between @start and @end
396 * overlaps with any of the jump label patch addresses. Code
397 * that wants to modify kernel text should first verify that
398 * it does not overlap with any of the jump label addresses.
399 * Caller must hold jump_label_mutex.
400 *
401 * returns 1 if there is an overlap, 0 otherwise
402 */
403 int jump_label_text_reserved(void *start, void *end)
404 {
405 int ret = __jump_label_text_reserved(__start___jump_table,
406 __stop___jump_table, start, end);
407
408 if (ret)
409 return ret;
410
411 #ifdef CONFIG_MODULES
412 ret = __jump_label_mod_text_reserved(start, end);
413 #endif
414 return ret;
415 }
416
417 static void jump_label_update(struct jump_label_key *key, int enable)
418 {
419 struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
420
421 #ifdef CONFIG_MODULES
422 struct module *mod = __module_address((jump_label_t)key);
423
424 __jump_label_mod_update(key, enable);
425
426 if (mod)
427 stop = mod->jump_entries + mod->num_jump_entries;
428 #endif
429 /* if there are no users, entry can be NULL */
430 if (entry)
431 __jump_label_update(key, entry, stop, enable);
432 }
433
434 #endif
This page took 0.043517 seconds and 5 git commands to generate.