Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[deliverable/linux.git] / arch / x86 / mm / kmmio.c
1 /* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/list.h>
11 #include <linux/rculist.h>
12 #include <linux/spinlock.h>
13 #include <linux/hash.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/uaccess.h>
17 #include <linux/ptrace.h>
18 #include <linux/preempt.h>
19 #include <linux/percpu.h>
20 #include <linux/kdebug.h>
21 #include <linux/mutex.h>
22 #include <linux/io.h>
23 #include <linux/slab.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <linux/errno.h>
27 #include <asm/debugreg.h>
28 #include <linux/mmiotrace.h>
29
30 #define KMMIO_PAGE_HASH_BITS 4
31 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
32
33 struct kmmio_fault_page {
34 struct list_head list;
35 struct kmmio_fault_page *release_next;
36 unsigned long addr; /* the requested address */
37 pteval_t old_presence; /* page presence prior to arming */
38 bool armed;
39
40 /*
41 * Number of times this page has been registered as a part
42 * of a probe. If zero, page is disarmed and this may be freed.
43 * Used only by writers (RCU) and post_kmmio_handler().
44 * Protected by kmmio_lock, when linked into kmmio_page_table.
45 */
46 int count;
47
48 bool scheduled_for_release;
49 };
50
51 struct kmmio_delayed_release {
52 struct rcu_head rcu;
53 struct kmmio_fault_page *release_list;
54 };
55
56 struct kmmio_context {
57 struct kmmio_fault_page *fpage;
58 struct kmmio_probe *probe;
59 unsigned long saved_flags;
60 unsigned long addr;
61 int active;
62 };
63
64 static DEFINE_SPINLOCK(kmmio_lock);
65
66 /* Protected by kmmio_lock */
67 unsigned int kmmio_count;
68
69 /* Read-protected by RCU, write-protected by kmmio_lock. */
70 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
71 static LIST_HEAD(kmmio_probes);
72
73 static struct list_head *kmmio_page_list(unsigned long addr)
74 {
75 unsigned int l;
76 pte_t *pte = lookup_address(addr, &l);
77
78 if (!pte)
79 return NULL;
80 addr &= page_level_mask(l);
81
82 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
83 }
84
85 /* Accessed per-cpu */
86 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
87
88 /*
89 * this is basically a dynamic stabbing problem:
90 * Could use the existing prio tree code or
91 * Possible better implementations:
92 * The Interval Skip List: A Data Structure for Finding All Intervals That
93 * Overlap a Point (might be simple)
94 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
95 */
96 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
97 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
98 {
99 struct kmmio_probe *p;
100 list_for_each_entry_rcu(p, &kmmio_probes, list) {
101 if (addr >= p->addr && addr < (p->addr + p->len))
102 return p;
103 }
104 return NULL;
105 }
106
107 /* You must be holding RCU read lock. */
108 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
109 {
110 struct list_head *head;
111 struct kmmio_fault_page *f;
112 unsigned int l;
113 pte_t *pte = lookup_address(addr, &l);
114
115 if (!pte)
116 return NULL;
117 addr &= page_level_mask(l);
118 head = kmmio_page_list(addr);
119 list_for_each_entry_rcu(f, head, list) {
120 if (f->addr == addr)
121 return f;
122 }
123 return NULL;
124 }
125
126 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
127 {
128 pmdval_t v = pmd_val(*pmd);
129 if (clear) {
130 *old = v & _PAGE_PRESENT;
131 v &= ~_PAGE_PRESENT;
132 } else /* presume this has been called with clear==true previously */
133 v |= *old;
134 set_pmd(pmd, __pmd(v));
135 }
136
137 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
138 {
139 pteval_t v = pte_val(*pte);
140 if (clear) {
141 *old = v & _PAGE_PRESENT;
142 v &= ~_PAGE_PRESENT;
143 } else /* presume this has been called with clear==true previously */
144 v |= *old;
145 set_pte_atomic(pte, __pte(v));
146 }
147
148 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
149 {
150 unsigned int level;
151 pte_t *pte = lookup_address(f->addr, &level);
152
153 if (!pte) {
154 pr_err("no pte for addr 0x%08lx\n", f->addr);
155 return -1;
156 }
157
158 switch (level) {
159 case PG_LEVEL_2M:
160 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
161 break;
162 case PG_LEVEL_4K:
163 clear_pte_presence(pte, clear, &f->old_presence);
164 break;
165 default:
166 pr_err("unexpected page level 0x%x.\n", level);
167 return -1;
168 }
169
170 __flush_tlb_one(f->addr);
171 return 0;
172 }
173
174 /*
175 * Mark the given page as not present. Access to it will trigger a fault.
176 *
177 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
178 * protection is ignored here. RCU read lock is assumed held, so the struct
179 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
180 * that double arming the same virtual address (page) cannot occur.
181 *
182 * Double disarming on the other hand is allowed, and may occur when a fault
183 * and mmiotrace shutdown happen simultaneously.
184 */
185 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
186 {
187 int ret;
188 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
189 if (f->armed) {
190 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
191 f->addr, f->count, !!f->old_presence);
192 }
193 ret = clear_page_presence(f, true);
194 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
195 f->addr);
196 f->armed = true;
197 return ret;
198 }
199
200 /** Restore the given page to saved presence state. */
201 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
202 {
203 int ret = clear_page_presence(f, false);
204 WARN_ONCE(ret < 0,
205 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
206 f->armed = false;
207 }
208
209 /*
210 * This is being called from do_page_fault().
211 *
212 * We may be in an interrupt or a critical section. Also prefecthing may
213 * trigger a page fault. We may be in the middle of process switch.
214 * We cannot take any locks, because we could be executing especially
215 * within a kmmio critical section.
216 *
217 * Local interrupts are disabled, so preemption cannot happen.
218 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
219 */
220 /*
221 * Interrupts are disabled on entry as trap3 is an interrupt gate
222 * and they remain disabled throughout this function.
223 */
224 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
225 {
226 struct kmmio_context *ctx;
227 struct kmmio_fault_page *faultpage;
228 int ret = 0; /* default to fault not handled */
229 unsigned long page_base = addr;
230 unsigned int l;
231 pte_t *pte = lookup_address(addr, &l);
232 if (!pte)
233 return -EINVAL;
234 page_base &= page_level_mask(l);
235
236 /*
237 * Preemption is now disabled to prevent process switch during
238 * single stepping. We can only handle one active kmmio trace
239 * per cpu, so ensure that we finish it before something else
240 * gets to run. We also hold the RCU read lock over single
241 * stepping to avoid looking up the probe and kmmio_fault_page
242 * again.
243 */
244 preempt_disable();
245 rcu_read_lock();
246
247 faultpage = get_kmmio_fault_page(page_base);
248 if (!faultpage) {
249 /*
250 * Either this page fault is not caused by kmmio, or
251 * another CPU just pulled the kmmio probe from under
252 * our feet. The latter case should not be possible.
253 */
254 goto no_kmmio;
255 }
256
257 ctx = &get_cpu_var(kmmio_ctx);
258 if (ctx->active) {
259 if (page_base == ctx->addr) {
260 /*
261 * A second fault on the same page means some other
262 * condition needs handling by do_page_fault(), the
263 * page really not being present is the most common.
264 */
265 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
266 addr, smp_processor_id());
267
268 if (!faultpage->old_presence)
269 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
270 addr, smp_processor_id());
271 } else {
272 /*
273 * Prevent overwriting already in-flight context.
274 * This should not happen, let's hope disarming at
275 * least prevents a panic.
276 */
277 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
278 smp_processor_id(), addr);
279 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
280 disarm_kmmio_fault_page(faultpage);
281 }
282 goto no_kmmio_ctx;
283 }
284 ctx->active++;
285
286 ctx->fpage = faultpage;
287 ctx->probe = get_kmmio_probe(page_base);
288 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
289 ctx->addr = page_base;
290
291 if (ctx->probe && ctx->probe->pre_handler)
292 ctx->probe->pre_handler(ctx->probe, regs, addr);
293
294 /*
295 * Enable single-stepping and disable interrupts for the faulting
296 * context. Local interrupts must not get enabled during stepping.
297 */
298 regs->flags |= X86_EFLAGS_TF;
299 regs->flags &= ~X86_EFLAGS_IF;
300
301 /* Now we set present bit in PTE and single step. */
302 disarm_kmmio_fault_page(ctx->fpage);
303
304 /*
305 * If another cpu accesses the same page while we are stepping,
306 * the access will not be caught. It will simply succeed and the
307 * only downside is we lose the event. If this becomes a problem,
308 * the user should drop to single cpu before tracing.
309 */
310
311 put_cpu_var(kmmio_ctx);
312 return 1; /* fault handled */
313
314 no_kmmio_ctx:
315 put_cpu_var(kmmio_ctx);
316 no_kmmio:
317 rcu_read_unlock();
318 preempt_enable_no_resched();
319 return ret;
320 }
321
322 /*
323 * Interrupts are disabled on entry as trap1 is an interrupt gate
324 * and they remain disabled throughout this function.
325 * This must always get called as the pair to kmmio_handler().
326 */
327 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
328 {
329 int ret = 0;
330 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
331
332 if (!ctx->active) {
333 /*
334 * debug traps without an active context are due to either
335 * something external causing them (f.e. using a debugger while
336 * mmio tracing enabled), or erroneous behaviour
337 */
338 pr_warning("unexpected debug trap on CPU %d.\n",
339 smp_processor_id());
340 goto out;
341 }
342
343 if (ctx->probe && ctx->probe->post_handler)
344 ctx->probe->post_handler(ctx->probe, condition, regs);
345
346 /* Prevent racing against release_kmmio_fault_page(). */
347 spin_lock(&kmmio_lock);
348 if (ctx->fpage->count)
349 arm_kmmio_fault_page(ctx->fpage);
350 spin_unlock(&kmmio_lock);
351
352 regs->flags &= ~X86_EFLAGS_TF;
353 regs->flags |= ctx->saved_flags;
354
355 /* These were acquired in kmmio_handler(). */
356 ctx->active--;
357 BUG_ON(ctx->active);
358 rcu_read_unlock();
359 preempt_enable_no_resched();
360
361 /*
362 * if somebody else is singlestepping across a probe point, flags
363 * will have TF set, in which case, continue the remaining processing
364 * of do_debug, as if this is not a probe hit.
365 */
366 if (!(regs->flags & X86_EFLAGS_TF))
367 ret = 1;
368 out:
369 put_cpu_var(kmmio_ctx);
370 return ret;
371 }
372
373 /* You must be holding kmmio_lock. */
374 static int add_kmmio_fault_page(unsigned long addr)
375 {
376 struct kmmio_fault_page *f;
377
378 f = get_kmmio_fault_page(addr);
379 if (f) {
380 if (!f->count)
381 arm_kmmio_fault_page(f);
382 f->count++;
383 return 0;
384 }
385
386 f = kzalloc(sizeof(*f), GFP_ATOMIC);
387 if (!f)
388 return -1;
389
390 f->count = 1;
391 f->addr = addr;
392
393 if (arm_kmmio_fault_page(f)) {
394 kfree(f);
395 return -1;
396 }
397
398 list_add_rcu(&f->list, kmmio_page_list(f->addr));
399
400 return 0;
401 }
402
403 /* You must be holding kmmio_lock. */
404 static void release_kmmio_fault_page(unsigned long addr,
405 struct kmmio_fault_page **release_list)
406 {
407 struct kmmio_fault_page *f;
408
409 f = get_kmmio_fault_page(addr);
410 if (!f)
411 return;
412
413 f->count--;
414 BUG_ON(f->count < 0);
415 if (!f->count) {
416 disarm_kmmio_fault_page(f);
417 if (!f->scheduled_for_release) {
418 f->release_next = *release_list;
419 *release_list = f;
420 f->scheduled_for_release = true;
421 }
422 }
423 }
424
425 /*
426 * With page-unaligned ioremaps, one or two armed pages may contain
427 * addresses from outside the intended mapping. Events for these addresses
428 * are currently silently dropped. The events may result only from programming
429 * mistakes by accessing addresses before the beginning or past the end of a
430 * mapping.
431 */
432 int register_kmmio_probe(struct kmmio_probe *p)
433 {
434 unsigned long flags;
435 int ret = 0;
436 unsigned long size = 0;
437 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
438 unsigned int l;
439 pte_t *pte;
440
441 spin_lock_irqsave(&kmmio_lock, flags);
442 if (get_kmmio_probe(p->addr)) {
443 ret = -EEXIST;
444 goto out;
445 }
446
447 pte = lookup_address(p->addr, &l);
448 if (!pte) {
449 ret = -EINVAL;
450 goto out;
451 }
452
453 kmmio_count++;
454 list_add_rcu(&p->list, &kmmio_probes);
455 while (size < size_lim) {
456 if (add_kmmio_fault_page(p->addr + size))
457 pr_err("Unable to set page fault.\n");
458 size += page_level_size(l);
459 }
460 out:
461 spin_unlock_irqrestore(&kmmio_lock, flags);
462 /*
463 * XXX: What should I do here?
464 * Here was a call to global_flush_tlb(), but it does not exist
465 * anymore. It seems it's not needed after all.
466 */
467 return ret;
468 }
469 EXPORT_SYMBOL(register_kmmio_probe);
470
471 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
472 {
473 struct kmmio_delayed_release *dr = container_of(
474 head,
475 struct kmmio_delayed_release,
476 rcu);
477 struct kmmio_fault_page *f = dr->release_list;
478 while (f) {
479 struct kmmio_fault_page *next = f->release_next;
480 BUG_ON(f->count);
481 kfree(f);
482 f = next;
483 }
484 kfree(dr);
485 }
486
487 static void remove_kmmio_fault_pages(struct rcu_head *head)
488 {
489 struct kmmio_delayed_release *dr =
490 container_of(head, struct kmmio_delayed_release, rcu);
491 struct kmmio_fault_page *f = dr->release_list;
492 struct kmmio_fault_page **prevp = &dr->release_list;
493 unsigned long flags;
494
495 spin_lock_irqsave(&kmmio_lock, flags);
496 while (f) {
497 if (!f->count) {
498 list_del_rcu(&f->list);
499 prevp = &f->release_next;
500 } else {
501 *prevp = f->release_next;
502 f->release_next = NULL;
503 f->scheduled_for_release = false;
504 }
505 f = *prevp;
506 }
507 spin_unlock_irqrestore(&kmmio_lock, flags);
508
509 /* This is the real RCU destroy call. */
510 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
511 }
512
513 /*
514 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
515 * sure that the callbacks will not be called anymore. Only after that
516 * you may actually release your struct kmmio_probe.
517 *
518 * Unregistering a kmmio fault page has three steps:
519 * 1. release_kmmio_fault_page()
520 * Disarm the page, wait a grace period to let all faults finish.
521 * 2. remove_kmmio_fault_pages()
522 * Remove the pages from kmmio_page_table.
523 * 3. rcu_free_kmmio_fault_pages()
524 * Actually free the kmmio_fault_page structs as with RCU.
525 */
526 void unregister_kmmio_probe(struct kmmio_probe *p)
527 {
528 unsigned long flags;
529 unsigned long size = 0;
530 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
531 struct kmmio_fault_page *release_list = NULL;
532 struct kmmio_delayed_release *drelease;
533 unsigned int l;
534 pte_t *pte;
535
536 pte = lookup_address(p->addr, &l);
537 if (!pte)
538 return;
539
540 spin_lock_irqsave(&kmmio_lock, flags);
541 while (size < size_lim) {
542 release_kmmio_fault_page(p->addr + size, &release_list);
543 size += page_level_size(l);
544 }
545 list_del_rcu(&p->list);
546 kmmio_count--;
547 spin_unlock_irqrestore(&kmmio_lock, flags);
548
549 if (!release_list)
550 return;
551
552 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
553 if (!drelease) {
554 pr_crit("leaking kmmio_fault_page objects.\n");
555 return;
556 }
557 drelease->release_list = release_list;
558
559 /*
560 * This is not really RCU here. We have just disarmed a set of
561 * pages so that they cannot trigger page faults anymore. However,
562 * we cannot remove the pages from kmmio_page_table,
563 * because a probe hit might be in flight on another CPU. The
564 * pages are collected into a list, and they will be removed from
565 * kmmio_page_table when it is certain that no probe hit related to
566 * these pages can be in flight. RCU grace period sounds like a
567 * good choice.
568 *
569 * If we removed the pages too early, kmmio page fault handler might
570 * not find the respective kmmio_fault_page and determine it's not
571 * a kmmio fault, when it actually is. This would lead to madness.
572 */
573 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
574 }
575 EXPORT_SYMBOL(unregister_kmmio_probe);
576
577 static int
578 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
579 {
580 struct die_args *arg = args;
581 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
582
583 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
584 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
585 /*
586 * Reset the BS bit in dr6 (pointed by args->err) to
587 * denote completion of processing
588 */
589 *dr6_p &= ~DR_STEP;
590 return NOTIFY_STOP;
591 }
592
593 return NOTIFY_DONE;
594 }
595
596 static struct notifier_block nb_die = {
597 .notifier_call = kmmio_die_notifier
598 };
599
600 int kmmio_init(void)
601 {
602 int i;
603
604 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
605 INIT_LIST_HEAD(&kmmio_page_table[i]);
606
607 return register_die_notifier(&nb_die);
608 }
609
610 void kmmio_cleanup(void)
611 {
612 int i;
613
614 unregister_die_notifier(&nb_die);
615 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
616 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
617 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
618 }
619 }
This page took 0.043525 seconds and 5 git commands to generate.