Commit | Line | Data |
---|---|---|
8b7d89d0 PP |
1 | /* Support for MMIO probes. |
2 | * Benfit many code from kprobes | |
3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. | |
4 | * 2007 Alexander Eichner | |
5 | * 2008 Pekka Paalanen <pq@iki.fi> | |
6 | */ | |
7 | ||
8 | #include <linux/version.h> | |
0fd0e3da | 9 | #include <linux/list.h> |
8b7d89d0 PP |
10 | #include <linux/spinlock.h> |
11 | #include <linux/hash.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/uaccess.h> | |
18 | #include <linux/ptrace.h> | |
19 | #include <linux/preempt.h> | |
f5136380 | 20 | #include <linux/percpu.h> |
0fd0e3da | 21 | #include <linux/kdebug.h> |
d61fc448 | 22 | #include <linux/mutex.h> |
8b7d89d0 PP |
23 | #include <asm/io.h> |
24 | #include <asm/cacheflush.h> | |
25 | #include <asm/errno.h> | |
26 | #include <asm/tlbflush.h> | |
75bb8835 | 27 | #include <asm/pgtable.h> |
8b7d89d0 | 28 | |
0fd0e3da | 29 | #include <linux/mmiotrace.h> |
8b7d89d0 | 30 | |
8b7d89d0 PP |
31 | #define KMMIO_PAGE_HASH_BITS 4 |
32 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) | |
33 | ||
0fd0e3da PP |
34 | struct kmmio_fault_page { |
35 | struct list_head list; | |
36 | struct kmmio_fault_page *release_next; | |
37 | unsigned long page; /* location of the fault page */ | |
38 | ||
39 | /* | |
40 | * Number of times this page has been registered as a part | |
41 | * of a probe. If zero, page is disarmed and this may be freed. | |
42 | * Used only by writers (RCU). | |
43 | */ | |
44 | int count; | |
45 | }; | |
46 | ||
47 | struct kmmio_delayed_release { | |
48 | struct rcu_head rcu; | |
49 | struct kmmio_fault_page *release_list; | |
50 | }; | |
51 | ||
8b7d89d0 PP |
52 | struct kmmio_context { |
53 | struct kmmio_fault_page *fpage; | |
54 | struct kmmio_probe *probe; | |
55 | unsigned long saved_flags; | |
0fd0e3da | 56 | unsigned long addr; |
8b7d89d0 PP |
57 | int active; |
58 | }; | |
59 | ||
8b7d89d0 PP |
60 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, |
61 | void *args); | |
62 | ||
d61fc448 | 63 | static DEFINE_MUTEX(kmmio_init_mutex); |
8b7d89d0 PP |
64 | static DEFINE_SPINLOCK(kmmio_lock); |
65 | ||
66 | /* These are protected by kmmio_lock */ | |
0fd0e3da | 67 | static int kmmio_initialized; |
8b7d89d0 | 68 | unsigned int kmmio_count; |
0fd0e3da PP |
69 | |
70 | /* Read-protected by RCU, write-protected by kmmio_lock. */ | |
8b7d89d0 PP |
71 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
72 | static LIST_HEAD(kmmio_probes); | |
73 | ||
0fd0e3da PP |
74 | static struct list_head *kmmio_page_list(unsigned long page) |
75 | { | |
76 | return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; | |
77 | } | |
78 | ||
f5136380 PP |
79 | /* Accessed per-cpu */ |
80 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); | |
8b7d89d0 | 81 | |
0fd0e3da | 82 | /* protected by kmmio_init_mutex */ |
8b7d89d0 PP |
83 | static struct notifier_block nb_die = { |
84 | .notifier_call = kmmio_die_notifier | |
85 | }; | |
86 | ||
0fd0e3da PP |
87 | /** |
88 | * Makes sure kmmio is initialized and usable. | |
89 | * This must be called before any other kmmio function defined here. | |
90 | * May sleep. | |
91 | */ | |
92 | void reference_kmmio(void) | |
8b7d89d0 | 93 | { |
d61fc448 | 94 | mutex_lock(&kmmio_init_mutex); |
0fd0e3da PP |
95 | spin_lock_irq(&kmmio_lock); |
96 | if (!kmmio_initialized) { | |
97 | int i; | |
98 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) | |
99 | INIT_LIST_HEAD(&kmmio_page_table[i]); | |
100 | if (register_die_notifier(&nb_die)) | |
101 | BUG(); | |
102 | } | |
103 | kmmio_initialized++; | |
104 | spin_unlock_irq(&kmmio_lock); | |
d61fc448 | 105 | mutex_unlock(&kmmio_init_mutex); |
8b7d89d0 | 106 | } |
0fd0e3da | 107 | EXPORT_SYMBOL_GPL(reference_kmmio); |
8b7d89d0 | 108 | |
0fd0e3da PP |
109 | /** |
110 | * Clean up kmmio after use. This must be called for every call to | |
111 | * reference_kmmio(). All probes registered after the corresponding | |
112 | * reference_kmmio() must have been unregistered when calling this. | |
113 | * May sleep. | |
114 | */ | |
115 | void unreference_kmmio(void) | |
8b7d89d0 | 116 | { |
0fd0e3da PP |
117 | bool unreg = false; |
118 | ||
d61fc448 | 119 | mutex_lock(&kmmio_init_mutex); |
0fd0e3da PP |
120 | spin_lock_irq(&kmmio_lock); |
121 | ||
122 | if (kmmio_initialized == 1) { | |
123 | BUG_ON(is_kmmio_active()); | |
124 | unreg = true; | |
8b7d89d0 | 125 | } |
0fd0e3da PP |
126 | kmmio_initialized--; |
127 | BUG_ON(kmmio_initialized < 0); | |
128 | spin_unlock_irq(&kmmio_lock); | |
129 | ||
130 | if (unreg) | |
131 | unregister_die_notifier(&nb_die); /* calls sync_rcu() */ | |
d61fc448 | 132 | mutex_unlock(&kmmio_init_mutex); |
8b7d89d0 | 133 | } |
0fd0e3da | 134 | EXPORT_SYMBOL(unreference_kmmio); |
8b7d89d0 PP |
135 | |
136 | /* | |
137 | * this is basically a dynamic stabbing problem: | |
138 | * Could use the existing prio tree code or | |
139 | * Possible better implementations: | |
140 | * The Interval Skip List: A Data Structure for Finding All Intervals That | |
141 | * Overlap a Point (might be simple) | |
142 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup | |
143 | */ | |
0fd0e3da | 144 | /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ |
8b7d89d0 PP |
145 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
146 | { | |
147 | struct kmmio_probe *p; | |
0fd0e3da | 148 | list_for_each_entry_rcu(p, &kmmio_probes, list) { |
8b7d89d0 PP |
149 | if (addr >= p->addr && addr <= (p->addr + p->len)) |
150 | return p; | |
151 | } | |
152 | return NULL; | |
153 | } | |
154 | ||
0fd0e3da | 155 | /* You must be holding RCU read lock. */ |
8b7d89d0 PP |
156 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
157 | { | |
0fd0e3da PP |
158 | struct list_head *head; |
159 | struct kmmio_fault_page *p; | |
8b7d89d0 PP |
160 | |
161 | page &= PAGE_MASK; | |
0fd0e3da PP |
162 | head = kmmio_page_list(page); |
163 | list_for_each_entry_rcu(p, head, list) { | |
8b7d89d0 PP |
164 | if (p->page == page) |
165 | return p; | |
166 | } | |
8b7d89d0 PP |
167 | return NULL; |
168 | } | |
169 | ||
0fd0e3da | 170 | /** Mark the given page as not present. Access to it will trigger a fault. */ |
75bb8835 | 171 | static void arm_kmmio_fault_page(unsigned long page, int *page_level) |
8b7d89d0 PP |
172 | { |
173 | unsigned long address = page & PAGE_MASK; | |
75bb8835 PP |
174 | int level; |
175 | pte_t *pte = lookup_address(address, &level); | |
8b7d89d0 | 176 | |
75bb8835 | 177 | if (!pte) { |
0fd0e3da PP |
178 | pr_err("kmmio: Error in %s: no pte for page 0x%08lx\n", |
179 | __func__, page); | |
75bb8835 PP |
180 | return; |
181 | } | |
182 | ||
183 | if (level == PG_LEVEL_2M) { | |
184 | pmd_t *pmd = (pmd_t *)pte; | |
8b7d89d0 | 185 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT)); |
8b7d89d0 | 186 | } else { |
75bb8835 | 187 | /* PG_LEVEL_4K */ |
8b7d89d0 PP |
188 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); |
189 | } | |
190 | ||
75bb8835 PP |
191 | if (page_level) |
192 | *page_level = level; | |
193 | ||
8b7d89d0 PP |
194 | __flush_tlb_one(page); |
195 | } | |
196 | ||
0fd0e3da | 197 | /** Mark the given page as present. */ |
75bb8835 | 198 | static void disarm_kmmio_fault_page(unsigned long page, int *page_level) |
8b7d89d0 PP |
199 | { |
200 | unsigned long address = page & PAGE_MASK; | |
75bb8835 PP |
201 | int level; |
202 | pte_t *pte = lookup_address(address, &level); | |
8b7d89d0 | 203 | |
75bb8835 | 204 | if (!pte) { |
0fd0e3da PP |
205 | pr_err("kmmio: Error in %s: no pte for page 0x%08lx\n", |
206 | __func__, page); | |
75bb8835 PP |
207 | return; |
208 | } | |
209 | ||
210 | if (level == PG_LEVEL_2M) { | |
211 | pmd_t *pmd = (pmd_t *)pte; | |
8b7d89d0 | 212 | set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT)); |
8b7d89d0 | 213 | } else { |
75bb8835 | 214 | /* PG_LEVEL_4K */ |
8b7d89d0 PP |
215 | set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); |
216 | } | |
217 | ||
75bb8835 PP |
218 | if (page_level) |
219 | *page_level = level; | |
220 | ||
8b7d89d0 PP |
221 | __flush_tlb_one(page); |
222 | } | |
223 | ||
0fd0e3da PP |
224 | /* |
225 | * This is being called from do_page_fault(). | |
226 | * | |
227 | * We may be in an interrupt or a critical section. Also prefecthing may | |
228 | * trigger a page fault. We may be in the middle of process switch. | |
229 | * We cannot take any locks, because we could be executing especially | |
230 | * within a kmmio critical section. | |
231 | * | |
232 | * Local interrupts are disabled, so preemption cannot happen. | |
233 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. | |
234 | */ | |
8b7d89d0 PP |
235 | /* |
236 | * Interrupts are disabled on entry as trap3 is an interrupt gate | |
237 | * and they remain disabled thorough out this function. | |
238 | */ | |
0fd0e3da | 239 | int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
8b7d89d0 | 240 | { |
0fd0e3da PP |
241 | struct kmmio_context *ctx; |
242 | struct kmmio_fault_page *faultpage; | |
8b7d89d0 PP |
243 | |
244 | /* | |
245 | * Preemption is now disabled to prevent process switch during | |
246 | * single stepping. We can only handle one active kmmio trace | |
247 | * per cpu, so ensure that we finish it before something else | |
d61fc448 PP |
248 | * gets to run. We also hold the RCU read lock over single |
249 | * stepping to avoid looking up the probe and kmmio_fault_page | |
250 | * again. | |
8b7d89d0 PP |
251 | */ |
252 | preempt_disable(); | |
0fd0e3da | 253 | rcu_read_lock(); |
d61fc448 | 254 | |
0fd0e3da PP |
255 | faultpage = get_kmmio_fault_page(addr); |
256 | if (!faultpage) { | |
257 | /* | |
258 | * Either this page fault is not caused by kmmio, or | |
259 | * another CPU just pulled the kmmio probe from under | |
260 | * our feet. In the latter case all hell breaks loose. | |
261 | */ | |
262 | goto no_kmmio; | |
263 | } | |
264 | ||
265 | ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 PP |
266 | if (ctx->active) { |
267 | /* | |
0fd0e3da | 268 | * Prevent overwriting already in-flight context. |
8b7d89d0 PP |
269 | * If this page fault really was due to kmmio trap, |
270 | * all hell breaks loose. | |
271 | */ | |
0fd0e3da PP |
272 | pr_emerg("kmmio: recursive probe hit on CPU %d, " |
273 | "for address 0x%08lx. Ignoring.\n", | |
f5136380 | 274 | smp_processor_id(), addr); |
0fd0e3da | 275 | goto no_kmmio_ctx; |
8b7d89d0 PP |
276 | } |
277 | ctx->active++; | |
278 | ||
0fd0e3da | 279 | ctx->fpage = faultpage; |
8b7d89d0 PP |
280 | ctx->probe = get_kmmio_probe(addr); |
281 | ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK)); | |
0fd0e3da | 282 | ctx->addr = addr; |
8b7d89d0 PP |
283 | |
284 | if (ctx->probe && ctx->probe->pre_handler) | |
285 | ctx->probe->pre_handler(ctx->probe, regs, addr); | |
286 | ||
d61fc448 PP |
287 | /* |
288 | * Enable single-stepping and disable interrupts for the faulting | |
289 | * context. Local interrupts must not get enabled during stepping. | |
290 | */ | |
8b7d89d0 PP |
291 | regs->flags |= TF_MASK; |
292 | regs->flags &= ~IF_MASK; | |
293 | ||
0fd0e3da | 294 | /* Now we set present bit in PTE and single step. */ |
8b7d89d0 PP |
295 | disarm_kmmio_fault_page(ctx->fpage->page, NULL); |
296 | ||
d61fc448 PP |
297 | /* |
298 | * If another cpu accesses the same page while we are stepping, | |
299 | * the access will not be caught. It will simply succeed and the | |
300 | * only downside is we lose the event. If this becomes a problem, | |
301 | * the user should drop to single cpu before tracing. | |
302 | */ | |
303 | ||
f5136380 | 304 | put_cpu_var(kmmio_ctx); |
8b7d89d0 PP |
305 | return 1; |
306 | ||
0fd0e3da PP |
307 | no_kmmio_ctx: |
308 | put_cpu_var(kmmio_ctx); | |
8b7d89d0 | 309 | no_kmmio: |
0fd0e3da | 310 | rcu_read_unlock(); |
8b7d89d0 | 311 | preempt_enable_no_resched(); |
0fd0e3da | 312 | return 0; /* page fault not handled by kmmio */ |
8b7d89d0 PP |
313 | } |
314 | ||
315 | /* | |
316 | * Interrupts are disabled on entry as trap1 is an interrupt gate | |
317 | * and they remain disabled thorough out this function. | |
0fd0e3da | 318 | * This must always get called as the pair to kmmio_handler(). |
8b7d89d0 PP |
319 | */ |
320 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |
321 | { | |
f5136380 PP |
322 | int ret = 0; |
323 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 PP |
324 | |
325 | if (!ctx->active) | |
f5136380 | 326 | goto out; |
8b7d89d0 PP |
327 | |
328 | if (ctx->probe && ctx->probe->post_handler) | |
329 | ctx->probe->post_handler(ctx->probe, condition, regs); | |
330 | ||
d61fc448 | 331 | arm_kmmio_fault_page(ctx->fpage->page, NULL); |
8b7d89d0 PP |
332 | |
333 | regs->flags &= ~TF_MASK; | |
334 | regs->flags |= ctx->saved_flags; | |
335 | ||
336 | /* These were acquired in kmmio_handler(). */ | |
337 | ctx->active--; | |
0fd0e3da | 338 | BUG_ON(ctx->active); |
d61fc448 | 339 | rcu_read_unlock(); |
8b7d89d0 PP |
340 | preempt_enable_no_resched(); |
341 | ||
342 | /* | |
343 | * if somebody else is singlestepping across a probe point, flags | |
344 | * will have TF set, in which case, continue the remaining processing | |
345 | * of do_debug, as if this is not a probe hit. | |
346 | */ | |
f5136380 PP |
347 | if (!(regs->flags & TF_MASK)) |
348 | ret = 1; | |
f5136380 PP |
349 | out: |
350 | put_cpu_var(kmmio_ctx); | |
351 | return ret; | |
8b7d89d0 PP |
352 | } |
353 | ||
0fd0e3da | 354 | /* You must be holding kmmio_lock. */ |
8b7d89d0 PP |
355 | static int add_kmmio_fault_page(unsigned long page) |
356 | { | |
357 | struct kmmio_fault_page *f; | |
358 | ||
359 | page &= PAGE_MASK; | |
360 | f = get_kmmio_fault_page(page); | |
361 | if (f) { | |
0fd0e3da PP |
362 | if (!f->count) |
363 | arm_kmmio_fault_page(f->page, NULL); | |
8b7d89d0 PP |
364 | f->count++; |
365 | return 0; | |
366 | } | |
367 | ||
368 | f = kmalloc(sizeof(*f), GFP_ATOMIC); | |
369 | if (!f) | |
370 | return -1; | |
371 | ||
372 | f->count = 1; | |
373 | f->page = page; | |
0fd0e3da | 374 | list_add_rcu(&f->list, kmmio_page_list(f->page)); |
8b7d89d0 PP |
375 | |
376 | arm_kmmio_fault_page(f->page, NULL); | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
0fd0e3da PP |
381 | /* You must be holding kmmio_lock. */ |
382 | static void release_kmmio_fault_page(unsigned long page, | |
383 | struct kmmio_fault_page **release_list) | |
8b7d89d0 PP |
384 | { |
385 | struct kmmio_fault_page *f; | |
386 | ||
387 | page &= PAGE_MASK; | |
388 | f = get_kmmio_fault_page(page); | |
389 | if (!f) | |
390 | return; | |
391 | ||
392 | f->count--; | |
0fd0e3da | 393 | BUG_ON(f->count < 0); |
8b7d89d0 PP |
394 | if (!f->count) { |
395 | disarm_kmmio_fault_page(f->page, NULL); | |
0fd0e3da PP |
396 | f->release_next = *release_list; |
397 | *release_list = f; | |
8b7d89d0 PP |
398 | } |
399 | } | |
400 | ||
401 | int register_kmmio_probe(struct kmmio_probe *p) | |
402 | { | |
d61fc448 | 403 | unsigned long flags; |
8b7d89d0 PP |
404 | int ret = 0; |
405 | unsigned long size = 0; | |
406 | ||
d61fc448 | 407 | spin_lock_irqsave(&kmmio_lock, flags); |
8b7d89d0 PP |
408 | if (get_kmmio_probe(p->addr)) { |
409 | ret = -EEXIST; | |
410 | goto out; | |
411 | } | |
d61fc448 | 412 | kmmio_count++; |
0fd0e3da | 413 | list_add_rcu(&p->list, &kmmio_probes); |
8b7d89d0 PP |
414 | while (size < p->len) { |
415 | if (add_kmmio_fault_page(p->addr + size)) | |
0fd0e3da | 416 | pr_err("kmmio: Unable to set page fault.\n"); |
8b7d89d0 PP |
417 | size += PAGE_SIZE; |
418 | } | |
8b7d89d0 | 419 | out: |
d61fc448 | 420 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 PP |
421 | /* |
422 | * XXX: What should I do here? | |
423 | * Here was a call to global_flush_tlb(), but it does not exist | |
0fd0e3da | 424 | * anymore. It seems it's not needed after all. |
8b7d89d0 PP |
425 | */ |
426 | return ret; | |
427 | } | |
0fd0e3da | 428 | EXPORT_SYMBOL(register_kmmio_probe); |
8b7d89d0 | 429 | |
0fd0e3da PP |
430 | static void rcu_free_kmmio_fault_pages(struct rcu_head *head) |
431 | { | |
432 | struct kmmio_delayed_release *dr = container_of( | |
433 | head, | |
434 | struct kmmio_delayed_release, | |
435 | rcu); | |
436 | struct kmmio_fault_page *p = dr->release_list; | |
437 | while (p) { | |
438 | struct kmmio_fault_page *next = p->release_next; | |
439 | BUG_ON(p->count); | |
440 | kfree(p); | |
441 | p = next; | |
442 | } | |
443 | kfree(dr); | |
444 | } | |
445 | ||
446 | static void remove_kmmio_fault_pages(struct rcu_head *head) | |
447 | { | |
448 | struct kmmio_delayed_release *dr = container_of( | |
449 | head, | |
450 | struct kmmio_delayed_release, | |
451 | rcu); | |
452 | struct kmmio_fault_page *p = dr->release_list; | |
453 | struct kmmio_fault_page **prevp = &dr->release_list; | |
454 | unsigned long flags; | |
455 | spin_lock_irqsave(&kmmio_lock, flags); | |
456 | while (p) { | |
457 | if (!p->count) | |
458 | list_del_rcu(&p->list); | |
459 | else | |
460 | *prevp = p->release_next; | |
461 | prevp = &p->release_next; | |
462 | p = p->release_next; | |
463 | } | |
464 | spin_unlock_irqrestore(&kmmio_lock, flags); | |
465 | /* This is the real RCU destroy call. */ | |
466 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); | |
467 | } | |
468 | ||
469 | /* | |
470 | * Remove a kmmio probe. You have to synchronize_rcu() before you can be | |
d61fc448 PP |
471 | * sure that the callbacks will not be called anymore. Only after that |
472 | * you may actually release your struct kmmio_probe. | |
0fd0e3da PP |
473 | * |
474 | * Unregistering a kmmio fault page has three steps: | |
475 | * 1. release_kmmio_fault_page() | |
476 | * Disarm the page, wait a grace period to let all faults finish. | |
477 | * 2. remove_kmmio_fault_pages() | |
478 | * Remove the pages from kmmio_page_table. | |
479 | * 3. rcu_free_kmmio_fault_pages() | |
480 | * Actally free the kmmio_fault_page structs as with RCU. | |
481 | */ | |
8b7d89d0 PP |
482 | void unregister_kmmio_probe(struct kmmio_probe *p) |
483 | { | |
d61fc448 | 484 | unsigned long flags; |
8b7d89d0 | 485 | unsigned long size = 0; |
0fd0e3da PP |
486 | struct kmmio_fault_page *release_list = NULL; |
487 | struct kmmio_delayed_release *drelease; | |
8b7d89d0 | 488 | |
d61fc448 | 489 | spin_lock_irqsave(&kmmio_lock, flags); |
8b7d89d0 | 490 | while (size < p->len) { |
0fd0e3da | 491 | release_kmmio_fault_page(p->addr + size, &release_list); |
8b7d89d0 PP |
492 | size += PAGE_SIZE; |
493 | } | |
0fd0e3da | 494 | list_del_rcu(&p->list); |
8b7d89d0 | 495 | kmmio_count--; |
d61fc448 | 496 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 | 497 | |
0fd0e3da PP |
498 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
499 | if (!drelease) { | |
500 | pr_crit("kmmio: leaking kmmio_fault_page objects.\n"); | |
501 | return; | |
502 | } | |
503 | drelease->release_list = release_list; | |
504 | ||
505 | /* | |
506 | * This is not really RCU here. We have just disarmed a set of | |
507 | * pages so that they cannot trigger page faults anymore. However, | |
508 | * we cannot remove the pages from kmmio_page_table, | |
509 | * because a probe hit might be in flight on another CPU. The | |
510 | * pages are collected into a list, and they will be removed from | |
511 | * kmmio_page_table when it is certain that no probe hit related to | |
512 | * these pages can be in flight. RCU grace period sounds like a | |
513 | * good choice. | |
514 | * | |
515 | * If we removed the pages too early, kmmio page fault handler might | |
516 | * not find the respective kmmio_fault_page and determine it's not | |
517 | * a kmmio fault, when it actually is. This would lead to madness. | |
518 | */ | |
519 | call_rcu(&drelease->rcu, remove_kmmio_fault_pages); | |
8b7d89d0 | 520 | } |
0fd0e3da | 521 | EXPORT_SYMBOL(unregister_kmmio_probe); |
8b7d89d0 PP |
522 | |
523 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, | |
524 | void *args) | |
525 | { | |
526 | struct die_args *arg = args; | |
527 | ||
528 | if (val == DIE_DEBUG) | |
529 | if (post_kmmio_handler(arg->err, arg->regs) == 1) | |
530 | return NOTIFY_STOP; | |
531 | ||
532 | return NOTIFY_DONE; | |
533 | } |