Commit | Line | Data |
---|---|---|
8b7d89d0 PP |
1 | /* Support for MMIO probes. |
2 | * Benfit many code from kprobes | |
3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. | |
4 | * 2007 Alexander Eichner | |
5 | * 2008 Pekka Paalanen <pq@iki.fi> | |
6 | */ | |
7 | ||
1bd591a5 JP |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
0fd0e3da | 10 | #include <linux/list.h> |
668a6c36 | 11 | #include <linux/rculist.h> |
8b7d89d0 PP |
12 | #include <linux/spinlock.h> |
13 | #include <linux/hash.h> | |
8b7d89d0 | 14 | #include <linux/module.h> |
8b7d89d0 | 15 | #include <linux/kernel.h> |
8b7d89d0 PP |
16 | #include <linux/uaccess.h> |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/preempt.h> | |
f5136380 | 19 | #include <linux/percpu.h> |
0fd0e3da | 20 | #include <linux/kdebug.h> |
d61fc448 | 21 | #include <linux/mutex.h> |
970e6fa0 | 22 | #include <linux/io.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
8b7d89d0 | 24 | #include <asm/cacheflush.h> |
8b7d89d0 | 25 | #include <asm/tlbflush.h> |
970e6fa0 | 26 | #include <linux/errno.h> |
13829537 | 27 | #include <asm/debugreg.h> |
0fd0e3da | 28 | #include <linux/mmiotrace.h> |
8b7d89d0 | 29 | |
8b7d89d0 PP |
30 | #define KMMIO_PAGE_HASH_BITS 4 |
31 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) | |
32 | ||
0fd0e3da PP |
33 | struct kmmio_fault_page { |
34 | struct list_head list; | |
35 | struct kmmio_fault_page *release_next; | |
36 | unsigned long page; /* location of the fault page */ | |
46e91d00 | 37 | pteval_t old_presence; /* page presence prior to arming */ |
5359b585 | 38 | bool armed; |
0fd0e3da PP |
39 | |
40 | /* | |
41 | * Number of times this page has been registered as a part | |
42 | * of a probe. If zero, page is disarmed and this may be freed. | |
340430c5 PP |
43 | * Used only by writers (RCU) and post_kmmio_handler(). |
44 | * Protected by kmmio_lock, when linked into kmmio_page_table. | |
0fd0e3da PP |
45 | */ |
46 | int count; | |
8b8f79b9 MS |
47 | |
48 | bool scheduled_for_release; | |
0fd0e3da PP |
49 | }; |
50 | ||
51 | struct kmmio_delayed_release { | |
52 | struct rcu_head rcu; | |
53 | struct kmmio_fault_page *release_list; | |
54 | }; | |
55 | ||
8b7d89d0 PP |
56 | struct kmmio_context { |
57 | struct kmmio_fault_page *fpage; | |
58 | struct kmmio_probe *probe; | |
59 | unsigned long saved_flags; | |
0fd0e3da | 60 | unsigned long addr; |
8b7d89d0 PP |
61 | int active; |
62 | }; | |
63 | ||
8b7d89d0 PP |
64 | static DEFINE_SPINLOCK(kmmio_lock); |
65 | ||
13829537 | 66 | /* Protected by kmmio_lock */ |
8b7d89d0 | 67 | unsigned int kmmio_count; |
0fd0e3da PP |
68 | |
69 | /* Read-protected by RCU, write-protected by kmmio_lock. */ | |
8b7d89d0 PP |
70 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
71 | static LIST_HEAD(kmmio_probes); | |
72 | ||
0fd0e3da PP |
73 | static struct list_head *kmmio_page_list(unsigned long page) |
74 | { | |
75 | return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; | |
76 | } | |
77 | ||
f5136380 PP |
78 | /* Accessed per-cpu */ |
79 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); | |
8b7d89d0 | 80 | |
8b7d89d0 PP |
81 | /* |
82 | * this is basically a dynamic stabbing problem: | |
83 | * Could use the existing prio tree code or | |
84 | * Possible better implementations: | |
85 | * The Interval Skip List: A Data Structure for Finding All Intervals That | |
86 | * Overlap a Point (might be simple) | |
87 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup | |
88 | */ | |
0fd0e3da | 89 | /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ |
8b7d89d0 PP |
90 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
91 | { | |
92 | struct kmmio_probe *p; | |
0fd0e3da | 93 | list_for_each_entry_rcu(p, &kmmio_probes, list) { |
33015c85 | 94 | if (addr >= p->addr && addr < (p->addr + p->len)) |
8b7d89d0 PP |
95 | return p; |
96 | } | |
97 | return NULL; | |
98 | } | |
99 | ||
0fd0e3da | 100 | /* You must be holding RCU read lock. */ |
8b7d89d0 PP |
101 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
102 | { | |
0fd0e3da | 103 | struct list_head *head; |
0492e1bb | 104 | struct kmmio_fault_page *f; |
8b7d89d0 PP |
105 | |
106 | page &= PAGE_MASK; | |
0fd0e3da | 107 | head = kmmio_page_list(page); |
0492e1bb SB |
108 | list_for_each_entry_rcu(f, head, list) { |
109 | if (f->page == page) | |
110 | return f; | |
8b7d89d0 | 111 | } |
8b7d89d0 PP |
112 | return NULL; |
113 | } | |
114 | ||
46e91d00 | 115 | static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) |
0b700a6a PP |
116 | { |
117 | pmdval_t v = pmd_val(*pmd); | |
46e91d00 SB |
118 | if (clear) { |
119 | *old = v & _PAGE_PRESENT; | |
120 | v &= ~_PAGE_PRESENT; | |
121 | } else /* presume this has been called with clear==true previously */ | |
122 | v |= *old; | |
0b700a6a PP |
123 | set_pmd(pmd, __pmd(v)); |
124 | } | |
125 | ||
46e91d00 | 126 | static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) |
0b700a6a PP |
127 | { |
128 | pteval_t v = pte_val(*pte); | |
46e91d00 SB |
129 | if (clear) { |
130 | *old = v & _PAGE_PRESENT; | |
131 | v &= ~_PAGE_PRESENT; | |
132 | } else /* presume this has been called with clear==true previously */ | |
133 | v |= *old; | |
0b700a6a PP |
134 | set_pte_atomic(pte, __pte(v)); |
135 | } | |
136 | ||
46e91d00 | 137 | static int clear_page_presence(struct kmmio_fault_page *f, bool clear) |
8b7d89d0 | 138 | { |
790e2a29 | 139 | unsigned int level; |
46e91d00 | 140 | pte_t *pte = lookup_address(f->page, &level); |
8b7d89d0 | 141 | |
75bb8835 | 142 | if (!pte) { |
1bd591a5 | 143 | pr_err("no pte for page 0x%08lx\n", f->page); |
e9d54cae | 144 | return -1; |
75bb8835 PP |
145 | } |
146 | ||
13829537 PP |
147 | switch (level) { |
148 | case PG_LEVEL_2M: | |
46e91d00 | 149 | clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); |
13829537 | 150 | break; |
13829537 | 151 | case PG_LEVEL_4K: |
46e91d00 | 152 | clear_pte_presence(pte, clear, &f->old_presence); |
13829537 | 153 | break; |
13829537 | 154 | default: |
1bd591a5 | 155 | pr_err("unexpected page level 0x%x.\n", level); |
e9d54cae | 156 | return -1; |
8b7d89d0 PP |
157 | } |
158 | ||
46e91d00 | 159 | __flush_tlb_one(f->page); |
e9d54cae | 160 | return 0; |
13829537 | 161 | } |
75bb8835 | 162 | |
5359b585 PP |
163 | /* |
164 | * Mark the given page as not present. Access to it will trigger a fault. | |
165 | * | |
166 | * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the | |
167 | * protection is ignored here. RCU read lock is assumed held, so the struct | |
168 | * will not disappear unexpectedly. Furthermore, the caller must guarantee, | |
169 | * that double arming the same virtual address (page) cannot occur. | |
170 | * | |
171 | * Double disarming on the other hand is allowed, and may occur when a fault | |
172 | * and mmiotrace shutdown happen simultaneously. | |
173 | */ | |
174 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) | |
13829537 | 175 | { |
5359b585 | 176 | int ret; |
1bd591a5 | 177 | WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); |
5359b585 | 178 | if (f->armed) { |
1bd591a5 JP |
179 | pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", |
180 | f->page, f->count, !!f->old_presence); | |
5359b585 | 181 | } |
46e91d00 | 182 | ret = clear_page_presence(f, true); |
1bd591a5 JP |
183 | WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"), |
184 | f->page); | |
5359b585 | 185 | f->armed = true; |
e9d54cae | 186 | return ret; |
8b7d89d0 PP |
187 | } |
188 | ||
5359b585 PP |
189 | /** Restore the given page to saved presence state. */ |
190 | static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) | |
8b7d89d0 | 191 | { |
46e91d00 | 192 | int ret = clear_page_presence(f, false); |
5359b585 PP |
193 | WARN_ONCE(ret < 0, |
194 | KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); | |
195 | f->armed = false; | |
8b7d89d0 PP |
196 | } |
197 | ||
0fd0e3da PP |
198 | /* |
199 | * This is being called from do_page_fault(). | |
200 | * | |
201 | * We may be in an interrupt or a critical section. Also prefecthing may | |
202 | * trigger a page fault. We may be in the middle of process switch. | |
203 | * We cannot take any locks, because we could be executing especially | |
204 | * within a kmmio critical section. | |
205 | * | |
206 | * Local interrupts are disabled, so preemption cannot happen. | |
207 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. | |
208 | */ | |
8b7d89d0 PP |
209 | /* |
210 | * Interrupts are disabled on entry as trap3 is an interrupt gate | |
af901ca1 | 211 | * and they remain disabled throughout this function. |
8b7d89d0 | 212 | */ |
0fd0e3da | 213 | int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
8b7d89d0 | 214 | { |
0fd0e3da PP |
215 | struct kmmio_context *ctx; |
216 | struct kmmio_fault_page *faultpage; | |
13829537 | 217 | int ret = 0; /* default to fault not handled */ |
8b7d89d0 PP |
218 | |
219 | /* | |
220 | * Preemption is now disabled to prevent process switch during | |
221 | * single stepping. We can only handle one active kmmio trace | |
222 | * per cpu, so ensure that we finish it before something else | |
d61fc448 PP |
223 | * gets to run. We also hold the RCU read lock over single |
224 | * stepping to avoid looking up the probe and kmmio_fault_page | |
225 | * again. | |
8b7d89d0 PP |
226 | */ |
227 | preempt_disable(); | |
0fd0e3da | 228 | rcu_read_lock(); |
d61fc448 | 229 | |
0fd0e3da PP |
230 | faultpage = get_kmmio_fault_page(addr); |
231 | if (!faultpage) { | |
232 | /* | |
233 | * Either this page fault is not caused by kmmio, or | |
234 | * another CPU just pulled the kmmio probe from under | |
13829537 | 235 | * our feet. The latter case should not be possible. |
0fd0e3da PP |
236 | */ |
237 | goto no_kmmio; | |
238 | } | |
239 | ||
240 | ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 241 | if (ctx->active) { |
13829537 PP |
242 | if (addr == ctx->addr) { |
243 | /* | |
3e39aa15 SB |
244 | * A second fault on the same page means some other |
245 | * condition needs handling by do_page_fault(), the | |
246 | * page really not being present is the most common. | |
13829537 | 247 | */ |
1bd591a5 JP |
248 | pr_debug("secondary hit for 0x%08lx CPU %d.\n", |
249 | addr, smp_processor_id()); | |
3e39aa15 SB |
250 | |
251 | if (!faultpage->old_presence) | |
1bd591a5 JP |
252 | pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", |
253 | addr, smp_processor_id()); | |
3e39aa15 SB |
254 | } else { |
255 | /* | |
256 | * Prevent overwriting already in-flight context. | |
257 | * This should not happen, let's hope disarming at | |
258 | * least prevents a panic. | |
259 | */ | |
1bd591a5 JP |
260 | pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", |
261 | smp_processor_id(), addr); | |
262 | pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); | |
3e39aa15 SB |
263 | disarm_kmmio_fault_page(faultpage); |
264 | } | |
0fd0e3da | 265 | goto no_kmmio_ctx; |
8b7d89d0 PP |
266 | } |
267 | ctx->active++; | |
268 | ||
0fd0e3da | 269 | ctx->fpage = faultpage; |
8b7d89d0 | 270 | ctx->probe = get_kmmio_probe(addr); |
49023168 | 271 | ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
0fd0e3da | 272 | ctx->addr = addr; |
8b7d89d0 PP |
273 | |
274 | if (ctx->probe && ctx->probe->pre_handler) | |
275 | ctx->probe->pre_handler(ctx->probe, regs, addr); | |
276 | ||
d61fc448 PP |
277 | /* |
278 | * Enable single-stepping and disable interrupts for the faulting | |
279 | * context. Local interrupts must not get enabled during stepping. | |
280 | */ | |
49023168 IM |
281 | regs->flags |= X86_EFLAGS_TF; |
282 | regs->flags &= ~X86_EFLAGS_IF; | |
8b7d89d0 | 283 | |
0fd0e3da | 284 | /* Now we set present bit in PTE and single step. */ |
5359b585 | 285 | disarm_kmmio_fault_page(ctx->fpage); |
8b7d89d0 | 286 | |
d61fc448 PP |
287 | /* |
288 | * If another cpu accesses the same page while we are stepping, | |
289 | * the access will not be caught. It will simply succeed and the | |
290 | * only downside is we lose the event. If this becomes a problem, | |
291 | * the user should drop to single cpu before tracing. | |
292 | */ | |
293 | ||
f5136380 | 294 | put_cpu_var(kmmio_ctx); |
13829537 | 295 | return 1; /* fault handled */ |
8b7d89d0 | 296 | |
0fd0e3da PP |
297 | no_kmmio_ctx: |
298 | put_cpu_var(kmmio_ctx); | |
8b7d89d0 | 299 | no_kmmio: |
0fd0e3da | 300 | rcu_read_unlock(); |
8b7d89d0 | 301 | preempt_enable_no_resched(); |
13829537 | 302 | return ret; |
8b7d89d0 PP |
303 | } |
304 | ||
305 | /* | |
306 | * Interrupts are disabled on entry as trap1 is an interrupt gate | |
af901ca1 | 307 | * and they remain disabled throughout this function. |
0fd0e3da | 308 | * This must always get called as the pair to kmmio_handler(). |
8b7d89d0 PP |
309 | */ |
310 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |
311 | { | |
f5136380 PP |
312 | int ret = 0; |
313 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 314 | |
13829537 | 315 | if (!ctx->active) { |
0f9a623d SB |
316 | /* |
317 | * debug traps without an active context are due to either | |
318 | * something external causing them (f.e. using a debugger while | |
319 | * mmio tracing enabled), or erroneous behaviour | |
320 | */ | |
1bd591a5 JP |
321 | pr_warning("unexpected debug trap on CPU %d.\n", |
322 | smp_processor_id()); | |
f5136380 | 323 | goto out; |
13829537 | 324 | } |
8b7d89d0 PP |
325 | |
326 | if (ctx->probe && ctx->probe->post_handler) | |
327 | ctx->probe->post_handler(ctx->probe, condition, regs); | |
328 | ||
340430c5 PP |
329 | /* Prevent racing against release_kmmio_fault_page(). */ |
330 | spin_lock(&kmmio_lock); | |
331 | if (ctx->fpage->count) | |
332 | arm_kmmio_fault_page(ctx->fpage); | |
333 | spin_unlock(&kmmio_lock); | |
8b7d89d0 | 334 | |
49023168 | 335 | regs->flags &= ~X86_EFLAGS_TF; |
8b7d89d0 PP |
336 | regs->flags |= ctx->saved_flags; |
337 | ||
338 | /* These were acquired in kmmio_handler(). */ | |
339 | ctx->active--; | |
0fd0e3da | 340 | BUG_ON(ctx->active); |
d61fc448 | 341 | rcu_read_unlock(); |
8b7d89d0 PP |
342 | preempt_enable_no_resched(); |
343 | ||
344 | /* | |
345 | * if somebody else is singlestepping across a probe point, flags | |
346 | * will have TF set, in which case, continue the remaining processing | |
347 | * of do_debug, as if this is not a probe hit. | |
348 | */ | |
49023168 | 349 | if (!(regs->flags & X86_EFLAGS_TF)) |
f5136380 | 350 | ret = 1; |
f5136380 PP |
351 | out: |
352 | put_cpu_var(kmmio_ctx); | |
353 | return ret; | |
8b7d89d0 PP |
354 | } |
355 | ||
0fd0e3da | 356 | /* You must be holding kmmio_lock. */ |
8b7d89d0 PP |
357 | static int add_kmmio_fault_page(unsigned long page) |
358 | { | |
359 | struct kmmio_fault_page *f; | |
360 | ||
361 | page &= PAGE_MASK; | |
362 | f = get_kmmio_fault_page(page); | |
363 | if (f) { | |
0fd0e3da | 364 | if (!f->count) |
5359b585 | 365 | arm_kmmio_fault_page(f); |
8b7d89d0 PP |
366 | f->count++; |
367 | return 0; | |
368 | } | |
369 | ||
5359b585 | 370 | f = kzalloc(sizeof(*f), GFP_ATOMIC); |
8b7d89d0 PP |
371 | if (!f) |
372 | return -1; | |
373 | ||
374 | f->count = 1; | |
375 | f->page = page; | |
8b7d89d0 | 376 | |
5359b585 | 377 | if (arm_kmmio_fault_page(f)) { |
e9d54cae SB |
378 | kfree(f); |
379 | return -1; | |
380 | } | |
381 | ||
382 | list_add_rcu(&f->list, kmmio_page_list(f->page)); | |
8b7d89d0 PP |
383 | |
384 | return 0; | |
385 | } | |
386 | ||
0fd0e3da PP |
387 | /* You must be holding kmmio_lock. */ |
388 | static void release_kmmio_fault_page(unsigned long page, | |
389 | struct kmmio_fault_page **release_list) | |
8b7d89d0 PP |
390 | { |
391 | struct kmmio_fault_page *f; | |
392 | ||
393 | page &= PAGE_MASK; | |
394 | f = get_kmmio_fault_page(page); | |
395 | if (!f) | |
396 | return; | |
397 | ||
398 | f->count--; | |
0fd0e3da | 399 | BUG_ON(f->count < 0); |
8b7d89d0 | 400 | if (!f->count) { |
5359b585 | 401 | disarm_kmmio_fault_page(f); |
8b8f79b9 MS |
402 | if (!f->scheduled_for_release) { |
403 | f->release_next = *release_list; | |
404 | *release_list = f; | |
405 | f->scheduled_for_release = true; | |
406 | } | |
8b7d89d0 PP |
407 | } |
408 | } | |
409 | ||
87e547fe PP |
410 | /* |
411 | * With page-unaligned ioremaps, one or two armed pages may contain | |
412 | * addresses from outside the intended mapping. Events for these addresses | |
413 | * are currently silently dropped. The events may result only from programming | |
414 | * mistakes by accessing addresses before the beginning or past the end of a | |
415 | * mapping. | |
416 | */ | |
8b7d89d0 PP |
417 | int register_kmmio_probe(struct kmmio_probe *p) |
418 | { | |
d61fc448 | 419 | unsigned long flags; |
8b7d89d0 PP |
420 | int ret = 0; |
421 | unsigned long size = 0; | |
87e547fe | 422 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
8b7d89d0 | 423 | |
d61fc448 | 424 | spin_lock_irqsave(&kmmio_lock, flags); |
8b7d89d0 PP |
425 | if (get_kmmio_probe(p->addr)) { |
426 | ret = -EEXIST; | |
427 | goto out; | |
428 | } | |
d61fc448 | 429 | kmmio_count++; |
0fd0e3da | 430 | list_add_rcu(&p->list, &kmmio_probes); |
87e547fe | 431 | while (size < size_lim) { |
8b7d89d0 | 432 | if (add_kmmio_fault_page(p->addr + size)) |
1bd591a5 | 433 | pr_err("Unable to set page fault.\n"); |
8b7d89d0 PP |
434 | size += PAGE_SIZE; |
435 | } | |
8b7d89d0 | 436 | out: |
d61fc448 | 437 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 PP |
438 | /* |
439 | * XXX: What should I do here? | |
440 | * Here was a call to global_flush_tlb(), but it does not exist | |
0fd0e3da | 441 | * anymore. It seems it's not needed after all. |
8b7d89d0 PP |
442 | */ |
443 | return ret; | |
444 | } | |
0fd0e3da | 445 | EXPORT_SYMBOL(register_kmmio_probe); |
8b7d89d0 | 446 | |
0fd0e3da PP |
447 | static void rcu_free_kmmio_fault_pages(struct rcu_head *head) |
448 | { | |
449 | struct kmmio_delayed_release *dr = container_of( | |
450 | head, | |
451 | struct kmmio_delayed_release, | |
452 | rcu); | |
0492e1bb SB |
453 | struct kmmio_fault_page *f = dr->release_list; |
454 | while (f) { | |
455 | struct kmmio_fault_page *next = f->release_next; | |
456 | BUG_ON(f->count); | |
457 | kfree(f); | |
458 | f = next; | |
0fd0e3da PP |
459 | } |
460 | kfree(dr); | |
461 | } | |
462 | ||
463 | static void remove_kmmio_fault_pages(struct rcu_head *head) | |
464 | { | |
d0fc63f7 SB |
465 | struct kmmio_delayed_release *dr = |
466 | container_of(head, struct kmmio_delayed_release, rcu); | |
0492e1bb | 467 | struct kmmio_fault_page *f = dr->release_list; |
0fd0e3da PP |
468 | struct kmmio_fault_page **prevp = &dr->release_list; |
469 | unsigned long flags; | |
d0fc63f7 | 470 | |
0fd0e3da | 471 | spin_lock_irqsave(&kmmio_lock, flags); |
0492e1bb SB |
472 | while (f) { |
473 | if (!f->count) { | |
474 | list_del_rcu(&f->list); | |
475 | prevp = &f->release_next; | |
d0fc63f7 | 476 | } else { |
0492e1bb | 477 | *prevp = f->release_next; |
8b8f79b9 MS |
478 | f->release_next = NULL; |
479 | f->scheduled_for_release = false; | |
d0fc63f7 | 480 | } |
8b8f79b9 | 481 | f = *prevp; |
0fd0e3da PP |
482 | } |
483 | spin_unlock_irqrestore(&kmmio_lock, flags); | |
d0fc63f7 | 484 | |
0fd0e3da PP |
485 | /* This is the real RCU destroy call. */ |
486 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); | |
487 | } | |
488 | ||
489 | /* | |
490 | * Remove a kmmio probe. You have to synchronize_rcu() before you can be | |
d61fc448 PP |
491 | * sure that the callbacks will not be called anymore. Only after that |
492 | * you may actually release your struct kmmio_probe. | |
0fd0e3da PP |
493 | * |
494 | * Unregistering a kmmio fault page has three steps: | |
495 | * 1. release_kmmio_fault_page() | |
496 | * Disarm the page, wait a grace period to let all faults finish. | |
497 | * 2. remove_kmmio_fault_pages() | |
498 | * Remove the pages from kmmio_page_table. | |
499 | * 3. rcu_free_kmmio_fault_pages() | |
8055039c | 500 | * Actually free the kmmio_fault_page structs as with RCU. |
0fd0e3da | 501 | */ |
8b7d89d0 PP |
502 | void unregister_kmmio_probe(struct kmmio_probe *p) |
503 | { | |
d61fc448 | 504 | unsigned long flags; |
8b7d89d0 | 505 | unsigned long size = 0; |
87e547fe | 506 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
0fd0e3da PP |
507 | struct kmmio_fault_page *release_list = NULL; |
508 | struct kmmio_delayed_release *drelease; | |
8b7d89d0 | 509 | |
d61fc448 | 510 | spin_lock_irqsave(&kmmio_lock, flags); |
87e547fe | 511 | while (size < size_lim) { |
0fd0e3da | 512 | release_kmmio_fault_page(p->addr + size, &release_list); |
8b7d89d0 PP |
513 | size += PAGE_SIZE; |
514 | } | |
0fd0e3da | 515 | list_del_rcu(&p->list); |
8b7d89d0 | 516 | kmmio_count--; |
d61fc448 | 517 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 | 518 | |
8b8f79b9 MS |
519 | if (!release_list) |
520 | return; | |
521 | ||
0fd0e3da PP |
522 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
523 | if (!drelease) { | |
1bd591a5 | 524 | pr_crit("leaking kmmio_fault_page objects.\n"); |
0fd0e3da PP |
525 | return; |
526 | } | |
527 | drelease->release_list = release_list; | |
528 | ||
529 | /* | |
530 | * This is not really RCU here. We have just disarmed a set of | |
531 | * pages so that they cannot trigger page faults anymore. However, | |
532 | * we cannot remove the pages from kmmio_page_table, | |
533 | * because a probe hit might be in flight on another CPU. The | |
534 | * pages are collected into a list, and they will be removed from | |
535 | * kmmio_page_table when it is certain that no probe hit related to | |
536 | * these pages can be in flight. RCU grace period sounds like a | |
537 | * good choice. | |
538 | * | |
539 | * If we removed the pages too early, kmmio page fault handler might | |
540 | * not find the respective kmmio_fault_page and determine it's not | |
541 | * a kmmio fault, when it actually is. This would lead to madness. | |
542 | */ | |
543 | call_rcu(&drelease->rcu, remove_kmmio_fault_pages); | |
8b7d89d0 | 544 | } |
0fd0e3da | 545 | EXPORT_SYMBOL(unregister_kmmio_probe); |
8b7d89d0 | 546 | |
0f9a623d SB |
547 | static int |
548 | kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) | |
8b7d89d0 PP |
549 | { |
550 | struct die_args *arg = args; | |
0bb7a95f | 551 | unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); |
8b7d89d0 | 552 | |
0bb7a95f LB |
553 | if (val == DIE_DEBUG && (*dr6_p & DR_STEP)) |
554 | if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { | |
62edab90 P |
555 | /* |
556 | * Reset the BS bit in dr6 (pointed by args->err) to | |
557 | * denote completion of processing | |
558 | */ | |
0bb7a95f | 559 | *dr6_p &= ~DR_STEP; |
8b7d89d0 | 560 | return NOTIFY_STOP; |
62edab90 | 561 | } |
8b7d89d0 PP |
562 | |
563 | return NOTIFY_DONE; | |
564 | } | |
13829537 PP |
565 | |
566 | static struct notifier_block nb_die = { | |
567 | .notifier_call = kmmio_die_notifier | |
568 | }; | |
569 | ||
0f9a623d | 570 | int kmmio_init(void) |
13829537 PP |
571 | { |
572 | int i; | |
0f9a623d | 573 | |
13829537 PP |
574 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) |
575 | INIT_LIST_HEAD(&kmmio_page_table[i]); | |
0f9a623d | 576 | |
13829537 PP |
577 | return register_die_notifier(&nb_die); |
578 | } | |
0f9a623d SB |
579 | |
580 | void kmmio_cleanup(void) | |
581 | { | |
582 | int i; | |
583 | ||
584 | unregister_die_notifier(&nb_die); | |
585 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) { | |
586 | WARN_ONCE(!list_empty(&kmmio_page_table[i]), | |
587 | KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n"); | |
588 | } | |
589 | } |