Commit | Line | Data |
---|---|---|
8b7d89d0 PP |
1 | /* Support for MMIO probes. |
2 | * Benfit many code from kprobes | |
3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. | |
4 | * 2007 Alexander Eichner | |
5 | * 2008 Pekka Paalanen <pq@iki.fi> | |
6 | */ | |
7 | ||
0fd0e3da | 8 | #include <linux/list.h> |
8b7d89d0 PP |
9 | #include <linux/spinlock.h> |
10 | #include <linux/hash.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/module.h> | |
8b7d89d0 | 13 | #include <linux/kernel.h> |
8b7d89d0 PP |
14 | #include <linux/uaccess.h> |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/preempt.h> | |
f5136380 | 17 | #include <linux/percpu.h> |
0fd0e3da | 18 | #include <linux/kdebug.h> |
d61fc448 | 19 | #include <linux/mutex.h> |
970e6fa0 | 20 | #include <linux/io.h> |
8b7d89d0 | 21 | #include <asm/cacheflush.h> |
8b7d89d0 | 22 | #include <asm/tlbflush.h> |
970e6fa0 | 23 | #include <linux/errno.h> |
13829537 | 24 | #include <asm/debugreg.h> |
0fd0e3da | 25 | #include <linux/mmiotrace.h> |
8b7d89d0 | 26 | |
8b7d89d0 PP |
27 | #define KMMIO_PAGE_HASH_BITS 4 |
28 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) | |
29 | ||
0fd0e3da PP |
30 | struct kmmio_fault_page { |
31 | struct list_head list; | |
32 | struct kmmio_fault_page *release_next; | |
33 | unsigned long page; /* location of the fault page */ | |
34 | ||
35 | /* | |
36 | * Number of times this page has been registered as a part | |
37 | * of a probe. If zero, page is disarmed and this may be freed. | |
38 | * Used only by writers (RCU). | |
39 | */ | |
40 | int count; | |
41 | }; | |
42 | ||
43 | struct kmmio_delayed_release { | |
44 | struct rcu_head rcu; | |
45 | struct kmmio_fault_page *release_list; | |
46 | }; | |
47 | ||
8b7d89d0 PP |
48 | struct kmmio_context { |
49 | struct kmmio_fault_page *fpage; | |
50 | struct kmmio_probe *probe; | |
51 | unsigned long saved_flags; | |
0fd0e3da | 52 | unsigned long addr; |
8b7d89d0 PP |
53 | int active; |
54 | }; | |
55 | ||
8b7d89d0 PP |
56 | static DEFINE_SPINLOCK(kmmio_lock); |
57 | ||
13829537 | 58 | /* Protected by kmmio_lock */ |
8b7d89d0 | 59 | unsigned int kmmio_count; |
0fd0e3da PP |
60 | |
61 | /* Read-protected by RCU, write-protected by kmmio_lock. */ | |
8b7d89d0 PP |
62 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
63 | static LIST_HEAD(kmmio_probes); | |
64 | ||
0fd0e3da PP |
65 | static struct list_head *kmmio_page_list(unsigned long page) |
66 | { | |
67 | return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; | |
68 | } | |
69 | ||
f5136380 PP |
70 | /* Accessed per-cpu */ |
71 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); | |
8b7d89d0 | 72 | |
8b7d89d0 PP |
73 | /* |
74 | * this is basically a dynamic stabbing problem: | |
75 | * Could use the existing prio tree code or | |
76 | * Possible better implementations: | |
77 | * The Interval Skip List: A Data Structure for Finding All Intervals That | |
78 | * Overlap a Point (might be simple) | |
79 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup | |
80 | */ | |
0fd0e3da | 81 | /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ |
8b7d89d0 PP |
82 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
83 | { | |
84 | struct kmmio_probe *p; | |
0fd0e3da | 85 | list_for_each_entry_rcu(p, &kmmio_probes, list) { |
8b7d89d0 PP |
86 | if (addr >= p->addr && addr <= (p->addr + p->len)) |
87 | return p; | |
88 | } | |
89 | return NULL; | |
90 | } | |
91 | ||
0fd0e3da | 92 | /* You must be holding RCU read lock. */ |
8b7d89d0 PP |
93 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
94 | { | |
0fd0e3da PP |
95 | struct list_head *head; |
96 | struct kmmio_fault_page *p; | |
8b7d89d0 PP |
97 | |
98 | page &= PAGE_MASK; | |
0fd0e3da PP |
99 | head = kmmio_page_list(page); |
100 | list_for_each_entry_rcu(p, head, list) { | |
8b7d89d0 PP |
101 | if (p->page == page) |
102 | return p; | |
103 | } | |
8b7d89d0 PP |
104 | return NULL; |
105 | } | |
106 | ||
790e2a29 PP |
107 | static void set_page_present(unsigned long addr, bool present, |
108 | unsigned int *pglevel) | |
8b7d89d0 | 109 | { |
13829537 PP |
110 | pteval_t pteval; |
111 | pmdval_t pmdval; | |
790e2a29 | 112 | unsigned int level; |
13829537 PP |
113 | pmd_t *pmd; |
114 | pte_t *pte = lookup_address(addr, &level); | |
8b7d89d0 | 115 | |
75bb8835 | 116 | if (!pte) { |
13829537 | 117 | pr_err("kmmio: no pte for page 0x%08lx\n", addr); |
75bb8835 PP |
118 | return; |
119 | } | |
120 | ||
13829537 PP |
121 | if (pglevel) |
122 | *pglevel = level; | |
123 | ||
124 | switch (level) { | |
125 | case PG_LEVEL_2M: | |
126 | pmd = (pmd_t *)pte; | |
127 | pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT; | |
128 | if (present) | |
129 | pmdval |= _PAGE_PRESENT; | |
130 | set_pmd(pmd, __pmd(pmdval)); | |
131 | break; | |
132 | ||
133 | case PG_LEVEL_4K: | |
134 | pteval = pte_val(*pte) & ~_PAGE_PRESENT; | |
135 | if (present) | |
136 | pteval |= _PAGE_PRESENT; | |
137 | set_pte_atomic(pte, __pte(pteval)); | |
138 | break; | |
139 | ||
140 | default: | |
141 | pr_err("kmmio: unexpected page level 0x%x.\n", level); | |
142 | return; | |
8b7d89d0 PP |
143 | } |
144 | ||
13829537 PP |
145 | __flush_tlb_one(addr); |
146 | } | |
75bb8835 | 147 | |
13829537 | 148 | /** Mark the given page as not present. Access to it will trigger a fault. */ |
790e2a29 | 149 | static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) |
13829537 | 150 | { |
790e2a29 | 151 | set_page_present(page & PAGE_MASK, false, pglevel); |
8b7d89d0 PP |
152 | } |
153 | ||
0fd0e3da | 154 | /** Mark the given page as present. */ |
790e2a29 | 155 | static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) |
8b7d89d0 | 156 | { |
790e2a29 | 157 | set_page_present(page & PAGE_MASK, true, pglevel); |
8b7d89d0 PP |
158 | } |
159 | ||
0fd0e3da PP |
160 | /* |
161 | * This is being called from do_page_fault(). | |
162 | * | |
163 | * We may be in an interrupt or a critical section. Also prefecthing may | |
164 | * trigger a page fault. We may be in the middle of process switch. | |
165 | * We cannot take any locks, because we could be executing especially | |
166 | * within a kmmio critical section. | |
167 | * | |
168 | * Local interrupts are disabled, so preemption cannot happen. | |
169 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. | |
170 | */ | |
8b7d89d0 PP |
171 | /* |
172 | * Interrupts are disabled on entry as trap3 is an interrupt gate | |
173 | * and they remain disabled thorough out this function. | |
174 | */ | |
0fd0e3da | 175 | int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
8b7d89d0 | 176 | { |
0fd0e3da PP |
177 | struct kmmio_context *ctx; |
178 | struct kmmio_fault_page *faultpage; | |
13829537 | 179 | int ret = 0; /* default to fault not handled */ |
8b7d89d0 PP |
180 | |
181 | /* | |
182 | * Preemption is now disabled to prevent process switch during | |
183 | * single stepping. We can only handle one active kmmio trace | |
184 | * per cpu, so ensure that we finish it before something else | |
d61fc448 PP |
185 | * gets to run. We also hold the RCU read lock over single |
186 | * stepping to avoid looking up the probe and kmmio_fault_page | |
187 | * again. | |
8b7d89d0 PP |
188 | */ |
189 | preempt_disable(); | |
0fd0e3da | 190 | rcu_read_lock(); |
d61fc448 | 191 | |
0fd0e3da PP |
192 | faultpage = get_kmmio_fault_page(addr); |
193 | if (!faultpage) { | |
194 | /* | |
195 | * Either this page fault is not caused by kmmio, or | |
196 | * another CPU just pulled the kmmio probe from under | |
13829537 | 197 | * our feet. The latter case should not be possible. |
0fd0e3da PP |
198 | */ |
199 | goto no_kmmio; | |
200 | } | |
201 | ||
202 | ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 203 | if (ctx->active) { |
13829537 PP |
204 | disarm_kmmio_fault_page(faultpage->page, NULL); |
205 | if (addr == ctx->addr) { | |
206 | /* | |
207 | * On SMP we sometimes get recursive probe hits on the | |
208 | * same address. Context is already saved, fall out. | |
209 | */ | |
210 | pr_debug("kmmio: duplicate probe hit on CPU %d, for " | |
211 | "address 0x%08lx.\n", | |
212 | smp_processor_id(), addr); | |
213 | ret = 1; | |
214 | goto no_kmmio_ctx; | |
215 | } | |
8b7d89d0 | 216 | /* |
0fd0e3da | 217 | * Prevent overwriting already in-flight context. |
13829537 PP |
218 | * This should not happen, let's hope disarming at least |
219 | * prevents a panic. | |
8b7d89d0 | 220 | */ |
0fd0e3da PP |
221 | pr_emerg("kmmio: recursive probe hit on CPU %d, " |
222 | "for address 0x%08lx. Ignoring.\n", | |
f5136380 | 223 | smp_processor_id(), addr); |
13829537 PP |
224 | pr_emerg("kmmio: previous hit was at 0x%08lx.\n", |
225 | ctx->addr); | |
0fd0e3da | 226 | goto no_kmmio_ctx; |
8b7d89d0 PP |
227 | } |
228 | ctx->active++; | |
229 | ||
0fd0e3da | 230 | ctx->fpage = faultpage; |
8b7d89d0 | 231 | ctx->probe = get_kmmio_probe(addr); |
49023168 | 232 | ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
0fd0e3da | 233 | ctx->addr = addr; |
8b7d89d0 PP |
234 | |
235 | if (ctx->probe && ctx->probe->pre_handler) | |
236 | ctx->probe->pre_handler(ctx->probe, regs, addr); | |
237 | ||
d61fc448 PP |
238 | /* |
239 | * Enable single-stepping and disable interrupts for the faulting | |
240 | * context. Local interrupts must not get enabled during stepping. | |
241 | */ | |
49023168 IM |
242 | regs->flags |= X86_EFLAGS_TF; |
243 | regs->flags &= ~X86_EFLAGS_IF; | |
8b7d89d0 | 244 | |
0fd0e3da | 245 | /* Now we set present bit in PTE and single step. */ |
8b7d89d0 PP |
246 | disarm_kmmio_fault_page(ctx->fpage->page, NULL); |
247 | ||
d61fc448 PP |
248 | /* |
249 | * If another cpu accesses the same page while we are stepping, | |
250 | * the access will not be caught. It will simply succeed and the | |
251 | * only downside is we lose the event. If this becomes a problem, | |
252 | * the user should drop to single cpu before tracing. | |
253 | */ | |
254 | ||
f5136380 | 255 | put_cpu_var(kmmio_ctx); |
13829537 | 256 | return 1; /* fault handled */ |
8b7d89d0 | 257 | |
0fd0e3da PP |
258 | no_kmmio_ctx: |
259 | put_cpu_var(kmmio_ctx); | |
8b7d89d0 | 260 | no_kmmio: |
0fd0e3da | 261 | rcu_read_unlock(); |
8b7d89d0 | 262 | preempt_enable_no_resched(); |
13829537 | 263 | return ret; |
8b7d89d0 PP |
264 | } |
265 | ||
266 | /* | |
267 | * Interrupts are disabled on entry as trap1 is an interrupt gate | |
268 | * and they remain disabled thorough out this function. | |
0fd0e3da | 269 | * This must always get called as the pair to kmmio_handler(). |
8b7d89d0 PP |
270 | */ |
271 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |
272 | { | |
f5136380 PP |
273 | int ret = 0; |
274 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 275 | |
13829537 PP |
276 | if (!ctx->active) { |
277 | pr_debug("kmmio: spurious debug trap on CPU %d.\n", | |
278 | smp_processor_id()); | |
f5136380 | 279 | goto out; |
13829537 | 280 | } |
8b7d89d0 PP |
281 | |
282 | if (ctx->probe && ctx->probe->post_handler) | |
283 | ctx->probe->post_handler(ctx->probe, condition, regs); | |
284 | ||
d61fc448 | 285 | arm_kmmio_fault_page(ctx->fpage->page, NULL); |
8b7d89d0 | 286 | |
49023168 | 287 | regs->flags &= ~X86_EFLAGS_TF; |
8b7d89d0 PP |
288 | regs->flags |= ctx->saved_flags; |
289 | ||
290 | /* These were acquired in kmmio_handler(). */ | |
291 | ctx->active--; | |
0fd0e3da | 292 | BUG_ON(ctx->active); |
d61fc448 | 293 | rcu_read_unlock(); |
8b7d89d0 PP |
294 | preempt_enable_no_resched(); |
295 | ||
296 | /* | |
297 | * if somebody else is singlestepping across a probe point, flags | |
298 | * will have TF set, in which case, continue the remaining processing | |
299 | * of do_debug, as if this is not a probe hit. | |
300 | */ | |
49023168 | 301 | if (!(regs->flags & X86_EFLAGS_TF)) |
f5136380 | 302 | ret = 1; |
f5136380 PP |
303 | out: |
304 | put_cpu_var(kmmio_ctx); | |
305 | return ret; | |
8b7d89d0 PP |
306 | } |
307 | ||
0fd0e3da | 308 | /* You must be holding kmmio_lock. */ |
8b7d89d0 PP |
309 | static int add_kmmio_fault_page(unsigned long page) |
310 | { | |
311 | struct kmmio_fault_page *f; | |
312 | ||
313 | page &= PAGE_MASK; | |
314 | f = get_kmmio_fault_page(page); | |
315 | if (f) { | |
0fd0e3da PP |
316 | if (!f->count) |
317 | arm_kmmio_fault_page(f->page, NULL); | |
8b7d89d0 PP |
318 | f->count++; |
319 | return 0; | |
320 | } | |
321 | ||
322 | f = kmalloc(sizeof(*f), GFP_ATOMIC); | |
323 | if (!f) | |
324 | return -1; | |
325 | ||
326 | f->count = 1; | |
327 | f->page = page; | |
0fd0e3da | 328 | list_add_rcu(&f->list, kmmio_page_list(f->page)); |
8b7d89d0 PP |
329 | |
330 | arm_kmmio_fault_page(f->page, NULL); | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
0fd0e3da PP |
335 | /* You must be holding kmmio_lock. */ |
336 | static void release_kmmio_fault_page(unsigned long page, | |
337 | struct kmmio_fault_page **release_list) | |
8b7d89d0 PP |
338 | { |
339 | struct kmmio_fault_page *f; | |
340 | ||
341 | page &= PAGE_MASK; | |
342 | f = get_kmmio_fault_page(page); | |
343 | if (!f) | |
344 | return; | |
345 | ||
346 | f->count--; | |
0fd0e3da | 347 | BUG_ON(f->count < 0); |
8b7d89d0 PP |
348 | if (!f->count) { |
349 | disarm_kmmio_fault_page(f->page, NULL); | |
0fd0e3da PP |
350 | f->release_next = *release_list; |
351 | *release_list = f; | |
8b7d89d0 PP |
352 | } |
353 | } | |
354 | ||
87e547fe PP |
355 | /* |
356 | * With page-unaligned ioremaps, one or two armed pages may contain | |
357 | * addresses from outside the intended mapping. Events for these addresses | |
358 | * are currently silently dropped. The events may result only from programming | |
359 | * mistakes by accessing addresses before the beginning or past the end of a | |
360 | * mapping. | |
361 | */ | |
8b7d89d0 PP |
362 | int register_kmmio_probe(struct kmmio_probe *p) |
363 | { | |
d61fc448 | 364 | unsigned long flags; |
8b7d89d0 PP |
365 | int ret = 0; |
366 | unsigned long size = 0; | |
87e547fe | 367 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
8b7d89d0 | 368 | |
d61fc448 | 369 | spin_lock_irqsave(&kmmio_lock, flags); |
8b7d89d0 PP |
370 | if (get_kmmio_probe(p->addr)) { |
371 | ret = -EEXIST; | |
372 | goto out; | |
373 | } | |
d61fc448 | 374 | kmmio_count++; |
0fd0e3da | 375 | list_add_rcu(&p->list, &kmmio_probes); |
87e547fe | 376 | while (size < size_lim) { |
8b7d89d0 | 377 | if (add_kmmio_fault_page(p->addr + size)) |
0fd0e3da | 378 | pr_err("kmmio: Unable to set page fault.\n"); |
8b7d89d0 PP |
379 | size += PAGE_SIZE; |
380 | } | |
8b7d89d0 | 381 | out: |
d61fc448 | 382 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 PP |
383 | /* |
384 | * XXX: What should I do here? | |
385 | * Here was a call to global_flush_tlb(), but it does not exist | |
0fd0e3da | 386 | * anymore. It seems it's not needed after all. |
8b7d89d0 PP |
387 | */ |
388 | return ret; | |
389 | } | |
0fd0e3da | 390 | EXPORT_SYMBOL(register_kmmio_probe); |
8b7d89d0 | 391 | |
0fd0e3da PP |
392 | static void rcu_free_kmmio_fault_pages(struct rcu_head *head) |
393 | { | |
394 | struct kmmio_delayed_release *dr = container_of( | |
395 | head, | |
396 | struct kmmio_delayed_release, | |
397 | rcu); | |
398 | struct kmmio_fault_page *p = dr->release_list; | |
399 | while (p) { | |
400 | struct kmmio_fault_page *next = p->release_next; | |
401 | BUG_ON(p->count); | |
402 | kfree(p); | |
403 | p = next; | |
404 | } | |
405 | kfree(dr); | |
406 | } | |
407 | ||
408 | static void remove_kmmio_fault_pages(struct rcu_head *head) | |
409 | { | |
410 | struct kmmio_delayed_release *dr = container_of( | |
411 | head, | |
412 | struct kmmio_delayed_release, | |
413 | rcu); | |
414 | struct kmmio_fault_page *p = dr->release_list; | |
415 | struct kmmio_fault_page **prevp = &dr->release_list; | |
416 | unsigned long flags; | |
417 | spin_lock_irqsave(&kmmio_lock, flags); | |
418 | while (p) { | |
419 | if (!p->count) | |
420 | list_del_rcu(&p->list); | |
421 | else | |
422 | *prevp = p->release_next; | |
423 | prevp = &p->release_next; | |
424 | p = p->release_next; | |
425 | } | |
426 | spin_unlock_irqrestore(&kmmio_lock, flags); | |
427 | /* This is the real RCU destroy call. */ | |
428 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); | |
429 | } | |
430 | ||
431 | /* | |
432 | * Remove a kmmio probe. You have to synchronize_rcu() before you can be | |
d61fc448 PP |
433 | * sure that the callbacks will not be called anymore. Only after that |
434 | * you may actually release your struct kmmio_probe. | |
0fd0e3da PP |
435 | * |
436 | * Unregistering a kmmio fault page has three steps: | |
437 | * 1. release_kmmio_fault_page() | |
438 | * Disarm the page, wait a grace period to let all faults finish. | |
439 | * 2. remove_kmmio_fault_pages() | |
440 | * Remove the pages from kmmio_page_table. | |
441 | * 3. rcu_free_kmmio_fault_pages() | |
442 | * Actally free the kmmio_fault_page structs as with RCU. | |
443 | */ | |
8b7d89d0 PP |
444 | void unregister_kmmio_probe(struct kmmio_probe *p) |
445 | { | |
d61fc448 | 446 | unsigned long flags; |
8b7d89d0 | 447 | unsigned long size = 0; |
87e547fe | 448 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
0fd0e3da PP |
449 | struct kmmio_fault_page *release_list = NULL; |
450 | struct kmmio_delayed_release *drelease; | |
8b7d89d0 | 451 | |
d61fc448 | 452 | spin_lock_irqsave(&kmmio_lock, flags); |
87e547fe | 453 | while (size < size_lim) { |
0fd0e3da | 454 | release_kmmio_fault_page(p->addr + size, &release_list); |
8b7d89d0 PP |
455 | size += PAGE_SIZE; |
456 | } | |
0fd0e3da | 457 | list_del_rcu(&p->list); |
8b7d89d0 | 458 | kmmio_count--; |
d61fc448 | 459 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 | 460 | |
0fd0e3da PP |
461 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
462 | if (!drelease) { | |
463 | pr_crit("kmmio: leaking kmmio_fault_page objects.\n"); | |
464 | return; | |
465 | } | |
466 | drelease->release_list = release_list; | |
467 | ||
468 | /* | |
469 | * This is not really RCU here. We have just disarmed a set of | |
470 | * pages so that they cannot trigger page faults anymore. However, | |
471 | * we cannot remove the pages from kmmio_page_table, | |
472 | * because a probe hit might be in flight on another CPU. The | |
473 | * pages are collected into a list, and they will be removed from | |
474 | * kmmio_page_table when it is certain that no probe hit related to | |
475 | * these pages can be in flight. RCU grace period sounds like a | |
476 | * good choice. | |
477 | * | |
478 | * If we removed the pages too early, kmmio page fault handler might | |
479 | * not find the respective kmmio_fault_page and determine it's not | |
480 | * a kmmio fault, when it actually is. This would lead to madness. | |
481 | */ | |
482 | call_rcu(&drelease->rcu, remove_kmmio_fault_pages); | |
8b7d89d0 | 483 | } |
0fd0e3da | 484 | EXPORT_SYMBOL(unregister_kmmio_probe); |
8b7d89d0 PP |
485 | |
486 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, | |
487 | void *args) | |
488 | { | |
489 | struct die_args *arg = args; | |
490 | ||
13829537 | 491 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) |
8b7d89d0 PP |
492 | if (post_kmmio_handler(arg->err, arg->regs) == 1) |
493 | return NOTIFY_STOP; | |
494 | ||
495 | return NOTIFY_DONE; | |
496 | } | |
13829537 PP |
497 | |
498 | static struct notifier_block nb_die = { | |
499 | .notifier_call = kmmio_die_notifier | |
500 | }; | |
501 | ||
502 | static int __init init_kmmio(void) | |
503 | { | |
504 | int i; | |
505 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) | |
506 | INIT_LIST_HEAD(&kmmio_page_table[i]); | |
507 | return register_die_notifier(&nb_die); | |
508 | } | |
509 | fs_initcall(init_kmmio); /* should be before device_initcall() */ |