oom: oom_kill_process: fix the child_points logic
[deliverable/linux.git] / drivers / base / power / wakeup.c
CommitLineData
c125e96f
RW
1/*
2 * drivers/base/power/wakeup.c - System wakeup events framework
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/capability.h>
13#include <linux/suspend.h>
9c034392
RW
14#include <linux/seq_file.h>
15#include <linux/debugfs.h>
074037ec
RW
16
17#include "power.h"
18
19#define TIMEOUT 100
c125e96f
RW
20
21/*
22 * If set, the suspend/hibernate code will abort transitions to a sleep state
23 * if wakeup events are registered during or immediately before the transition.
24 */
25bool events_check_enabled;
26
27/* The counter of registered wakeup events. */
074037ec 28static atomic_t event_count = ATOMIC_INIT(0);
c125e96f 29/* A preserved old value of event_count. */
074037ec 30static unsigned int saved_count;
c125e96f 31/* The counter of wakeup events being processed. */
074037ec 32static atomic_t events_in_progress = ATOMIC_INIT(0);
c125e96f
RW
33
34static DEFINE_SPINLOCK(events_lock);
35
4eb241e5
RW
36static void pm_wakeup_timer_fn(unsigned long data);
37
074037ec
RW
38static LIST_HEAD(wakeup_sources);
39
40/**
41 * wakeup_source_create - Create a struct wakeup_source object.
42 * @name: Name of the new wakeup source.
43 */
44struct wakeup_source *wakeup_source_create(const char *name)
45{
46 struct wakeup_source *ws;
47
48 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
49 if (!ws)
50 return NULL;
51
52 spin_lock_init(&ws->lock);
53 if (name)
54 ws->name = kstrdup(name, GFP_KERNEL);
55
56 return ws;
57}
58EXPORT_SYMBOL_GPL(wakeup_source_create);
59
60/**
61 * wakeup_source_destroy - Destroy a struct wakeup_source object.
62 * @ws: Wakeup source to destroy.
63 */
64void wakeup_source_destroy(struct wakeup_source *ws)
65{
66 if (!ws)
67 return;
68
69 spin_lock_irq(&ws->lock);
70 while (ws->active) {
71 spin_unlock_irq(&ws->lock);
72
73 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
74
75 spin_lock_irq(&ws->lock);
76 }
77 spin_unlock_irq(&ws->lock);
78
79 kfree(ws->name);
80 kfree(ws);
81}
82EXPORT_SYMBOL_GPL(wakeup_source_destroy);
83
84/**
85 * wakeup_source_add - Add given object to the list of wakeup sources.
86 * @ws: Wakeup source object to add to the list.
87 */
88void wakeup_source_add(struct wakeup_source *ws)
89{
90 if (WARN_ON(!ws))
91 return;
92
93 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
94 ws->active = false;
95
96 spin_lock_irq(&events_lock);
97 list_add_rcu(&ws->entry, &wakeup_sources);
98 spin_unlock_irq(&events_lock);
99 synchronize_rcu();
100}
101EXPORT_SYMBOL_GPL(wakeup_source_add);
102
103/**
104 * wakeup_source_remove - Remove given object from the wakeup sources list.
105 * @ws: Wakeup source object to remove from the list.
106 */
107void wakeup_source_remove(struct wakeup_source *ws)
108{
109 if (WARN_ON(!ws))
110 return;
111
112 spin_lock_irq(&events_lock);
113 list_del_rcu(&ws->entry);
114 spin_unlock_irq(&events_lock);
115 synchronize_rcu();
116}
117EXPORT_SYMBOL_GPL(wakeup_source_remove);
118
119/**
120 * wakeup_source_register - Create wakeup source and add it to the list.
121 * @name: Name of the wakeup source to register.
122 */
123struct wakeup_source *wakeup_source_register(const char *name)
124{
125 struct wakeup_source *ws;
126
127 ws = wakeup_source_create(name);
128 if (ws)
129 wakeup_source_add(ws);
130
131 return ws;
132}
133EXPORT_SYMBOL_GPL(wakeup_source_register);
134
135/**
136 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
137 * @ws: Wakeup source object to unregister.
138 */
139void wakeup_source_unregister(struct wakeup_source *ws)
140{
141 wakeup_source_remove(ws);
142 wakeup_source_destroy(ws);
143}
144EXPORT_SYMBOL_GPL(wakeup_source_unregister);
145
146/**
147 * device_wakeup_attach - Attach a wakeup source object to a device object.
148 * @dev: Device to handle.
149 * @ws: Wakeup source object to attach to @dev.
150 *
151 * This causes @dev to be treated as a wakeup device.
152 */
153static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
154{
155 spin_lock_irq(&dev->power.lock);
156 if (dev->power.wakeup) {
157 spin_unlock_irq(&dev->power.lock);
158 return -EEXIST;
159 }
160 dev->power.wakeup = ws;
161 spin_unlock_irq(&dev->power.lock);
162 return 0;
163}
164
165/**
166 * device_wakeup_enable - Enable given device to be a wakeup source.
167 * @dev: Device to handle.
168 *
169 * Create a wakeup source object, register it and attach it to @dev.
170 */
171int device_wakeup_enable(struct device *dev)
172{
173 struct wakeup_source *ws;
174 int ret;
175
176 if (!dev || !dev->power.can_wakeup)
177 return -EINVAL;
178
179 ws = wakeup_source_register(dev_name(dev));
180 if (!ws)
181 return -ENOMEM;
182
183 ret = device_wakeup_attach(dev, ws);
184 if (ret)
185 wakeup_source_unregister(ws);
186
187 return ret;
188}
189EXPORT_SYMBOL_GPL(device_wakeup_enable);
190
191/**
192 * device_wakeup_detach - Detach a device's wakeup source object from it.
193 * @dev: Device to detach the wakeup source object from.
194 *
195 * After it returns, @dev will not be treated as a wakeup device any more.
196 */
197static struct wakeup_source *device_wakeup_detach(struct device *dev)
198{
199 struct wakeup_source *ws;
200
201 spin_lock_irq(&dev->power.lock);
202 ws = dev->power.wakeup;
203 dev->power.wakeup = NULL;
204 spin_unlock_irq(&dev->power.lock);
205 return ws;
206}
207
208/**
209 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
210 * @dev: Device to handle.
211 *
212 * Detach the @dev's wakeup source object from it, unregister this wakeup source
213 * object and destroy it.
214 */
215int device_wakeup_disable(struct device *dev)
216{
217 struct wakeup_source *ws;
218
219 if (!dev || !dev->power.can_wakeup)
220 return -EINVAL;
221
222 ws = device_wakeup_detach(dev);
223 if (ws)
224 wakeup_source_unregister(ws);
225
226 return 0;
227}
228EXPORT_SYMBOL_GPL(device_wakeup_disable);
229
230/**
231 * device_init_wakeup - Device wakeup initialization.
232 * @dev: Device to handle.
233 * @enable: Whether or not to enable @dev as a wakeup device.
234 *
235 * By default, most devices should leave wakeup disabled. The exceptions are
236 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
237 * possibly network interfaces, etc.
238 */
239int device_init_wakeup(struct device *dev, bool enable)
240{
241 int ret = 0;
242
243 if (enable) {
244 device_set_wakeup_capable(dev, true);
245 ret = device_wakeup_enable(dev);
246 } else {
247 device_set_wakeup_capable(dev, false);
248 }
249
250 return ret;
251}
252EXPORT_SYMBOL_GPL(device_init_wakeup);
253
254/**
255 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
256 * @dev: Device to handle.
257 */
258int device_set_wakeup_enable(struct device *dev, bool enable)
259{
260 if (!dev || !dev->power.can_wakeup)
261 return -EINVAL;
262
263 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
264}
265EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
4eb241e5 266
c125e96f
RW
267/*
268 * The functions below use the observation that each wakeup event starts a
269 * period in which the system should not be suspended. The moment this period
270 * will end depends on how the wakeup event is going to be processed after being
271 * detected and all of the possible cases can be divided into two distinct
272 * groups.
273 *
274 * First, a wakeup event may be detected by the same functional unit that will
275 * carry out the entire processing of it and possibly will pass it to user space
276 * for further processing. In that case the functional unit that has detected
277 * the event may later "close" the "no suspend" period associated with it
278 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
279 * pm_relax(), balanced with each other, is supposed to be used in such
280 * situations.
281 *
282 * Second, a wakeup event may be detected by one functional unit and processed
283 * by another one. In that case the unit that has detected it cannot really
284 * "close" the "no suspend" period associated with it, unless it knows in
285 * advance what's going to happen to the event during processing. This
286 * knowledge, however, may not be available to it, so it can simply specify time
287 * to wait before the system can be suspended and pass it as the second
288 * argument of pm_wakeup_event().
074037ec
RW
289 *
290 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
291 * "no suspend" period will be ended either by the pm_relax(), or by the timer
292 * function executed when the timer expires, whichever comes first.
c125e96f
RW
293 */
294
074037ec
RW
295/**
296 * wakup_source_activate - Mark given wakeup source as active.
297 * @ws: Wakeup source to handle.
298 *
299 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
300 * core of the event by incrementing the counter of of wakeup events being
301 * processed.
302 */
303static void wakeup_source_activate(struct wakeup_source *ws)
304{
305 ws->active = true;
306 ws->active_count++;
307 ws->timer_expires = jiffies;
308 ws->last_time = ktime_get();
309
310 atomic_inc(&events_in_progress);
311}
312
313/**
314 * __pm_stay_awake - Notify the PM core of a wakeup event.
315 * @ws: Wakeup source object associated with the source of the event.
316 *
317 * It is safe to call this function from interrupt context.
318 */
319void __pm_stay_awake(struct wakeup_source *ws)
320{
321 unsigned long flags;
322
323 if (!ws)
324 return;
325
326 spin_lock_irqsave(&ws->lock, flags);
327 ws->event_count++;
328 if (!ws->active)
329 wakeup_source_activate(ws);
330 spin_unlock_irqrestore(&ws->lock, flags);
331}
332EXPORT_SYMBOL_GPL(__pm_stay_awake);
333
c125e96f
RW
334/**
335 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
336 * @dev: Device the wakeup event is related to.
337 *
074037ec
RW
338 * Notify the PM core of a wakeup event (signaled by @dev) by calling
339 * __pm_stay_awake for the @dev's wakeup source object.
c125e96f
RW
340 *
341 * Call this function after detecting of a wakeup event if pm_relax() is going
342 * to be called directly after processing the event (and possibly passing it to
343 * user space for further processing).
c125e96f
RW
344 */
345void pm_stay_awake(struct device *dev)
346{
347 unsigned long flags;
348
074037ec
RW
349 if (!dev)
350 return;
c125e96f 351
074037ec
RW
352 spin_lock_irqsave(&dev->power.lock, flags);
353 __pm_stay_awake(dev->power.wakeup);
354 spin_unlock_irqrestore(&dev->power.lock, flags);
c125e96f 355}
074037ec 356EXPORT_SYMBOL_GPL(pm_stay_awake);
c125e96f
RW
357
358/**
074037ec
RW
359 * wakup_source_deactivate - Mark given wakeup source as inactive.
360 * @ws: Wakeup source to handle.
c125e96f 361 *
074037ec
RW
362 * Update the @ws' statistics and notify the PM core that the wakeup source has
363 * become inactive by decrementing the counter of wakeup events being processed
364 * and incrementing the counter of registered wakeup events.
365 */
366static void wakeup_source_deactivate(struct wakeup_source *ws)
367{
368 ktime_t duration;
369 ktime_t now;
370
371 ws->relax_count++;
372 /*
373 * __pm_relax() may be called directly or from a timer function.
374 * If it is called directly right after the timer function has been
375 * started, but before the timer function calls __pm_relax(), it is
376 * possible that __pm_stay_awake() will be called in the meantime and
377 * will set ws->active. Then, ws->active may be cleared immediately
378 * by the __pm_relax() called from the timer function, but in such a
379 * case ws->relax_count will be different from ws->active_count.
380 */
381 if (ws->relax_count != ws->active_count) {
382 ws->relax_count--;
383 return;
384 }
385
386 ws->active = false;
387
388 now = ktime_get();
389 duration = ktime_sub(now, ws->last_time);
390 ws->total_time = ktime_add(ws->total_time, duration);
391 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
392 ws->max_time = duration;
393
394 del_timer(&ws->timer);
395
396 /*
397 * event_count has to be incremented before events_in_progress is
398 * modified, so that the callers of pm_check_wakeup_events() and
399 * pm_save_wakeup_count() don't see the old value of event_count and
400 * events_in_progress equal to zero at the same time.
401 */
402 atomic_inc(&event_count);
403 smp_mb__before_atomic_dec();
404 atomic_dec(&events_in_progress);
405}
406
407/**
408 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
409 * @ws: Wakeup source object associated with the source of the event.
c125e96f
RW
410 *
411 * Call this function for wakeup events whose processing started with calling
074037ec 412 * __pm_stay_awake().
c125e96f
RW
413 *
414 * It is safe to call it from interrupt context.
415 */
074037ec 416void __pm_relax(struct wakeup_source *ws)
c125e96f
RW
417{
418 unsigned long flags;
419
074037ec
RW
420 if (!ws)
421 return;
422
423 spin_lock_irqsave(&ws->lock, flags);
424 if (ws->active)
425 wakeup_source_deactivate(ws);
426 spin_unlock_irqrestore(&ws->lock, flags);
427}
428EXPORT_SYMBOL_GPL(__pm_relax);
429
430/**
431 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
432 * @dev: Device that signaled the event.
433 *
434 * Execute __pm_relax() for the @dev's wakeup source object.
435 */
436void pm_relax(struct device *dev)
437{
438 unsigned long flags;
439
440 if (!dev)
441 return;
442
443 spin_lock_irqsave(&dev->power.lock, flags);
444 __pm_relax(dev->power.wakeup);
445 spin_unlock_irqrestore(&dev->power.lock, flags);
c125e96f 446}
074037ec 447EXPORT_SYMBOL_GPL(pm_relax);
c125e96f
RW
448
449/**
4eb241e5 450 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
074037ec 451 * @data: Address of the wakeup source object associated with the event source.
c125e96f 452 *
074037ec 453 * Call __pm_relax() for the wakeup source whose address is stored in @data.
c125e96f 454 */
4eb241e5 455static void pm_wakeup_timer_fn(unsigned long data)
074037ec
RW
456{
457 __pm_relax((struct wakeup_source *)data);
458}
459
460/**
461 * __pm_wakeup_event - Notify the PM core of a wakeup event.
462 * @ws: Wakeup source object associated with the event source.
463 * @msec: Anticipated event processing time (in milliseconds).
464 *
465 * Notify the PM core of a wakeup event whose source is @ws that will take
466 * approximately @msec milliseconds to be processed by the kernel. If @ws is
467 * not active, activate it. If @msec is nonzero, set up the @ws' timer to
468 * execute pm_wakeup_timer_fn() in future.
469 *
470 * It is safe to call this function from interrupt context.
471 */
472void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
c125e96f 473{
4eb241e5 474 unsigned long flags;
074037ec 475 unsigned long expires;
c125e96f 476
074037ec
RW
477 if (!ws)
478 return;
479
480 spin_lock_irqsave(&ws->lock, flags);
481
482 ws->event_count++;
483 if (!ws->active)
484 wakeup_source_activate(ws);
485
486 if (!msec) {
487 wakeup_source_deactivate(ws);
488 goto unlock;
4eb241e5 489 }
074037ec
RW
490
491 expires = jiffies + msecs_to_jiffies(msec);
492 if (!expires)
493 expires = 1;
494
495 if (time_after(expires, ws->timer_expires)) {
496 mod_timer(&ws->timer, expires);
497 ws->timer_expires = expires;
498 }
499
500 unlock:
501 spin_unlock_irqrestore(&ws->lock, flags);
c125e96f 502}
074037ec
RW
503EXPORT_SYMBOL_GPL(__pm_wakeup_event);
504
c125e96f
RW
505
506/**
507 * pm_wakeup_event - Notify the PM core of a wakeup event.
508 * @dev: Device the wakeup event is related to.
509 * @msec: Anticipated event processing time (in milliseconds).
510 *
074037ec 511 * Call __pm_wakeup_event() for the @dev's wakeup source object.
c125e96f
RW
512 */
513void pm_wakeup_event(struct device *dev, unsigned int msec)
514{
515 unsigned long flags;
c125e96f 516
074037ec
RW
517 if (!dev)
518 return;
c125e96f 519
074037ec
RW
520 spin_lock_irqsave(&dev->power.lock, flags);
521 __pm_wakeup_event(dev->power.wakeup, msec);
522 spin_unlock_irqrestore(&dev->power.lock, flags);
523}
524EXPORT_SYMBOL_GPL(pm_wakeup_event);
4eb241e5 525
074037ec
RW
526/**
527 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
528 */
529static void pm_wakeup_update_hit_counts(void)
530{
531 unsigned long flags;
532 struct wakeup_source *ws;
4eb241e5 533
074037ec
RW
534 rcu_read_lock();
535 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
536 spin_lock_irqsave(&ws->lock, flags);
537 if (ws->active)
538 ws->hit_count++;
539 spin_unlock_irqrestore(&ws->lock, flags);
c125e96f 540 }
074037ec 541 rcu_read_unlock();
c125e96f
RW
542}
543
544/**
a2867e08 545 * pm_wakeup_pending - Check if power transition in progress should be aborted.
c125e96f
RW
546 *
547 * Compare the current number of registered wakeup events with its preserved
a2867e08
RW
548 * value from the past and return true if new wakeup events have been registered
549 * since the old value was stored. Also return true if the current number of
550 * wakeup events being processed is different from zero.
c125e96f 551 */
a2867e08 552bool pm_wakeup_pending(void)
c125e96f
RW
553{
554 unsigned long flags;
a2867e08 555 bool ret = false;
c125e96f
RW
556
557 spin_lock_irqsave(&events_lock, flags);
558 if (events_check_enabled) {
a2867e08
RW
559 ret = ((unsigned int)atomic_read(&event_count) != saved_count)
560 || atomic_read(&events_in_progress);
561 events_check_enabled = !ret;
c125e96f
RW
562 }
563 spin_unlock_irqrestore(&events_lock, flags);
a2867e08 564 if (ret)
074037ec 565 pm_wakeup_update_hit_counts();
c125e96f
RW
566 return ret;
567}
568
569/**
570 * pm_get_wakeup_count - Read the number of registered wakeup events.
571 * @count: Address to store the value at.
572 *
573 * Store the number of registered wakeup events at the address in @count. Block
574 * if the current number of wakeup events being processed is nonzero.
575 *
576 * Return false if the wait for the number of wakeup events being processed to
577 * drop down to zero has been interrupted by a signal (and the current number
578 * of wakeup events being processed is still nonzero). Otherwise return true.
579 */
074037ec 580bool pm_get_wakeup_count(unsigned int *count)
c125e96f
RW
581{
582 bool ret;
583
c125e96f
RW
584 if (capable(CAP_SYS_ADMIN))
585 events_check_enabled = false;
586
074037ec
RW
587 while (atomic_read(&events_in_progress) && !signal_pending(current)) {
588 pm_wakeup_update_hit_counts();
589 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
c125e96f 590 }
074037ec
RW
591
592 ret = !atomic_read(&events_in_progress);
593 *count = atomic_read(&event_count);
c125e96f
RW
594 return ret;
595}
596
597/**
598 * pm_save_wakeup_count - Save the current number of registered wakeup events.
599 * @count: Value to compare with the current number of registered wakeup events.
600 *
601 * If @count is equal to the current number of registered wakeup events and the
602 * current number of wakeup events being processed is zero, store @count as the
603 * old number of registered wakeup events to be used by pm_check_wakeup_events()
604 * and return true. Otherwise return false.
605 */
074037ec 606bool pm_save_wakeup_count(unsigned int count)
c125e96f
RW
607{
608 bool ret = false;
609
610 spin_lock_irq(&events_lock);
074037ec
RW
611 if (count == (unsigned int)atomic_read(&event_count)
612 && !atomic_read(&events_in_progress)) {
613 saved_count = count;
c125e96f
RW
614 events_check_enabled = true;
615 ret = true;
616 }
617 spin_unlock_irq(&events_lock);
074037ec
RW
618 if (!ret)
619 pm_wakeup_update_hit_counts();
c125e96f
RW
620 return ret;
621}
9c034392
RW
622
623static struct dentry *wakeup_sources_stats_dentry;
624
625/**
626 * print_wakeup_source_stats - Print wakeup source statistics information.
627 * @m: seq_file to print the statistics into.
628 * @ws: Wakeup source object to print the statistics for.
629 */
630static int print_wakeup_source_stats(struct seq_file *m,
631 struct wakeup_source *ws)
632{
633 unsigned long flags;
634 ktime_t total_time;
635 ktime_t max_time;
636 unsigned long active_count;
637 ktime_t active_time;
638 int ret;
639
640 spin_lock_irqsave(&ws->lock, flags);
641
642 total_time = ws->total_time;
643 max_time = ws->max_time;
644 active_count = ws->active_count;
645 if (ws->active) {
646 active_time = ktime_sub(ktime_get(), ws->last_time);
647 total_time = ktime_add(total_time, active_time);
648 if (active_time.tv64 > max_time.tv64)
649 max_time = active_time;
650 } else {
651 active_time = ktime_set(0, 0);
652 }
653
654 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
655 "%lld\t\t%lld\t\t%lld\t\t%lld\n",
656 ws->name, active_count, ws->event_count, ws->hit_count,
657 ktime_to_ms(active_time), ktime_to_ms(total_time),
658 ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
659
660 spin_unlock_irqrestore(&ws->lock, flags);
661
662 return ret;
663}
664
665/**
666 * wakeup_sources_stats_show - Print wakeup sources statistics information.
667 * @m: seq_file to print the statistics into.
668 */
669static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
670{
671 struct wakeup_source *ws;
672
673 seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
674 "active_since\ttotal_time\tmax_time\tlast_change\n");
675
676 rcu_read_lock();
677 list_for_each_entry_rcu(ws, &wakeup_sources, entry)
678 print_wakeup_source_stats(m, ws);
679 rcu_read_unlock();
680
681 return 0;
682}
683
684static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
685{
686 return single_open(file, wakeup_sources_stats_show, NULL);
687}
688
689static const struct file_operations wakeup_sources_stats_fops = {
690 .owner = THIS_MODULE,
691 .open = wakeup_sources_stats_open,
692 .read = seq_read,
693 .llseek = seq_lseek,
694 .release = single_release,
695};
696
697static int __init wakeup_sources_debugfs_init(void)
698{
699 wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
700 S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
701 return 0;
702}
703
704postcore_initcall(wakeup_sources_debugfs_init);
This page took 0.113858 seconds and 5 git commands to generate.