2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/devfreq.h>
23 #include <linux/workqueue.h>
24 #include <linux/platform_device.h>
25 #include <linux/list.h>
26 #include <linux/printk.h>
27 #include <linux/hrtimer.h>
30 struct class *devfreq_class
;
33 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
37 static struct workqueue_struct
*devfreq_wq
;
39 /* The list of all device-devfreq */
40 static LIST_HEAD(devfreq_list
);
41 static DEFINE_MUTEX(devfreq_list_lock
);
44 * find_device_devfreq() - find devfreq struct using device pointer
45 * @dev: device pointer used to lookup device devfreq.
47 * Search the list of device devfreqs and return the matched device's
48 * devfreq info. devfreq_list_lock should be held by the caller.
50 static struct devfreq
*find_device_devfreq(struct device
*dev
)
52 struct devfreq
*tmp_devfreq
;
54 if (unlikely(IS_ERR_OR_NULL(dev
))) {
55 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
56 return ERR_PTR(-EINVAL
);
58 WARN(!mutex_is_locked(&devfreq_list_lock
),
59 "devfreq_list_lock must be locked.");
61 list_for_each_entry(tmp_devfreq
, &devfreq_list
, node
) {
62 if (tmp_devfreq
->dev
.parent
== dev
)
66 return ERR_PTR(-ENODEV
);
69 /* Load monitoring helper functions for governors use */
72 * update_devfreq() - Reevaluate the device and configure frequency.
73 * @devfreq: the devfreq instance.
75 * Note: Lock devfreq->lock before calling update_devfreq
76 * This function is exported for governors.
78 int update_devfreq(struct devfreq
*devfreq
)
84 if (!mutex_is_locked(&devfreq
->lock
)) {
85 WARN(true, "devfreq->lock must be locked by the caller.\n");
89 /* Reevaluate the proper frequency */
90 err
= devfreq
->governor
->get_target_freq(devfreq
, &freq
);
95 * Adjust the freuqency with user freq and QoS.
97 * List from the highest proiority
98 * max_freq (probably called by thermal when it's too hot)
102 if (devfreq
->min_freq
&& freq
< devfreq
->min_freq
) {
103 freq
= devfreq
->min_freq
;
104 flags
&= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use GLB */
106 if (devfreq
->max_freq
&& freq
> devfreq
->max_freq
) {
107 freq
= devfreq
->max_freq
;
108 flags
|= DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use LUB */
111 err
= devfreq
->profile
->target(devfreq
->dev
.parent
, &freq
, flags
);
115 devfreq
->previous_freq
= freq
;
120 * devfreq_monitor() - Periodically poll devfreq objects.
121 * @work: the work struct used to run devfreq_monitor periodically.
124 static void devfreq_monitor(struct work_struct
*work
)
127 struct devfreq
*devfreq
= container_of(work
,
128 struct devfreq
, work
.work
);
130 mutex_lock(&devfreq
->lock
);
131 err
= update_devfreq(devfreq
);
133 dev_err(&devfreq
->dev
, "dvfs failed with (%d) error\n", err
);
135 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
136 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
137 mutex_unlock(&devfreq
->lock
);
141 * devfreq_monitor_start() - Start load monitoring of devfreq instance
142 * @devfreq: the devfreq instance.
144 * Helper function for starting devfreq device load monitoing. By
145 * default delayed work based monitoring is supported. Function
146 * to be called from governor in response to DEVFREQ_GOV_START
147 * event when device is added to devfreq framework.
149 void devfreq_monitor_start(struct devfreq
*devfreq
)
151 INIT_DEFERRABLE_WORK(&devfreq
->work
, devfreq_monitor
);
152 if (devfreq
->profile
->polling_ms
)
153 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
154 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
158 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
159 * @devfreq: the devfreq instance.
161 * Helper function to stop devfreq device load monitoing. Function
162 * to be called from governor in response to DEVFREQ_GOV_STOP
163 * event when device is removed from devfreq framework.
165 void devfreq_monitor_stop(struct devfreq
*devfreq
)
167 cancel_delayed_work_sync(&devfreq
->work
);
171 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
172 * @devfreq: the devfreq instance.
174 * Helper function to suspend devfreq device load monitoing. Function
175 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
176 * event or when polling interval is set to zero.
178 * Note: Though this function is same as devfreq_monitor_stop(),
179 * intentionally kept separate to provide hooks for collecting
180 * transition statistics.
182 void devfreq_monitor_suspend(struct devfreq
*devfreq
)
184 mutex_lock(&devfreq
->lock
);
185 if (devfreq
->stop_polling
) {
186 mutex_unlock(&devfreq
->lock
);
190 devfreq
->stop_polling
= true;
191 mutex_unlock(&devfreq
->lock
);
192 cancel_delayed_work_sync(&devfreq
->work
);
196 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
197 * @devfreq: the devfreq instance.
199 * Helper function to resume devfreq device load monitoing. Function
200 * to be called from governor in response to DEVFREQ_GOV_RESUME
201 * event or when polling interval is set to non-zero.
203 void devfreq_monitor_resume(struct devfreq
*devfreq
)
205 mutex_lock(&devfreq
->lock
);
206 if (!devfreq
->stop_polling
)
209 if (!delayed_work_pending(&devfreq
->work
) &&
210 devfreq
->profile
->polling_ms
)
211 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
212 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
213 devfreq
->stop_polling
= false;
216 mutex_unlock(&devfreq
->lock
);
220 * devfreq_interval_update() - Update device devfreq monitoring interval
221 * @devfreq: the devfreq instance.
222 * @delay: new polling interval to be set.
224 * Helper function to set new load monitoring polling interval. Function
225 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
227 void devfreq_interval_update(struct devfreq
*devfreq
, unsigned int *delay
)
229 unsigned int cur_delay
= devfreq
->profile
->polling_ms
;
230 unsigned int new_delay
= *delay
;
232 mutex_lock(&devfreq
->lock
);
233 devfreq
->profile
->polling_ms
= new_delay
;
235 if (devfreq
->stop_polling
)
238 /* if new delay is zero, stop polling */
240 mutex_unlock(&devfreq
->lock
);
241 cancel_delayed_work_sync(&devfreq
->work
);
245 /* if current delay is zero, start polling with new delay */
247 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
248 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
252 /* if current delay is greater than new delay, restart polling */
253 if (cur_delay
> new_delay
) {
254 mutex_unlock(&devfreq
->lock
);
255 cancel_delayed_work_sync(&devfreq
->work
);
256 mutex_lock(&devfreq
->lock
);
257 if (!devfreq
->stop_polling
)
258 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
259 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
262 mutex_unlock(&devfreq
->lock
);
266 * devfreq_notifier_call() - Notify that the device frequency requirements
267 * has been changed out of devfreq framework.
268 * @nb: the notifier_block (supposed to be devfreq->nb)
272 * Called by a notifier that uses devfreq->nb.
274 static int devfreq_notifier_call(struct notifier_block
*nb
, unsigned long type
,
277 struct devfreq
*devfreq
= container_of(nb
, struct devfreq
, nb
);
280 mutex_lock(&devfreq
->lock
);
281 ret
= update_devfreq(devfreq
);
282 mutex_unlock(&devfreq
->lock
);
288 * _remove_devfreq() - Remove devfreq from the list and release its resources.
289 * @devfreq: the devfreq struct
290 * @skip: skip calling device_unregister().
292 static void _remove_devfreq(struct devfreq
*devfreq
, bool skip
)
294 mutex_lock(&devfreq_list_lock
);
295 if (IS_ERR(find_device_devfreq(devfreq
->dev
.parent
))) {
296 mutex_unlock(&devfreq_list_lock
);
297 dev_warn(&devfreq
->dev
, "releasing devfreq which doesn't exist\n");
300 list_del(&devfreq
->node
);
301 mutex_unlock(&devfreq_list_lock
);
303 devfreq
->governor
->event_handler(devfreq
, DEVFREQ_GOV_STOP
, NULL
);
305 if (devfreq
->profile
->exit
)
306 devfreq
->profile
->exit(devfreq
->dev
.parent
);
308 if (!skip
&& get_device(&devfreq
->dev
)) {
309 device_unregister(&devfreq
->dev
);
310 put_device(&devfreq
->dev
);
313 mutex_destroy(&devfreq
->lock
);
318 * devfreq_dev_release() - Callback for struct device to release the device.
319 * @dev: the devfreq device
321 * This calls _remove_devfreq() if _remove_devfreq() is not called.
322 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
323 * well as by others unregistering the device.
325 static void devfreq_dev_release(struct device
*dev
)
327 struct devfreq
*devfreq
= to_devfreq(dev
);
329 _remove_devfreq(devfreq
, true);
333 * devfreq_add_device() - Add devfreq feature to the device
334 * @dev: the device to add devfreq feature.
335 * @profile: device-specific profile to run devfreq.
336 * @governor: the policy to choose frequency.
337 * @data: private data for the governor. The devfreq framework does not
340 struct devfreq
*devfreq_add_device(struct device
*dev
,
341 struct devfreq_dev_profile
*profile
,
342 const struct devfreq_governor
*governor
,
345 struct devfreq
*devfreq
;
348 if (!dev
|| !profile
|| !governor
) {
349 dev_err(dev
, "%s: Invalid parameters.\n", __func__
);
350 return ERR_PTR(-EINVAL
);
353 mutex_lock(&devfreq_list_lock
);
354 devfreq
= find_device_devfreq(dev
);
355 mutex_unlock(&devfreq_list_lock
);
356 if (!IS_ERR(devfreq
)) {
357 dev_err(dev
, "%s: Unable to create devfreq for the device. It already has one.\n", __func__
);
362 devfreq
= kzalloc(sizeof(struct devfreq
), GFP_KERNEL
);
364 dev_err(dev
, "%s: Unable to create devfreq for the device\n",
370 mutex_init(&devfreq
->lock
);
371 mutex_lock(&devfreq
->lock
);
372 devfreq
->dev
.parent
= dev
;
373 devfreq
->dev
.class = devfreq_class
;
374 devfreq
->dev
.release
= devfreq_dev_release
;
375 devfreq
->profile
= profile
;
376 devfreq
->governor
= governor
;
377 devfreq
->previous_freq
= profile
->initial_freq
;
378 devfreq
->data
= data
;
379 devfreq
->nb
.notifier_call
= devfreq_notifier_call
;
381 dev_set_name(&devfreq
->dev
, dev_name(dev
));
382 err
= device_register(&devfreq
->dev
);
384 put_device(&devfreq
->dev
);
385 mutex_unlock(&devfreq
->lock
);
389 mutex_unlock(&devfreq
->lock
);
391 mutex_lock(&devfreq_list_lock
);
392 list_add(&devfreq
->node
, &devfreq_list
);
393 mutex_unlock(&devfreq_list_lock
);
395 err
= devfreq
->governor
->event_handler(devfreq
,
396 DEVFREQ_GOV_START
, NULL
);
398 dev_err(dev
, "%s: Unable to start governor for the device\n",
406 list_del(&devfreq
->node
);
407 device_unregister(&devfreq
->dev
);
413 EXPORT_SYMBOL(devfreq_add_device
);
416 * devfreq_remove_device() - Remove devfreq feature from a device.
417 * @devfreq: the devfreq instance to be removed
419 int devfreq_remove_device(struct devfreq
*devfreq
)
424 _remove_devfreq(devfreq
, false);
428 EXPORT_SYMBOL(devfreq_remove_device
);
431 * devfreq_suspend_device() - Suspend devfreq of a device.
432 * @devfreq: the devfreq instance to be suspended
434 int devfreq_suspend_device(struct devfreq
*devfreq
)
439 return devfreq
->governor
->event_handler(devfreq
,
440 DEVFREQ_GOV_SUSPEND
, NULL
);
442 EXPORT_SYMBOL(devfreq_suspend_device
);
445 * devfreq_resume_device() - Resume devfreq of a device.
446 * @devfreq: the devfreq instance to be resumed
448 int devfreq_resume_device(struct devfreq
*devfreq
)
453 return devfreq
->governor
->event_handler(devfreq
,
454 DEVFREQ_GOV_RESUME
, NULL
);
456 EXPORT_SYMBOL(devfreq_resume_device
);
458 static ssize_t
show_governor(struct device
*dev
,
459 struct device_attribute
*attr
, char *buf
)
461 return sprintf(buf
, "%s\n", to_devfreq(dev
)->governor
->name
);
464 static ssize_t
show_freq(struct device
*dev
,
465 struct device_attribute
*attr
, char *buf
)
468 struct devfreq
*devfreq
= to_devfreq(dev
);
470 if (devfreq
->profile
->get_cur_freq
&&
471 !devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &freq
))
472 return sprintf(buf
, "%lu\n", freq
);
474 return sprintf(buf
, "%lu\n", devfreq
->previous_freq
);
477 static ssize_t
show_target_freq(struct device
*dev
,
478 struct device_attribute
*attr
, char *buf
)
480 return sprintf(buf
, "%lu\n", to_devfreq(dev
)->previous_freq
);
483 static ssize_t
show_polling_interval(struct device
*dev
,
484 struct device_attribute
*attr
, char *buf
)
486 return sprintf(buf
, "%d\n", to_devfreq(dev
)->profile
->polling_ms
);
489 static ssize_t
store_polling_interval(struct device
*dev
,
490 struct device_attribute
*attr
,
491 const char *buf
, size_t count
)
493 struct devfreq
*df
= to_devfreq(dev
);
497 ret
= sscanf(buf
, "%u", &value
);
501 df
->governor
->event_handler(df
, DEVFREQ_GOV_INTERVAL
, &value
);
508 static ssize_t
store_min_freq(struct device
*dev
, struct device_attribute
*attr
,
509 const char *buf
, size_t count
)
511 struct devfreq
*df
= to_devfreq(dev
);
516 ret
= sscanf(buf
, "%lu", &value
);
520 mutex_lock(&df
->lock
);
522 if (value
&& max
&& value
> max
) {
527 df
->min_freq
= value
;
531 mutex_unlock(&df
->lock
);
536 static ssize_t
show_min_freq(struct device
*dev
, struct device_attribute
*attr
,
539 return sprintf(buf
, "%lu\n", to_devfreq(dev
)->min_freq
);
542 static ssize_t
store_max_freq(struct device
*dev
, struct device_attribute
*attr
,
543 const char *buf
, size_t count
)
545 struct devfreq
*df
= to_devfreq(dev
);
550 ret
= sscanf(buf
, "%lu", &value
);
554 mutex_lock(&df
->lock
);
556 if (value
&& min
&& value
< min
) {
561 df
->max_freq
= value
;
565 mutex_unlock(&df
->lock
);
570 static ssize_t
show_max_freq(struct device
*dev
, struct device_attribute
*attr
,
573 return sprintf(buf
, "%lu\n", to_devfreq(dev
)->max_freq
);
576 static struct device_attribute devfreq_attrs
[] = {
577 __ATTR(governor
, S_IRUGO
, show_governor
, NULL
),
578 __ATTR(cur_freq
, S_IRUGO
, show_freq
, NULL
),
579 __ATTR(target_freq
, S_IRUGO
, show_target_freq
, NULL
),
580 __ATTR(polling_interval
, S_IRUGO
| S_IWUSR
, show_polling_interval
,
581 store_polling_interval
),
582 __ATTR(min_freq
, S_IRUGO
| S_IWUSR
, show_min_freq
, store_min_freq
),
583 __ATTR(max_freq
, S_IRUGO
| S_IWUSR
, show_max_freq
, store_max_freq
),
587 static int __init
devfreq_init(void)
589 devfreq_class
= class_create(THIS_MODULE
, "devfreq");
590 if (IS_ERR(devfreq_class
)) {
591 pr_err("%s: couldn't create class\n", __FILE__
);
592 return PTR_ERR(devfreq_class
);
595 devfreq_wq
= create_freezable_workqueue("devfreq_wq");
596 if (IS_ERR(devfreq_wq
)) {
597 class_destroy(devfreq_class
);
598 pr_err("%s: couldn't create workqueue\n", __FILE__
);
599 return PTR_ERR(devfreq_wq
);
601 devfreq_class
->dev_attrs
= devfreq_attrs
;
605 subsys_initcall(devfreq_init
);
607 static void __exit
devfreq_exit(void)
609 class_destroy(devfreq_class
);
610 destroy_workqueue(devfreq_wq
);
612 module_exit(devfreq_exit
);
615 * The followings are helper functions for devfreq user device drivers with
620 * devfreq_recommended_opp() - Helper function to get proper OPP for the
621 * freq value given to target callback.
622 * @dev: The devfreq user device. (parent of devfreq)
623 * @freq: The frequency given to target function
624 * @flags: Flags handed from devfreq framework.
627 struct opp
*devfreq_recommended_opp(struct device
*dev
, unsigned long *freq
,
632 if (flags
& DEVFREQ_FLAG_LEAST_UPPER_BOUND
) {
633 /* The freq is an upper bound. opp should be lower */
634 opp
= opp_find_freq_floor(dev
, freq
);
636 /* If not available, use the closest opp */
637 if (opp
== ERR_PTR(-ENODEV
))
638 opp
= opp_find_freq_ceil(dev
, freq
);
640 /* The freq is an lower bound. opp should be higher */
641 opp
= opp_find_freq_ceil(dev
, freq
);
643 /* If not available, use the closest opp */
644 if (opp
== ERR_PTR(-ENODEV
))
645 opp
= opp_find_freq_floor(dev
, freq
);
652 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
653 * for any changes in the OPP availability
655 * @dev: The devfreq user device. (parent of devfreq)
656 * @devfreq: The devfreq object.
658 int devfreq_register_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
660 struct srcu_notifier_head
*nh
= opp_get_notifier(dev
);
664 return srcu_notifier_chain_register(nh
, &devfreq
->nb
);
668 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
669 * notified for any changes in the OPP
670 * availability changes anymore.
671 * @dev: The devfreq user device. (parent of devfreq)
672 * @devfreq: The devfreq object.
674 * At exit() callback of devfreq_dev_profile, this must be included if
675 * devfreq_recommended_opp is used.
677 int devfreq_unregister_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
679 struct srcu_notifier_head
*nh
= opp_get_notifier(dev
);
683 return srcu_notifier_chain_unregister(nh
, &devfreq
->nb
);
686 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
687 MODULE_DESCRIPTION("devfreq class support");
688 MODULE_LICENSE("GPL");