Merge tag 'v4.0-rc3' into asoc-rt5670
[deliverable/linux.git] / drivers / base / power / opp.c
1 /*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/device.h>
19 #include <linux/list.h>
20 #include <linux/rculist.h>
21 #include <linux/rcupdate.h>
22 #include <linux/pm_opp.h>
23 #include <linux/of.h>
24 #include <linux/export.h>
25
26 /*
27 * Internal data structure organization with the OPP layer library is as
28 * follows:
29 * dev_opp_list (root)
30 * |- device 1 (represents voltage domain 1)
31 * | |- opp 1 (availability, freq, voltage)
32 * | |- opp 2 ..
33 * ... ...
34 * | `- opp n ..
35 * |- device 2 (represents the next voltage domain)
36 * ...
37 * `- device m (represents mth voltage domain)
38 * device 1, 2.. are represented by dev_opp structure while each opp
39 * is represented by the opp structure.
40 */
41
42 /**
43 * struct dev_pm_opp - Generic OPP description structure
44 * @node: opp list node. The nodes are maintained throughout the lifetime
45 * of boot. It is expected only an optimal set of OPPs are
46 * added to the library by the SoC framework.
47 * RCU usage: opp list is traversed with RCU locks. node
48 * modification is possible realtime, hence the modifications
49 * are protected by the dev_opp_list_lock for integrity.
50 * IMPORTANT: the opp nodes should be maintained in increasing
51 * order.
52 * @dynamic: not-created from static DT entries.
53 * @available: true/false - marks if this OPP as available or not
54 * @rate: Frequency in hertz
55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
56 * @dev_opp: points back to the device_opp struct this opp belongs to
57 * @rcu_head: RCU callback head used for deferred freeing
58 *
59 * This structure stores the OPP information for a given device.
60 */
61 struct dev_pm_opp {
62 struct list_head node;
63
64 bool available;
65 bool dynamic;
66 unsigned long rate;
67 unsigned long u_volt;
68
69 struct device_opp *dev_opp;
70 struct rcu_head rcu_head;
71 };
72
73 /**
74 * struct device_opp - Device opp structure
75 * @node: list node - contains the devices with OPPs that
76 * have been registered. Nodes once added are not modified in this
77 * list.
78 * RCU usage: nodes are not modified in the list of device_opp,
79 * however addition is possible and is secured by dev_opp_list_lock
80 * @dev: device pointer
81 * @srcu_head: notifier head to notify the OPP availability changes.
82 * @rcu_head: RCU callback head used for deferred freeing
83 * @opp_list: list of opps
84 *
85 * This is an internal data structure maintaining the link to opps attached to
86 * a device. This structure is not meant to be shared to users as it is
87 * meant for book keeping and private to OPP library.
88 *
89 * Because the opp structures can be used from both rcu and srcu readers, we
90 * need to wait for the grace period of both of them before freeing any
91 * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
92 */
93 struct device_opp {
94 struct list_head node;
95
96 struct device *dev;
97 struct srcu_notifier_head srcu_head;
98 struct rcu_head rcu_head;
99 struct list_head opp_list;
100 };
101
102 /*
103 * The root of the list of all devices. All device_opp structures branch off
104 * from here, with each device_opp containing the list of opp it supports in
105 * various states of availability.
106 */
107 static LIST_HEAD(dev_opp_list);
108 /* Lock to allow exclusive modification to the device and opp lists */
109 static DEFINE_MUTEX(dev_opp_list_lock);
110
111 #define opp_rcu_lockdep_assert() \
112 do { \
113 rcu_lockdep_assert(rcu_read_lock_held() || \
114 lockdep_is_held(&dev_opp_list_lock), \
115 "Missing rcu_read_lock() or " \
116 "dev_opp_list_lock protection"); \
117 } while (0)
118
119 /**
120 * _find_device_opp() - find device_opp struct using device pointer
121 * @dev: device pointer used to lookup device OPPs
122 *
123 * Search list of device OPPs for one containing matching device. Does a RCU
124 * reader operation to grab the pointer needed.
125 *
126 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
127 * -EINVAL based on type of error.
128 *
129 * Locking: This function must be called under rcu_read_lock(). device_opp
130 * is a RCU protected pointer. This means that device_opp is valid as long
131 * as we are under RCU lock.
132 */
133 static struct device_opp *_find_device_opp(struct device *dev)
134 {
135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
136
137 if (unlikely(IS_ERR_OR_NULL(dev))) {
138 pr_err("%s: Invalid parameters\n", __func__);
139 return ERR_PTR(-EINVAL);
140 }
141
142 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
143 if (tmp_dev_opp->dev == dev) {
144 dev_opp = tmp_dev_opp;
145 break;
146 }
147 }
148
149 return dev_opp;
150 }
151
152 /**
153 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
154 * @opp: opp for which voltage has to be returned for
155 *
156 * Return: voltage in micro volt corresponding to the opp, else
157 * return 0
158 *
159 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
160 * protected pointer. This means that opp which could have been fetched by
161 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
162 * under RCU lock. The pointer returned by the opp_find_freq family must be
163 * used in the same section as the usage of this function with the pointer
164 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
165 * pointer.
166 */
167 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
168 {
169 struct dev_pm_opp *tmp_opp;
170 unsigned long v = 0;
171
172 opp_rcu_lockdep_assert();
173
174 tmp_opp = rcu_dereference(opp);
175 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
176 pr_err("%s: Invalid parameters\n", __func__);
177 else
178 v = tmp_opp->u_volt;
179
180 return v;
181 }
182 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
183
184 /**
185 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
186 * @opp: opp for which frequency has to be returned for
187 *
188 * Return: frequency in hertz corresponding to the opp, else
189 * return 0
190 *
191 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
192 * protected pointer. This means that opp which could have been fetched by
193 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
194 * under RCU lock. The pointer returned by the opp_find_freq family must be
195 * used in the same section as the usage of this function with the pointer
196 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
197 * pointer.
198 */
199 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
200 {
201 struct dev_pm_opp *tmp_opp;
202 unsigned long f = 0;
203
204 opp_rcu_lockdep_assert();
205
206 tmp_opp = rcu_dereference(opp);
207 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
208 pr_err("%s: Invalid parameters\n", __func__);
209 else
210 f = tmp_opp->rate;
211
212 return f;
213 }
214 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
215
216 /**
217 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
218 * @dev: device for which we do this operation
219 *
220 * Return: This function returns the number of available opps if there are any,
221 * else returns 0 if none or the corresponding error value.
222 *
223 * Locking: This function takes rcu_read_lock().
224 */
225 int dev_pm_opp_get_opp_count(struct device *dev)
226 {
227 struct device_opp *dev_opp;
228 struct dev_pm_opp *temp_opp;
229 int count = 0;
230
231 rcu_read_lock();
232
233 dev_opp = _find_device_opp(dev);
234 if (IS_ERR(dev_opp)) {
235 count = PTR_ERR(dev_opp);
236 dev_err(dev, "%s: device OPP not found (%d)\n",
237 __func__, count);
238 goto out_unlock;
239 }
240
241 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
242 if (temp_opp->available)
243 count++;
244 }
245
246 out_unlock:
247 rcu_read_unlock();
248 return count;
249 }
250 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
251
252 /**
253 * dev_pm_opp_find_freq_exact() - search for an exact frequency
254 * @dev: device for which we do this operation
255 * @freq: frequency to search for
256 * @available: true/false - match for available opp
257 *
258 * Return: Searches for exact match in the opp list and returns pointer to the
259 * matching opp if found, else returns ERR_PTR in case of error and should
260 * be handled using IS_ERR. Error return values can be:
261 * EINVAL: for bad pointer
262 * ERANGE: no match found for search
263 * ENODEV: if device not found in list of registered devices
264 *
265 * Note: available is a modifier for the search. if available=true, then the
266 * match is for exact matching frequency and is available in the stored OPP
267 * table. if false, the match is for exact frequency which is not available.
268 *
269 * This provides a mechanism to enable an opp which is not available currently
270 * or the opposite as well.
271 *
272 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
273 * protected pointer. The reason for the same is that the opp pointer which is
274 * returned will remain valid for use with opp_get_{voltage, freq} only while
275 * under the locked area. The pointer returned must be used prior to unlocking
276 * with rcu_read_unlock() to maintain the integrity of the pointer.
277 */
278 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
279 unsigned long freq,
280 bool available)
281 {
282 struct device_opp *dev_opp;
283 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
284
285 opp_rcu_lockdep_assert();
286
287 dev_opp = _find_device_opp(dev);
288 if (IS_ERR(dev_opp)) {
289 int r = PTR_ERR(dev_opp);
290 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
291 return ERR_PTR(r);
292 }
293
294 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
295 if (temp_opp->available == available &&
296 temp_opp->rate == freq) {
297 opp = temp_opp;
298 break;
299 }
300 }
301
302 return opp;
303 }
304 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
305
306 /**
307 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
308 * @dev: device for which we do this operation
309 * @freq: Start frequency
310 *
311 * Search for the matching ceil *available* OPP from a starting freq
312 * for a device.
313 *
314 * Return: matching *opp and refreshes *freq accordingly, else returns
315 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
316 * values can be:
317 * EINVAL: for bad pointer
318 * ERANGE: no match found for search
319 * ENODEV: if device not found in list of registered devices
320 *
321 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
322 * protected pointer. The reason for the same is that the opp pointer which is
323 * returned will remain valid for use with opp_get_{voltage, freq} only while
324 * under the locked area. The pointer returned must be used prior to unlocking
325 * with rcu_read_unlock() to maintain the integrity of the pointer.
326 */
327 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
328 unsigned long *freq)
329 {
330 struct device_opp *dev_opp;
331 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
332
333 opp_rcu_lockdep_assert();
334
335 if (!dev || !freq) {
336 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
337 return ERR_PTR(-EINVAL);
338 }
339
340 dev_opp = _find_device_opp(dev);
341 if (IS_ERR(dev_opp))
342 return ERR_CAST(dev_opp);
343
344 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
345 if (temp_opp->available && temp_opp->rate >= *freq) {
346 opp = temp_opp;
347 *freq = opp->rate;
348 break;
349 }
350 }
351
352 return opp;
353 }
354 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
355
356 /**
357 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
358 * @dev: device for which we do this operation
359 * @freq: Start frequency
360 *
361 * Search for the matching floor *available* OPP from a starting freq
362 * for a device.
363 *
364 * Return: matching *opp and refreshes *freq accordingly, else returns
365 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
366 * values can be:
367 * EINVAL: for bad pointer
368 * ERANGE: no match found for search
369 * ENODEV: if device not found in list of registered devices
370 *
371 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
372 * protected pointer. The reason for the same is that the opp pointer which is
373 * returned will remain valid for use with opp_get_{voltage, freq} only while
374 * under the locked area. The pointer returned must be used prior to unlocking
375 * with rcu_read_unlock() to maintain the integrity of the pointer.
376 */
377 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
378 unsigned long *freq)
379 {
380 struct device_opp *dev_opp;
381 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
382
383 opp_rcu_lockdep_assert();
384
385 if (!dev || !freq) {
386 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
387 return ERR_PTR(-EINVAL);
388 }
389
390 dev_opp = _find_device_opp(dev);
391 if (IS_ERR(dev_opp))
392 return ERR_CAST(dev_opp);
393
394 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
395 if (temp_opp->available) {
396 /* go to the next node, before choosing prev */
397 if (temp_opp->rate > *freq)
398 break;
399 else
400 opp = temp_opp;
401 }
402 }
403 if (!IS_ERR(opp))
404 *freq = opp->rate;
405
406 return opp;
407 }
408 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
409
410 /**
411 * _add_device_opp() - Allocate a new device OPP table
412 * @dev: device for which we do this operation
413 *
414 * New device node which uses OPPs - used when multiple devices with OPP tables
415 * are maintained.
416 *
417 * Return: valid device_opp pointer if success, else NULL.
418 */
419 static struct device_opp *_add_device_opp(struct device *dev)
420 {
421 struct device_opp *dev_opp;
422
423 /*
424 * Allocate a new device OPP table. In the infrequent case where a new
425 * device is needed to be added, we pay this penalty.
426 */
427 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
428 if (!dev_opp)
429 return NULL;
430
431 dev_opp->dev = dev;
432 srcu_init_notifier_head(&dev_opp->srcu_head);
433 INIT_LIST_HEAD(&dev_opp->opp_list);
434
435 /* Secure the device list modification */
436 list_add_rcu(&dev_opp->node, &dev_opp_list);
437 return dev_opp;
438 }
439
440 /**
441 * _opp_add_dynamic() - Allocate a dynamic OPP.
442 * @dev: device for which we do this operation
443 * @freq: Frequency in Hz for this OPP
444 * @u_volt: Voltage in uVolts for this OPP
445 * @dynamic: Dynamically added OPPs.
446 *
447 * This function adds an opp definition to the opp list and returns status.
448 * The opp is made available by default and it can be controlled using
449 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
450 *
451 * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
452 * freed by of_free_opp_table.
453 *
454 * Locking: The internal device_opp and opp structures are RCU protected.
455 * Hence this function internally uses RCU updater strategy with mutex locks
456 * to keep the integrity of the internal data structures. Callers should ensure
457 * that this function is *NOT* called under RCU protection or in contexts where
458 * mutex cannot be locked.
459 *
460 * Return:
461 * 0 On success OR
462 * Duplicate OPPs (both freq and volt are same) and opp->available
463 * -EEXIST Freq are same and volt are different OR
464 * Duplicate OPPs (both freq and volt are same) and !opp->available
465 * -ENOMEM Memory allocation failure
466 */
467 static int _opp_add_dynamic(struct device *dev, unsigned long freq,
468 long u_volt, bool dynamic)
469 {
470 struct device_opp *dev_opp = NULL;
471 struct dev_pm_opp *opp, *new_opp;
472 struct list_head *head;
473 int ret;
474
475 /* allocate new OPP node */
476 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
477 if (!new_opp)
478 return -ENOMEM;
479
480 /* Hold our list modification lock here */
481 mutex_lock(&dev_opp_list_lock);
482
483 /* populate the opp table */
484 new_opp->rate = freq;
485 new_opp->u_volt = u_volt;
486 new_opp->available = true;
487 new_opp->dynamic = dynamic;
488
489 /* Check for existing list for 'dev' */
490 dev_opp = _find_device_opp(dev);
491 if (IS_ERR(dev_opp)) {
492 dev_opp = _add_device_opp(dev);
493 if (!dev_opp) {
494 ret = -ENOMEM;
495 goto free_opp;
496 }
497
498 head = &dev_opp->opp_list;
499 goto list_add;
500 }
501
502 /*
503 * Insert new OPP in order of increasing frequency
504 * and discard if already present
505 */
506 head = &dev_opp->opp_list;
507 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
508 if (new_opp->rate <= opp->rate)
509 break;
510 else
511 head = &opp->node;
512 }
513
514 /* Duplicate OPPs ? */
515 if (new_opp->rate == opp->rate) {
516 ret = opp->available && new_opp->u_volt == opp->u_volt ?
517 0 : -EEXIST;
518
519 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
520 __func__, opp->rate, opp->u_volt, opp->available,
521 new_opp->rate, new_opp->u_volt, new_opp->available);
522 goto free_opp;
523 }
524
525 list_add:
526 new_opp->dev_opp = dev_opp;
527 list_add_rcu(&new_opp->node, head);
528 mutex_unlock(&dev_opp_list_lock);
529
530 /*
531 * Notify the changes in the availability of the operable
532 * frequency/voltage list.
533 */
534 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
535 return 0;
536
537 free_opp:
538 mutex_unlock(&dev_opp_list_lock);
539 kfree(new_opp);
540 return ret;
541 }
542
543 /**
544 * dev_pm_opp_add() - Add an OPP table from a table definitions
545 * @dev: device for which we do this operation
546 * @freq: Frequency in Hz for this OPP
547 * @u_volt: Voltage in uVolts for this OPP
548 *
549 * This function adds an opp definition to the opp list and returns status.
550 * The opp is made available by default and it can be controlled using
551 * dev_pm_opp_enable/disable functions.
552 *
553 * Locking: The internal device_opp and opp structures are RCU protected.
554 * Hence this function internally uses RCU updater strategy with mutex locks
555 * to keep the integrity of the internal data structures. Callers should ensure
556 * that this function is *NOT* called under RCU protection or in contexts where
557 * mutex cannot be locked.
558 *
559 * Return:
560 * 0 On success OR
561 * Duplicate OPPs (both freq and volt are same) and opp->available
562 * -EEXIST Freq are same and volt are different OR
563 * Duplicate OPPs (both freq and volt are same) and !opp->available
564 * -ENOMEM Memory allocation failure
565 */
566 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
567 {
568 return _opp_add_dynamic(dev, freq, u_volt, true);
569 }
570 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
571
572 /**
573 * _kfree_opp_rcu() - Free OPP RCU handler
574 * @head: RCU head
575 */
576 static void _kfree_opp_rcu(struct rcu_head *head)
577 {
578 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
579
580 kfree_rcu(opp, rcu_head);
581 }
582
583 /**
584 * _kfree_device_rcu() - Free device_opp RCU handler
585 * @head: RCU head
586 */
587 static void _kfree_device_rcu(struct rcu_head *head)
588 {
589 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
590
591 kfree_rcu(device_opp, rcu_head);
592 }
593
594 /**
595 * _opp_remove() - Remove an OPP from a table definition
596 * @dev_opp: points back to the device_opp struct this opp belongs to
597 * @opp: pointer to the OPP to remove
598 *
599 * This function removes an opp definition from the opp list.
600 *
601 * Locking: The internal device_opp and opp structures are RCU protected.
602 * It is assumed that the caller holds required mutex for an RCU updater
603 * strategy.
604 */
605 static void _opp_remove(struct device_opp *dev_opp,
606 struct dev_pm_opp *opp)
607 {
608 /*
609 * Notify the changes in the availability of the operable
610 * frequency/voltage list.
611 */
612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
613 list_del_rcu(&opp->node);
614 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
615
616 if (list_empty(&dev_opp->opp_list)) {
617 list_del_rcu(&dev_opp->node);
618 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
619 _kfree_device_rcu);
620 }
621 }
622
623 /**
624 * dev_pm_opp_remove() - Remove an OPP from OPP list
625 * @dev: device for which we do this operation
626 * @freq: OPP to remove with matching 'freq'
627 *
628 * This function removes an opp from the opp list.
629 *
630 * Locking: The internal device_opp and opp structures are RCU protected.
631 * Hence this function internally uses RCU updater strategy with mutex locks
632 * to keep the integrity of the internal data structures. Callers should ensure
633 * that this function is *NOT* called under RCU protection or in contexts where
634 * mutex cannot be locked.
635 */
636 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
637 {
638 struct dev_pm_opp *opp;
639 struct device_opp *dev_opp;
640 bool found = false;
641
642 /* Hold our list modification lock here */
643 mutex_lock(&dev_opp_list_lock);
644
645 dev_opp = _find_device_opp(dev);
646 if (IS_ERR(dev_opp))
647 goto unlock;
648
649 list_for_each_entry(opp, &dev_opp->opp_list, node) {
650 if (opp->rate == freq) {
651 found = true;
652 break;
653 }
654 }
655
656 if (!found) {
657 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
658 __func__, freq);
659 goto unlock;
660 }
661
662 _opp_remove(dev_opp, opp);
663 unlock:
664 mutex_unlock(&dev_opp_list_lock);
665 }
666 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
667
668 /**
669 * _opp_set_availability() - helper to set the availability of an opp
670 * @dev: device for which we do this operation
671 * @freq: OPP frequency to modify availability
672 * @availability_req: availability status requested for this opp
673 *
674 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
675 * share a common logic which is isolated here.
676 *
677 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
678 * copy operation, returns 0 if no modifcation was done OR modification was
679 * successful.
680 *
681 * Locking: The internal device_opp and opp structures are RCU protected.
682 * Hence this function internally uses RCU updater strategy with mutex locks to
683 * keep the integrity of the internal data structures. Callers should ensure
684 * that this function is *NOT* called under RCU protection or in contexts where
685 * mutex locking or synchronize_rcu() blocking calls cannot be used.
686 */
687 static int _opp_set_availability(struct device *dev, unsigned long freq,
688 bool availability_req)
689 {
690 struct device_opp *dev_opp;
691 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
692 int r = 0;
693
694 /* keep the node allocated */
695 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
696 if (!new_opp)
697 return -ENOMEM;
698
699 mutex_lock(&dev_opp_list_lock);
700
701 /* Find the device_opp */
702 dev_opp = _find_device_opp(dev);
703 if (IS_ERR(dev_opp)) {
704 r = PTR_ERR(dev_opp);
705 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
706 goto unlock;
707 }
708
709 /* Do we have the frequency? */
710 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
711 if (tmp_opp->rate == freq) {
712 opp = tmp_opp;
713 break;
714 }
715 }
716 if (IS_ERR(opp)) {
717 r = PTR_ERR(opp);
718 goto unlock;
719 }
720
721 /* Is update really needed? */
722 if (opp->available == availability_req)
723 goto unlock;
724 /* copy the old data over */
725 *new_opp = *opp;
726
727 /* plug in new node */
728 new_opp->available = availability_req;
729
730 list_replace_rcu(&opp->node, &new_opp->node);
731 mutex_unlock(&dev_opp_list_lock);
732 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
733
734 /* Notify the change of the OPP availability */
735 if (availability_req)
736 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
737 new_opp);
738 else
739 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
740 new_opp);
741
742 return 0;
743
744 unlock:
745 mutex_unlock(&dev_opp_list_lock);
746 kfree(new_opp);
747 return r;
748 }
749
750 /**
751 * dev_pm_opp_enable() - Enable a specific OPP
752 * @dev: device for which we do this operation
753 * @freq: OPP frequency to enable
754 *
755 * Enables a provided opp. If the operation is valid, this returns 0, else the
756 * corresponding error value. It is meant to be used for users an OPP available
757 * after being temporarily made unavailable with dev_pm_opp_disable.
758 *
759 * Locking: The internal device_opp and opp structures are RCU protected.
760 * Hence this function indirectly uses RCU and mutex locks to keep the
761 * integrity of the internal data structures. Callers should ensure that
762 * this function is *NOT* called under RCU protection or in contexts where
763 * mutex locking or synchronize_rcu() blocking calls cannot be used.
764 *
765 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
766 * copy operation, returns 0 if no modifcation was done OR modification was
767 * successful.
768 */
769 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
770 {
771 return _opp_set_availability(dev, freq, true);
772 }
773 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
774
775 /**
776 * dev_pm_opp_disable() - Disable a specific OPP
777 * @dev: device for which we do this operation
778 * @freq: OPP frequency to disable
779 *
780 * Disables a provided opp. If the operation is valid, this returns
781 * 0, else the corresponding error value. It is meant to be a temporary
782 * control by users to make this OPP not available until the circumstances are
783 * right to make it available again (with a call to dev_pm_opp_enable).
784 *
785 * Locking: The internal device_opp and opp structures are RCU protected.
786 * Hence this function indirectly uses RCU and mutex locks to keep the
787 * integrity of the internal data structures. Callers should ensure that
788 * this function is *NOT* called under RCU protection or in contexts where
789 * mutex locking or synchronize_rcu() blocking calls cannot be used.
790 *
791 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
792 * copy operation, returns 0 if no modifcation was done OR modification was
793 * successful.
794 */
795 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
796 {
797 return _opp_set_availability(dev, freq, false);
798 }
799 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
800
801 /**
802 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
803 * @dev: device pointer used to lookup device OPPs.
804 *
805 * Return: pointer to notifier head if found, otherwise -ENODEV or
806 * -EINVAL based on type of error casted as pointer. value must be checked
807 * with IS_ERR to determine valid pointer or error result.
808 *
809 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
810 * protected pointer. The reason for the same is that the opp pointer which is
811 * returned will remain valid for use with opp_get_{voltage, freq} only while
812 * under the locked area. The pointer returned must be used prior to unlocking
813 * with rcu_read_unlock() to maintain the integrity of the pointer.
814 */
815 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
816 {
817 struct device_opp *dev_opp = _find_device_opp(dev);
818
819 if (IS_ERR(dev_opp))
820 return ERR_CAST(dev_opp); /* matching type */
821
822 return &dev_opp->srcu_head;
823 }
824 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
825
826 #ifdef CONFIG_OF
827 /**
828 * of_init_opp_table() - Initialize opp table from device tree
829 * @dev: device pointer used to lookup device OPPs.
830 *
831 * Register the initial OPP table with the OPP library for given device.
832 *
833 * Locking: The internal device_opp and opp structures are RCU protected.
834 * Hence this function indirectly uses RCU updater strategy with mutex locks
835 * to keep the integrity of the internal data structures. Callers should ensure
836 * that this function is *NOT* called under RCU protection or in contexts where
837 * mutex cannot be locked.
838 *
839 * Return:
840 * 0 On success OR
841 * Duplicate OPPs (both freq and volt are same) and opp->available
842 * -EEXIST Freq are same and volt are different OR
843 * Duplicate OPPs (both freq and volt are same) and !opp->available
844 * -ENOMEM Memory allocation failure
845 * -ENODEV when 'operating-points' property is not found or is invalid data
846 * in device node.
847 * -ENODATA when empty 'operating-points' property is found
848 */
849 int of_init_opp_table(struct device *dev)
850 {
851 const struct property *prop;
852 const __be32 *val;
853 int nr;
854
855 prop = of_find_property(dev->of_node, "operating-points", NULL);
856 if (!prop)
857 return -ENODEV;
858 if (!prop->value)
859 return -ENODATA;
860
861 /*
862 * Each OPP is a set of tuples consisting of frequency and
863 * voltage like <freq-kHz vol-uV>.
864 */
865 nr = prop->length / sizeof(u32);
866 if (nr % 2) {
867 dev_err(dev, "%s: Invalid OPP list\n", __func__);
868 return -EINVAL;
869 }
870
871 val = prop->value;
872 while (nr) {
873 unsigned long freq = be32_to_cpup(val++) * 1000;
874 unsigned long volt = be32_to_cpup(val++);
875
876 if (_opp_add_dynamic(dev, freq, volt, false))
877 dev_warn(dev, "%s: Failed to add OPP %ld\n",
878 __func__, freq);
879 nr -= 2;
880 }
881
882 return 0;
883 }
884 EXPORT_SYMBOL_GPL(of_init_opp_table);
885
886 /**
887 * of_free_opp_table() - Free OPP table entries created from static DT entries
888 * @dev: device pointer used to lookup device OPPs.
889 *
890 * Free OPPs created using static entries present in DT.
891 *
892 * Locking: The internal device_opp and opp structures are RCU protected.
893 * Hence this function indirectly uses RCU updater strategy with mutex locks
894 * to keep the integrity of the internal data structures. Callers should ensure
895 * that this function is *NOT* called under RCU protection or in contexts where
896 * mutex cannot be locked.
897 */
898 void of_free_opp_table(struct device *dev)
899 {
900 struct device_opp *dev_opp;
901 struct dev_pm_opp *opp, *tmp;
902
903 /* Check for existing list for 'dev' */
904 dev_opp = _find_device_opp(dev);
905 if (IS_ERR(dev_opp)) {
906 int error = PTR_ERR(dev_opp);
907 if (error != -ENODEV)
908 WARN(1, "%s: dev_opp: %d\n",
909 IS_ERR_OR_NULL(dev) ?
910 "Invalid device" : dev_name(dev),
911 error);
912 return;
913 }
914
915 /* Hold our list modification lock here */
916 mutex_lock(&dev_opp_list_lock);
917
918 /* Free static OPPs */
919 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
920 if (!opp->dynamic)
921 _opp_remove(dev_opp, opp);
922 }
923
924 mutex_unlock(&dev_opp_list_lock);
925 }
926 EXPORT_SYMBOL_GPL(of_free_opp_table);
927 #endif
This page took 0.082605 seconds and 6 git commands to generate.