Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / kernel / pm_qos_params.c
1 /*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30 /*#define DEBUG*/
31
32 #include <linux/pm_qos_params.h>
33 #include <linux/sched.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/fs.h>
38 #include <linux/device.h>
39 #include <linux/miscdevice.h>
40 #include <linux/string.h>
41 #include <linux/platform_device.h>
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44
45 #include <linux/uaccess.h>
46
47 /*
48 * locking rule: all changes to requests or notifiers lists
49 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
50 * held, taken with _irqsave. One lock to rule them all
51 */
52 enum pm_qos_type {
53 PM_QOS_MAX, /* return the largest value */
54 PM_QOS_MIN /* return the smallest value */
55 };
56
57 struct pm_qos_object {
58 struct plist_head requests;
59 struct blocking_notifier_head *notifiers;
60 struct miscdevice pm_qos_power_miscdev;
61 char *name;
62 s32 default_value;
63 enum pm_qos_type type;
64 };
65
66 static DEFINE_SPINLOCK(pm_qos_lock);
67
68 static struct pm_qos_object null_pm_qos;
69 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
70 static struct pm_qos_object cpu_dma_pm_qos = {
71 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
72 .notifiers = &cpu_dma_lat_notifier,
73 .name = "cpu_dma_latency",
74 .default_value = 2000 * USEC_PER_SEC,
75 .type = PM_QOS_MIN,
76 };
77
78 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
79 static struct pm_qos_object network_lat_pm_qos = {
80 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
81 .notifiers = &network_lat_notifier,
82 .name = "network_latency",
83 .default_value = 2000 * USEC_PER_SEC,
84 .type = PM_QOS_MIN
85 };
86
87
88 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
89 static struct pm_qos_object network_throughput_pm_qos = {
90 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
91 .notifiers = &network_throughput_notifier,
92 .name = "network_throughput",
93 .default_value = 0,
94 .type = PM_QOS_MAX,
95 };
96
97
98 static struct pm_qos_object *pm_qos_array[] = {
99 &null_pm_qos,
100 &cpu_dma_pm_qos,
101 &network_lat_pm_qos,
102 &network_throughput_pm_qos
103 };
104
105 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
106 size_t count, loff_t *f_pos);
107 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
108 size_t count, loff_t *f_pos);
109 static int pm_qos_power_open(struct inode *inode, struct file *filp);
110 static int pm_qos_power_release(struct inode *inode, struct file *filp);
111
112 static const struct file_operations pm_qos_power_fops = {
113 .write = pm_qos_power_write,
114 .read = pm_qos_power_read,
115 .open = pm_qos_power_open,
116 .release = pm_qos_power_release,
117 .llseek = noop_llseek,
118 };
119
120 /* unlocked internal variant */
121 static inline int pm_qos_get_value(struct pm_qos_object *o)
122 {
123 if (plist_head_empty(&o->requests))
124 return o->default_value;
125
126 switch (o->type) {
127 case PM_QOS_MIN:
128 return plist_first(&o->requests)->prio;
129
130 case PM_QOS_MAX:
131 return plist_last(&o->requests)->prio;
132
133 default:
134 /* runtime check for not using enum */
135 BUG();
136 }
137 }
138
139 static void update_target(struct pm_qos_object *o, struct plist_node *node,
140 int del, int value)
141 {
142 unsigned long flags;
143 int prev_value, curr_value;
144
145 spin_lock_irqsave(&pm_qos_lock, flags);
146 prev_value = pm_qos_get_value(o);
147 /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */
148 if (value != PM_QOS_DEFAULT_VALUE) {
149 /*
150 * to change the list, we atomically remove, reinit
151 * with new value and add, then see if the extremal
152 * changed
153 */
154 plist_del(node, &o->requests);
155 plist_node_init(node, value);
156 plist_add(node, &o->requests);
157 } else if (del) {
158 plist_del(node, &o->requests);
159 } else {
160 plist_add(node, &o->requests);
161 }
162 curr_value = pm_qos_get_value(o);
163 spin_unlock_irqrestore(&pm_qos_lock, flags);
164
165 if (prev_value != curr_value)
166 blocking_notifier_call_chain(o->notifiers,
167 (unsigned long)curr_value,
168 NULL);
169 }
170
171 static int register_pm_qos_misc(struct pm_qos_object *qos)
172 {
173 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
174 qos->pm_qos_power_miscdev.name = qos->name;
175 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
176
177 return misc_register(&qos->pm_qos_power_miscdev);
178 }
179
180 static int find_pm_qos_object_by_minor(int minor)
181 {
182 int pm_qos_class;
183
184 for (pm_qos_class = 0;
185 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
186 if (minor ==
187 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
188 return pm_qos_class;
189 }
190 return -1;
191 }
192
193 /**
194 * pm_qos_request - returns current system wide qos expectation
195 * @pm_qos_class: identification of which qos value is requested
196 *
197 * This function returns the current target value in an atomic manner.
198 */
199 int pm_qos_request(int pm_qos_class)
200 {
201 unsigned long flags;
202 int value;
203
204 spin_lock_irqsave(&pm_qos_lock, flags);
205 value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
206 spin_unlock_irqrestore(&pm_qos_lock, flags);
207
208 return value;
209 }
210 EXPORT_SYMBOL_GPL(pm_qos_request);
211
212 int pm_qos_request_active(struct pm_qos_request_list *req)
213 {
214 return req->pm_qos_class != 0;
215 }
216 EXPORT_SYMBOL_GPL(pm_qos_request_active);
217
218 /**
219 * pm_qos_add_request - inserts new qos request into the list
220 * @dep: pointer to a preallocated handle
221 * @pm_qos_class: identifies which list of qos request to use
222 * @value: defines the qos request
223 *
224 * This function inserts a new entry in the pm_qos_class list of requested qos
225 * performance characteristics. It recomputes the aggregate QoS expectations
226 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
227 * handle. Caller needs to save this handle for later use in updates and
228 * removal.
229 */
230
231 void pm_qos_add_request(struct pm_qos_request_list *dep,
232 int pm_qos_class, s32 value)
233 {
234 struct pm_qos_object *o = pm_qos_array[pm_qos_class];
235 int new_value;
236
237 if (pm_qos_request_active(dep)) {
238 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
239 return;
240 }
241 if (value == PM_QOS_DEFAULT_VALUE)
242 new_value = o->default_value;
243 else
244 new_value = value;
245 plist_node_init(&dep->list, new_value);
246 dep->pm_qos_class = pm_qos_class;
247 update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE);
248 }
249 EXPORT_SYMBOL_GPL(pm_qos_add_request);
250
251 /**
252 * pm_qos_update_request - modifies an existing qos request
253 * @pm_qos_req : handle to list element holding a pm_qos request to use
254 * @value: defines the qos request
255 *
256 * Updates an existing qos request for the pm_qos_class of parameters along
257 * with updating the target pm_qos_class value.
258 *
259 * Attempts are made to make this code callable on hot code paths.
260 */
261 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
262 s32 new_value)
263 {
264 s32 temp;
265 struct pm_qos_object *o;
266
267 if (!pm_qos_req) /*guard against callers passing in null */
268 return;
269
270 if (!pm_qos_request_active(pm_qos_req)) {
271 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
272 return;
273 }
274
275 o = pm_qos_array[pm_qos_req->pm_qos_class];
276
277 if (new_value == PM_QOS_DEFAULT_VALUE)
278 temp = o->default_value;
279 else
280 temp = new_value;
281
282 if (temp != pm_qos_req->list.prio)
283 update_target(o, &pm_qos_req->list, 0, temp);
284 }
285 EXPORT_SYMBOL_GPL(pm_qos_update_request);
286
287 /**
288 * pm_qos_remove_request - modifies an existing qos request
289 * @pm_qos_req: handle to request list element
290 *
291 * Will remove pm qos request from the list of requests and
292 * recompute the current target value for the pm_qos_class. Call this
293 * on slow code paths.
294 */
295 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
296 {
297 struct pm_qos_object *o;
298
299 if (pm_qos_req == NULL)
300 return;
301 /* silent return to keep pcm code cleaner */
302
303 if (!pm_qos_request_active(pm_qos_req)) {
304 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
305 return;
306 }
307
308 o = pm_qos_array[pm_qos_req->pm_qos_class];
309 update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE);
310 memset(pm_qos_req, 0, sizeof(*pm_qos_req));
311 }
312 EXPORT_SYMBOL_GPL(pm_qos_remove_request);
313
314 /**
315 * pm_qos_add_notifier - sets notification entry for changes to target value
316 * @pm_qos_class: identifies which qos target changes should be notified.
317 * @notifier: notifier block managed by caller.
318 *
319 * will register the notifier into a notification chain that gets called
320 * upon changes to the pm_qos_class target value.
321 */
322 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
323 {
324 int retval;
325
326 retval = blocking_notifier_chain_register(
327 pm_qos_array[pm_qos_class]->notifiers, notifier);
328
329 return retval;
330 }
331 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
332
333 /**
334 * pm_qos_remove_notifier - deletes notification entry from chain.
335 * @pm_qos_class: identifies which qos target changes are notified.
336 * @notifier: notifier block to be removed.
337 *
338 * will remove the notifier from the notification chain that gets called
339 * upon changes to the pm_qos_class target value.
340 */
341 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
342 {
343 int retval;
344
345 retval = blocking_notifier_chain_unregister(
346 pm_qos_array[pm_qos_class]->notifiers, notifier);
347
348 return retval;
349 }
350 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
351
352 static int pm_qos_power_open(struct inode *inode, struct file *filp)
353 {
354 long pm_qos_class;
355
356 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
357 if (pm_qos_class >= 0) {
358 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
359 if (!req)
360 return -ENOMEM;
361
362 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
363 filp->private_data = req;
364
365 if (filp->private_data)
366 return 0;
367 }
368 return -EPERM;
369 }
370
371 static int pm_qos_power_release(struct inode *inode, struct file *filp)
372 {
373 struct pm_qos_request_list *req;
374
375 req = filp->private_data;
376 pm_qos_remove_request(req);
377 kfree(req);
378
379 return 0;
380 }
381
382
383 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
384 size_t count, loff_t *f_pos)
385 {
386 s32 value;
387 unsigned long flags;
388 struct pm_qos_object *o;
389 struct pm_qos_request_list *pm_qos_req = filp->private_data;
390
391 if (!pm_qos_req)
392 return -EINVAL;
393 if (!pm_qos_request_active(pm_qos_req))
394 return -EINVAL;
395
396 o = pm_qos_array[pm_qos_req->pm_qos_class];
397 spin_lock_irqsave(&pm_qos_lock, flags);
398 value = pm_qos_get_value(o);
399 spin_unlock_irqrestore(&pm_qos_lock, flags);
400
401 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
402 }
403
404 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
405 size_t count, loff_t *f_pos)
406 {
407 s32 value;
408 struct pm_qos_request_list *pm_qos_req;
409
410 if (count == sizeof(s32)) {
411 if (copy_from_user(&value, buf, sizeof(s32)))
412 return -EFAULT;
413 } else if (count <= 11) { /* ASCII perhaps? */
414 char ascii_value[11];
415 unsigned long int ulval;
416 int ret;
417
418 if (copy_from_user(ascii_value, buf, count))
419 return -EFAULT;
420
421 if (count > 10) {
422 if (ascii_value[10] == '\n')
423 ascii_value[10] = '\0';
424 else
425 return -EINVAL;
426 } else {
427 ascii_value[count] = '\0';
428 }
429 ret = strict_strtoul(ascii_value, 16, &ulval);
430 if (ret) {
431 pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
432 return -EINVAL;
433 }
434 value = (s32)lower_32_bits(ulval);
435 } else {
436 return -EINVAL;
437 }
438
439 pm_qos_req = filp->private_data;
440 pm_qos_update_request(pm_qos_req, value);
441
442 return count;
443 }
444
445
446 static int __init pm_qos_power_init(void)
447 {
448 int ret = 0;
449
450 ret = register_pm_qos_misc(&cpu_dma_pm_qos);
451 if (ret < 0) {
452 printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
453 return ret;
454 }
455 ret = register_pm_qos_misc(&network_lat_pm_qos);
456 if (ret < 0) {
457 printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
458 return ret;
459 }
460 ret = register_pm_qos_misc(&network_throughput_pm_qos);
461 if (ret < 0)
462 printk(KERN_ERR
463 "pm_qos_param: network_throughput setup failed\n");
464
465 return ret;
466 }
467
468 late_initcall(pm_qos_power_init);
This page took 0.053958 seconds and 5 git commands to generate.