Merge branch 'topic/livepatch' of git://git.kernel.org/pub/scm/linux/kernel/git/power...
[deliverable/linux.git] / drivers / cpuidle / governors / menu.c
1 /*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
6 * Author:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
23
24 /*
25 * Please note when changing the tuning values:
26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27 * a scaling operation multiplication may overflow on 32 bit platforms.
28 * In that case, #define RESOLUTION as ULL to get 64 bit result:
29 * #define RESOLUTION 1024ULL
30 *
31 * The default values do not overflow.
32 */
33 #define BUCKETS 12
34 #define INTERVAL_SHIFT 3
35 #define INTERVALS (1UL << INTERVAL_SHIFT)
36 #define RESOLUTION 1024
37 #define DECAY 8
38 #define MAX_INTERESTING 50000
39
40
41 /*
42 * Concepts and ideas behind the menu governor
43 *
44 * For the menu governor, there are 3 decision factors for picking a C
45 * state:
46 * 1) Energy break even point
47 * 2) Performance impact
48 * 3) Latency tolerance (from pmqos infrastructure)
49 * These these three factors are treated independently.
50 *
51 * Energy break even point
52 * -----------------------
53 * C state entry and exit have an energy cost, and a certain amount of time in
54 * the C state is required to actually break even on this cost. CPUIDLE
55 * provides us this duration in the "target_residency" field. So all that we
56 * need is a good prediction of how long we'll be idle. Like the traditional
57 * menu governor, we start with the actual known "next timer event" time.
58 *
59 * Since there are other source of wakeups (interrupts for example) than
60 * the next timer event, this estimation is rather optimistic. To get a
61 * more realistic estimate, a correction factor is applied to the estimate,
62 * that is based on historic behavior. For example, if in the past the actual
63 * duration always was 50% of the next timer tick, the correction factor will
64 * be 0.5.
65 *
66 * menu uses a running average for this correction factor, however it uses a
67 * set of factors, not just a single factor. This stems from the realization
68 * that the ratio is dependent on the order of magnitude of the expected
69 * duration; if we expect 500 milliseconds of idle time the likelihood of
70 * getting an interrupt very early is much higher than if we expect 50 micro
71 * seconds of idle time. A second independent factor that has big impact on
72 * the actual factor is if there is (disk) IO outstanding or not.
73 * (as a special twist, we consider every sleep longer than 50 milliseconds
74 * as perfect; there are no power gains for sleeping longer than this)
75 *
76 * For these two reasons we keep an array of 12 independent factors, that gets
77 * indexed based on the magnitude of the expected duration as well as the
78 * "is IO outstanding" property.
79 *
80 * Repeatable-interval-detector
81 * ----------------------------
82 * There are some cases where "next timer" is a completely unusable predictor:
83 * Those cases where the interval is fixed, for example due to hardware
84 * interrupt mitigation, but also due to fixed transfer rate devices such as
85 * mice.
86 * For this, we use a different predictor: We track the duration of the last 8
87 * intervals and if the stand deviation of these 8 intervals is below a
88 * threshold value, we use the average of these intervals as prediction.
89 *
90 * Limiting Performance Impact
91 * ---------------------------
92 * C states, especially those with large exit latencies, can have a real
93 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 * and in addition, less performance has a power price of its own.
95 *
96 * As a general rule of thumb, menu assumes that the following heuristic
97 * holds:
98 * The busier the system, the less impact of C states is acceptable
99 *
100 * This rule-of-thumb is implemented using a performance-multiplier:
101 * If the exit latency times the performance multiplier is longer than
102 * the predicted duration, the C state is not considered a candidate
103 * for selection due to a too high performance impact. So the higher
104 * this multiplier is, the longer we need to be idle to pick a deep C
105 * state, and thus the less likely a busy CPU will hit such a deep
106 * C state.
107 *
108 * Two factors are used in determing this multiplier:
109 * a value of 10 is added for each point of "per cpu load average" we have.
110 * a value of 5 points is added for each process that is waiting for
111 * IO on this CPU.
112 * (these values are experimentally determined)
113 *
114 * The load average factor gives a longer term (few seconds) input to the
115 * decision, while the iowait value gives a cpu local instantanious input.
116 * The iowait factor may look low, but realize that this is also already
117 * represented in the system load average.
118 *
119 */
120
121 struct menu_device {
122 int last_state_idx;
123 int needs_update;
124
125 unsigned int next_timer_us;
126 unsigned int predicted_us;
127 unsigned int bucket;
128 unsigned int correction_factor[BUCKETS];
129 unsigned int intervals[INTERVALS];
130 int interval_ptr;
131 };
132
133
134 #define LOAD_INT(x) ((x) >> FSHIFT)
135 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
136
137 static inline int get_loadavg(unsigned long load)
138 {
139 return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
140 }
141
142 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
143 {
144 int bucket = 0;
145
146 /*
147 * We keep two groups of stats; one with no
148 * IO pending, one without.
149 * This allows us to calculate
150 * E(duration)|iowait
151 */
152 if (nr_iowaiters)
153 bucket = BUCKETS/2;
154
155 if (duration < 10)
156 return bucket;
157 if (duration < 100)
158 return bucket + 1;
159 if (duration < 1000)
160 return bucket + 2;
161 if (duration < 10000)
162 return bucket + 3;
163 if (duration < 100000)
164 return bucket + 4;
165 return bucket + 5;
166 }
167
168 /*
169 * Return a multiplier for the exit latency that is intended
170 * to take performance requirements into account.
171 * The more performance critical we estimate the system
172 * to be, the higher this multiplier, and thus the higher
173 * the barrier to go to an expensive C state.
174 */
175 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
176 {
177 int mult = 1;
178
179 /* for higher loadavg, we are more reluctant */
180
181 mult += 2 * get_loadavg(load);
182
183 /* for IO wait tasks (per cpu!) we add 5x each */
184 mult += 10 * nr_iowaiters;
185
186 return mult;
187 }
188
189 static DEFINE_PER_CPU(struct menu_device, menu_devices);
190
191 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
192
193 /*
194 * Try detecting repeating patterns by keeping track of the last 8
195 * intervals, and checking if the standard deviation of that set
196 * of points is below a threshold. If it is... then use the
197 * average of these 8 points as the estimated value.
198 */
199 static void get_typical_interval(struct menu_device *data)
200 {
201 int i, divisor;
202 unsigned int max, thresh, avg;
203 uint64_t sum, variance;
204
205 thresh = UINT_MAX; /* Discard outliers above this value */
206
207 again:
208
209 /* First calculate the average of past intervals */
210 max = 0;
211 sum = 0;
212 divisor = 0;
213 for (i = 0; i < INTERVALS; i++) {
214 unsigned int value = data->intervals[i];
215 if (value <= thresh) {
216 sum += value;
217 divisor++;
218 if (value > max)
219 max = value;
220 }
221 }
222 if (divisor == INTERVALS)
223 avg = sum >> INTERVAL_SHIFT;
224 else
225 avg = div_u64(sum, divisor);
226
227 /* Then try to determine variance */
228 variance = 0;
229 for (i = 0; i < INTERVALS; i++) {
230 unsigned int value = data->intervals[i];
231 if (value <= thresh) {
232 int64_t diff = (int64_t)value - avg;
233 variance += diff * diff;
234 }
235 }
236 if (divisor == INTERVALS)
237 variance >>= INTERVAL_SHIFT;
238 else
239 do_div(variance, divisor);
240
241 /*
242 * The typical interval is obtained when standard deviation is
243 * small (stddev <= 20 us, variance <= 400 us^2) or standard
244 * deviation is small compared to the average interval (avg >
245 * 6*stddev, avg^2 > 36*variance). The average is smaller than
246 * UINT_MAX aka U32_MAX, so computing its square does not
247 * overflow a u64. We simply reject this candidate average if
248 * the standard deviation is greater than 715 s (which is
249 * rather unlikely).
250 *
251 * Use this result only if there is no timer to wake us up sooner.
252 */
253 if (likely(variance <= U64_MAX/36)) {
254 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
255 || variance <= 400) {
256 if (data->next_timer_us > avg)
257 data->predicted_us = avg;
258 return;
259 }
260 }
261
262 /*
263 * If we have outliers to the upside in our distribution, discard
264 * those by setting the threshold to exclude these outliers, then
265 * calculate the average and standard deviation again. Once we get
266 * down to the bottom 3/4 of our samples, stop excluding samples.
267 *
268 * This can deal with workloads that have long pauses interspersed
269 * with sporadic activity with a bunch of short pauses.
270 */
271 if ((divisor * 4) <= INTERVALS * 3)
272 return;
273
274 thresh = max - 1;
275 goto again;
276 }
277
278 /**
279 * menu_select - selects the next idle state to enter
280 * @drv: cpuidle driver containing state data
281 * @dev: the CPU
282 */
283 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
284 {
285 struct menu_device *data = this_cpu_ptr(&menu_devices);
286 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
287 int i;
288 unsigned int interactivity_req;
289 unsigned long nr_iowaiters, cpu_load;
290
291 if (data->needs_update) {
292 menu_update(drv, dev);
293 data->needs_update = 0;
294 }
295
296 /* Special case when user has set very strict latency requirement */
297 if (unlikely(latency_req == 0))
298 return 0;
299
300 /* determine the expected residency time, round up */
301 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
302
303 get_iowait_load(&nr_iowaiters, &cpu_load);
304 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
305
306 /*
307 * Force the result of multiplication to be 64 bits even if both
308 * operands are 32 bits.
309 * Make sure to round up for half microseconds.
310 */
311 data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
312 data->correction_factor[data->bucket],
313 RESOLUTION * DECAY);
314
315 get_typical_interval(data);
316
317 /*
318 * Performance multiplier defines a minimum predicted idle
319 * duration / latency ratio. Adjust the latency limit if
320 * necessary.
321 */
322 interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
323 if (latency_req > interactivity_req)
324 latency_req = interactivity_req;
325
326 if (CPUIDLE_DRIVER_STATE_START > 0) {
327 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
328 /*
329 * We want to default to C1 (hlt), not to busy polling
330 * unless the timer is happening really really soon.
331 */
332 if (interactivity_req > 20 &&
333 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
334 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
335 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
336 } else {
337 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
338 }
339
340 /*
341 * Find the idle state with the lowest power while satisfying
342 * our constraints.
343 */
344 for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
345 struct cpuidle_state *s = &drv->states[i];
346 struct cpuidle_state_usage *su = &dev->states_usage[i];
347
348 if (s->disabled || su->disable)
349 continue;
350 if (s->target_residency > data->predicted_us)
351 continue;
352 if (s->exit_latency > latency_req)
353 continue;
354
355 data->last_state_idx = i;
356 }
357
358 return data->last_state_idx;
359 }
360
361 /**
362 * menu_reflect - records that data structures need update
363 * @dev: the CPU
364 * @index: the index of actual entered state
365 *
366 * NOTE: it's important to be fast here because this operation will add to
367 * the overall exit latency.
368 */
369 static void menu_reflect(struct cpuidle_device *dev, int index)
370 {
371 struct menu_device *data = this_cpu_ptr(&menu_devices);
372
373 data->last_state_idx = index;
374 data->needs_update = 1;
375 }
376
377 /**
378 * menu_update - attempts to guess what happened after entry
379 * @drv: cpuidle driver containing state data
380 * @dev: the CPU
381 */
382 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
383 {
384 struct menu_device *data = this_cpu_ptr(&menu_devices);
385 int last_idx = data->last_state_idx;
386 struct cpuidle_state *target = &drv->states[last_idx];
387 unsigned int measured_us;
388 unsigned int new_factor;
389
390 /*
391 * Try to figure out how much time passed between entry to low
392 * power state and occurrence of the wakeup event.
393 *
394 * If the entered idle state didn't support residency measurements,
395 * we use them anyway if they are short, and if long,
396 * truncate to the whole expected time.
397 *
398 * Any measured amount of time will include the exit latency.
399 * Since we are interested in when the wakeup begun, not when it
400 * was completed, we must subtract the exit latency. However, if
401 * the measured amount of time is less than the exit latency,
402 * assume the state was never reached and the exit latency is 0.
403 */
404
405 /* measured value */
406 measured_us = cpuidle_get_last_residency(dev);
407
408 /* Deduct exit latency */
409 if (measured_us > 2 * target->exit_latency)
410 measured_us -= target->exit_latency;
411 else
412 measured_us /= 2;
413
414 /* Make sure our coefficients do not exceed unity */
415 if (measured_us > data->next_timer_us)
416 measured_us = data->next_timer_us;
417
418 /* Update our correction ratio */
419 new_factor = data->correction_factor[data->bucket];
420 new_factor -= new_factor / DECAY;
421
422 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
423 new_factor += RESOLUTION * measured_us / data->next_timer_us;
424 else
425 /*
426 * we were idle so long that we count it as a perfect
427 * prediction
428 */
429 new_factor += RESOLUTION;
430
431 /*
432 * We don't want 0 as factor; we always want at least
433 * a tiny bit of estimated time. Fortunately, due to rounding,
434 * new_factor will stay nonzero regardless of measured_us values
435 * and the compiler can eliminate this test as long as DECAY > 1.
436 */
437 if (DECAY == 1 && unlikely(new_factor == 0))
438 new_factor = 1;
439
440 data->correction_factor[data->bucket] = new_factor;
441
442 /* update the repeating-pattern data */
443 data->intervals[data->interval_ptr++] = measured_us;
444 if (data->interval_ptr >= INTERVALS)
445 data->interval_ptr = 0;
446 }
447
448 /**
449 * menu_enable_device - scans a CPU's states and does setup
450 * @drv: cpuidle driver
451 * @dev: the CPU
452 */
453 static int menu_enable_device(struct cpuidle_driver *drv,
454 struct cpuidle_device *dev)
455 {
456 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
457 int i;
458
459 memset(data, 0, sizeof(struct menu_device));
460
461 /*
462 * if the correction factor is 0 (eg first time init or cpu hotplug
463 * etc), we actually want to start out with a unity factor.
464 */
465 for(i = 0; i < BUCKETS; i++)
466 data->correction_factor[i] = RESOLUTION * DECAY;
467
468 return 0;
469 }
470
471 static struct cpuidle_governor menu_governor = {
472 .name = "menu",
473 .rating = 20,
474 .enable = menu_enable_device,
475 .select = menu_select,
476 .reflect = menu_reflect,
477 .owner = THIS_MODULE,
478 };
479
480 /**
481 * init_menu - initializes the governor
482 */
483 static int __init init_menu(void)
484 {
485 return cpuidle_register_governor(&menu_governor);
486 }
487
488 postcore_initcall(init_menu);
This page took 0.053868 seconds and 5 git commands to generate.