drm/radeon/kms: add support for gui idle interrupts (v4)
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_pm.c
CommitLineData
7433874e
RM
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
56278a8e 21 * Alex Deucher <alexdeucher@gmail.com>
7433874e
RM
22 */
23#include "drmP.h"
24#include "radeon.h"
f735261b 25#include "avivod.h"
7433874e 26
c913e23a
RM
27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
73a6d3fc 29#define RADEON_WAIT_VBLANK_TIMEOUT 200
2031f77c 30#define RADEON_WAIT_IDLE_TIMEOUT 200
c913e23a 31
d0d6cb81 32static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
c913e23a
RM
33static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
34static void radeon_pm_set_clocks(struct radeon_device *rdev);
c913e23a
RM
35static void radeon_pm_idle_work_handler(struct work_struct *work);
36static int radeon_debugfs_pm_init(struct radeon_device *rdev);
37
38static const char *pm_state_names[4] = {
39 "PM_STATE_DISABLED",
40 "PM_STATE_MINIMUM",
41 "PM_STATE_PAUSED",
42 "PM_STATE_ACTIVE"
43};
7433874e 44
0ec0e74f
AD
45static const char *pm_state_types[5] = {
46 "Default",
47 "Powersave",
48 "Battery",
49 "Balanced",
50 "Performance",
51};
52
56278a8e
AD
53static void radeon_print_power_mode_info(struct radeon_device *rdev)
54{
55 int i, j;
56 bool is_default;
57
58 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
59 for (i = 0; i < rdev->pm.num_power_states; i++) {
60 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
61 is_default = true;
62 else
63 is_default = false;
0ec0e74f
AD
64 DRM_INFO("State %d %s %s\n", i,
65 pm_state_types[rdev->pm.power_state[i].type],
66 is_default ? "(default)" : "");
56278a8e
AD
67 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
68 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
69 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
70 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
71 if (rdev->flags & RADEON_IS_IGP)
72 DRM_INFO("\t\t%d engine: %d\n",
73 j,
74 rdev->pm.power_state[i].clock_info[j].sclk * 10);
75 else
76 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
77 j,
78 rdev->pm.power_state[i].clock_info[j].sclk * 10,
79 rdev->pm.power_state[i].clock_info[j].mclk * 10);
80 }
81 }
82}
83
516d0e46
AD
84static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
85 enum radeon_pm_state_type type)
86{
bc4624ca
RM
87 int i, j;
88 enum radeon_pm_state_type wanted_types[2];
89 int wanted_count;
516d0e46
AD
90
91 switch (type) {
92 case POWER_STATE_TYPE_DEFAULT:
93 default:
94 return rdev->pm.default_power_state;
95 case POWER_STATE_TYPE_POWERSAVE:
08ff2a7a
RM
96 if (rdev->flags & RADEON_IS_MOBILITY) {
97 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
98 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
99 wanted_count = 2;
100 } else {
101 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
102 wanted_count = 1;
103 }
516d0e46
AD
104 break;
105 case POWER_STATE_TYPE_BATTERY:
08ff2a7a
RM
106 if (rdev->flags & RADEON_IS_MOBILITY) {
107 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
108 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
109 wanted_count = 2;
110 } else {
111 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
112 wanted_count = 1;
113 }
516d0e46
AD
114 break;
115 case POWER_STATE_TYPE_BALANCED:
116 case POWER_STATE_TYPE_PERFORMANCE:
bc4624ca
RM
117 wanted_types[0] = type;
118 wanted_count = 1;
516d0e46
AD
119 break;
120 }
121
bc4624ca
RM
122 for (i = 0; i < wanted_count; i++) {
123 for (j = 0; j < rdev->pm.num_power_states; j++) {
124 if (rdev->pm.power_state[j].type == wanted_types[i])
125 return &rdev->pm.power_state[j];
126 }
127 }
516d0e46 128
bc4624ca 129 return rdev->pm.default_power_state;
516d0e46
AD
130}
131
132static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
133 struct radeon_power_state *power_state,
134 enum radeon_pm_clock_mode_type type)
135{
136 switch (type) {
137 case POWER_MODE_TYPE_DEFAULT:
138 default:
139 return power_state->default_clock_mode;
140 case POWER_MODE_TYPE_LOW:
141 return &power_state->clock_info[0];
142 case POWER_MODE_TYPE_MID:
143 if (power_state->num_clock_modes > 2)
144 return &power_state->clock_info[1];
145 else
146 return &power_state->clock_info[0];
147 break;
148 case POWER_MODE_TYPE_HIGH:
149 return &power_state->clock_info[power_state->num_clock_modes - 1];
150 }
151
152}
153
154static void radeon_get_power_state(struct radeon_device *rdev,
155 enum radeon_pm_action action)
156{
157 switch (action) {
516d0e46
AD
158 case PM_ACTION_MINIMUM:
159 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
9038dfdf 160 rdev->pm.requested_clock_mode =
516d0e46
AD
161 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
162 break;
163 case PM_ACTION_DOWNCLOCK:
164 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
9038dfdf 165 rdev->pm.requested_clock_mode =
516d0e46
AD
166 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
167 break;
168 case PM_ACTION_UPCLOCK:
169 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
9038dfdf 170 rdev->pm.requested_clock_mode =
516d0e46
AD
171 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
172 break;
9038dfdf
RM
173 case PM_ACTION_NONE:
174 default:
175 DRM_ERROR("Requested mode for not defined action\n");
176 return;
516d0e46 177 }
530079a8 178 DRM_INFO("Requested: e: %d m: %d p: %d\n",
9038dfdf
RM
179 rdev->pm.requested_clock_mode->sclk,
180 rdev->pm.requested_clock_mode->mclk,
530079a8 181 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
516d0e46
AD
182}
183
d0d6cb81
RM
184static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
185{
186 if (rdev->pm.active_crtcs) {
187 rdev->pm.vblank_sync = false;
188 wait_event_timeout(
189 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
190 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
191 }
192}
193
516d0e46
AD
194static void radeon_set_power_state(struct radeon_device *rdev)
195{
9038dfdf
RM
196 /* if *_clock_mode are the same, *_power_state are as well */
197 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
516d0e46 198 return;
530079a8
AD
199
200 DRM_INFO("Setting: e: %d m: %d p: %d\n",
9038dfdf
RM
201 rdev->pm.requested_clock_mode->sclk,
202 rdev->pm.requested_clock_mode->mclk,
530079a8 203 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
d0d6cb81 204
516d0e46 205 /* set pcie lanes */
d0d6cb81
RM
206 /* TODO */
207
516d0e46 208 /* set voltage */
d0d6cb81
RM
209 /* TODO */
210
516d0e46 211 /* set engine clock */
d0d6cb81
RM
212 radeon_sync_with_vblank(rdev);
213 radeon_pm_debug_check_in_vbl(rdev, false);
9038dfdf 214 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
d0d6cb81
RM
215 radeon_pm_debug_check_in_vbl(rdev, true);
216
217#if 0
516d0e46 218 /* set memory clock */
d0d6cb81
RM
219 if (rdev->asic->set_memory_clock) {
220 radeon_sync_with_vblank(rdev);
221 radeon_pm_debug_check_in_vbl(rdev, false);
222 radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
223 radeon_pm_debug_check_in_vbl(rdev, true);
224 }
225#endif
516d0e46
AD
226
227 rdev->pm.current_power_state = rdev->pm.requested_power_state;
9038dfdf 228 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
516d0e46
AD
229}
230
7433874e
RM
231int radeon_pm_init(struct radeon_device *rdev)
232{
c913e23a
RM
233 rdev->pm.state = PM_STATE_DISABLED;
234 rdev->pm.planned_action = PM_ACTION_NONE;
235 rdev->pm.downclocked = false;
c913e23a 236
56278a8e
AD
237 if (rdev->bios) {
238 if (rdev->is_atom_bios)
239 radeon_atombios_get_power_modes(rdev);
240 else
241 radeon_combios_get_power_modes(rdev);
242 radeon_print_power_mode_info(rdev);
243 }
244
7433874e 245 if (radeon_debugfs_pm_init(rdev)) {
c142c3e5 246 DRM_ERROR("Failed to register debugfs file for PM!\n");
7433874e
RM
247 }
248
c913e23a
RM
249 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
250
251 if (radeon_dynpm != -1 && radeon_dynpm) {
252 rdev->pm.state = PM_STATE_PAUSED;
253 DRM_INFO("radeon: dynamic power management enabled\n");
254 }
255
256 DRM_INFO("radeon: power management initialized\n");
257
7433874e
RM
258 return 0;
259}
260
29fb52ca
AD
261void radeon_pm_fini(struct radeon_device *rdev)
262{
263 if (rdev->pm.i2c_bus)
264 radeon_i2c_destroy(rdev->pm.i2c_bus);
265}
266
c913e23a
RM
267void radeon_pm_compute_clocks(struct radeon_device *rdev)
268{
269 struct drm_device *ddev = rdev->ddev;
270 struct drm_connector *connector;
271 struct radeon_crtc *radeon_crtc;
272 int count = 0;
273
274 if (rdev->pm.state == PM_STATE_DISABLED)
275 return;
276
277 mutex_lock(&rdev->pm.mutex);
278
279 rdev->pm.active_crtcs = 0;
280 list_for_each_entry(connector,
281 &ddev->mode_config.connector_list, head) {
282 if (connector->encoder &&
57f50d70
AD
283 connector->encoder->crtc &&
284 connector->dpms != DRM_MODE_DPMS_OFF) {
c913e23a
RM
285 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
286 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
287 ++count;
288 }
289 }
290
291 if (count > 1) {
292 if (rdev->pm.state == PM_STATE_ACTIVE) {
c913e23a
RM
293 cancel_delayed_work(&rdev->pm.idle_work);
294
295 rdev->pm.state = PM_STATE_PAUSED;
296 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
73a6d3fc 297 if (rdev->pm.downclocked)
c913e23a
RM
298 radeon_pm_set_clocks(rdev);
299
300 DRM_DEBUG("radeon: dynamic power management deactivated\n");
c913e23a
RM
301 }
302 } else if (count == 1) {
c913e23a
RM
303 /* TODO: Increase clocks if needed for current mode */
304
305 if (rdev->pm.state == PM_STATE_MINIMUM) {
306 rdev->pm.state = PM_STATE_ACTIVE;
307 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
73a6d3fc 308 radeon_pm_set_clocks(rdev);
c913e23a
RM
309
310 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
311 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
312 }
313 else if (rdev->pm.state == PM_STATE_PAUSED) {
314 rdev->pm.state = PM_STATE_ACTIVE;
315 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
316 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
317 DRM_DEBUG("radeon: dynamic power management activated\n");
318 }
c913e23a
RM
319 }
320 else { /* count == 0 */
321 if (rdev->pm.state != PM_STATE_MINIMUM) {
322 cancel_delayed_work(&rdev->pm.idle_work);
323
324 rdev->pm.state = PM_STATE_MINIMUM;
325 rdev->pm.planned_action = PM_ACTION_MINIMUM;
73a6d3fc 326 radeon_pm_set_clocks(rdev);
c913e23a 327 }
c913e23a 328 }
73a6d3fc
RM
329
330 mutex_unlock(&rdev->pm.mutex);
c913e23a
RM
331}
332
f735261b
DA
333static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
334{
335 u32 stat_crtc1 = 0, stat_crtc2 = 0;
336 bool in_vbl = true;
337
338 if (ASIC_IS_AVIVO(rdev)) {
339 if (rdev->pm.active_crtcs & (1 << 0)) {
340 stat_crtc1 = RREG32(D1CRTC_STATUS);
341 if (!(stat_crtc1 & 1))
342 in_vbl = false;
343 }
344 if (rdev->pm.active_crtcs & (1 << 1)) {
345 stat_crtc2 = RREG32(D2CRTC_STATUS);
346 if (!(stat_crtc2 & 1))
347 in_vbl = false;
348 }
349 }
350 if (in_vbl == false)
351 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
352 stat_crtc2, finish ? "exit" : "entry");
353 return in_vbl;
354}
c913e23a
RM
355static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
356{
357 /*radeon_fence_wait_last(rdev);*/
358 switch (rdev->pm.planned_action) {
359 case PM_ACTION_UPCLOCK:
c913e23a
RM
360 rdev->pm.downclocked = false;
361 break;
362 case PM_ACTION_DOWNCLOCK:
c913e23a
RM
363 rdev->pm.downclocked = true;
364 break;
365 case PM_ACTION_MINIMUM:
c913e23a
RM
366 break;
367 case PM_ACTION_NONE:
368 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
369 break;
370 }
f735261b 371
530079a8 372 radeon_set_power_state(rdev);
c913e23a
RM
373 rdev->pm.planned_action = PM_ACTION_NONE;
374}
375
376static void radeon_pm_set_clocks(struct radeon_device *rdev)
377{
73a6d3fc
RM
378 radeon_get_power_state(rdev, rdev->pm.planned_action);
379 mutex_lock(&rdev->cp.mutex);
380
381 if (rdev->pm.active_crtcs & (1 << 0)) {
382 rdev->pm.req_vblank |= (1 << 0);
383 drm_vblank_get(rdev->ddev, 0);
384 }
385 if (rdev->pm.active_crtcs & (1 << 1)) {
386 rdev->pm.req_vblank |= (1 << 1);
387 drm_vblank_get(rdev->ddev, 1);
388 }
d0d6cb81 389 radeon_pm_set_clocks_locked(rdev);
73a6d3fc
RM
390 if (rdev->pm.req_vblank & (1 << 0)) {
391 rdev->pm.req_vblank &= ~(1 << 0);
392 drm_vblank_put(rdev->ddev, 0);
393 }
394 if (rdev->pm.req_vblank & (1 << 1)) {
395 rdev->pm.req_vblank &= ~(1 << 1);
396 drm_vblank_put(rdev->ddev, 1);
c913e23a 397 }
c913e23a 398
73a6d3fc 399 mutex_unlock(&rdev->cp.mutex);
c913e23a
RM
400}
401
402static void radeon_pm_idle_work_handler(struct work_struct *work)
403{
404 struct radeon_device *rdev;
405 rdev = container_of(work, struct radeon_device,
406 pm.idle_work.work);
407
408 mutex_lock(&rdev->pm.mutex);
73a6d3fc 409 if (rdev->pm.state == PM_STATE_ACTIVE) {
c913e23a
RM
410 unsigned long irq_flags;
411 int not_processed = 0;
412
413 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
414 if (!list_empty(&rdev->fence_drv.emited)) {
415 struct list_head *ptr;
416 list_for_each(ptr, &rdev->fence_drv.emited) {
417 /* count up to 3, that's enought info */
418 if (++not_processed >= 3)
419 break;
420 }
421 }
422 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
423
424 if (not_processed >= 3) { /* should upclock */
425 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
426 rdev->pm.planned_action = PM_ACTION_NONE;
427 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
428 rdev->pm.downclocked) {
429 rdev->pm.planned_action =
430 PM_ACTION_UPCLOCK;
431 rdev->pm.action_timeout = jiffies +
432 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
433 }
434 } else if (not_processed == 0) { /* should downclock */
435 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
436 rdev->pm.planned_action = PM_ACTION_NONE;
437 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
438 !rdev->pm.downclocked) {
439 rdev->pm.planned_action =
440 PM_ACTION_DOWNCLOCK;
441 rdev->pm.action_timeout = jiffies +
442 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
443 }
444 }
445
446 if (rdev->pm.planned_action != PM_ACTION_NONE &&
73a6d3fc
RM
447 jiffies > rdev->pm.action_timeout) {
448 radeon_pm_set_clocks(rdev);
c913e23a
RM
449 }
450 }
451 mutex_unlock(&rdev->pm.mutex);
452
453 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
454 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
455}
456
7433874e
RM
457/*
458 * Debugfs info
459 */
460#if defined(CONFIG_DEBUG_FS)
461
462static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
463{
464 struct drm_info_node *node = (struct drm_info_node *) m->private;
465 struct drm_device *dev = node->minor->dev;
466 struct radeon_device *rdev = dev->dev_private;
467
c913e23a 468 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
6234077d
RM
469 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
470 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
471 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
472 if (rdev->asic->get_memory_clock)
473 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
aa5120d2
RM
474 if (rdev->asic->get_pcie_lanes)
475 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
7433874e
RM
476
477 return 0;
478}
479
480static struct drm_info_list radeon_pm_info_list[] = {
481 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
482};
483#endif
484
c913e23a 485static int radeon_debugfs_pm_init(struct radeon_device *rdev)
7433874e
RM
486{
487#if defined(CONFIG_DEBUG_FS)
488 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
489#else
490 return 0;
491#endif
492}
This page took 0.074604 seconds and 5 git commands to generate.