drm/radeon/kms: add functions to get current pcie lanes
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_pm.c
CommitLineData
7433874e
RM
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 */
22#include "drmP.h"
23#include "radeon.h"
24
c913e23a
RM
25#define RADEON_IDLE_LOOP_MS 100
26#define RADEON_RECLOCK_DELAY_MS 200
27
28static void radeon_pm_check_limits(struct radeon_device *rdev);
29static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
30static void radeon_pm_set_clocks(struct radeon_device *rdev);
31static void radeon_pm_reclock_work_handler(struct work_struct *work);
32static void radeon_pm_idle_work_handler(struct work_struct *work);
33static int radeon_debugfs_pm_init(struct radeon_device *rdev);
34
35static const char *pm_state_names[4] = {
36 "PM_STATE_DISABLED",
37 "PM_STATE_MINIMUM",
38 "PM_STATE_PAUSED",
39 "PM_STATE_ACTIVE"
40};
7433874e
RM
41
42int radeon_pm_init(struct radeon_device *rdev)
43{
c913e23a
RM
44 rdev->pm.state = PM_STATE_DISABLED;
45 rdev->pm.planned_action = PM_ACTION_NONE;
46 rdev->pm.downclocked = false;
47 rdev->pm.vblank_callback = false;
48
49 radeon_pm_check_limits(rdev);
50
7433874e 51 if (radeon_debugfs_pm_init(rdev)) {
c142c3e5 52 DRM_ERROR("Failed to register debugfs file for PM!\n");
7433874e
RM
53 }
54
c913e23a
RM
55 INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
56 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
57
58 if (radeon_dynpm != -1 && radeon_dynpm) {
59 rdev->pm.state = PM_STATE_PAUSED;
60 DRM_INFO("radeon: dynamic power management enabled\n");
61 }
62
63 DRM_INFO("radeon: power management initialized\n");
64
7433874e
RM
65 return 0;
66}
67
c913e23a
RM
68static void radeon_pm_check_limits(struct radeon_device *rdev)
69{
70 rdev->pm.min_gpu_engine_clock = rdev->clock.default_sclk - 5000;
71 rdev->pm.min_gpu_memory_clock = rdev->clock.default_mclk - 5000;
72}
73
74void radeon_pm_compute_clocks(struct radeon_device *rdev)
75{
76 struct drm_device *ddev = rdev->ddev;
77 struct drm_connector *connector;
78 struct radeon_crtc *radeon_crtc;
79 int count = 0;
80
81 if (rdev->pm.state == PM_STATE_DISABLED)
82 return;
83
84 mutex_lock(&rdev->pm.mutex);
85
86 rdev->pm.active_crtcs = 0;
87 list_for_each_entry(connector,
88 &ddev->mode_config.connector_list, head) {
89 if (connector->encoder &&
90 connector->dpms != DRM_MODE_DPMS_OFF) {
91 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
92 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
93 ++count;
94 }
95 }
96
97 if (count > 1) {
98 if (rdev->pm.state == PM_STATE_ACTIVE) {
99 wait_queue_head_t wait;
100 init_waitqueue_head(&wait);
101
102 cancel_delayed_work(&rdev->pm.idle_work);
103
104 rdev->pm.state = PM_STATE_PAUSED;
105 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
106 rdev->pm.vblank_callback = true;
107
108 mutex_unlock(&rdev->pm.mutex);
109
110 wait_event_timeout(wait, !rdev->pm.downclocked,
111 msecs_to_jiffies(300));
112 if (!rdev->pm.downclocked)
113 radeon_pm_set_clocks(rdev);
114
115 DRM_DEBUG("radeon: dynamic power management deactivated\n");
116 } else {
117 mutex_unlock(&rdev->pm.mutex);
118 }
119 } else if (count == 1) {
120 rdev->pm.min_mode_engine_clock = rdev->pm.min_gpu_engine_clock;
121 rdev->pm.min_mode_memory_clock = rdev->pm.min_gpu_memory_clock;
122 /* TODO: Increase clocks if needed for current mode */
123
124 if (rdev->pm.state == PM_STATE_MINIMUM) {
125 rdev->pm.state = PM_STATE_ACTIVE;
126 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
127 radeon_pm_set_clocks_locked(rdev);
128
129 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
130 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
131 }
132 else if (rdev->pm.state == PM_STATE_PAUSED) {
133 rdev->pm.state = PM_STATE_ACTIVE;
134 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
135 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
136 DRM_DEBUG("radeon: dynamic power management activated\n");
137 }
138
139 mutex_unlock(&rdev->pm.mutex);
140 }
141 else { /* count == 0 */
142 if (rdev->pm.state != PM_STATE_MINIMUM) {
143 cancel_delayed_work(&rdev->pm.idle_work);
144
145 rdev->pm.state = PM_STATE_MINIMUM;
146 rdev->pm.planned_action = PM_ACTION_MINIMUM;
147 radeon_pm_set_clocks_locked(rdev);
148 }
149
150 mutex_unlock(&rdev->pm.mutex);
151 }
152}
153
154static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
155{
156 /*radeon_fence_wait_last(rdev);*/
157 switch (rdev->pm.planned_action) {
158 case PM_ACTION_UPCLOCK:
159 radeon_set_engine_clock(rdev, rdev->clock.default_sclk);
160 rdev->pm.downclocked = false;
161 break;
162 case PM_ACTION_DOWNCLOCK:
163 radeon_set_engine_clock(rdev,
164 rdev->pm.min_mode_engine_clock);
165 rdev->pm.downclocked = true;
166 break;
167 case PM_ACTION_MINIMUM:
168 radeon_set_engine_clock(rdev,
169 rdev->pm.min_gpu_engine_clock);
170 break;
171 case PM_ACTION_NONE:
172 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
173 break;
174 }
175
176 rdev->pm.planned_action = PM_ACTION_NONE;
177}
178
179static void radeon_pm_set_clocks(struct radeon_device *rdev)
180{
181 mutex_lock(&rdev->pm.mutex);
182 /* new VBLANK irq may come before handling previous one */
183 if (rdev->pm.vblank_callback) {
184 mutex_lock(&rdev->cp.mutex);
185 if (rdev->pm.req_vblank & (1 << 0)) {
186 rdev->pm.req_vblank &= ~(1 << 0);
187 drm_vblank_put(rdev->ddev, 0);
188 }
189 if (rdev->pm.req_vblank & (1 << 1)) {
190 rdev->pm.req_vblank &= ~(1 << 1);
191 drm_vblank_put(rdev->ddev, 1);
192 }
193 rdev->pm.vblank_callback = false;
194 radeon_pm_set_clocks_locked(rdev);
195 mutex_unlock(&rdev->cp.mutex);
196 }
197 mutex_unlock(&rdev->pm.mutex);
198}
199
200static void radeon_pm_reclock_work_handler(struct work_struct *work)
201{
202 struct radeon_device *rdev;
203 rdev = container_of(work, struct radeon_device,
204 pm.reclock_work);
205 radeon_pm_set_clocks(rdev);
206}
207
208static void radeon_pm_idle_work_handler(struct work_struct *work)
209{
210 struct radeon_device *rdev;
211 rdev = container_of(work, struct radeon_device,
212 pm.idle_work.work);
213
214 mutex_lock(&rdev->pm.mutex);
215 if (rdev->pm.state == PM_STATE_ACTIVE &&
216 !rdev->pm.vblank_callback) {
217 unsigned long irq_flags;
218 int not_processed = 0;
219
220 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
221 if (!list_empty(&rdev->fence_drv.emited)) {
222 struct list_head *ptr;
223 list_for_each(ptr, &rdev->fence_drv.emited) {
224 /* count up to 3, that's enought info */
225 if (++not_processed >= 3)
226 break;
227 }
228 }
229 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
230
231 if (not_processed >= 3) { /* should upclock */
232 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
233 rdev->pm.planned_action = PM_ACTION_NONE;
234 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
235 rdev->pm.downclocked) {
236 rdev->pm.planned_action =
237 PM_ACTION_UPCLOCK;
238 rdev->pm.action_timeout = jiffies +
239 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
240 }
241 } else if (not_processed == 0) { /* should downclock */
242 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
243 rdev->pm.planned_action = PM_ACTION_NONE;
244 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
245 !rdev->pm.downclocked) {
246 rdev->pm.planned_action =
247 PM_ACTION_DOWNCLOCK;
248 rdev->pm.action_timeout = jiffies +
249 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
250 }
251 }
252
253 if (rdev->pm.planned_action != PM_ACTION_NONE &&
254 jiffies > rdev->pm.action_timeout) {
255 if (rdev->pm.active_crtcs & (1 << 0)) {
256 rdev->pm.req_vblank |= (1 << 0);
257 drm_vblank_get(rdev->ddev, 0);
258 }
259 if (rdev->pm.active_crtcs & (1 << 1)) {
260 rdev->pm.req_vblank |= (1 << 1);
261 drm_vblank_get(rdev->ddev, 1);
262 }
263 rdev->pm.vblank_callback = true;
264 }
265 }
266 mutex_unlock(&rdev->pm.mutex);
267
268 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
269 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
270}
271
7433874e
RM
272/*
273 * Debugfs info
274 */
275#if defined(CONFIG_DEBUG_FS)
276
277static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
278{
279 struct drm_info_node *node = (struct drm_info_node *) m->private;
280 struct drm_device *dev = node->minor->dev;
281 struct radeon_device *rdev = dev->dev_private;
282
c913e23a 283 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
6234077d
RM
284 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
285 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
286 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
287 if (rdev->asic->get_memory_clock)
288 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
7433874e
RM
289
290 return 0;
291}
292
293static struct drm_info_list radeon_pm_info_list[] = {
294 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
295};
296#endif
297
c913e23a 298static int radeon_debugfs_pm_init(struct radeon_device *rdev)
7433874e
RM
299{
300#if defined(CONFIG_DEBUG_FS)
301 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
302#else
303 return 0;
304#endif
305}
This page took 0.047985 seconds and 5 git commands to generate.