2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
33 #include <drm/radeon_drm.h>
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
40 #include "radeon_ucode.h"
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93 static const u32 crtc_offsets
[2] =
96 AVIVO_D2CRTC_H_TOTAL
- AVIVO_D1CRTC_H_TOTAL
99 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
103 static void r600_gpu_init(struct radeon_device
*rdev
);
104 void r600_fini(struct radeon_device
*rdev
);
105 void r600_irq_disable(struct radeon_device
*rdev
);
106 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
);
107 extern int evergreen_rlc_resume(struct radeon_device
*rdev
);
110 * r600_get_xclk - get the xclk
112 * @rdev: radeon_device pointer
114 * Returns the reference clock used by the gfx engine
115 * (r6xx, IGPs, APUs).
117 u32
r600_get_xclk(struct radeon_device
*rdev
)
119 return rdev
->clock
.spll
.reference_freq
;
122 /* get temperature in millidegrees */
123 int rv6xx_get_temp(struct radeon_device
*rdev
)
125 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
127 int actual_temp
= temp
& 0xff;
132 return actual_temp
* 1000;
135 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
139 rdev
->pm
.dynpm_can_upclock
= true;
140 rdev
->pm
.dynpm_can_downclock
= true;
142 /* power state array is low to high, default is first */
143 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
144 int min_power_state_index
= 0;
146 if (rdev
->pm
.num_power_states
> 2)
147 min_power_state_index
= 1;
149 switch (rdev
->pm
.dynpm_planned_action
) {
150 case DYNPM_ACTION_MINIMUM
:
151 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
152 rdev
->pm
.requested_clock_mode_index
= 0;
153 rdev
->pm
.dynpm_can_downclock
= false;
155 case DYNPM_ACTION_DOWNCLOCK
:
156 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
157 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
158 rdev
->pm
.dynpm_can_downclock
= false;
160 if (rdev
->pm
.active_crtc_count
> 1) {
161 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
162 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
164 else if (i
>= rdev
->pm
.current_power_state_index
) {
165 rdev
->pm
.requested_power_state_index
=
166 rdev
->pm
.current_power_state_index
;
169 rdev
->pm
.requested_power_state_index
= i
;
174 if (rdev
->pm
.current_power_state_index
== 0)
175 rdev
->pm
.requested_power_state_index
=
176 rdev
->pm
.num_power_states
- 1;
178 rdev
->pm
.requested_power_state_index
=
179 rdev
->pm
.current_power_state_index
- 1;
182 rdev
->pm
.requested_clock_mode_index
= 0;
183 /* don't use the power state if crtcs are active and no display flag is set */
184 if ((rdev
->pm
.active_crtc_count
> 0) &&
185 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
186 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
187 RADEON_PM_MODE_NO_DISPLAY
)) {
188 rdev
->pm
.requested_power_state_index
++;
191 case DYNPM_ACTION_UPCLOCK
:
192 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
193 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
194 rdev
->pm
.dynpm_can_upclock
= false;
196 if (rdev
->pm
.active_crtc_count
> 1) {
197 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
198 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
200 else if (i
<= rdev
->pm
.current_power_state_index
) {
201 rdev
->pm
.requested_power_state_index
=
202 rdev
->pm
.current_power_state_index
;
205 rdev
->pm
.requested_power_state_index
= i
;
210 rdev
->pm
.requested_power_state_index
=
211 rdev
->pm
.current_power_state_index
+ 1;
213 rdev
->pm
.requested_clock_mode_index
= 0;
215 case DYNPM_ACTION_DEFAULT
:
216 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
217 rdev
->pm
.requested_clock_mode_index
= 0;
218 rdev
->pm
.dynpm_can_upclock
= false;
220 case DYNPM_ACTION_NONE
:
222 DRM_ERROR("Requested mode for not defined action\n");
226 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
227 /* for now just select the first power state and switch between clock modes */
228 /* power state array is low to high, default is first (0) */
229 if (rdev
->pm
.active_crtc_count
> 1) {
230 rdev
->pm
.requested_power_state_index
= -1;
231 /* start at 1 as we don't want the default mode */
232 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
233 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
235 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
236 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
237 rdev
->pm
.requested_power_state_index
= i
;
241 /* if nothing selected, grab the default state. */
242 if (rdev
->pm
.requested_power_state_index
== -1)
243 rdev
->pm
.requested_power_state_index
= 0;
245 rdev
->pm
.requested_power_state_index
= 1;
247 switch (rdev
->pm
.dynpm_planned_action
) {
248 case DYNPM_ACTION_MINIMUM
:
249 rdev
->pm
.requested_clock_mode_index
= 0;
250 rdev
->pm
.dynpm_can_downclock
= false;
252 case DYNPM_ACTION_DOWNCLOCK
:
253 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
254 if (rdev
->pm
.current_clock_mode_index
== 0) {
255 rdev
->pm
.requested_clock_mode_index
= 0;
256 rdev
->pm
.dynpm_can_downclock
= false;
258 rdev
->pm
.requested_clock_mode_index
=
259 rdev
->pm
.current_clock_mode_index
- 1;
261 rdev
->pm
.requested_clock_mode_index
= 0;
262 rdev
->pm
.dynpm_can_downclock
= false;
264 /* don't use the power state if crtcs are active and no display flag is set */
265 if ((rdev
->pm
.active_crtc_count
> 0) &&
266 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
267 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
268 RADEON_PM_MODE_NO_DISPLAY
)) {
269 rdev
->pm
.requested_clock_mode_index
++;
272 case DYNPM_ACTION_UPCLOCK
:
273 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
274 if (rdev
->pm
.current_clock_mode_index
==
275 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
276 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
277 rdev
->pm
.dynpm_can_upclock
= false;
279 rdev
->pm
.requested_clock_mode_index
=
280 rdev
->pm
.current_clock_mode_index
+ 1;
282 rdev
->pm
.requested_clock_mode_index
=
283 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
284 rdev
->pm
.dynpm_can_upclock
= false;
287 case DYNPM_ACTION_DEFAULT
:
288 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
289 rdev
->pm
.requested_clock_mode_index
= 0;
290 rdev
->pm
.dynpm_can_upclock
= false;
292 case DYNPM_ACTION_NONE
:
294 DRM_ERROR("Requested mode for not defined action\n");
299 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
300 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
301 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
302 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
303 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
304 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
308 void rs780_pm_init_profile(struct radeon_device
*rdev
)
310 if (rdev
->pm
.num_power_states
== 2) {
312 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
313 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
314 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
315 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
317 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
318 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
319 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
320 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
322 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
323 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
324 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
325 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
327 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
328 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
329 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
330 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
332 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
333 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
334 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
335 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
337 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
338 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
339 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
340 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
342 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
343 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
344 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
345 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
346 } else if (rdev
->pm
.num_power_states
== 3) {
348 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
349 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
350 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
351 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
353 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
354 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
355 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
356 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
358 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
359 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
360 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
361 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
363 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
364 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
365 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
366 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
368 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
369 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
370 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
371 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
373 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
374 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
375 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
376 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
378 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
379 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
380 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
381 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
384 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
385 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
386 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
387 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
389 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
390 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
391 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
392 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
394 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
395 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
396 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
397 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
399 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
400 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
401 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
402 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
404 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
405 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
406 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
407 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
409 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
410 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
411 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
412 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
414 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
415 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
416 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
417 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
421 void r600_pm_init_profile(struct radeon_device
*rdev
)
425 if (rdev
->family
== CHIP_R600
) {
428 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
429 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
430 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
431 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
433 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
434 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
435 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
436 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
438 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
439 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
440 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
441 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
443 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
444 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
445 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
446 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
448 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
449 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
450 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
451 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
453 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
454 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
455 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
456 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
458 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
459 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
460 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
461 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
463 if (rdev
->pm
.num_power_states
< 4) {
465 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
466 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
467 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
468 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
470 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
471 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
472 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
473 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
475 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
476 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
477 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
478 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
480 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
481 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
482 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
483 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
485 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
486 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
487 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
488 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
490 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
491 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
492 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
493 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
495 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
496 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
497 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
498 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
501 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
502 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
503 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
504 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
506 if (rdev
->flags
& RADEON_IS_MOBILITY
)
507 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
509 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
510 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
511 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
512 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
513 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
515 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
516 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
517 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
518 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
520 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
521 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
522 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
523 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
524 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
526 if (rdev
->flags
& RADEON_IS_MOBILITY
)
527 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
529 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
530 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
531 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
532 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
533 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
535 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
536 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
537 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
538 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
540 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
541 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
542 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
543 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
544 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
549 void r600_pm_misc(struct radeon_device
*rdev
)
551 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
552 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
553 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
554 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
556 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
557 /* 0xff01 is a flag rather then an actual voltage */
558 if (voltage
->voltage
== 0xff01)
560 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
561 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
562 rdev
->pm
.current_vddc
= voltage
->voltage
;
563 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
568 bool r600_gui_idle(struct radeon_device
*rdev
)
570 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
576 /* hpd for digital panel detect/disconnect */
577 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
579 bool connected
= false;
581 if (ASIC_IS_DCE3(rdev
)) {
584 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
588 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
592 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
596 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
601 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
605 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
614 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
618 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
622 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
632 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
633 enum radeon_hpd_id hpd
)
636 bool connected
= r600_hpd_sense(rdev
, hpd
);
638 if (ASIC_IS_DCE3(rdev
)) {
641 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
643 tmp
&= ~DC_HPDx_INT_POLARITY
;
645 tmp
|= DC_HPDx_INT_POLARITY
;
646 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
649 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
651 tmp
&= ~DC_HPDx_INT_POLARITY
;
653 tmp
|= DC_HPDx_INT_POLARITY
;
654 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
657 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
659 tmp
&= ~DC_HPDx_INT_POLARITY
;
661 tmp
|= DC_HPDx_INT_POLARITY
;
662 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
665 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
667 tmp
&= ~DC_HPDx_INT_POLARITY
;
669 tmp
|= DC_HPDx_INT_POLARITY
;
670 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
673 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
675 tmp
&= ~DC_HPDx_INT_POLARITY
;
677 tmp
|= DC_HPDx_INT_POLARITY
;
678 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
682 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
684 tmp
&= ~DC_HPDx_INT_POLARITY
;
686 tmp
|= DC_HPDx_INT_POLARITY
;
687 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
695 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
697 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
699 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
700 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
703 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
705 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
707 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
708 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
711 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
713 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
715 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
716 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
724 void r600_hpd_init(struct radeon_device
*rdev
)
726 struct drm_device
*dev
= rdev
->ddev
;
727 struct drm_connector
*connector
;
730 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
731 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
733 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
734 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
735 /* don't try to enable hpd on eDP or LVDS avoid breaking the
736 * aux dp channel on imac and help (but not completely fix)
737 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
741 if (ASIC_IS_DCE3(rdev
)) {
742 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
743 if (ASIC_IS_DCE32(rdev
))
746 switch (radeon_connector
->hpd
.hpd
) {
748 WREG32(DC_HPD1_CONTROL
, tmp
);
751 WREG32(DC_HPD2_CONTROL
, tmp
);
754 WREG32(DC_HPD3_CONTROL
, tmp
);
757 WREG32(DC_HPD4_CONTROL
, tmp
);
761 WREG32(DC_HPD5_CONTROL
, tmp
);
764 WREG32(DC_HPD6_CONTROL
, tmp
);
770 switch (radeon_connector
->hpd
.hpd
) {
772 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
775 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
778 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
784 enable
|= 1 << radeon_connector
->hpd
.hpd
;
785 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
787 radeon_irq_kms_enable_hpd(rdev
, enable
);
790 void r600_hpd_fini(struct radeon_device
*rdev
)
792 struct drm_device
*dev
= rdev
->ddev
;
793 struct drm_connector
*connector
;
794 unsigned disable
= 0;
796 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
797 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
798 if (ASIC_IS_DCE3(rdev
)) {
799 switch (radeon_connector
->hpd
.hpd
) {
801 WREG32(DC_HPD1_CONTROL
, 0);
804 WREG32(DC_HPD2_CONTROL
, 0);
807 WREG32(DC_HPD3_CONTROL
, 0);
810 WREG32(DC_HPD4_CONTROL
, 0);
814 WREG32(DC_HPD5_CONTROL
, 0);
817 WREG32(DC_HPD6_CONTROL
, 0);
823 switch (radeon_connector
->hpd
.hpd
) {
825 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
828 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
831 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
837 disable
|= 1 << radeon_connector
->hpd
.hpd
;
839 radeon_irq_kms_disable_hpd(rdev
, disable
);
845 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
850 /* flush hdp cache so updates hit vram */
851 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
852 !(rdev
->flags
& RADEON_IS_AGP
)) {
853 void __iomem
*ptr
= (void *)rdev
->gart
.ptr
;
856 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
857 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
858 * This seems to cause problems on some AGP cards. Just use the old
861 WREG32(HDP_DEBUG1
, 0);
862 tmp
= readl((void __iomem
*)ptr
);
864 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
866 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
867 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
868 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
869 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
871 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
872 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
874 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
884 int r600_pcie_gart_init(struct radeon_device
*rdev
)
888 if (rdev
->gart
.robj
) {
889 WARN(1, "R600 PCIE GART already initialized\n");
892 /* Initialize common gart structure */
893 r
= radeon_gart_init(rdev
);
896 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
897 return radeon_gart_table_vram_alloc(rdev
);
900 static int r600_pcie_gart_enable(struct radeon_device
*rdev
)
905 if (rdev
->gart
.robj
== NULL
) {
906 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
909 r
= radeon_gart_table_vram_pin(rdev
);
912 radeon_gart_restore(rdev
);
915 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
916 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
917 EFFECTIVE_L2_QUEUE_SIZE(7));
918 WREG32(VM_L2_CNTL2
, 0);
919 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
920 /* Setup TLB control */
921 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
922 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
923 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
924 ENABLE_WAIT_L2_QUERY
;
925 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
926 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
927 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
928 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
929 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
930 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
931 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
932 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
933 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
934 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
935 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
936 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
937 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
938 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
939 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
940 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
941 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
942 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
943 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
944 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
945 (u32
)(rdev
->dummy_page
.addr
>> 12));
946 for (i
= 1; i
< 7; i
++)
947 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
949 r600_pcie_gart_tlb_flush(rdev
);
950 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
951 (unsigned)(rdev
->mc
.gtt_size
>> 20),
952 (unsigned long long)rdev
->gart
.table_addr
);
953 rdev
->gart
.ready
= true;
957 static void r600_pcie_gart_disable(struct radeon_device
*rdev
)
962 /* Disable all tables */
963 for (i
= 0; i
< 7; i
++)
964 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
966 /* Disable L2 cache */
967 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
968 EFFECTIVE_L2_QUEUE_SIZE(7));
969 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
970 /* Setup L1 TLB control */
971 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
972 ENABLE_WAIT_L2_QUERY
;
973 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
974 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
975 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
976 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
977 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
978 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
979 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
980 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
981 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
982 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
983 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
984 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
985 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
986 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
987 radeon_gart_table_vram_unpin(rdev
);
990 static void r600_pcie_gart_fini(struct radeon_device
*rdev
)
992 radeon_gart_fini(rdev
);
993 r600_pcie_gart_disable(rdev
);
994 radeon_gart_table_vram_free(rdev
);
997 static void r600_agp_enable(struct radeon_device
*rdev
)
1002 /* Setup L2 cache */
1003 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1004 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1005 EFFECTIVE_L2_QUEUE_SIZE(7));
1006 WREG32(VM_L2_CNTL2
, 0);
1007 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1008 /* Setup TLB control */
1009 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1010 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1011 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1012 ENABLE_WAIT_L2_QUERY
;
1013 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1014 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1015 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1016 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1017 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1018 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1019 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1020 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1021 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1022 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1023 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1024 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1025 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1026 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1027 for (i
= 0; i
< 7; i
++)
1028 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1031 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1036 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1037 /* read MC_STATUS */
1038 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1046 uint32_t rs780_mc_rreg(struct radeon_device
*rdev
, uint32_t reg
)
1050 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
));
1051 r
= RREG32(R_0028FC_MC_DATA
);
1052 WREG32(R_0028F8_MC_INDEX
, ~C_0028F8_MC_IND_ADDR
);
1056 void rs780_mc_wreg(struct radeon_device
*rdev
, uint32_t reg
, uint32_t v
)
1058 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
) |
1059 S_0028F8_MC_IND_WR_EN(1));
1060 WREG32(R_0028FC_MC_DATA
, v
);
1061 WREG32(R_0028F8_MC_INDEX
, 0x7F);
1064 static void r600_mc_program(struct radeon_device
*rdev
)
1066 struct rv515_mc_save save
;
1070 /* Initialize HDP */
1071 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1072 WREG32((0x2c14 + j
), 0x00000000);
1073 WREG32((0x2c18 + j
), 0x00000000);
1074 WREG32((0x2c1c + j
), 0x00000000);
1075 WREG32((0x2c20 + j
), 0x00000000);
1076 WREG32((0x2c24 + j
), 0x00000000);
1078 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1080 rv515_mc_stop(rdev
, &save
);
1081 if (r600_mc_wait_for_idle(rdev
)) {
1082 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1084 /* Lockout access through VGA aperture (doesn't exist before R600) */
1085 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1086 /* Update configuration */
1087 if (rdev
->flags
& RADEON_IS_AGP
) {
1088 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1089 /* VRAM before AGP */
1090 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1091 rdev
->mc
.vram_start
>> 12);
1092 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1093 rdev
->mc
.gtt_end
>> 12);
1095 /* VRAM after AGP */
1096 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1097 rdev
->mc
.gtt_start
>> 12);
1098 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1099 rdev
->mc
.vram_end
>> 12);
1102 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1103 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1105 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1106 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1107 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1108 WREG32(MC_VM_FB_LOCATION
, tmp
);
1109 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1110 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1111 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1112 if (rdev
->flags
& RADEON_IS_AGP
) {
1113 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1114 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1115 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1117 WREG32(MC_VM_AGP_BASE
, 0);
1118 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1119 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1121 if (r600_mc_wait_for_idle(rdev
)) {
1122 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1124 rv515_mc_resume(rdev
, &save
);
1125 /* we need to own VRAM, so turn off the VGA renderer here
1126 * to stop it overwriting our objects */
1127 rv515_vga_render_disable(rdev
);
1131 * r600_vram_gtt_location - try to find VRAM & GTT location
1132 * @rdev: radeon device structure holding all necessary informations
1133 * @mc: memory controller structure holding memory informations
1135 * Function will place try to place VRAM at same place as in CPU (PCI)
1136 * address space as some GPU seems to have issue when we reprogram at
1137 * different address space.
1139 * If there is not enough space to fit the unvisible VRAM after the
1140 * aperture then we limit the VRAM size to the aperture.
1142 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1143 * them to be in one from GPU point of view so that we can program GPU to
1144 * catch access outside them (weird GPU policy see ??).
1146 * This function will never fails, worst case are limiting VRAM or GTT.
1148 * Note: GTT start, end, size should be initialized before calling this
1149 * function on AGP platform.
1151 static void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1153 u64 size_bf
, size_af
;
1155 if (mc
->mc_vram_size
> 0xE0000000) {
1156 /* leave room for at least 512M GTT */
1157 dev_warn(rdev
->dev
, "limiting VRAM\n");
1158 mc
->real_vram_size
= 0xE0000000;
1159 mc
->mc_vram_size
= 0xE0000000;
1161 if (rdev
->flags
& RADEON_IS_AGP
) {
1162 size_bf
= mc
->gtt_start
;
1163 size_af
= mc
->mc_mask
- mc
->gtt_end
;
1164 if (size_bf
> size_af
) {
1165 if (mc
->mc_vram_size
> size_bf
) {
1166 dev_warn(rdev
->dev
, "limiting VRAM\n");
1167 mc
->real_vram_size
= size_bf
;
1168 mc
->mc_vram_size
= size_bf
;
1170 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1172 if (mc
->mc_vram_size
> size_af
) {
1173 dev_warn(rdev
->dev
, "limiting VRAM\n");
1174 mc
->real_vram_size
= size_af
;
1175 mc
->mc_vram_size
= size_af
;
1177 mc
->vram_start
= mc
->gtt_end
+ 1;
1179 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1180 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1181 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1182 mc
->vram_end
, mc
->real_vram_size
>> 20);
1185 if (rdev
->flags
& RADEON_IS_IGP
) {
1186 base
= RREG32(MC_VM_FB_LOCATION
) & 0xFFFF;
1189 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1190 rdev
->mc
.gtt_base_align
= 0;
1191 radeon_gtt_location(rdev
, mc
);
1195 static int r600_mc_init(struct radeon_device
*rdev
)
1198 int chansize
, numchan
;
1199 uint32_t h_addr
, l_addr
;
1200 unsigned long long k8_addr
;
1202 /* Get VRAM informations */
1203 rdev
->mc
.vram_is_ddr
= true;
1204 tmp
= RREG32(RAMCFG
);
1205 if (tmp
& CHANSIZE_OVERRIDE
) {
1207 } else if (tmp
& CHANSIZE_MASK
) {
1212 tmp
= RREG32(CHMAP
);
1213 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1228 rdev
->mc
.vram_width
= numchan
* chansize
;
1229 /* Could aper size report 0 ? */
1230 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1231 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1232 /* Setup GPU memory space */
1233 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1234 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1235 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1236 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1238 if (rdev
->flags
& RADEON_IS_IGP
) {
1239 rs690_pm_info(rdev
);
1240 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1242 if (rdev
->family
== CHIP_RS780
|| rdev
->family
== CHIP_RS880
) {
1243 /* Use K8 direct mapping for fast fb access. */
1244 rdev
->fastfb_working
= false;
1245 h_addr
= G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL
));
1246 l_addr
= RREG32_MC(R_000011_K8_FB_LOCATION
);
1247 k8_addr
= ((unsigned long long)h_addr
) << 32 | l_addr
;
1248 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1249 if (k8_addr
+ rdev
->mc
.visible_vram_size
< 0x100000000ULL
)
1252 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1253 * memory is present.
1255 if (rdev
->mc
.igp_sideport_enabled
== false && radeon_fastfb
== 1) {
1256 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1257 (unsigned long long)rdev
->mc
.aper_base
, k8_addr
);
1258 rdev
->mc
.aper_base
= (resource_size_t
)k8_addr
;
1259 rdev
->fastfb_working
= true;
1265 radeon_update_bandwidth_info(rdev
);
1269 int r600_vram_scratch_init(struct radeon_device
*rdev
)
1273 if (rdev
->vram_scratch
.robj
== NULL
) {
1274 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
,
1275 PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
1276 NULL
, &rdev
->vram_scratch
.robj
);
1282 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1283 if (unlikely(r
!= 0))
1285 r
= radeon_bo_pin(rdev
->vram_scratch
.robj
,
1286 RADEON_GEM_DOMAIN_VRAM
, &rdev
->vram_scratch
.gpu_addr
);
1288 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1291 r
= radeon_bo_kmap(rdev
->vram_scratch
.robj
,
1292 (void **)&rdev
->vram_scratch
.ptr
);
1294 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1295 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1300 void r600_vram_scratch_fini(struct radeon_device
*rdev
)
1304 if (rdev
->vram_scratch
.robj
== NULL
) {
1307 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1308 if (likely(r
== 0)) {
1309 radeon_bo_kunmap(rdev
->vram_scratch
.robj
);
1310 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1311 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1313 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1316 void r600_set_bios_scratch_engine_hung(struct radeon_device
*rdev
, bool hung
)
1318 u32 tmp
= RREG32(R600_BIOS_3_SCRATCH
);
1321 tmp
|= ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1323 tmp
&= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1325 WREG32(R600_BIOS_3_SCRATCH
, tmp
);
1328 static void r600_print_gpu_status_regs(struct radeon_device
*rdev
)
1330 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS = 0x%08X\n",
1331 RREG32(R_008010_GRBM_STATUS
));
1332 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1333 RREG32(R_008014_GRBM_STATUS2
));
1334 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS = 0x%08X\n",
1335 RREG32(R_000E50_SRBM_STATUS
));
1336 dev_info(rdev
->dev
, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1337 RREG32(CP_STALLED_STAT1
));
1338 dev_info(rdev
->dev
, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1339 RREG32(CP_STALLED_STAT2
));
1340 dev_info(rdev
->dev
, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1341 RREG32(CP_BUSY_STAT
));
1342 dev_info(rdev
->dev
, " R_008680_CP_STAT = 0x%08X\n",
1344 dev_info(rdev
->dev
, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1345 RREG32(DMA_STATUS_REG
));
1348 static bool r600_is_display_hung(struct radeon_device
*rdev
)
1354 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1355 if (RREG32(AVIVO_D1CRTC_CONTROL
+ crtc_offsets
[i
]) & AVIVO_CRTC_EN
) {
1356 crtc_status
[i
] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1357 crtc_hung
|= (1 << i
);
1361 for (j
= 0; j
< 10; j
++) {
1362 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1363 if (crtc_hung
& (1 << i
)) {
1364 tmp
= RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1365 if (tmp
!= crtc_status
[i
])
1366 crtc_hung
&= ~(1 << i
);
1377 u32
r600_gpu_check_soft_reset(struct radeon_device
*rdev
)
1383 tmp
= RREG32(R_008010_GRBM_STATUS
);
1384 if (rdev
->family
>= CHIP_RV770
) {
1385 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1386 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1387 G_008010_TA_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1388 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1389 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1390 reset_mask
|= RADEON_RESET_GFX
;
1392 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1393 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1394 G_008010_TA03_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1395 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1396 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1397 reset_mask
|= RADEON_RESET_GFX
;
1400 if (G_008010_CF_RQ_PENDING(tmp
) | G_008010_PF_RQ_PENDING(tmp
) |
1401 G_008010_CP_BUSY(tmp
) | G_008010_CP_COHERENCY_BUSY(tmp
))
1402 reset_mask
|= RADEON_RESET_CP
;
1404 if (G_008010_GRBM_EE_BUSY(tmp
))
1405 reset_mask
|= RADEON_RESET_GRBM
| RADEON_RESET_GFX
| RADEON_RESET_CP
;
1407 /* DMA_STATUS_REG */
1408 tmp
= RREG32(DMA_STATUS_REG
);
1409 if (!(tmp
& DMA_IDLE
))
1410 reset_mask
|= RADEON_RESET_DMA
;
1413 tmp
= RREG32(R_000E50_SRBM_STATUS
);
1414 if (G_000E50_RLC_RQ_PENDING(tmp
) | G_000E50_RLC_BUSY(tmp
))
1415 reset_mask
|= RADEON_RESET_RLC
;
1417 if (G_000E50_IH_BUSY(tmp
))
1418 reset_mask
|= RADEON_RESET_IH
;
1420 if (G_000E50_SEM_BUSY(tmp
))
1421 reset_mask
|= RADEON_RESET_SEM
;
1423 if (G_000E50_GRBM_RQ_PENDING(tmp
))
1424 reset_mask
|= RADEON_RESET_GRBM
;
1426 if (G_000E50_VMC_BUSY(tmp
))
1427 reset_mask
|= RADEON_RESET_VMC
;
1429 if (G_000E50_MCB_BUSY(tmp
) | G_000E50_MCDZ_BUSY(tmp
) |
1430 G_000E50_MCDY_BUSY(tmp
) | G_000E50_MCDX_BUSY(tmp
) |
1431 G_000E50_MCDW_BUSY(tmp
))
1432 reset_mask
|= RADEON_RESET_MC
;
1434 if (r600_is_display_hung(rdev
))
1435 reset_mask
|= RADEON_RESET_DISPLAY
;
1437 /* Skip MC reset as it's mostly likely not hung, just busy */
1438 if (reset_mask
& RADEON_RESET_MC
) {
1439 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask
);
1440 reset_mask
&= ~RADEON_RESET_MC
;
1446 static void r600_gpu_soft_reset(struct radeon_device
*rdev
, u32 reset_mask
)
1448 struct rv515_mc_save save
;
1449 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
1452 if (reset_mask
== 0)
1455 dev_info(rdev
->dev
, "GPU softreset: 0x%08X\n", reset_mask
);
1457 r600_print_gpu_status_regs(rdev
);
1459 /* Disable CP parsing/prefetching */
1460 if (rdev
->family
>= CHIP_RV770
)
1461 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1463 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1465 /* disable the RLC */
1466 WREG32(RLC_CNTL
, 0);
1468 if (reset_mask
& RADEON_RESET_DMA
) {
1470 tmp
= RREG32(DMA_RB_CNTL
);
1471 tmp
&= ~DMA_RB_ENABLE
;
1472 WREG32(DMA_RB_CNTL
, tmp
);
1477 rv515_mc_stop(rdev
, &save
);
1478 if (r600_mc_wait_for_idle(rdev
)) {
1479 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1482 if (reset_mask
& (RADEON_RESET_GFX
| RADEON_RESET_COMPUTE
)) {
1483 if (rdev
->family
>= CHIP_RV770
)
1484 grbm_soft_reset
|= S_008020_SOFT_RESET_DB(1) |
1485 S_008020_SOFT_RESET_CB(1) |
1486 S_008020_SOFT_RESET_PA(1) |
1487 S_008020_SOFT_RESET_SC(1) |
1488 S_008020_SOFT_RESET_SPI(1) |
1489 S_008020_SOFT_RESET_SX(1) |
1490 S_008020_SOFT_RESET_SH(1) |
1491 S_008020_SOFT_RESET_TC(1) |
1492 S_008020_SOFT_RESET_TA(1) |
1493 S_008020_SOFT_RESET_VC(1) |
1494 S_008020_SOFT_RESET_VGT(1);
1496 grbm_soft_reset
|= S_008020_SOFT_RESET_CR(1) |
1497 S_008020_SOFT_RESET_DB(1) |
1498 S_008020_SOFT_RESET_CB(1) |
1499 S_008020_SOFT_RESET_PA(1) |
1500 S_008020_SOFT_RESET_SC(1) |
1501 S_008020_SOFT_RESET_SMX(1) |
1502 S_008020_SOFT_RESET_SPI(1) |
1503 S_008020_SOFT_RESET_SX(1) |
1504 S_008020_SOFT_RESET_SH(1) |
1505 S_008020_SOFT_RESET_TC(1) |
1506 S_008020_SOFT_RESET_TA(1) |
1507 S_008020_SOFT_RESET_VC(1) |
1508 S_008020_SOFT_RESET_VGT(1);
1511 if (reset_mask
& RADEON_RESET_CP
) {
1512 grbm_soft_reset
|= S_008020_SOFT_RESET_CP(1) |
1513 S_008020_SOFT_RESET_VGT(1);
1515 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1518 if (reset_mask
& RADEON_RESET_DMA
) {
1519 if (rdev
->family
>= CHIP_RV770
)
1520 srbm_soft_reset
|= RV770_SOFT_RESET_DMA
;
1522 srbm_soft_reset
|= SOFT_RESET_DMA
;
1525 if (reset_mask
& RADEON_RESET_RLC
)
1526 srbm_soft_reset
|= S_000E60_SOFT_RESET_RLC(1);
1528 if (reset_mask
& RADEON_RESET_SEM
)
1529 srbm_soft_reset
|= S_000E60_SOFT_RESET_SEM(1);
1531 if (reset_mask
& RADEON_RESET_IH
)
1532 srbm_soft_reset
|= S_000E60_SOFT_RESET_IH(1);
1534 if (reset_mask
& RADEON_RESET_GRBM
)
1535 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1537 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1538 if (reset_mask
& RADEON_RESET_MC
)
1539 srbm_soft_reset
|= S_000E60_SOFT_RESET_MC(1);
1542 if (reset_mask
& RADEON_RESET_VMC
)
1543 srbm_soft_reset
|= S_000E60_SOFT_RESET_VMC(1);
1545 if (grbm_soft_reset
) {
1546 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1547 tmp
|= grbm_soft_reset
;
1548 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1549 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1550 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1554 tmp
&= ~grbm_soft_reset
;
1555 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1556 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1559 if (srbm_soft_reset
) {
1560 tmp
= RREG32(SRBM_SOFT_RESET
);
1561 tmp
|= srbm_soft_reset
;
1562 dev_info(rdev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1563 WREG32(SRBM_SOFT_RESET
, tmp
);
1564 tmp
= RREG32(SRBM_SOFT_RESET
);
1568 tmp
&= ~srbm_soft_reset
;
1569 WREG32(SRBM_SOFT_RESET
, tmp
);
1570 tmp
= RREG32(SRBM_SOFT_RESET
);
1573 /* Wait a little for things to settle down */
1576 rv515_mc_resume(rdev
, &save
);
1579 r600_print_gpu_status_regs(rdev
);
1582 int r600_asic_reset(struct radeon_device
*rdev
)
1586 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1589 r600_set_bios_scratch_engine_hung(rdev
, true);
1591 r600_gpu_soft_reset(rdev
, reset_mask
);
1593 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1596 r600_set_bios_scratch_engine_hung(rdev
, false);
1602 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1604 * @rdev: radeon_device pointer
1605 * @ring: radeon_ring structure holding ring information
1607 * Check if the GFX engine is locked up.
1608 * Returns true if the engine appears to be locked up, false if not.
1610 bool r600_gfx_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
1612 u32 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1614 if (!(reset_mask
& (RADEON_RESET_GFX
|
1615 RADEON_RESET_COMPUTE
|
1616 RADEON_RESET_CP
))) {
1617 radeon_ring_lockup_update(ring
);
1620 /* force CP activities */
1621 radeon_ring_force_activity(rdev
, ring
);
1622 return radeon_ring_test_lockup(rdev
, ring
);
1625 u32
r6xx_remap_render_backend(struct radeon_device
*rdev
,
1626 u32 tiling_pipe_num
,
1628 u32 total_max_rb_num
,
1629 u32 disabled_rb_mask
)
1631 u32 rendering_pipe_num
, rb_num_width
, req_rb_num
;
1632 u32 pipe_rb_ratio
, pipe_rb_remain
, tmp
;
1633 u32 data
= 0, mask
= 1 << (max_rb_num
- 1);
1636 /* mask out the RBs that don't exist on that asic */
1637 tmp
= disabled_rb_mask
| ((0xff << max_rb_num
) & 0xff);
1638 /* make sure at least one RB is available */
1639 if ((tmp
& 0xff) != 0xff)
1640 disabled_rb_mask
= tmp
;
1642 rendering_pipe_num
= 1 << tiling_pipe_num
;
1643 req_rb_num
= total_max_rb_num
- r600_count_pipe_bits(disabled_rb_mask
);
1644 BUG_ON(rendering_pipe_num
< req_rb_num
);
1646 pipe_rb_ratio
= rendering_pipe_num
/ req_rb_num
;
1647 pipe_rb_remain
= rendering_pipe_num
- pipe_rb_ratio
* req_rb_num
;
1649 if (rdev
->family
<= CHIP_RV740
) {
1657 for (i
= 0; i
< max_rb_num
; i
++) {
1658 if (!(mask
& disabled_rb_mask
)) {
1659 for (j
= 0; j
< pipe_rb_ratio
; j
++) {
1660 data
<<= rb_num_width
;
1661 data
|= max_rb_num
- i
- 1;
1663 if (pipe_rb_remain
) {
1664 data
<<= rb_num_width
;
1665 data
|= max_rb_num
- i
- 1;
1675 int r600_count_pipe_bits(uint32_t val
)
1677 return hweight32(val
);
1680 static void r600_gpu_init(struct radeon_device
*rdev
)
1684 u32 cc_rb_backend_disable
;
1685 u32 cc_gc_shader_pipe_config
;
1689 u32 sq_gpr_resource_mgmt_1
= 0;
1690 u32 sq_gpr_resource_mgmt_2
= 0;
1691 u32 sq_thread_resource_mgmt
= 0;
1692 u32 sq_stack_resource_mgmt_1
= 0;
1693 u32 sq_stack_resource_mgmt_2
= 0;
1694 u32 disabled_rb_mask
;
1696 rdev
->config
.r600
.tiling_group_size
= 256;
1697 switch (rdev
->family
) {
1699 rdev
->config
.r600
.max_pipes
= 4;
1700 rdev
->config
.r600
.max_tile_pipes
= 8;
1701 rdev
->config
.r600
.max_simds
= 4;
1702 rdev
->config
.r600
.max_backends
= 4;
1703 rdev
->config
.r600
.max_gprs
= 256;
1704 rdev
->config
.r600
.max_threads
= 192;
1705 rdev
->config
.r600
.max_stack_entries
= 256;
1706 rdev
->config
.r600
.max_hw_contexts
= 8;
1707 rdev
->config
.r600
.max_gs_threads
= 16;
1708 rdev
->config
.r600
.sx_max_export_size
= 128;
1709 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1710 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1711 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1715 rdev
->config
.r600
.max_pipes
= 2;
1716 rdev
->config
.r600
.max_tile_pipes
= 2;
1717 rdev
->config
.r600
.max_simds
= 3;
1718 rdev
->config
.r600
.max_backends
= 1;
1719 rdev
->config
.r600
.max_gprs
= 128;
1720 rdev
->config
.r600
.max_threads
= 192;
1721 rdev
->config
.r600
.max_stack_entries
= 128;
1722 rdev
->config
.r600
.max_hw_contexts
= 8;
1723 rdev
->config
.r600
.max_gs_threads
= 4;
1724 rdev
->config
.r600
.sx_max_export_size
= 128;
1725 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1726 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1727 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1733 rdev
->config
.r600
.max_pipes
= 1;
1734 rdev
->config
.r600
.max_tile_pipes
= 1;
1735 rdev
->config
.r600
.max_simds
= 2;
1736 rdev
->config
.r600
.max_backends
= 1;
1737 rdev
->config
.r600
.max_gprs
= 128;
1738 rdev
->config
.r600
.max_threads
= 192;
1739 rdev
->config
.r600
.max_stack_entries
= 128;
1740 rdev
->config
.r600
.max_hw_contexts
= 4;
1741 rdev
->config
.r600
.max_gs_threads
= 4;
1742 rdev
->config
.r600
.sx_max_export_size
= 128;
1743 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1744 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1745 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1748 rdev
->config
.r600
.max_pipes
= 4;
1749 rdev
->config
.r600
.max_tile_pipes
= 4;
1750 rdev
->config
.r600
.max_simds
= 4;
1751 rdev
->config
.r600
.max_backends
= 4;
1752 rdev
->config
.r600
.max_gprs
= 192;
1753 rdev
->config
.r600
.max_threads
= 192;
1754 rdev
->config
.r600
.max_stack_entries
= 256;
1755 rdev
->config
.r600
.max_hw_contexts
= 8;
1756 rdev
->config
.r600
.max_gs_threads
= 16;
1757 rdev
->config
.r600
.sx_max_export_size
= 128;
1758 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1759 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1760 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1766 /* Initialize HDP */
1767 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1768 WREG32((0x2c14 + j
), 0x00000000);
1769 WREG32((0x2c18 + j
), 0x00000000);
1770 WREG32((0x2c1c + j
), 0x00000000);
1771 WREG32((0x2c20 + j
), 0x00000000);
1772 WREG32((0x2c24 + j
), 0x00000000);
1775 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1779 ramcfg
= RREG32(RAMCFG
);
1780 switch (rdev
->config
.r600
.max_tile_pipes
) {
1782 tiling_config
|= PIPE_TILING(0);
1785 tiling_config
|= PIPE_TILING(1);
1788 tiling_config
|= PIPE_TILING(2);
1791 tiling_config
|= PIPE_TILING(3);
1796 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1797 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1798 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1799 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1801 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1803 tiling_config
|= ROW_TILING(3);
1804 tiling_config
|= SAMPLE_SPLIT(3);
1806 tiling_config
|= ROW_TILING(tmp
);
1807 tiling_config
|= SAMPLE_SPLIT(tmp
);
1809 tiling_config
|= BANK_SWAPS(1);
1811 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1812 tmp
= R6XX_MAX_BACKENDS
-
1813 r600_count_pipe_bits((cc_rb_backend_disable
>> 16) & R6XX_MAX_BACKENDS_MASK
);
1814 if (tmp
< rdev
->config
.r600
.max_backends
) {
1815 rdev
->config
.r600
.max_backends
= tmp
;
1818 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0x00ffff00;
1819 tmp
= R6XX_MAX_PIPES
-
1820 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 8) & R6XX_MAX_PIPES_MASK
);
1821 if (tmp
< rdev
->config
.r600
.max_pipes
) {
1822 rdev
->config
.r600
.max_pipes
= tmp
;
1824 tmp
= R6XX_MAX_SIMDS
-
1825 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 16) & R6XX_MAX_SIMDS_MASK
);
1826 if (tmp
< rdev
->config
.r600
.max_simds
) {
1827 rdev
->config
.r600
.max_simds
= tmp
;
1830 disabled_rb_mask
= (RREG32(CC_RB_BACKEND_DISABLE
) >> 16) & R6XX_MAX_BACKENDS_MASK
;
1831 tmp
= (tiling_config
& PIPE_TILING__MASK
) >> PIPE_TILING__SHIFT
;
1832 tmp
= r6xx_remap_render_backend(rdev
, tmp
, rdev
->config
.r600
.max_backends
,
1833 R6XX_MAX_BACKENDS
, disabled_rb_mask
);
1834 tiling_config
|= tmp
<< 16;
1835 rdev
->config
.r600
.backend_map
= tmp
;
1837 rdev
->config
.r600
.tile_config
= tiling_config
;
1838 WREG32(GB_TILING_CONFIG
, tiling_config
);
1839 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1840 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1841 WREG32(DMA_TILING_CONFIG
, tiling_config
& 0xffff);
1843 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1844 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1845 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1847 /* Setup some CP states */
1848 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1849 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1851 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1852 SYNC_WALKER
| SYNC_ALIGNER
));
1853 /* Setup various GPU states */
1854 if (rdev
->family
== CHIP_RV670
)
1855 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1857 tmp
= RREG32(SX_DEBUG_1
);
1858 tmp
|= SMX_EVENT_RELEASE
;
1859 if ((rdev
->family
> CHIP_R600
))
1860 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1861 WREG32(SX_DEBUG_1
, tmp
);
1863 if (((rdev
->family
) == CHIP_R600
) ||
1864 ((rdev
->family
) == CHIP_RV630
) ||
1865 ((rdev
->family
) == CHIP_RV610
) ||
1866 ((rdev
->family
) == CHIP_RV620
) ||
1867 ((rdev
->family
) == CHIP_RS780
) ||
1868 ((rdev
->family
) == CHIP_RS880
)) {
1869 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1871 WREG32(DB_DEBUG
, 0);
1873 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1874 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1876 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1877 WREG32(VGT_NUM_INSTANCES
, 0);
1879 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1880 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1882 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1883 if (((rdev
->family
) == CHIP_RV610
) ||
1884 ((rdev
->family
) == CHIP_RV620
) ||
1885 ((rdev
->family
) == CHIP_RS780
) ||
1886 ((rdev
->family
) == CHIP_RS880
)) {
1887 tmp
= (CACHE_FIFO_SIZE(0xa) |
1888 FETCH_FIFO_HIWATER(0xa) |
1889 DONE_FIFO_HIWATER(0xe0) |
1890 ALU_UPDATE_FIFO_HIWATER(0x8));
1891 } else if (((rdev
->family
) == CHIP_R600
) ||
1892 ((rdev
->family
) == CHIP_RV630
)) {
1893 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1894 tmp
|= DONE_FIFO_HIWATER(0x4);
1896 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1898 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1899 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1901 sq_config
= RREG32(SQ_CONFIG
);
1902 sq_config
&= ~(PS_PRIO(3) |
1906 sq_config
|= (DX9_CONSTS
|
1913 if ((rdev
->family
) == CHIP_R600
) {
1914 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1916 NUM_CLAUSE_TEMP_GPRS(4));
1917 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1919 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1920 NUM_VS_THREADS(48) |
1923 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1924 NUM_VS_STACK_ENTRIES(128));
1925 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1926 NUM_ES_STACK_ENTRIES(0));
1927 } else if (((rdev
->family
) == CHIP_RV610
) ||
1928 ((rdev
->family
) == CHIP_RV620
) ||
1929 ((rdev
->family
) == CHIP_RS780
) ||
1930 ((rdev
->family
) == CHIP_RS880
)) {
1931 /* no vertex cache */
1932 sq_config
&= ~VC_ENABLE
;
1934 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1936 NUM_CLAUSE_TEMP_GPRS(2));
1937 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1939 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1940 NUM_VS_THREADS(78) |
1942 NUM_ES_THREADS(31));
1943 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1944 NUM_VS_STACK_ENTRIES(40));
1945 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1946 NUM_ES_STACK_ENTRIES(16));
1947 } else if (((rdev
->family
) == CHIP_RV630
) ||
1948 ((rdev
->family
) == CHIP_RV635
)) {
1949 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1951 NUM_CLAUSE_TEMP_GPRS(2));
1952 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1954 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1955 NUM_VS_THREADS(78) |
1957 NUM_ES_THREADS(31));
1958 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1959 NUM_VS_STACK_ENTRIES(40));
1960 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1961 NUM_ES_STACK_ENTRIES(16));
1962 } else if ((rdev
->family
) == CHIP_RV670
) {
1963 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1965 NUM_CLAUSE_TEMP_GPRS(2));
1966 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1968 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1969 NUM_VS_THREADS(78) |
1971 NUM_ES_THREADS(31));
1972 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1973 NUM_VS_STACK_ENTRIES(64));
1974 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1975 NUM_ES_STACK_ENTRIES(64));
1978 WREG32(SQ_CONFIG
, sq_config
);
1979 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1980 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1981 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1982 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1983 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1985 if (((rdev
->family
) == CHIP_RV610
) ||
1986 ((rdev
->family
) == CHIP_RV620
) ||
1987 ((rdev
->family
) == CHIP_RS780
) ||
1988 ((rdev
->family
) == CHIP_RS880
)) {
1989 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1991 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1994 /* More default values. 2D/3D driver should adjust as needed */
1995 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1996 S1_X(0x4) | S1_Y(0xc)));
1997 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1998 S1_X(0x2) | S1_Y(0x2) |
1999 S2_X(0xa) | S2_Y(0x6) |
2000 S3_X(0x6) | S3_Y(0xa)));
2001 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
2002 S1_X(0x4) | S1_Y(0xc) |
2003 S2_X(0x1) | S2_Y(0x6) |
2004 S3_X(0xa) | S3_Y(0xe)));
2005 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
2006 S5_X(0x0) | S5_Y(0x0) |
2007 S6_X(0xb) | S6_Y(0x4) |
2008 S7_X(0x7) | S7_Y(0x8)));
2010 WREG32(VGT_STRMOUT_EN
, 0);
2011 tmp
= rdev
->config
.r600
.max_pipes
* 16;
2012 switch (rdev
->family
) {
2028 WREG32(VGT_ES_PER_GS
, 128);
2029 WREG32(VGT_GS_PER_ES
, tmp
);
2030 WREG32(VGT_GS_PER_VS
, 2);
2031 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2033 /* more default values. 2D/3D driver should adjust as needed */
2034 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2035 WREG32(VGT_STRMOUT_EN
, 0);
2037 WREG32(PA_SC_MODE_CNTL
, 0);
2038 WREG32(PA_SC_AA_CONFIG
, 0);
2039 WREG32(PA_SC_LINE_STIPPLE
, 0);
2040 WREG32(SPI_INPUT_Z
, 0);
2041 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
2042 WREG32(CB_COLOR7_FRAG
, 0);
2044 /* Clear render buffer base addresses */
2045 WREG32(CB_COLOR0_BASE
, 0);
2046 WREG32(CB_COLOR1_BASE
, 0);
2047 WREG32(CB_COLOR2_BASE
, 0);
2048 WREG32(CB_COLOR3_BASE
, 0);
2049 WREG32(CB_COLOR4_BASE
, 0);
2050 WREG32(CB_COLOR5_BASE
, 0);
2051 WREG32(CB_COLOR6_BASE
, 0);
2052 WREG32(CB_COLOR7_BASE
, 0);
2053 WREG32(CB_COLOR7_FRAG
, 0);
2055 switch (rdev
->family
) {
2060 tmp
= TC_L2_SIZE(8);
2064 tmp
= TC_L2_SIZE(4);
2067 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
2070 tmp
= TC_L2_SIZE(0);
2073 WREG32(TC_CNTL
, tmp
);
2075 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
2076 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
2078 tmp
= RREG32(ARB_POP
);
2079 tmp
|= ENABLE_TC128
;
2080 WREG32(ARB_POP
, tmp
);
2082 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
2083 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
2085 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
2086 WREG32(VC_ENHANCE
, 0);
2091 * Indirect registers accessor
2093 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
2097 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2098 (void)RREG32(PCIE_PORT_INDEX
);
2099 r
= RREG32(PCIE_PORT_DATA
);
2103 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
2105 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2106 (void)RREG32(PCIE_PORT_INDEX
);
2107 WREG32(PCIE_PORT_DATA
, (v
));
2108 (void)RREG32(PCIE_PORT_DATA
);
2114 void r600_cp_stop(struct radeon_device
*rdev
)
2116 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
2117 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
2118 WREG32(SCRATCH_UMSK
, 0);
2119 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
2122 int r600_init_microcode(struct radeon_device
*rdev
)
2124 const char *chip_name
;
2125 const char *rlc_chip_name
;
2126 const char *smc_chip_name
= "RV770";
2127 size_t pfp_req_size
, me_req_size
, rlc_req_size
, smc_req_size
= 0;
2133 switch (rdev
->family
) {
2136 rlc_chip_name
= "R600";
2139 chip_name
= "RV610";
2140 rlc_chip_name
= "R600";
2143 chip_name
= "RV630";
2144 rlc_chip_name
= "R600";
2147 chip_name
= "RV620";
2148 rlc_chip_name
= "R600";
2151 chip_name
= "RV635";
2152 rlc_chip_name
= "R600";
2155 chip_name
= "RV670";
2156 rlc_chip_name
= "R600";
2160 chip_name
= "RS780";
2161 rlc_chip_name
= "R600";
2164 chip_name
= "RV770";
2165 rlc_chip_name
= "R700";
2166 smc_chip_name
= "RV770";
2167 smc_req_size
= ALIGN(RV770_SMC_UCODE_SIZE
, 4);
2170 chip_name
= "RV730";
2171 rlc_chip_name
= "R700";
2172 smc_chip_name
= "RV730";
2173 smc_req_size
= ALIGN(RV730_SMC_UCODE_SIZE
, 4);
2176 chip_name
= "RV710";
2177 rlc_chip_name
= "R700";
2178 smc_chip_name
= "RV710";
2179 smc_req_size
= ALIGN(RV710_SMC_UCODE_SIZE
, 4);
2182 chip_name
= "RV730";
2183 rlc_chip_name
= "R700";
2184 smc_chip_name
= "RV740";
2185 smc_req_size
= ALIGN(RV740_SMC_UCODE_SIZE
, 4);
2188 chip_name
= "CEDAR";
2189 rlc_chip_name
= "CEDAR";
2190 smc_chip_name
= "CEDAR";
2191 smc_req_size
= ALIGN(CEDAR_SMC_UCODE_SIZE
, 4);
2194 chip_name
= "REDWOOD";
2195 rlc_chip_name
= "REDWOOD";
2196 smc_chip_name
= "REDWOOD";
2197 smc_req_size
= ALIGN(REDWOOD_SMC_UCODE_SIZE
, 4);
2200 chip_name
= "JUNIPER";
2201 rlc_chip_name
= "JUNIPER";
2202 smc_chip_name
= "JUNIPER";
2203 smc_req_size
= ALIGN(JUNIPER_SMC_UCODE_SIZE
, 4);
2207 chip_name
= "CYPRESS";
2208 rlc_chip_name
= "CYPRESS";
2209 smc_chip_name
= "CYPRESS";
2210 smc_req_size
= ALIGN(CYPRESS_SMC_UCODE_SIZE
, 4);
2214 rlc_chip_name
= "SUMO";
2218 rlc_chip_name
= "SUMO";
2221 chip_name
= "SUMO2";
2222 rlc_chip_name
= "SUMO";
2227 if (rdev
->family
>= CHIP_CEDAR
) {
2228 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
2229 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
2230 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
2231 } else if (rdev
->family
>= CHIP_RV770
) {
2232 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
2233 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
2234 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
2236 pfp_req_size
= R600_PFP_UCODE_SIZE
* 4;
2237 me_req_size
= R600_PM4_UCODE_SIZE
* 12;
2238 rlc_req_size
= R600_RLC_UCODE_SIZE
* 4;
2241 DRM_INFO("Loading %s Microcode\n", chip_name
);
2243 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
2244 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, rdev
->dev
);
2247 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
2249 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2250 rdev
->pfp_fw
->size
, fw_name
);
2255 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
2256 err
= request_firmware(&rdev
->me_fw
, fw_name
, rdev
->dev
);
2259 if (rdev
->me_fw
->size
!= me_req_size
) {
2261 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2262 rdev
->me_fw
->size
, fw_name
);
2266 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2267 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, rdev
->dev
);
2270 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2272 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2273 rdev
->rlc_fw
->size
, fw_name
);
2277 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_HEMLOCK
)) {
2278 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", smc_chip_name
);
2279 err
= request_firmware(&rdev
->smc_fw
, fw_name
, rdev
->dev
);
2282 "smc: error loading firmware \"%s\"\n",
2284 release_firmware(rdev
->smc_fw
);
2285 rdev
->smc_fw
= NULL
;
2286 } else if (rdev
->smc_fw
->size
!= smc_req_size
) {
2288 "smc: Bogus length %zu in firmware \"%s\"\n",
2289 rdev
->smc_fw
->size
, fw_name
);
2298 "r600_cp: Failed to load firmware \"%s\"\n",
2300 release_firmware(rdev
->pfp_fw
);
2301 rdev
->pfp_fw
= NULL
;
2302 release_firmware(rdev
->me_fw
);
2304 release_firmware(rdev
->rlc_fw
);
2305 rdev
->rlc_fw
= NULL
;
2306 release_firmware(rdev
->smc_fw
);
2307 rdev
->smc_fw
= NULL
;
2312 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2314 const __be32
*fw_data
;
2317 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2326 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2329 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2330 RREG32(GRBM_SOFT_RESET
);
2332 WREG32(GRBM_SOFT_RESET
, 0);
2334 WREG32(CP_ME_RAM_WADDR
, 0);
2336 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2337 WREG32(CP_ME_RAM_WADDR
, 0);
2338 for (i
= 0; i
< R600_PM4_UCODE_SIZE
* 3; i
++)
2339 WREG32(CP_ME_RAM_DATA
,
2340 be32_to_cpup(fw_data
++));
2342 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2343 WREG32(CP_PFP_UCODE_ADDR
, 0);
2344 for (i
= 0; i
< R600_PFP_UCODE_SIZE
; i
++)
2345 WREG32(CP_PFP_UCODE_DATA
,
2346 be32_to_cpup(fw_data
++));
2348 WREG32(CP_PFP_UCODE_ADDR
, 0);
2349 WREG32(CP_ME_RAM_WADDR
, 0);
2350 WREG32(CP_ME_RAM_RADDR
, 0);
2354 int r600_cp_start(struct radeon_device
*rdev
)
2356 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2360 r
= radeon_ring_lock(rdev
, ring
, 7);
2362 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2365 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2366 radeon_ring_write(ring
, 0x1);
2367 if (rdev
->family
>= CHIP_RV770
) {
2368 radeon_ring_write(ring
, 0x0);
2369 radeon_ring_write(ring
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2371 radeon_ring_write(ring
, 0x3);
2372 radeon_ring_write(ring
, rdev
->config
.r600
.max_hw_contexts
- 1);
2374 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2375 radeon_ring_write(ring
, 0);
2376 radeon_ring_write(ring
, 0);
2377 radeon_ring_unlock_commit(rdev
, ring
);
2380 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2384 int r600_cp_resume(struct radeon_device
*rdev
)
2386 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2392 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2393 RREG32(GRBM_SOFT_RESET
);
2395 WREG32(GRBM_SOFT_RESET
, 0);
2397 /* Set ring buffer size */
2398 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
2399 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2401 tmp
|= BUF_SWAP_32BIT
;
2403 WREG32(CP_RB_CNTL
, tmp
);
2404 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
2406 /* Set the write pointer delay */
2407 WREG32(CP_RB_WPTR_DELAY
, 0);
2409 /* Initialize the ring buffer's read and write pointers */
2410 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2411 WREG32(CP_RB_RPTR_WR
, 0);
2413 WREG32(CP_RB_WPTR
, ring
->wptr
);
2415 /* set the wb address whether it's enabled or not */
2416 WREG32(CP_RB_RPTR_ADDR
,
2417 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
2418 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2419 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2421 if (rdev
->wb
.enabled
)
2422 WREG32(SCRATCH_UMSK
, 0xff);
2424 tmp
|= RB_NO_UPDATE
;
2425 WREG32(SCRATCH_UMSK
, 0);
2429 WREG32(CP_RB_CNTL
, tmp
);
2431 WREG32(CP_RB_BASE
, ring
->gpu_addr
>> 8);
2432 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2434 ring
->rptr
= RREG32(CP_RB_RPTR
);
2436 r600_cp_start(rdev
);
2438 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, ring
);
2440 ring
->ready
= false;
2446 void r600_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
)
2451 /* Align ring size */
2452 rb_bufsz
= drm_order(ring_size
/ 8);
2453 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2454 ring
->ring_size
= ring_size
;
2455 ring
->align_mask
= 16 - 1;
2457 if (radeon_ring_supports_scratch_reg(rdev
, ring
)) {
2458 r
= radeon_scratch_get(rdev
, &ring
->rptr_save_reg
);
2460 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r
);
2461 ring
->rptr_save_reg
= 0;
2466 void r600_cp_fini(struct radeon_device
*rdev
)
2468 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2470 radeon_ring_fini(rdev
, ring
);
2471 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
2475 * GPU scratch registers helpers function.
2477 void r600_scratch_init(struct radeon_device
*rdev
)
2481 rdev
->scratch
.num_reg
= 7;
2482 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2483 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2484 rdev
->scratch
.free
[i
] = true;
2485 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2489 int r600_ring_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2496 r
= radeon_scratch_get(rdev
, &scratch
);
2498 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2501 WREG32(scratch
, 0xCAFEDEAD);
2502 r
= radeon_ring_lock(rdev
, ring
, 3);
2504 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
2505 radeon_scratch_free(rdev
, scratch
);
2508 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2509 radeon_ring_write(ring
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2510 radeon_ring_write(ring
, 0xDEADBEEF);
2511 radeon_ring_unlock_commit(rdev
, ring
);
2512 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2513 tmp
= RREG32(scratch
);
2514 if (tmp
== 0xDEADBEEF)
2518 if (i
< rdev
->usec_timeout
) {
2519 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
2521 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2522 ring
->idx
, scratch
, tmp
);
2525 radeon_scratch_free(rdev
, scratch
);
2530 * CP fences/semaphores
2533 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2534 struct radeon_fence
*fence
)
2536 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
2538 if (rdev
->wb
.use_event
) {
2539 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
2540 /* flush read cache over gart */
2541 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2542 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2543 PACKET3_VC_ACTION_ENA
|
2544 PACKET3_SH_ACTION_ENA
);
2545 radeon_ring_write(ring
, 0xFFFFFFFF);
2546 radeon_ring_write(ring
, 0);
2547 radeon_ring_write(ring
, 10); /* poll interval */
2548 /* EVENT_WRITE_EOP - flush caches, send int */
2549 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2550 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2551 radeon_ring_write(ring
, addr
& 0xffffffff);
2552 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2553 radeon_ring_write(ring
, fence
->seq
);
2554 radeon_ring_write(ring
, 0);
2556 /* flush read cache over gart */
2557 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2558 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2559 PACKET3_VC_ACTION_ENA
|
2560 PACKET3_SH_ACTION_ENA
);
2561 radeon_ring_write(ring
, 0xFFFFFFFF);
2562 radeon_ring_write(ring
, 0);
2563 radeon_ring_write(ring
, 10); /* poll interval */
2564 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2565 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2566 /* wait for 3D idle clean */
2567 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2568 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2569 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2570 /* Emit fence sequence & fire IRQ */
2571 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2572 radeon_ring_write(ring
, ((rdev
->fence_drv
[fence
->ring
].scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2573 radeon_ring_write(ring
, fence
->seq
);
2574 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2575 radeon_ring_write(ring
, PACKET0(CP_INT_STATUS
, 0));
2576 radeon_ring_write(ring
, RB_INT_STAT
);
2580 void r600_semaphore_ring_emit(struct radeon_device
*rdev
,
2581 struct radeon_ring
*ring
,
2582 struct radeon_semaphore
*semaphore
,
2585 uint64_t addr
= semaphore
->gpu_addr
;
2586 unsigned sel
= emit_wait
? PACKET3_SEM_SEL_WAIT
: PACKET3_SEM_SEL_SIGNAL
;
2588 if (rdev
->family
< CHIP_CAYMAN
)
2589 sel
|= PACKET3_SEM_WAIT_ON_SIGNAL
;
2591 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_SEMAPHORE
, 1));
2592 radeon_ring_write(ring
, addr
& 0xffffffff);
2593 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | sel
);
2597 * r600_copy_cpdma - copy pages using the CP DMA engine
2599 * @rdev: radeon_device pointer
2600 * @src_offset: src GPU address
2601 * @dst_offset: dst GPU address
2602 * @num_gpu_pages: number of GPU pages to xfer
2603 * @fence: radeon fence object
2605 * Copy GPU paging using the CP DMA engine (r6xx+).
2606 * Used by the radeon ttm implementation to move pages if
2607 * registered as the asic copy callback.
2609 int r600_copy_cpdma(struct radeon_device
*rdev
,
2610 uint64_t src_offset
, uint64_t dst_offset
,
2611 unsigned num_gpu_pages
,
2612 struct radeon_fence
**fence
)
2614 struct radeon_semaphore
*sem
= NULL
;
2615 int ring_index
= rdev
->asic
->copy
.blit_ring_index
;
2616 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
2617 u32 size_in_bytes
, cur_size_in_bytes
, tmp
;
2621 r
= radeon_semaphore_create(rdev
, &sem
);
2623 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2627 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
2628 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
2629 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 6 + 24);
2631 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2632 radeon_semaphore_free(rdev
, &sem
, NULL
);
2636 if (radeon_fence_need_sync(*fence
, ring
->idx
)) {
2637 radeon_semaphore_sync_rings(rdev
, sem
, (*fence
)->ring
,
2639 radeon_fence_note_sync(*fence
, ring
->idx
);
2641 radeon_semaphore_free(rdev
, &sem
, NULL
);
2644 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2645 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2646 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
);
2647 for (i
= 0; i
< num_loops
; i
++) {
2648 cur_size_in_bytes
= size_in_bytes
;
2649 if (cur_size_in_bytes
> 0x1fffff)
2650 cur_size_in_bytes
= 0x1fffff;
2651 size_in_bytes
-= cur_size_in_bytes
;
2652 tmp
= upper_32_bits(src_offset
) & 0xff;
2653 if (size_in_bytes
== 0)
2654 tmp
|= PACKET3_CP_DMA_CP_SYNC
;
2655 radeon_ring_write(ring
, PACKET3(PACKET3_CP_DMA
, 4));
2656 radeon_ring_write(ring
, src_offset
& 0xffffffff);
2657 radeon_ring_write(ring
, tmp
);
2658 radeon_ring_write(ring
, dst_offset
& 0xffffffff);
2659 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xff);
2660 radeon_ring_write(ring
, cur_size_in_bytes
);
2661 src_offset
+= cur_size_in_bytes
;
2662 dst_offset
+= cur_size_in_bytes
;
2664 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2665 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2666 radeon_ring_write(ring
, WAIT_CP_DMA_IDLE_bit
);
2668 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
2670 radeon_ring_unlock_undo(rdev
, ring
);
2674 radeon_ring_unlock_commit(rdev
, ring
);
2675 radeon_semaphore_free(rdev
, &sem
, *fence
);
2680 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2681 uint32_t tiling_flags
, uint32_t pitch
,
2682 uint32_t offset
, uint32_t obj_size
)
2684 /* FIXME: implement */
2688 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2690 /* FIXME: implement */
2693 static int r600_startup(struct radeon_device
*rdev
)
2695 struct radeon_ring
*ring
;
2698 /* enable pcie gen2 link */
2699 r600_pcie_gen2_enable(rdev
);
2701 /* scratch needs to be initialized before MC */
2702 r
= r600_vram_scratch_init(rdev
);
2706 r600_mc_program(rdev
);
2708 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2709 r
= r600_init_microcode(rdev
);
2711 DRM_ERROR("Failed to load firmware!\n");
2716 if (rdev
->flags
& RADEON_IS_AGP
) {
2717 r600_agp_enable(rdev
);
2719 r
= r600_pcie_gart_enable(rdev
);
2723 r600_gpu_init(rdev
);
2725 /* allocate wb buffer */
2726 r
= radeon_wb_init(rdev
);
2730 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
2732 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2736 r
= radeon_fence_driver_start_ring(rdev
, R600_RING_TYPE_DMA_INDEX
);
2738 dev_err(rdev
->dev
, "failed initializing DMA fences (%d).\n", r
);
2743 if (!rdev
->irq
.installed
) {
2744 r
= radeon_irq_kms_init(rdev
);
2749 r
= r600_irq_init(rdev
);
2751 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2752 radeon_irq_kms_fini(rdev
);
2757 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2758 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
2759 R600_CP_RB_RPTR
, R600_CP_RB_WPTR
,
2764 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
2765 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, R600_WB_DMA_RPTR_OFFSET
,
2766 DMA_RB_RPTR
, DMA_RB_WPTR
,
2767 DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
2771 r
= r600_cp_load_microcode(rdev
);
2774 r
= r600_cp_resume(rdev
);
2778 r
= r600_dma_resume(rdev
);
2782 r
= radeon_ib_pool_init(rdev
);
2784 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2788 r
= r600_audio_init(rdev
);
2790 DRM_ERROR("radeon: audio init failed\n");
2797 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2801 temp
= RREG32(CONFIG_CNTL
);
2802 if (state
== false) {
2808 WREG32(CONFIG_CNTL
, temp
);
2811 int r600_resume(struct radeon_device
*rdev
)
2815 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2816 * posting will perform necessary task to bring back GPU into good
2820 atom_asic_init(rdev
->mode_info
.atom_context
);
2822 rdev
->accel_working
= true;
2823 r
= r600_startup(rdev
);
2825 DRM_ERROR("r600 startup failed on resume\n");
2826 rdev
->accel_working
= false;
2833 int r600_suspend(struct radeon_device
*rdev
)
2835 r600_audio_fini(rdev
);
2837 r600_dma_stop(rdev
);
2838 r600_irq_suspend(rdev
);
2839 radeon_wb_disable(rdev
);
2840 r600_pcie_gart_disable(rdev
);
2845 /* Plan is to move initialization in that function and use
2846 * helper function so that radeon_device_init pretty much
2847 * do nothing more than calling asic specific function. This
2848 * should also allow to remove a bunch of callback function
2851 int r600_init(struct radeon_device
*rdev
)
2855 if (r600_debugfs_mc_info_init(rdev
)) {
2856 DRM_ERROR("Failed to register debugfs file for mc !\n");
2859 if (!radeon_get_bios(rdev
)) {
2860 if (ASIC_IS_AVIVO(rdev
))
2863 /* Must be an ATOMBIOS */
2864 if (!rdev
->is_atom_bios
) {
2865 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2868 r
= radeon_atombios_init(rdev
);
2871 /* Post card if necessary */
2872 if (!radeon_card_posted(rdev
)) {
2874 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2877 DRM_INFO("GPU not posted. posting now...\n");
2878 atom_asic_init(rdev
->mode_info
.atom_context
);
2880 /* Initialize scratch registers */
2881 r600_scratch_init(rdev
);
2882 /* Initialize surface registers */
2883 radeon_surface_init(rdev
);
2884 /* Initialize clocks */
2885 radeon_get_clock_info(rdev
->ddev
);
2887 r
= radeon_fence_driver_init(rdev
);
2890 if (rdev
->flags
& RADEON_IS_AGP
) {
2891 r
= radeon_agp_init(rdev
);
2893 radeon_agp_disable(rdev
);
2895 r
= r600_mc_init(rdev
);
2898 /* Memory manager */
2899 r
= radeon_bo_init(rdev
);
2903 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ring_obj
= NULL
;
2904 r600_ring_init(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
], 1024 * 1024);
2906 rdev
->ring
[R600_RING_TYPE_DMA_INDEX
].ring_obj
= NULL
;
2907 r600_ring_init(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
], 64 * 1024);
2909 rdev
->ih
.ring_obj
= NULL
;
2910 r600_ih_ring_init(rdev
, 64 * 1024);
2912 r
= r600_pcie_gart_init(rdev
);
2916 rdev
->accel_working
= true;
2917 r
= r600_startup(rdev
);
2919 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2921 r600_dma_fini(rdev
);
2922 r600_irq_fini(rdev
);
2923 radeon_wb_fini(rdev
);
2924 radeon_ib_pool_fini(rdev
);
2925 radeon_irq_kms_fini(rdev
);
2926 r600_pcie_gart_fini(rdev
);
2927 rdev
->accel_working
= false;
2933 void r600_fini(struct radeon_device
*rdev
)
2935 r600_audio_fini(rdev
);
2937 r600_dma_fini(rdev
);
2938 r600_irq_fini(rdev
);
2939 radeon_wb_fini(rdev
);
2940 radeon_ib_pool_fini(rdev
);
2941 radeon_irq_kms_fini(rdev
);
2942 r600_pcie_gart_fini(rdev
);
2943 r600_vram_scratch_fini(rdev
);
2944 radeon_agp_fini(rdev
);
2945 radeon_gem_fini(rdev
);
2946 radeon_fence_driver_fini(rdev
);
2947 radeon_bo_fini(rdev
);
2948 radeon_atombios_fini(rdev
);
2957 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2959 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
2962 if (ring
->rptr_save_reg
) {
2963 next_rptr
= ring
->wptr
+ 3 + 4;
2964 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2965 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
2966 PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2967 radeon_ring_write(ring
, next_rptr
);
2968 } else if (rdev
->wb
.enabled
) {
2969 next_rptr
= ring
->wptr
+ 5 + 4;
2970 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_WRITE
, 3));
2971 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
2972 radeon_ring_write(ring
, (upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff) | (1 << 18));
2973 radeon_ring_write(ring
, next_rptr
);
2974 radeon_ring_write(ring
, 0);
2977 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2978 radeon_ring_write(ring
,
2982 (ib
->gpu_addr
& 0xFFFFFFFC));
2983 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2984 radeon_ring_write(ring
, ib
->length_dw
);
2987 int r600_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2989 struct radeon_ib ib
;
2995 r
= radeon_scratch_get(rdev
, &scratch
);
2997 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
3000 WREG32(scratch
, 0xCAFEDEAD);
3001 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
3003 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
3006 ib
.ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
3007 ib
.ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
3008 ib
.ptr
[2] = 0xDEADBEEF;
3010 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
3012 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
3015 r
= radeon_fence_wait(ib
.fence
, false);
3017 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
3020 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
3021 tmp
= RREG32(scratch
);
3022 if (tmp
== 0xDEADBEEF)
3026 if (i
< rdev
->usec_timeout
) {
3027 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
3029 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3034 radeon_ib_free(rdev
, &ib
);
3036 radeon_scratch_free(rdev
, scratch
);
3043 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3044 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3045 * writing to the ring and the GPU consuming, the GPU writes to the ring
3046 * and host consumes. As the host irq handler processes interrupts, it
3047 * increments the rptr. When the rptr catches up with the wptr, all the
3048 * current interrupts have been processed.
3051 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
3055 /* Align ring size */
3056 rb_bufsz
= drm_order(ring_size
/ 4);
3057 ring_size
= (1 << rb_bufsz
) * 4;
3058 rdev
->ih
.ring_size
= ring_size
;
3059 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
3063 int r600_ih_ring_alloc(struct radeon_device
*rdev
)
3067 /* Allocate ring buffer */
3068 if (rdev
->ih
.ring_obj
== NULL
) {
3069 r
= radeon_bo_create(rdev
, rdev
->ih
.ring_size
,
3071 RADEON_GEM_DOMAIN_GTT
,
3072 NULL
, &rdev
->ih
.ring_obj
);
3074 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
3077 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3078 if (unlikely(r
!= 0))
3080 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
3081 RADEON_GEM_DOMAIN_GTT
,
3082 &rdev
->ih
.gpu_addr
);
3084 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3085 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
3088 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
3089 (void **)&rdev
->ih
.ring
);
3090 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3092 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
3099 void r600_ih_ring_fini(struct radeon_device
*rdev
)
3102 if (rdev
->ih
.ring_obj
) {
3103 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3104 if (likely(r
== 0)) {
3105 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
3106 radeon_bo_unpin(rdev
->ih
.ring_obj
);
3107 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3109 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3110 rdev
->ih
.ring
= NULL
;
3111 rdev
->ih
.ring_obj
= NULL
;
3115 void r600_rlc_stop(struct radeon_device
*rdev
)
3118 if ((rdev
->family
>= CHIP_RV770
) &&
3119 (rdev
->family
<= CHIP_RV740
)) {
3120 /* r7xx asics need to soft reset RLC before halting */
3121 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
3122 RREG32(SRBM_SOFT_RESET
);
3124 WREG32(SRBM_SOFT_RESET
, 0);
3125 RREG32(SRBM_SOFT_RESET
);
3128 WREG32(RLC_CNTL
, 0);
3131 static void r600_rlc_start(struct radeon_device
*rdev
)
3133 WREG32(RLC_CNTL
, RLC_ENABLE
);
3136 static int r600_rlc_resume(struct radeon_device
*rdev
)
3139 const __be32
*fw_data
;
3144 r600_rlc_stop(rdev
);
3146 WREG32(RLC_HB_CNTL
, 0);
3148 WREG32(RLC_HB_BASE
, 0);
3149 WREG32(RLC_HB_RPTR
, 0);
3150 WREG32(RLC_HB_WPTR
, 0);
3151 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
3152 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
3153 WREG32(RLC_MC_CNTL
, 0);
3154 WREG32(RLC_UCODE_CNTL
, 0);
3156 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
3157 if (rdev
->family
>= CHIP_RV770
) {
3158 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
3159 WREG32(RLC_UCODE_ADDR
, i
);
3160 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3163 for (i
= 0; i
< R600_RLC_UCODE_SIZE
; i
++) {
3164 WREG32(RLC_UCODE_ADDR
, i
);
3165 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3168 WREG32(RLC_UCODE_ADDR
, 0);
3170 r600_rlc_start(rdev
);
3175 static void r600_enable_interrupts(struct radeon_device
*rdev
)
3177 u32 ih_cntl
= RREG32(IH_CNTL
);
3178 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3180 ih_cntl
|= ENABLE_INTR
;
3181 ih_rb_cntl
|= IH_RB_ENABLE
;
3182 WREG32(IH_CNTL
, ih_cntl
);
3183 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3184 rdev
->ih
.enabled
= true;
3187 void r600_disable_interrupts(struct radeon_device
*rdev
)
3189 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3190 u32 ih_cntl
= RREG32(IH_CNTL
);
3192 ih_rb_cntl
&= ~IH_RB_ENABLE
;
3193 ih_cntl
&= ~ENABLE_INTR
;
3194 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3195 WREG32(IH_CNTL
, ih_cntl
);
3196 /* set rptr, wptr to 0 */
3197 WREG32(IH_RB_RPTR
, 0);
3198 WREG32(IH_RB_WPTR
, 0);
3199 rdev
->ih
.enabled
= false;
3203 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
3207 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
3208 tmp
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3209 WREG32(DMA_CNTL
, tmp
);
3210 WREG32(GRBM_INT_CNTL
, 0);
3211 WREG32(DxMODE_INT_MASK
, 0);
3212 WREG32(D1GRPH_INTERRUPT_CONTROL
, 0);
3213 WREG32(D2GRPH_INTERRUPT_CONTROL
, 0);
3214 if (ASIC_IS_DCE3(rdev
)) {
3215 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
3216 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
3217 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3218 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3219 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3220 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3221 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3222 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3223 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3224 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3225 if (ASIC_IS_DCE32(rdev
)) {
3226 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3227 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3228 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3229 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3230 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3231 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3232 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3233 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3235 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3236 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3237 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3238 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3241 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
3242 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
3243 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3244 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3245 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3246 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3247 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3248 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3249 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3250 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3251 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3252 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3256 int r600_irq_init(struct radeon_device
*rdev
)
3260 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
3263 ret
= r600_ih_ring_alloc(rdev
);
3268 r600_disable_interrupts(rdev
);
3271 if (rdev
->family
>= CHIP_CEDAR
)
3272 ret
= evergreen_rlc_resume(rdev
);
3274 ret
= r600_rlc_resume(rdev
);
3276 r600_ih_ring_fini(rdev
);
3280 /* setup interrupt control */
3281 /* set dummy read address to ring address */
3282 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
3283 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
3284 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3285 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3287 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
3288 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3289 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
3290 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
3292 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
3293 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
3295 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
3296 IH_WPTR_OVERFLOW_CLEAR
|
3299 if (rdev
->wb
.enabled
)
3300 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
3302 /* set the writeback address whether it's enabled or not */
3303 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
3304 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
3306 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3308 /* set rptr, wptr to 0 */
3309 WREG32(IH_RB_RPTR
, 0);
3310 WREG32(IH_RB_WPTR
, 0);
3312 /* Default settings for IH_CNTL (disabled at first) */
3313 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3314 /* RPTR_REARM only works if msi's are enabled */
3315 if (rdev
->msi_enabled
)
3316 ih_cntl
|= RPTR_REARM
;
3317 WREG32(IH_CNTL
, ih_cntl
);
3319 /* force the active interrupt state to all disabled */
3320 if (rdev
->family
>= CHIP_CEDAR
)
3321 evergreen_disable_interrupt_state(rdev
);
3323 r600_disable_interrupt_state(rdev
);
3325 /* at this point everything should be setup correctly to enable master */
3326 pci_set_master(rdev
->pdev
);
3329 r600_enable_interrupts(rdev
);
3334 void r600_irq_suspend(struct radeon_device
*rdev
)
3336 r600_irq_disable(rdev
);
3337 r600_rlc_stop(rdev
);
3340 void r600_irq_fini(struct radeon_device
*rdev
)
3342 r600_irq_suspend(rdev
);
3343 r600_ih_ring_fini(rdev
);
3346 int r600_irq_set(struct radeon_device
*rdev
)
3348 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3350 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3351 u32 grbm_int_cntl
= 0;
3353 u32 d1grph
= 0, d2grph
= 0;
3355 u32 thermal_int
= 0;
3357 if (!rdev
->irq
.installed
) {
3358 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3361 /* don't enable anything if the ih is disabled */
3362 if (!rdev
->ih
.enabled
) {
3363 r600_disable_interrupts(rdev
);
3364 /* force the active interrupt state to all disabled */
3365 r600_disable_interrupt_state(rdev
);
3369 if (ASIC_IS_DCE3(rdev
)) {
3370 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3371 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3372 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3373 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3374 if (ASIC_IS_DCE32(rdev
)) {
3375 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3376 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3377 hdmi0
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3378 hdmi1
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3380 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3381 hdmi1
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3384 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3385 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3386 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3387 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3388 hdmi1
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3391 dma_cntl
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3393 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3394 thermal_int
= RREG32(CG_THERMAL_INT
) &
3395 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3396 } else if (rdev
->family
>= CHIP_RV770
) {
3397 thermal_int
= RREG32(RV770_CG_THERMAL_INT
) &
3398 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3400 if (rdev
->irq
.dpm_thermal
) {
3401 DRM_DEBUG("dpm thermal\n");
3402 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
3405 if (atomic_read(&rdev
->irq
.ring_int
[RADEON_RING_TYPE_GFX_INDEX
])) {
3406 DRM_DEBUG("r600_irq_set: sw int\n");
3407 cp_int_cntl
|= RB_INT_ENABLE
;
3408 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3411 if (atomic_read(&rdev
->irq
.ring_int
[R600_RING_TYPE_DMA_INDEX
])) {
3412 DRM_DEBUG("r600_irq_set: sw int dma\n");
3413 dma_cntl
|= TRAP_ENABLE
;
3416 if (rdev
->irq
.crtc_vblank_int
[0] ||
3417 atomic_read(&rdev
->irq
.pflip
[0])) {
3418 DRM_DEBUG("r600_irq_set: vblank 0\n");
3419 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3421 if (rdev
->irq
.crtc_vblank_int
[1] ||
3422 atomic_read(&rdev
->irq
.pflip
[1])) {
3423 DRM_DEBUG("r600_irq_set: vblank 1\n");
3424 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3426 if (rdev
->irq
.hpd
[0]) {
3427 DRM_DEBUG("r600_irq_set: hpd 1\n");
3428 hpd1
|= DC_HPDx_INT_EN
;
3430 if (rdev
->irq
.hpd
[1]) {
3431 DRM_DEBUG("r600_irq_set: hpd 2\n");
3432 hpd2
|= DC_HPDx_INT_EN
;
3434 if (rdev
->irq
.hpd
[2]) {
3435 DRM_DEBUG("r600_irq_set: hpd 3\n");
3436 hpd3
|= DC_HPDx_INT_EN
;
3438 if (rdev
->irq
.hpd
[3]) {
3439 DRM_DEBUG("r600_irq_set: hpd 4\n");
3440 hpd4
|= DC_HPDx_INT_EN
;
3442 if (rdev
->irq
.hpd
[4]) {
3443 DRM_DEBUG("r600_irq_set: hpd 5\n");
3444 hpd5
|= DC_HPDx_INT_EN
;
3446 if (rdev
->irq
.hpd
[5]) {
3447 DRM_DEBUG("r600_irq_set: hpd 6\n");
3448 hpd6
|= DC_HPDx_INT_EN
;
3450 if (rdev
->irq
.afmt
[0]) {
3451 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3452 hdmi0
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3454 if (rdev
->irq
.afmt
[1]) {
3455 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3456 hdmi1
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3459 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3460 WREG32(DMA_CNTL
, dma_cntl
);
3461 WREG32(DxMODE_INT_MASK
, mode_int
);
3462 WREG32(D1GRPH_INTERRUPT_CONTROL
, d1grph
);
3463 WREG32(D2GRPH_INTERRUPT_CONTROL
, d2grph
);
3464 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3465 if (ASIC_IS_DCE3(rdev
)) {
3466 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3467 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3468 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3469 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3470 if (ASIC_IS_DCE32(rdev
)) {
3471 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3472 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3473 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, hdmi0
);
3474 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, hdmi1
);
3476 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3477 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3480 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3481 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3482 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3483 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3484 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3486 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3487 WREG32(CG_THERMAL_INT
, thermal_int
);
3488 } else if (rdev
->family
>= CHIP_RV770
) {
3489 WREG32(RV770_CG_THERMAL_INT
, thermal_int
);
3495 static void r600_irq_ack(struct radeon_device
*rdev
)
3499 if (ASIC_IS_DCE3(rdev
)) {
3500 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3501 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3502 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3503 if (ASIC_IS_DCE32(rdev
)) {
3504 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET0
);
3505 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET1
);
3507 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3508 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(DCE3_HDMI1_STATUS
);
3511 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3512 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3513 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= 0;
3514 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3515 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(HDMI1_STATUS
);
3517 rdev
->irq
.stat_regs
.r600
.d1grph_int
= RREG32(D1GRPH_INTERRUPT_STATUS
);
3518 rdev
->irq
.stat_regs
.r600
.d2grph_int
= RREG32(D2GRPH_INTERRUPT_STATUS
);
3520 if (rdev
->irq
.stat_regs
.r600
.d1grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3521 WREG32(D1GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3522 if (rdev
->irq
.stat_regs
.r600
.d2grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3523 WREG32(D2GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3524 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3525 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3526 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3527 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3528 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
)
3529 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3530 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
)
3531 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3532 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3533 if (ASIC_IS_DCE3(rdev
)) {
3534 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3535 tmp
|= DC_HPDx_INT_ACK
;
3536 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3538 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3539 tmp
|= DC_HPDx_INT_ACK
;
3540 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3543 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3544 if (ASIC_IS_DCE3(rdev
)) {
3545 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3546 tmp
|= DC_HPDx_INT_ACK
;
3547 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3549 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3550 tmp
|= DC_HPDx_INT_ACK
;
3551 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3554 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3555 if (ASIC_IS_DCE3(rdev
)) {
3556 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3557 tmp
|= DC_HPDx_INT_ACK
;
3558 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3560 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3561 tmp
|= DC_HPDx_INT_ACK
;
3562 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3565 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3566 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3567 tmp
|= DC_HPDx_INT_ACK
;
3568 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3570 if (ASIC_IS_DCE32(rdev
)) {
3571 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3572 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3573 tmp
|= DC_HPDx_INT_ACK
;
3574 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3576 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3577 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3578 tmp
|= DC_HPDx_INT_ACK
;
3579 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3581 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& AFMT_AZ_FORMAT_WTRIG
) {
3582 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
);
3583 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3584 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3586 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& AFMT_AZ_FORMAT_WTRIG
) {
3587 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
);
3588 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3589 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3592 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3593 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
);
3594 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3595 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3597 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3598 if (ASIC_IS_DCE3(rdev
)) {
3599 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
);
3600 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3601 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3603 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
);
3604 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3605 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3611 void r600_irq_disable(struct radeon_device
*rdev
)
3613 r600_disable_interrupts(rdev
);
3614 /* Wait and acknowledge irq */
3617 r600_disable_interrupt_state(rdev
);
3620 static u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3624 if (rdev
->wb
.enabled
)
3625 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3627 wptr
= RREG32(IH_RB_WPTR
);
3629 if (wptr
& RB_OVERFLOW
) {
3630 /* When a ring buffer overflow happen start parsing interrupt
3631 * from the last not overwritten vector (wptr + 16). Hopefully
3632 * this should allow us to catchup.
3634 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3635 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3636 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3637 tmp
= RREG32(IH_RB_CNTL
);
3638 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3639 WREG32(IH_RB_CNTL
, tmp
);
3641 return (wptr
& rdev
->ih
.ptr_mask
);
3645 * Each IV ring entry is 128 bits:
3646 * [7:0] - interrupt source id
3648 * [59:32] - interrupt source data
3649 * [127:60] - reserved
3651 * The basic interrupt vector entries
3652 * are decoded as follows:
3653 * src_id src_data description
3658 * 19 0 FP Hot plug detection A
3659 * 19 1 FP Hot plug detection B
3660 * 19 2 DAC A auto-detection
3661 * 19 3 DAC B auto-detection
3667 * 181 - EOP Interrupt
3670 * Note, these are based on r600 and may need to be
3671 * adjusted or added to on newer asics
3674 int r600_irq_process(struct radeon_device
*rdev
)
3678 u32 src_id
, src_data
;
3680 bool queue_hotplug
= false;
3681 bool queue_hdmi
= false;
3682 bool queue_thermal
= false;
3684 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
3687 /* No MSIs, need a dummy read to flush PCI DMAs */
3688 if (!rdev
->msi_enabled
)
3691 wptr
= r600_get_ih_wptr(rdev
);
3694 /* is somebody else already processing irqs? */
3695 if (atomic_xchg(&rdev
->ih
.lock
, 1))
3698 rptr
= rdev
->ih
.rptr
;
3699 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3701 /* Order reading of wptr vs. reading of IH ring data */
3704 /* display interrupts */
3707 while (rptr
!= wptr
) {
3708 /* wptr/rptr are in bytes! */
3709 ring_index
= rptr
/ 4;
3710 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
3711 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
3714 case 1: /* D1 vblank/vline */
3716 case 0: /* D1 vblank */
3717 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3718 if (rdev
->irq
.crtc_vblank_int
[0]) {
3719 drm_handle_vblank(rdev
->ddev
, 0);
3720 rdev
->pm
.vblank_sync
= true;
3721 wake_up(&rdev
->irq
.vblank_queue
);
3723 if (atomic_read(&rdev
->irq
.pflip
[0]))
3724 radeon_crtc_handle_flip(rdev
, 0);
3725 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3726 DRM_DEBUG("IH: D1 vblank\n");
3729 case 1: /* D1 vline */
3730 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
3731 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3732 DRM_DEBUG("IH: D1 vline\n");
3736 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3740 case 5: /* D2 vblank/vline */
3742 case 0: /* D2 vblank */
3743 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3744 if (rdev
->irq
.crtc_vblank_int
[1]) {
3745 drm_handle_vblank(rdev
->ddev
, 1);
3746 rdev
->pm
.vblank_sync
= true;
3747 wake_up(&rdev
->irq
.vblank_queue
);
3749 if (atomic_read(&rdev
->irq
.pflip
[1]))
3750 radeon_crtc_handle_flip(rdev
, 1);
3751 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3752 DRM_DEBUG("IH: D2 vblank\n");
3755 case 1: /* D1 vline */
3756 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
) {
3757 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3758 DRM_DEBUG("IH: D2 vline\n");
3762 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3766 case 19: /* HPD/DAC hotplug */
3769 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3770 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3771 queue_hotplug
= true;
3772 DRM_DEBUG("IH: HPD1\n");
3776 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3777 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD2_INTERRUPT
;
3778 queue_hotplug
= true;
3779 DRM_DEBUG("IH: HPD2\n");
3783 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3784 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3785 queue_hotplug
= true;
3786 DRM_DEBUG("IH: HPD3\n");
3790 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3791 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3792 queue_hotplug
= true;
3793 DRM_DEBUG("IH: HPD4\n");
3797 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3798 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3799 queue_hotplug
= true;
3800 DRM_DEBUG("IH: HPD5\n");
3804 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3805 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3806 queue_hotplug
= true;
3807 DRM_DEBUG("IH: HPD6\n");
3811 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3818 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3819 rdev
->irq
.stat_regs
.r600
.hdmi0_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3821 DRM_DEBUG("IH: HDMI0\n");
3825 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3826 rdev
->irq
.stat_regs
.r600
.hdmi1_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3828 DRM_DEBUG("IH: HDMI1\n");
3832 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3836 case 176: /* CP_INT in ring buffer */
3837 case 177: /* CP_INT in IB1 */
3838 case 178: /* CP_INT in IB2 */
3839 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3840 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3842 case 181: /* CP EOP event */
3843 DRM_DEBUG("IH: CP EOP\n");
3844 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3846 case 224: /* DMA trap event */
3847 DRM_DEBUG("IH: DMA trap\n");
3848 radeon_fence_process(rdev
, R600_RING_TYPE_DMA_INDEX
);
3850 case 230: /* thermal low to high */
3851 DRM_DEBUG("IH: thermal low to high\n");
3852 rdev
->pm
.dpm
.thermal
.high_to_low
= false;
3853 queue_thermal
= true;
3855 case 231: /* thermal high to low */
3856 DRM_DEBUG("IH: thermal high to low\n");
3857 rdev
->pm
.dpm
.thermal
.high_to_low
= true;
3858 queue_thermal
= true;
3860 case 233: /* GUI IDLE */
3861 DRM_DEBUG("IH: GUI idle\n");
3864 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3868 /* wptr/rptr are in bytes! */
3870 rptr
&= rdev
->ih
.ptr_mask
;
3873 schedule_work(&rdev
->hotplug_work
);
3875 schedule_work(&rdev
->audio_work
);
3876 if (queue_thermal
&& rdev
->pm
.dpm_enabled
)
3877 schedule_work(&rdev
->pm
.dpm
.thermal
.work
);
3878 rdev
->ih
.rptr
= rptr
;
3879 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3880 atomic_set(&rdev
->ih
.lock
, 0);
3882 /* make sure wptr hasn't changed while processing */
3883 wptr
= r600_get_ih_wptr(rdev
);
3893 #if defined(CONFIG_DEBUG_FS)
3895 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3897 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3898 struct drm_device
*dev
= node
->minor
->dev
;
3899 struct radeon_device
*rdev
= dev
->dev_private
;
3901 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3902 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3906 static struct drm_info_list r600_mc_info_list
[] = {
3907 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3911 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3913 #if defined(CONFIG_DEBUG_FS)
3914 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3921 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3922 * rdev: radeon device structure
3923 * bo: buffer object struct which userspace is waiting for idle
3925 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3926 * through ring buffer, this leads to corruption in rendering, see
3927 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3928 * directly perform HDP flush by writing register through MMIO.
3930 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3932 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3933 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3934 * This seems to cause problems on some AGP cards. Just use the old
3937 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
3938 rdev
->vram_scratch
.ptr
&& !(rdev
->flags
& RADEON_IS_AGP
)) {
3939 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
3942 WREG32(HDP_DEBUG1
, 0);
3943 tmp
= readl((void __iomem
*)ptr
);
3945 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
3948 void r600_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
3950 u32 link_width_cntl
, mask
;
3952 if (rdev
->flags
& RADEON_IS_IGP
)
3955 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3958 /* x2 cards have a special sequence */
3959 if (ASIC_IS_X2(rdev
))
3962 radeon_gui_idle(rdev
);
3966 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
3969 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
3972 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
3975 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
3978 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
3981 /* not actually supported */
3982 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
3985 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
3988 DRM_ERROR("invalid pcie lane request: %d\n", lanes
);
3992 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
3993 link_width_cntl
&= ~RADEON_PCIE_LC_LINK_WIDTH_MASK
;
3994 link_width_cntl
|= mask
<< RADEON_PCIE_LC_LINK_WIDTH_SHIFT
;
3995 link_width_cntl
|= (RADEON_PCIE_LC_RECONFIG_NOW
|
3996 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
);
3998 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4001 int r600_get_pcie_lanes(struct radeon_device
*rdev
)
4003 u32 link_width_cntl
;
4005 if (rdev
->flags
& RADEON_IS_IGP
)
4008 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4011 /* x2 cards have a special sequence */
4012 if (ASIC_IS_X2(rdev
))
4015 radeon_gui_idle(rdev
);
4017 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
4019 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
4020 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
4022 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
4024 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
4026 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
4028 case RADEON_PCIE_LC_LINK_WIDTH_X12
:
4029 /* not actually supported */
4031 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
4032 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
4038 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
)
4040 u32 link_width_cntl
, lanes
, speed_cntl
, training_cntl
, tmp
;
4043 if (radeon_pcie_gen2
== 0)
4046 if (rdev
->flags
& RADEON_IS_IGP
)
4049 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4052 /* x2 cards have a special sequence */
4053 if (ASIC_IS_X2(rdev
))
4056 /* only RV6xx+ chips are supported */
4057 if (rdev
->family
<= CHIP_R600
)
4060 if ((rdev
->pdev
->bus
->max_bus_speed
!= PCIE_SPEED_5_0GT
) &&
4061 (rdev
->pdev
->bus
->max_bus_speed
!= PCIE_SPEED_8_0GT
))
4064 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4065 if (speed_cntl
& LC_CURRENT_DATA_RATE
) {
4066 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4070 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4072 /* 55 nm r6xx asics */
4073 if ((rdev
->family
== CHIP_RV670
) ||
4074 (rdev
->family
== CHIP_RV620
) ||
4075 (rdev
->family
== CHIP_RV635
)) {
4076 /* advertise upconfig capability */
4077 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4078 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4079 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4080 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4081 if (link_width_cntl
& LC_RENEGOTIATION_SUPPORT
) {
4082 lanes
= (link_width_cntl
& LC_LINK_WIDTH_RD_MASK
) >> LC_LINK_WIDTH_RD_SHIFT
;
4083 link_width_cntl
&= ~(LC_LINK_WIDTH_MASK
|
4084 LC_RECONFIG_ARC_MISSING_ESCAPE
);
4085 link_width_cntl
|= lanes
| LC_RECONFIG_NOW
| LC_RENEGOTIATE_EN
;
4086 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4088 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4089 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4093 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4094 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) &&
4095 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
4097 /* 55 nm r6xx asics */
4098 if ((rdev
->family
== CHIP_RV670
) ||
4099 (rdev
->family
== CHIP_RV620
) ||
4100 (rdev
->family
== CHIP_RV635
)) {
4101 WREG32(MM_CFGREGS_CNTL
, 0x8);
4102 link_cntl2
= RREG32(0x4088);
4103 WREG32(MM_CFGREGS_CNTL
, 0);
4104 /* not supported yet */
4105 if (link_cntl2
& SELECTABLE_DEEMPHASIS
)
4109 speed_cntl
&= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK
;
4110 speed_cntl
|= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT
);
4111 speed_cntl
&= ~LC_VOLTAGE_TIMER_SEL_MASK
;
4112 speed_cntl
&= ~LC_FORCE_DIS_HW_SPEED_CHANGE
;
4113 speed_cntl
|= LC_FORCE_EN_HW_SPEED_CHANGE
;
4114 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4116 tmp
= RREG32(0x541c);
4117 WREG32(0x541c, tmp
| 0x8);
4118 WREG32(MM_CFGREGS_CNTL
, MM_WR_TO_CFG_EN
);
4119 link_cntl2
= RREG16(0x4088);
4120 link_cntl2
&= ~TARGET_LINK_SPEED_MASK
;
4122 WREG16(0x4088, link_cntl2
);
4123 WREG32(MM_CFGREGS_CNTL
, 0);
4125 if ((rdev
->family
== CHIP_RV670
) ||
4126 (rdev
->family
== CHIP_RV620
) ||
4127 (rdev
->family
== CHIP_RV635
)) {
4128 training_cntl
= RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
);
4129 training_cntl
&= ~LC_POINT_7_PLUS_EN
;
4130 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
, training_cntl
);
4132 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4133 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
4134 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4137 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4138 speed_cntl
|= LC_GEN2_EN_STRAP
;
4139 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4142 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4143 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4145 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4147 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4148 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4153 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4155 * @rdev: radeon_device pointer
4157 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4158 * Returns the 64 bit clock counter snapshot.
4160 uint64_t r600_get_gpu_clock_counter(struct radeon_device
*rdev
)
4164 mutex_lock(&rdev
->gpu_clock_mutex
);
4165 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4166 clock
= (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB
) |
4167 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4168 mutex_unlock(&rdev
->gpu_clock_mutex
);